bool InBounds = EatIfPresent(lltok::kw_inbounds);
- if (ParseTypeAndValue(Ptr, Loc, PFS)) return true;
+ Type *Ty = nullptr;
+ LocTy ExplicitTypeLoc = Lex.getLoc();
+ if (ParseType(Ty) ||
+ ParseToken(lltok::comma, "expected comma after getelementptr's type") ||
+ ParseTypeAndValue(Ptr, Loc, PFS))
+ return true;
+
+ Type *PtrTy = Ptr->getType();
+ if (VectorType *VT = dyn_cast<VectorType>(PtrTy))
+ PtrTy = VT->getElementType();
+ if (Ty != cast<SequentialType>(PtrTy)->getElementType())
+ return Error(ExplicitTypeLoc,
+ "explicit pointee type doesn't match operand's pointee type");
Type *BaseType = Ptr->getType();
PointerType *BasePointerType = dyn_cast<PointerType>(BaseType->getScalarType());
Out << ", ";
TypePrinter.print(I.getType(), Out);
} else if (Operand) { // Print the normal way.
+ if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(GEP->getSourceElementType(), Out);
+ Out << ',';
+ }
// PrintAllTypes - Instructions who have operands of all the same type
// omit the type from all but the first operand. If the instruction has
store i32 0, i32* %A
%X = load i32* %A
%B = bitcast i32* %A to i8*
- %C = getelementptr i8* %B, i64 1
+ %C = getelementptr i8, i8* %B, i64 1
store i8 1, i8* %C ; Aliases %A
%Y.DONOTREMOVE = load i32* %A
%Z = sub i32 %X, %Y.DONOTREMOVE
; RUN: opt < %s -basicaa -aa-eval -disable-output 2>/dev/null
; Test for a bug in BasicAA which caused a crash when querying equality of P1&P2
define void @test({[2 x i32],[2 x i32]}* %A, i64 %X, i64 %Y) {
- %P1 = getelementptr {[2 x i32],[2 x i32]}* %A, i64 0, i32 0, i64 %X
- %P2 = getelementptr {[2 x i32],[2 x i32]}* %A, i64 0, i32 1, i64 %Y
+ %P1 = getelementptr {[2 x i32],[2 x i32]}, {[2 x i32],[2 x i32]}* %A, i64 0, i32 0, i64 %X
+ %P2 = getelementptr {[2 x i32],[2 x i32]}, {[2 x i32],[2 x i32]}* %A, i64 0, i32 1, i64 %Y
ret void
}
define i32 @test(i32 *%Ptr, i64 %V) {
; CHECK: sub i32 %X, %Y
- %P2 = getelementptr i32* %Ptr, i64 1
- %P1 = getelementptr i32* %Ptr, i64 %V
+ %P2 = getelementptr i32, i32* %Ptr, i64 1
+ %P1 = getelementptr i32, i32* %Ptr, i64 %V
%X = load i32* %P1
store i32 5, i32* %P2
%Y = load i32* %P1
; RUN: opt < %s -basicaa -aa-eval -disable-output 2>/dev/null
; Test for a bug in BasicAA which caused a crash when querying equality of P1&P2
define void @test([17 x i16]* %mask_bits) {
- %P1 = getelementptr [17 x i16]* %mask_bits, i64 0, i64 0
- %P2 = getelementptr [17 x i16]* %mask_bits, i64 252645134, i64 0
+ %P1 = getelementptr [17 x i16], [17 x i16]* %mask_bits, i64 0, i64 0
+ %P2 = getelementptr [17 x i16], [17 x i16]* %mask_bits, i64 252645134, i64 0
ret void
}
br label %loopentry
loopentry: ; preds = %0, %no_exit
- %tmp.101 = getelementptr %struct..apr_table_t* %t.1, i64 0, i32 0, i32 2
+ %tmp.101 = getelementptr %struct..apr_table_t, %struct..apr_table_t* %t.1, i64 0, i32 0, i32 2
%tmp.11 = load i32* %tmp.101 ; <i32> [#uses=0]
br i1 false, label %no_exit, label %UnifiedExitNode
no_exit: ; preds = %loopentry
%tmp.25 = sext i32 0 to i64 ; <i64> [#uses=1]
- %tmp.261 = getelementptr %struct..apr_table_t* %t.1, i64 0, i32 3, i64 %tmp.25 ; <i32*> [#uses=1]
+ %tmp.261 = getelementptr %struct..apr_table_t, %struct..apr_table_t* %t.1, i64 0, i32 3, i64 %tmp.25 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.261
br label %loopentry
; RUN: opt < %s -basicaa -aa-eval -disable-output 2>/dev/null
define i32 @MTConcat([3 x i32]* %a.1) {
- %tmp.961 = getelementptr [3 x i32]* %a.1, i64 0, i64 4
+ %tmp.961 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4
%tmp.97 = load i32* %tmp.961
- %tmp.119 = getelementptr [3 x i32]* %a.1, i64 1, i64 0
+ %tmp.119 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 1, i64 0
%tmp.120 = load i32* %tmp.119
- %tmp.1541 = getelementptr [3 x i32]* %a.1, i64 0, i64 4
+ %tmp.1541 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4
%tmp.155 = load i32* %tmp.1541
ret i32 0
}
%struct..RefRect = type { %struct..RefPoint, %struct..RefPoint }
define i32 @BMT_CommitPartDrawObj() {
- %tmp.19111 = getelementptr %struct..RefRect* null, i64 0, i32 0, i32 1, i32 2
- %tmp.20311 = getelementptr %struct..RefRect* null, i64 0, i32 1, i32 1, i32 2
+ %tmp.19111 = getelementptr %struct..RefRect, %struct..RefRect* null, i64 0, i32 0, i32 1, i32 2
+ %tmp.20311 = getelementptr %struct..RefRect, %struct..RefRect* null, i64 0, i32 1, i32 1, i32 2
ret i32 0
}
; CHECK-NOT: MayAlias:
define void @test(%T* %P) {
- %A = getelementptr %T* %P, i64 0
- %B = getelementptr %T* %P, i64 0, i32 0
- %C = getelementptr %T* %P, i64 0, i32 1
- %D = getelementptr %T* %P, i64 0, i32 1, i64 0
- %E = getelementptr %T* %P, i64 0, i32 1, i64 5
+ %A = getelementptr %T, %T* %P, i64 0
+ %B = getelementptr %T, %T* %P, i64 0, i32 0
+ %C = getelementptr %T, %T* %P, i64 0, i32 1
+ %D = getelementptr %T, %T* %P, i64 0, i32 1, i64 0
+ %E = getelementptr %T, %T* %P, i64 0, i32 1, i64 5
ret void
}
; CHECK-NOT: MayAlias:
define void @test() {
- %D = getelementptr %T* @G, i64 0, i32 0
- %E = getelementptr %T* @G, i64 0, i32 1, i64 5
- %F = getelementptr i32* getelementptr (%T* @G, i64 0, i32 0), i64 0
- %X = getelementptr [10 x i8]* getelementptr (%T* @G, i64 0, i32 1), i64 0, i64 5
+ %D = getelementptr %T, %T* @G, i64 0, i32 0
+ %E = getelementptr %T, %T* @G, i64 0, i32 1, i64 5
+ %F = getelementptr i32, i32* getelementptr (%T* @G, i64 0, i32 0), i64 0
+ %X = getelementptr [10 x i8], [10 x i8]* getelementptr (%T* @G, i64 0, i32 1), i64 0, i64 5
ret void
}
define void @test({i32,i32 }* %P) {
; CHECK: store i32 0, i32* %X
- %Q = getelementptr {i32,i32}* %P, i32 1
- %X = getelementptr {i32,i32}* %Q, i32 0, i32 1
- %Y = getelementptr {i32,i32}* %Q, i32 1, i32 1
+ %Q = getelementptr {i32,i32}, {i32,i32}* %P, i32 1
+ %X = getelementptr {i32,i32}, {i32,i32}* %Q, i32 0, i32 1
+ %Y = getelementptr {i32,i32}, {i32,i32}* %Q, i32 1, i32 1
store i32 0, i32* %X
store i32 1, i32* %Y
ret void
no_exit: ; preds = %no_exit, %entry
%i.0.0 = phi i32 [ 0, %entry ], [ %inc, %no_exit ] ; <i32> [#uses=2]
- %tmp.6 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
+ %tmp.6 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp.6
- %tmp.8 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.8 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp.9 = load i32* %tmp.8 ; <i32> [#uses=1]
- %tmp.11 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 1, i32 0 ; <i32*> [#uses=1]
+ %tmp.11 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 1, i32 0 ; <i32*> [#uses=1]
%tmp.12 = load i32* %tmp.11 ; <i32> [#uses=1]
%tmp.13 = add i32 %tmp.12, %tmp.9 ; <i32> [#uses=1]
%inc = add i32 %i.0.0, 1 ; <i32> [#uses=2]
loopexit: ; preds = %no_exit, %entry
%Y.0.1 = phi i32 [ 0, %entry ], [ %tmp.13, %no_exit ] ; <i32> [#uses=1]
- %tmp.4 = getelementptr [3 x [3 x i32]]* %X, i32 0, i32 0 ; <[3 x i32]*> [#uses=1]
+ %tmp.4 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0 ; <[3 x i32]*> [#uses=1]
%tmp.15 = call i32 (...)* @foo( [3 x i32]* %tmp.4, i32 %Y.0.1 ) ; <i32> [#uses=0]
ret void
}
ret void
cond_false277.i: ; preds = %bb239.i
- %tmp1062.i = getelementptr [2 x <4 x i32>]* null, i32 0, i32 1 ; <<4 x i32>*> [#uses=1]
+ %tmp1062.i = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* null, i32 0, i32 1 ; <<4 x i32>*> [#uses=1]
store <4 x i32> zeroinitializer, <4 x i32>* %tmp1062.i
br i1 false, label %cond_true1032.i, label %cond_false1063.i85
ret void
cond_true1032.i: ; preds = %cond_false277.i
- %tmp1187.i = getelementptr [2 x <4 x i32>]* null, i32 0, i32 0, i32 7 ; <i32*> [#uses=1]
+ %tmp1187.i = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* null, i32 0, i32 0, i32 7 ; <i32*> [#uses=1]
store i32 0, i32* %tmp1187.i
br label %bb2037.i
; CHECK: ret i32 %Z
define i32 @test(%struct.closure_type* %tmp18169) {
- %tmp18174 = getelementptr %struct.closure_type* %tmp18169, i32 0, i32 4, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp18174 = getelementptr %struct.closure_type, %struct.closure_type* %tmp18169, i32 0, i32 4, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp18269 = bitcast i32* %tmp18174 to %struct.STYLE* ; <%struct.STYLE*> [#uses=1]
%A = load i32* %tmp18174 ; <i32> [#uses=1]
- %tmp18272 = getelementptr %struct.STYLE* %tmp18269, i32 0, i32 0, i32 0, i32 2 ; <i16*> [#uses=1]
+ %tmp18272 = getelementptr %struct.STYLE, %struct.STYLE* %tmp18269, i32 0, i32 0, i32 0, i32 2 ; <i16*> [#uses=1]
store i16 123, i16* %tmp18272
%Q = load i32* %tmp18174 ; <i32> [#uses=1]
; CHECK: 6 partial alias responses
define void @foo(i32* noalias %p, i32* noalias %q, i32 %i, i32 %j) {
- %Ipointer = getelementptr i32* %p, i32 %i
- %qi = getelementptr i32* %q, i32 %i
- %Jpointer = getelementptr i32* %p, i32 %j
- %qj = getelementptr i32* %q, i32 %j
+ %Ipointer = getelementptr i32, i32* %p, i32 %i
+ %qi = getelementptr i32, i32* %q, i32 %i
+ %Jpointer = getelementptr i32, i32* %p, i32 %j
+ %qj = getelementptr i32, i32* %q, i32 %j
store i32 0, i32* %p
store i32 0, i32* %Ipointer
store i32 0, i32* %Jpointer
; CHECK: ret i32 %tmp7
entry:
store i32 1, i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8
- %tmp4 = getelementptr %struct.A* %b, i32 0, i32 0 ;<i32*> [#uses=1]
+ %tmp4 = getelementptr %struct.A, %struct.A* %b, i32 0, i32 0 ;<i32*> [#uses=1]
store i32 0, i32* %tmp4, align 4
%tmp7 = load i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8 ; <i32> [#uses=1]
ret i32 %tmp7
define i32 @uhci_suspend(%struct.usb_hcd* %hcd) {
entry:
- %tmp17 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 2, i64 1
+ %tmp17 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 2, i64 1
; <i64*> [#uses=1]
%tmp1718 = bitcast i64* %tmp17 to i32* ; <i32*> [#uses=1]
%tmp19 = load i32* %tmp1718, align 4 ; <i32> [#uses=0]
br i1 false, label %cond_true34, label %done_okay
cond_true34: ; preds = %entry
- %tmp631 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 2, i64
+ %tmp631 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 2, i64
2305843009213693950 ; <i64*> [#uses=1]
%tmp70 = bitcast i64* %tmp631 to %struct.device**
define i32 @ehci_pci_setup(%struct.usb_hcd* %hcd) {
entry:
- %tmp14 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 0, i32 0 ; <%struct.device**> [#uses=1]
+ %tmp14 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 0, i32 0 ; <%struct.device**> [#uses=1]
%tmp15 = load %struct.device** %tmp14, align 8 ; <%struct.device*> [#uses=0]
br i1 false, label %bb25, label %return
br i1 false, label %cond_true, label %return
cond_true: ; preds = %bb25
- %tmp601 = getelementptr %struct.usb_hcd* %hcd, i32 0, i32 1, i64 2305843009213693951 ; <i64*> [#uses=1]
+ %tmp601 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 1, i64 2305843009213693951 ; <i64*> [#uses=1]
%tmp67 = bitcast i64* %tmp601 to %struct.device** ; <%struct.device**> [#uses=1]
%tmp68 = load %struct.device** %tmp67, align 8 ; <%struct.device*> [#uses=0]
ret i32 undef
define void @foo(%struct.x* byval align 4 %X) nounwind {
; CHECK: store i32 2, i32* %tmp1
entry:
- %tmp = getelementptr %struct.x* %X, i32 0, i32 0 ; <[4 x i32]*> [#uses=1]
- %tmp1 = getelementptr [4 x i32]* %tmp, i32 0, i32 3 ; <i32*> [#uses=1]
+ %tmp = getelementptr %struct.x, %struct.x* %X, i32 0, i32 0 ; <[4 x i32]*> [#uses=1]
+ %tmp1 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 3 ; <i32*> [#uses=1]
store i32 2, i32* %tmp1, align 4
%tmp2 = call i32 (...)* @bar( %struct.x* byval align 4 %X ) nounwind ; <i32> [#uses=0]
br label %return
; CHECK: load i32* %a
%a = call i32* @noalias()
store i32 1, i32* %a
- %b = getelementptr i32* %a, i32 %x
+ %b = getelementptr i32, i32* %a, i32 %x
store i32 2, i32* %b
%c = load i32* %a
target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"
define i8 @foo(i8* %ptr) {
- %P = getelementptr i8* %ptr, i32 0
- %Q = getelementptr i8* %ptr, i32 1
+ %P = getelementptr i8, i8* %ptr, i32 0
+ %Q = getelementptr i8, i8* %ptr, i32 1
; CHECK: getelementptr
%X = load i8* %P
%Y = atomicrmw add i8* %Q, i8 1 monotonic
br i1 %tmp, label %bb, label %bb1
bb:
- %b = getelementptr i32* %a, i32 0
+ %b = getelementptr i32, i32* %a, i32 0
br label %bb2
bb1:
define i32 @test(i32* %tab, i32 %indvar) nounwind {
%tmp31 = mul i32 %indvar, -2
%tmp32 = add i32 %tmp31, 30
- %t.5 = getelementptr i32* %tab, i32 %tmp32
+ %t.5 = getelementptr i32, i32* %tab, i32 %tmp32
%loada = load i32* %tab
store i32 0, i32* %t.5
%loadb = load i32* %tab
define i32 @main() {
%t = alloca %struct.foo, align 4
- %1 = getelementptr inbounds %struct.foo* %t, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.foo, %struct.foo* %t, i32 0, i32 0
store i32 1, i32* %1, align 4
- %2 = getelementptr inbounds %struct.foo* %t, i64 1
+ %2 = getelementptr inbounds %struct.foo, %struct.foo* %t, i64 1
%3 = bitcast %struct.foo* %2 to i8*
- %4 = getelementptr inbounds i8* %3, i32 -1
+ %4 = getelementptr inbounds i8, i8* %3, i32 -1
store i8 0, i8* %4
- %5 = getelementptr inbounds i8* %4, i32 -1
+ %5 = getelementptr inbounds i8, i8* %4, i32 -1
store i8 0, i8* %5
- %6 = getelementptr inbounds i8* %5, i32 -1
+ %6 = getelementptr inbounds i8, i8* %5, i32 -1
store i8 0, i8* %6
- %7 = getelementptr inbounds i8* %6, i32 -1
+ %7 = getelementptr inbounds i8, i8* %6, i32 -1
store i8 0, i8* %7
- %8 = getelementptr inbounds i8* %7, i32 -1
+ %8 = getelementptr inbounds i8, i8* %7, i32 -1
store i8 0, i8* %8
- %9 = getelementptr inbounds i8* %8, i32 -1
+ %9 = getelementptr inbounds i8, i8* %8, i32 -1
store i8 0, i8* %9
- %10 = getelementptr inbounds i8* %9, i32 -1
+ %10 = getelementptr inbounds i8, i8* %9, i32 -1
store i8 0, i8* %10
- %11 = getelementptr inbounds i8* %10, i32 -1
+ %11 = getelementptr inbounds i8, i8* %10, i32 -1
store i8 0, i8* %11
%12 = load i32* %1, align 4
ret i32 %12
define i32 @foo(%struct.x* byval %a) nounwind {
; CHECK: ret i32 1
%tmp1 = tail call i32 (...)* @bar( %struct.x* %a ) nounwind ; <i32> [#uses=0]
- %tmp2 = getelementptr %struct.x* %a, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0 ; <i32*> [#uses=2]
store i32 1, i32* %tmp2, align 4
store i32 2, i32* @g, align 4
%tmp4 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
define void @foo([3 x [3 x double]]* noalias %p) {
entry:
- %p3 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
+ %p3 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
br label %loop
loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- %p.0.i.0 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
+ %p.0.i.0 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
store volatile double 0.0, double* %p3
store volatile double 0.1, double* %p.0.i.0
define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
entry:
- %q = getelementptr i8* %p, i64 16
+ %q = getelementptr i8, i8* %p, i64 16
%a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
%b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
define void @test2b(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 12
+ %R = getelementptr i8, i8* %P, i64 12
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
define void @test2c(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 11
+ %R = getelementptr i8, i8* %P, i64 11
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
define void @test2d(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 -12
+ %R = getelementptr i8, i8* %P, i64 -12
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
define void @test2e(i8* noalias %P, i8* noalias %Q) nounwind ssp {
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
- %R = getelementptr i8* %P, i64 -11
+ %R = getelementptr i8, i8* %P, i64 -11
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
ret void
call void @external(i32* %Array1)
call void @external(i32* %Array2)
- %pointer = getelementptr i32* %Array1, i64 %A
+ %pointer = getelementptr i32, i32* %Array1, i64 %A
%val = load i32* %pointer
- %pointer2 = getelementptr i32* %Array2, i64 %B
+ %pointer2 = getelementptr i32, i32* %Array2, i64 %B
store i32 7, i32* %pointer2
%REMOVE = load i32* %pointer ; redundant with above load
%Array = alloca i32, i32 100
call void @external(i32* %Array)
- %P1 = getelementptr i32* %Array, i64 7
- %P2 = getelementptr i32* %Array, i64 6
+ %P1 = getelementptr i32, i32* %Array, i64 7
+ %P2 = getelementptr i32, i32* %Array, i64 6
%A = load i32* %P1
store i32 1, i32* %P2 ; Should not invalidate load
; they cannot alias.
define i32 @gep_distance_test(i32* %A) {
%REMOVEu = load i32* %A
- %B = getelementptr i32* %A, i64 2 ; Cannot alias A
+ %B = getelementptr i32, i32* %A, i64 2 ; Cannot alias A
store i32 7, i32* %B
%REMOVEv = load i32* %A
%r = sub i32 %REMOVEu, %REMOVEv
; Test that if two pointers are spaced out by a constant offset, that they
; cannot alias, even if there is a variable offset between them...
define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) {
- %A1 = getelementptr {i32,i32}* %A, i64 0, i32 0
+ %A1 = getelementptr {i32,i32}, {i32,i32}* %A, i64 0, i32 0
%REMOVEu = load i32* %A1
- %B = getelementptr {i32,i32}* %A, i64 %distance, i32 1
+ %B = getelementptr {i32,i32}, {i32,i32}* %A, i64 %distance, i32 1
store i32 7, i32* %B ; B cannot alias A, it's at least 4 bytes away
%REMOVEv = load i32* %A1
%r = sub i32 %REMOVEu, %REMOVEv
define i32 @gep_distance_test3(i32 * %A) {
%X = load i32* %A
%B = bitcast i32* %A to i8*
- %C = getelementptr i8* %B, i64 4
+ %C = getelementptr i8, i8* %B, i64 4
store i8 42, i8* %C
%Y = load i32* %A
%R = sub i32 %X, %Y
define i16 @zext_sext_confusion(i16* %row2col, i5 %j) nounwind{
entry:
%sum5.cast = zext i5 %j to i64 ; <i64> [#uses=1]
- %P1 = getelementptr i16* %row2col, i64 %sum5.cast
+ %P1 = getelementptr i16, i16* %row2col, i64 %sum5.cast
%row2col.load.1.2 = load i16* %P1, align 1 ; <i16> [#uses=1]
%sum13.cast31 = sext i5 %j to i6 ; <i6> [#uses=1]
%sum13.cast = zext i6 %sum13.cast31 to i64 ; <i64> [#uses=1]
- %P2 = getelementptr i16* %row2col, i64 %sum13.cast
+ %P2 = getelementptr i16, i16* %row2col, i64 %sum13.cast
%row2col.load.1.6 = load i16* %P2, align 1 ; <i16> [#uses=1]
%.ret = sub i16 %row2col.load.1.6, %row2col.load.1.2 ; <i16> [#uses=1]
; CHECK: ret i32 0
entry:
%u = alloca %union.anon, align 8
- %tmp9 = getelementptr inbounds %union.anon* %u, i64 0, i32 0
+ %tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0
store double %x, double* %tmp9, align 8, !tbaa !0
%tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
%idxprom = sext i32 %tmp2 to i64
%tmp4 = bitcast %union.anon* %u to [2 x i32]*
- %arrayidx = getelementptr inbounds [2 x i32]* %tmp4, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom
%tmp5 = load i32* %arrayidx, align 4, !tbaa !3
%tmp5.lobit = lshr i32 %tmp5, 31
ret i32 %tmp5.lobit
define i32 @test1(i8 * %P) {
entry:
%Q = bitcast i8* %P to {i32, i32}*
- %R = getelementptr {i32, i32}* %Q, i32 0, i32 1
+ %R = getelementptr {i32, i32}, {i32, i32}* %Q, i32 0, i32 1
%S = load i32* %R
%q = bitcast i8* %P to {i32, i32}*
- %r = getelementptr {i32, i32}* %q, i32 0, i32 1
+ %r = getelementptr {i32, i32}, {i32, i32}* %q, i32 0, i32 1
%s = load i32* %r
%t = sub i32 %S, %s
define i32 @test2(i8 * %P) {
entry:
%Q = bitcast i8* %P to {i32, i32, i32}*
- %R = getelementptr {i32, i32, i32}* %Q, i32 0, i32 1
+ %R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 1
%S = load i32* %R
- %r = getelementptr {i32, i32, i32}* %Q, i32 0, i32 2
+ %r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 2
store i32 42, i32* %r
%s = load i32* %R
; This was a miscompilation.
define i32 @test3({float, {i32, i32, i32}}* %P) {
entry:
- %P2 = getelementptr {float, {i32, i32, i32}}* %P, i32 0, i32 1
- %R = getelementptr {i32, i32, i32}* %P2, i32 0, i32 1
+ %P2 = getelementptr {float, {i32, i32, i32}}, {float, {i32, i32, i32}}* %P, i32 0, i32 1
+ %R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 1
%S = load i32* %R
- %r = getelementptr {i32, i32, i32}* %P2, i32 0, i32 2
+ %r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 2
store i32 42, i32* %r
%s = load i32* %R
define i32 @test4(%SmallPtrSet64* %P) {
entry:
- %tmp2 = getelementptr inbounds %SmallPtrSet64* %P, i64 0, i32 0, i32 1
+ %tmp2 = getelementptr inbounds %SmallPtrSet64, %SmallPtrSet64* %P, i64 0, i32 0, i32 1
store i32 64, i32* %tmp2, align 8
- %tmp3 = getelementptr inbounds %SmallPtrSet64* %P, i64 0, i32 0, i32 4, i64 64
+ %tmp3 = getelementptr inbounds %SmallPtrSet64, %SmallPtrSet64* %P, i64 0, i32 0, i32 4, i64 64
store i8* null, i8** %tmp3, align 8
%tmp4 = load i32* %tmp2, align 8
ret i32 %tmp4
; P[i] != p[i+1]
define i32 @test5(i32* %p, i64 %i) {
- %pi = getelementptr i32* %p, i64 %i
+ %pi = getelementptr i32, i32* %p, i64 %i
%i.next = add i64 %i, 1
- %pi.next = getelementptr i32* %p, i64 %i.next
+ %pi.next = getelementptr i32, i32* %p, i64 %i.next
%x = load i32* %pi
store i32 42, i32* %pi.next
%y = load i32* %pi
}
define i32 @test5_as1_smaller_size(i32 addrspace(1)* %p, i8 %i) {
- %pi = getelementptr i32 addrspace(1)* %p, i8 %i
+ %pi = getelementptr i32, i32 addrspace(1)* %p, i8 %i
%i.next = add i8 %i, 1
- %pi.next = getelementptr i32 addrspace(1)* %p, i8 %i.next
+ %pi.next = getelementptr i32, i32 addrspace(1)* %p, i8 %i.next
%x = load i32 addrspace(1)* %pi
store i32 42, i32 addrspace(1)* %pi.next
%y = load i32 addrspace(1)* %pi
}
define i32 @test5_as1_same_size(i32 addrspace(1)* %p, i16 %i) {
- %pi = getelementptr i32 addrspace(1)* %p, i16 %i
+ %pi = getelementptr i32, i32 addrspace(1)* %p, i16 %i
%i.next = add i16 %i, 1
- %pi.next = getelementptr i32 addrspace(1)* %p, i16 %i.next
+ %pi.next = getelementptr i32, i32 addrspace(1)* %p, i16 %i.next
%x = load i32 addrspace(1)* %pi
store i32 42, i32 addrspace(1)* %pi.next
%y = load i32 addrspace(1)* %pi
; P[i] != p[(i*4)|1]
define i32 @test6(i32* %p, i64 %i1) {
%i = shl i64 %i1, 2
- %pi = getelementptr i32* %p, i64 %i
+ %pi = getelementptr i32, i32* %p, i64 %i
%i.next = or i64 %i, 1
- %pi.next = getelementptr i32* %p, i64 %i.next
+ %pi.next = getelementptr i32, i32* %p, i64 %i.next
%x = load i32* %pi
store i32 42, i32* %pi.next
%y = load i32* %pi
; P[1] != P[i*4]
define i32 @test7(i32* %p, i64 %i) {
- %pi = getelementptr i32* %p, i64 1
+ %pi = getelementptr i32, i32* %p, i64 1
%i.next = shl i64 %i, 2
- %pi.next = getelementptr i32* %p, i64 %i.next
+ %pi.next = getelementptr i32, i32* %p, i64 %i.next
%x = load i32* %pi
store i32 42, i32* %pi.next
%y = load i32* %pi
; PR1143
define i32 @test8(i32* %p, i16 %i) {
%i1 = zext i16 %i to i32
- %pi = getelementptr i32* %p, i32 %i1
+ %pi = getelementptr i32, i32* %p, i32 %i1
%i.next = add i16 %i, 1
%i.next2 = zext i16 %i.next to i32
- %pi.next = getelementptr i32* %p, i32 %i.next2
+ %pi.next = getelementptr i32, i32* %p, i32 %i.next2
%x = load i32* %pi
store i32 42, i32* %pi.next
%y = load i32* %pi
%i2 = shl i32 %i, 2
%i3 = add i32 %i2, 1
; P2 = P + 1 + 4*i
- %P2 = getelementptr [4 x i8] *%P, i32 0, i32 %i3
+ %P2 = getelementptr [4 x i8], [4 x i8] *%P, i32 0, i32 %i3
%j2 = shl i32 %j, 2
; P4 = P + 4*j
- %P4 = getelementptr [4 x i8]* %P, i32 0, i32 %j2
+ %P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %j2
%x = load i8* %P2
store i8 42, i8* %P4
%i2 = shl i32 %i, 2
%i3 = add i32 %i2, 4
; P2 = P + 4 + 4*i
- %P2 = getelementptr [4 x i8] *%P, i32 0, i32 %i3
+ %P2 = getelementptr [4 x i8], [4 x i8] *%P, i32 0, i32 %i3
; P4 = P + 4*i
- %P4 = getelementptr [4 x i8]* %P, i32 0, i32 %i2
+ %P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %i2
%x = load i8* %P2
store i8 42, i8* %P4
define float @test11(i32 %indvar, [4 x [2 x float]]* %q) nounwind ssp {
%tmp = mul i32 %indvar, -1
%dec = add i32 %tmp, 3
- %scevgep = getelementptr [4 x [2 x float]]* %q, i32 0, i32 %dec
+ %scevgep = getelementptr [4 x [2 x float]], [4 x [2 x float]]* %q, i32 0, i32 %dec
%scevgep35 = bitcast [2 x float]* %scevgep to i64*
- %arrayidx28 = getelementptr inbounds [4 x [2 x float]]* %q, i32 0, i32 0
- %y29 = getelementptr inbounds [2 x float]* %arrayidx28, i32 0, i32 1
+ %arrayidx28 = getelementptr inbounds [4 x [2 x float]], [4 x [2 x float]]* %q, i32 0, i32 0
+ %y29 = getelementptr inbounds [2 x float], [2 x float]* %arrayidx28, i32 0, i32 1
store float 1.0, float* %y29, align 4
store i64 0, i64* %scevgep35, align 4
%tmp30 = load float* %y29, align 4
; (This was a miscompilation.)
define i32 @test12(i32 %x, i32 %y, i8* %p) nounwind {
%a = bitcast i8* %p to [13 x i8]*
- %b = getelementptr [13 x i8]* %a, i32 %x
+ %b = getelementptr [13 x i8], [13 x i8]* %a, i32 %x
%c = bitcast [13 x i8]* %b to [15 x i8]*
- %d = getelementptr [15 x i8]* %c, i32 %y, i32 8
+ %d = getelementptr [15 x i8], [15 x i8]* %c, i32 %y, i32 8
%castd = bitcast i8* %d to i32*
%castp = bitcast i8* %p to i32*
store i32 1, i32* %castp
; CHECK-LABEL: @test2(
define i8 @test2(i32 %tmp79, i32 %w.2, i32 %indvar89) nounwind {
%tmp92 = add i32 %tmp79, %indvar89
- %arrayidx412 = getelementptr [0 x i8]* @window, i32 0, i32 %tmp92
+ %arrayidx412 = getelementptr [0 x i8], [0 x i8]* @window, i32 0, i32 %tmp92
%tmp93 = add i32 %w.2, %indvar89
- %arrayidx416 = getelementptr [0 x i8]* @window, i32 0, i32 %tmp93
+ %arrayidx416 = getelementptr [0 x i8], [0 x i8]* @window, i32 0, i32 %tmp93
%A = load i8* %arrayidx412, align 1
store i8 4, i8* %arrayidx416, align 1
; CHECK: define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: %q = getelementptr i8* %p, i64 16
+; CHECK-NEXT: %q = getelementptr i8, i8* %p, i64 16
; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) [[ATTR]]
; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
; CHECK-NEXT: %c = add <8 x i16> %a, %a
define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
entry:
- %q = getelementptr i8* %p, i64 16
+ %q = getelementptr i8, i8* %p, i64 16
%a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
%b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
define i8 @test2(i8* %P) {
; CHECK-LABEL: @test2
- %P2 = getelementptr i8* %P, i32 127
+ %P2 = getelementptr i8, i8* %P, i32 127
store i8 1, i8* %P2 ;; Not dead across memset
call void @llvm.memset.p0i8.i8(i8* %P, i8 2, i8 127, i32 0, i1 false)
%A = load i8* %P2
define i8 @test2a(i8* %P) {
; CHECK-LABEL: @test2
- %P2 = getelementptr i8* %P, i32 126
+ %P2 = getelementptr i8, i8* %P, i32 126
;; FIXME: DSE isn't zapping this dead store.
store i8 1, i8* %P2 ;; Dead, clobbered by memset.
; CHECK-NOT: %Y
%Y = add i8 %X, 1 ;; Dead, because the only use (the store) is dead.
- %P2 = getelementptr i8* %P, i32 2
+ %P2 = getelementptr i8, i8* %P, i32 2
store i8 %Y, i8* %P2 ;; Not read by lifetime.end, should be removed.
; CHECK: store i8 2, i8* %P2
call void @llvm.lifetime.end(i64 1, i8* %P)
; CHECK-LABEL: @test3a
%Y = add i8 %X, 1 ;; Dead, because the only use (the store) is dead.
- %P2 = getelementptr i8* %P, i32 2
+ %P2 = getelementptr i8, i8* %P, i32 2
store i8 %Y, i8* %P2
; CHECK-NEXT: call void @llvm.lifetime.end
call void @llvm.lifetime.end(i64 10, i8* %P)
entry:
%x = alloca i32, align 4
store i32 0, i32* %x, align 4
- %add.ptr = getelementptr inbounds i32* %x, i64 1
+ %add.ptr = getelementptr inbounds i32, i32* %x, i64 1
call void @test7decl(i32* %add.ptr)
%tmp = load i32* %x, align 4
ret i32 %tmp
; CHECK: PartialAlias: i16* %bigbase0, i8* %phi
define i8 @test0(i8* %base, i1 %x) {
entry:
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
br i1 %x, label %red, label %green
red:
br label %green
; CHECK: PartialAlias: i16* %bigbase1, i8* %sel
define i8 @test1(i8* %base, i1 %x) {
entry:
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
%sel = select i1 %x, i8* %baseplusone, i8* %base
store i8 0, i8* %sel
entry:
%arr = alloca [10 x i8*] ; <[10 x i8*]*> [#uses=1]
%tmp2 = call i8* @getPtr( ) nounwind ; <i8*> [#uses=2]
- %tmp4 = getelementptr [10 x i8*]* %arr, i32 0, i32 %i ; <i8**> [#uses=2]
+ %tmp4 = getelementptr [10 x i8*], [10 x i8*]* %arr, i32 0, i32 %i ; <i8**> [#uses=2]
store i8* %tmp2, i8** %tmp4, align 4
- %tmp10 = getelementptr i8* %tmp2, i32 10 ; <i8*> [#uses=1]
+ %tmp10 = getelementptr i8, i8* %tmp2, i32 10 ; <i8*> [#uses=1]
store i8 42, i8* %tmp10, align 1
%tmp14 = load i8** %tmp4, align 4 ; <i8*> [#uses=1]
- %tmp16 = getelementptr i8* %tmp14, i32 10 ; <i8*> [#uses=1]
+ %tmp16 = getelementptr i8, i8* %tmp14, i32 10 ; <i8*> [#uses=1]
%tmp17 = load i8* %tmp16, align 1 ; <i8> [#uses=1]
%tmp19 = icmp eq i8 %tmp17, 42 ; <i1> [#uses=1]
ret i1 %tmp19
define i64 @testcase(%nested * noalias %p1, %nested * noalias %p2,
i32 %a, i32 %b) {
- %ptr = getelementptr inbounds %nested* %p1, i64 -1, i32 0
- %ptr.64 = getelementptr inbounds %nested.i64* %ptr, i64 0, i32 0
- %ptr2= getelementptr inbounds %nested* %p2, i64 0, i32 0
+ %ptr = getelementptr inbounds %nested, %nested* %p1, i64 -1, i32 0
+ %ptr.64 = getelementptr inbounds %nested.i64, %nested.i64* %ptr, i64 0, i32 0
+ %ptr2= getelementptr inbounds %nested, %nested* %p2, i64 0, i32 0
%cmp = icmp ult i32 %a, %b
%either_ptr = select i1 %cmp, %nested.i64* %ptr2, %nested.i64* %ptr
- %either_ptr.64 = getelementptr inbounds %nested.i64* %either_ptr, i64 0, i32 0
+ %either_ptr.64 = getelementptr inbounds %nested.i64, %nested.i64* %either_ptr, i64 0, i32 0
; Because either_ptr.64 and ptr.64 can alias (we used to return noalias)
; elimination of the first store is not valid.
; Check that geps with equal base offsets of noalias base pointers stay noalias.
define i32 @test(i32* %p, i16 %i) {
; CHECK-LABEL: Function: test:
- %pi = getelementptr i32* %p, i32 0
- %pi.next = getelementptr i32* %p, i32 1
+ %pi = getelementptr i32, i32* %p, i32 0
+ %pi.next = getelementptr i32, i32* %p, i32 1
%b = icmp eq i16 %i, 0
br i1 %b, label %bb1, label %bb2
bb1:
- %f = getelementptr i32* %pi, i32 1
- %g = getelementptr i32* %pi.next, i32 1
+ %f = getelementptr i32, i32* %pi, i32 1
+ %g = getelementptr i32, i32* %pi.next, i32 1
br label %bb3
bb2:
- %f2 = getelementptr i32* %pi, i32 1
- %g2 = getelementptr i32* %pi.next, i32 1
+ %f2 = getelementptr i32, i32* %pi, i32 1
+ %g2 = getelementptr i32, i32* %pi.next, i32 1
br label %bb3
bb3:
%ptr_phi = phi i32* [ %f, %bb1 ], [ %f2, %bb2 ]
%ptr_phi2 = phi i32* [ %g, %bb1 ], [ %g2, %bb2 ]
; CHECK: NoAlias: i32* %f1, i32* %g1
- %f1 = getelementptr i32* %ptr_phi , i32 1
- %g1 = getelementptr i32* %ptr_phi2 , i32 1
+ %f1 = getelementptr i32, i32* %ptr_phi , i32 1
+ %g1 = getelementptr i32, i32* %ptr_phi2 , i32 1
ret i32 0
}
; Check that geps with equal indices of noalias base pointers stay noalias.
define i32 @test2([2 x i32]* %p, i32 %i) {
; CHECK-LABEL: Function: test2:
- %pi = getelementptr [2 x i32]* %p, i32 0
- %pi.next = getelementptr [2 x i32]* %p, i32 1
+ %pi = getelementptr [2 x i32], [2 x i32]* %p, i32 0
+ %pi.next = getelementptr [2 x i32], [2 x i32]* %p, i32 1
%b = icmp eq i32 %i, 0
br i1 %b, label %bb1, label %bb2
bb1:
- %f = getelementptr [2 x i32]* %pi, i32 1
- %g = getelementptr [2 x i32]* %pi.next, i32 1
+ %f = getelementptr [2 x i32], [2 x i32]* %pi, i32 1
+ %g = getelementptr [2 x i32], [2 x i32]* %pi.next, i32 1
br label %bb3
bb2:
- %f2 = getelementptr [2 x i32]* %pi, i32 1
- %g2 = getelementptr [2 x i32]* %pi.next, i32 1
+ %f2 = getelementptr [2 x i32], [2 x i32]* %pi, i32 1
+ %g2 = getelementptr [2 x i32], [2 x i32]* %pi.next, i32 1
br label %bb3
bb3:
%ptr_phi = phi [2 x i32]* [ %f, %bb1 ], [ %f2, %bb2 ]
%ptr_phi2 = phi [2 x i32]* [ %g, %bb1 ], [ %g2, %bb2 ]
; CHECK: NoAlias: i32* %f1, i32* %g1
- %f1 = getelementptr [2 x i32]* %ptr_phi , i32 1, i32 %i
- %g1 = getelementptr [2 x i32]* %ptr_phi2 , i32 1, i32 %i
+ %f1 = getelementptr [2 x i32], [2 x i32]* %ptr_phi , i32 1, i32 %i
+ %g1 = getelementptr [2 x i32], [2 x i32]* %ptr_phi2 , i32 1, i32 %i
ret i32 0
}
for.body:
%1 = load i32* %jj7, align 4
%idxprom4 = zext i32 %1 to i64
- %arrayidx5 = getelementptr inbounds [100 x i32]* %oa5, i64 0, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %oa5, i64 0, i64 %idxprom4
%2 = load i32* %arrayidx5, align 4
%sub6 = sub i32 %2, 6
store i32 %sub6, i32* %arrayidx5, align 4
store i32 %3, i32* %arrayidx5, align 4
%sub11 = add i32 %1, -1
%idxprom12 = zext i32 %sub11 to i64
- %arrayidx13 = getelementptr inbounds [100 x i32]* %oa5, i64 0, i64 %idxprom12
+ %arrayidx13 = getelementptr inbounds [100 x i32], [100 x i32]* %oa5, i64 0, i64 %idxprom12
call void @inc(i32* %jj7)
br label %codeRepl
%lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ]
%lsr.iv46 = bitcast [16000 x double]* %lsr.iv4 to <4 x double>*
%lsr.iv12 = bitcast [16000 x double]* %lsr.iv1 to <4 x double>*
- %scevgep11 = getelementptr <4 x double>* %lsr.iv46, i64 -2
+ %scevgep11 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -2
%i6 = load <4 x double>* %scevgep11, align 32
%add = fadd <4 x double> %i6, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
store <4 x double> %add, <4 x double>* %lsr.iv12, align 32
- %scevgep10 = getelementptr <4 x double>* %lsr.iv46, i64 -1
+ %scevgep10 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -1
%i7 = load <4 x double>* %scevgep10, align 32
%add.4 = fadd <4 x double> %i7, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
- %scevgep9 = getelementptr <4 x double>* %lsr.iv12, i64 1
+ %scevgep9 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 1
store <4 x double> %add.4, <4 x double>* %scevgep9, align 32
%i8 = load <4 x double>* %lsr.iv46, align 32
%add.8 = fadd <4 x double> %i8, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
- %scevgep8 = getelementptr <4 x double>* %lsr.iv12, i64 2
+ %scevgep8 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 2
store <4 x double> %add.8, <4 x double>* %scevgep8, align 32
- %scevgep7 = getelementptr <4 x double>* %lsr.iv46, i64 1
+ %scevgep7 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 1
%i9 = load <4 x double>* %scevgep7, align 32
%add.12 = fadd <4 x double> %i9, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
- %scevgep3 = getelementptr <4 x double>* %lsr.iv12, i64 3
+ %scevgep3 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 3
store <4 x double> %add.12, <4 x double>* %scevgep3, align 32
; CHECK: NoAlias:{{[ \t]+}}<4 x double>* %scevgep11, <4 x double>* %scevgep7
; CHECK: NoAlias:{{[ \t]+}}<4 x double>* %scevgep3, <4 x double>* %scevgep9
%lsr.iv.next = add i32 %lsr.iv, -16
- %scevgep = getelementptr [16000 x double]* %lsr.iv1, i64 0, i64 16
+ %scevgep = getelementptr [16000 x double], [16000 x double]* %lsr.iv1, i64 0, i64 16
%i10 = bitcast double* %scevgep to [16000 x double]*
- %scevgep5 = getelementptr [16000 x double]* %lsr.iv4, i64 0, i64 16
+ %scevgep5 = getelementptr [16000 x double], [16000 x double]* %lsr.iv4, i64 0, i64 16
%i11 = bitcast double* %scevgep5 to [16000 x double]*
%exitcond.15 = icmp eq i32 %lsr.iv.next, 0
br i1 %exitcond.15, label %for.end, label %for.body4
; CHECK: NoAlias: i32* %ptr2_phi, i32* %ptr_phi
define i32 @test_noalias_1(i32* %ptr2, i32 %count, i32* %coeff) {
entry:
- %ptr = getelementptr inbounds i32* %ptr2, i64 1
+ %ptr = getelementptr inbounds i32, i32* %ptr2, i64 1
br label %while.body
while.body:
%mul = mul nsw i32 %1, %2
%add = add nsw i32 %mul, %result.09
%tobool = icmp eq i32 %dec, 0
- %ptr_inc = getelementptr inbounds i32* %ptr_phi, i64 1
- %ptr2_inc = getelementptr inbounds i32* %ptr2_phi, i64 1
+ %ptr_inc = getelementptr inbounds i32, i32* %ptr_phi, i64 1
+ %ptr2_inc = getelementptr inbounds i32, i32* %ptr2_phi, i64 1
br i1 %tobool, label %the_exit, label %while.body
the_exit:
; CHECK: NoAlias: i32* %ptr2_phi, i32* %ptr_phi
define i32 @test_noalias_2(i32* %ptr2, i32 %count, i32* %coeff) {
entry:
- %ptr = getelementptr inbounds i32* %ptr2, i64 1
+ %ptr = getelementptr inbounds i32, i32* %ptr2, i64 1
br label %outer.while.header
outer.while.header:
%mul = mul nsw i32 %1, %2
%add = add nsw i32 %mul, %result.09
%tobool = icmp eq i32 %dec, 0
- %ptr_inc = getelementptr inbounds i32* %ptr_phi, i64 1
- %ptr2_inc = getelementptr inbounds i32* %ptr2_phi, i64 1
+ %ptr_inc = getelementptr inbounds i32, i32* %ptr_phi, i64 1
+ %ptr2_inc = getelementptr inbounds i32, i32* %ptr2_phi, i64 1
br i1 %tobool, label %outer.while.backedge, label %while.body
outer.while.backedge:
- %ptr_inc_outer = getelementptr inbounds i32* %ptr_phi, i64 1
- %ptr2_inc_outer = getelementptr inbounds i32* %ptr2_phi, i64 1
+ %ptr_inc_outer = getelementptr inbounds i32, i32* %ptr_phi, i64 1
+ %ptr2_inc_outer = getelementptr inbounds i32, i32* %ptr2_phi, i64 1
%dec.outer = add nsw i32 %num.outer, -1
%br.cond = icmp eq i32 %dec.outer, 0
br i1 %br.cond, label %the_exit, label %outer.while.header
define <8 x float> @foo1(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
allocas:
%vix = load <8 x i32>* %vix.ptr, align 4
- %t1.ptr = getelementptr i8* %arr.ptr, i8 4
+ %t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4
%v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
store i8 1, i8* %t1.ptr, align 4
define <8 x float> @foo2(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
allocas:
%vix = load <8 x i32>* %vix.ptr, align 4
- %t1.ptr = getelementptr i8* %arr.ptr, i8 4
+ %t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4
%v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
store i8 1, i8* %t2.ptr, align 4
Loop: ; preds = %Loop, %0
%AVal = load i32* @A ; <i32> [#uses=2]
- %C0 = getelementptr [2 x i32]* @C, i64 0, i64 0 ; <i32*> [#uses=1]
+ %C0 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 0 ; <i32*> [#uses=1]
store i32 %AVal, i32* %C0
%BVal = load i32* @B ; <i32> [#uses=2]
- %C1 = getelementptr [2 x i32]* @C, i64 0, i64 1 ; <i32*> [#uses=1]
+ %C1 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 1 ; <i32*> [#uses=1]
store i32 %BVal, i32* %C1
br i1 %c, label %Out, label %Loop
; CHECK-DAG: MustAlias: i32* %y, i80* %y_10
define void @test_simple(%struct* %st, i64 %i, i64 %j, i64 %k) {
- %x = getelementptr %struct* %st, i64 %i, i32 0
- %y = getelementptr %struct* %st, i64 %j, i32 1
- %z = getelementptr %struct* %st, i64 %k, i32 2
+ %x = getelementptr %struct, %struct* %st, i64 %i, i32 0
+ %y = getelementptr %struct, %struct* %st, i64 %j, i32 1
+ %z = getelementptr %struct, %struct* %st, i64 %k, i32 2
%y_12 = bitcast i32* %y to %struct*
%y_10 = bitcast i32* %y to i80*
%y_8 = bitcast i32* %y to i64*
; CHECK-DAG: MustAlias: i32* %y, i80* %y_10
define void @test_in_array([1 x %struct]* %st, i64 %i, i64 %j, i64 %k, i64 %i1, i64 %j1, i64 %k1) {
- %x = getelementptr [1 x %struct]* %st, i64 %i, i64 %i1, i32 0
- %y = getelementptr [1 x %struct]* %st, i64 %j, i64 %j1, i32 1
- %z = getelementptr [1 x %struct]* %st, i64 %k, i64 %k1, i32 2
+ %x = getelementptr [1 x %struct], [1 x %struct]* %st, i64 %i, i64 %i1, i32 0
+ %y = getelementptr [1 x %struct], [1 x %struct]* %st, i64 %j, i64 %j1, i32 1
+ %z = getelementptr [1 x %struct], [1 x %struct]* %st, i64 %k, i64 %k1, i32 2
%y_12 = bitcast i32* %y to %struct*
%y_10 = bitcast i32* %y to i80*
%y_8 = bitcast i32* %y to i64*
; CHECK-DAG: MustAlias: i32* %y, i80* %y_10
define void @test_in_3d_array([1 x [1 x [1 x %struct]]]* %st, i64 %i, i64 %j, i64 %k, i64 %i1, i64 %j1, i64 %k1, i64 %i2, i64 %j2, i64 %k2, i64 %i3, i64 %j3, i64 %k3) {
- %x = getelementptr [1 x [1 x [1 x %struct]]]* %st, i64 %i, i64 %i1, i64 %i2, i64 %i3, i32 0
- %y = getelementptr [1 x [1 x [1 x %struct]]]* %st, i64 %j, i64 %j1, i64 %j2, i64 %j3, i32 1
- %z = getelementptr [1 x [1 x [1 x %struct]]]* %st, i64 %k, i64 %k1, i64 %k2, i64 %k3, i32 2
+ %x = getelementptr [1 x [1 x [1 x %struct]]], [1 x [1 x [1 x %struct]]]* %st, i64 %i, i64 %i1, i64 %i2, i64 %i3, i32 0
+ %y = getelementptr [1 x [1 x [1 x %struct]]], [1 x [1 x [1 x %struct]]]* %st, i64 %j, i64 %j1, i64 %j2, i64 %j3, i32 1
+ %z = getelementptr [1 x [1 x [1 x %struct]]], [1 x [1 x [1 x %struct]]]* %st, i64 %k, i64 %k1, i64 %k2, i64 %k3, i32 2
%y_12 = bitcast i32* %y to %struct*
%y_10 = bitcast i32* %y to i80*
%y_8 = bitcast i32* %y to i64*
; CHECK-DAG: PartialAlias: i32* %y2, i32* %z
define void @test_same_underlying_object_same_indices(%struct* %st, i64 %i, i64 %j, i64 %k) {
- %st2 = getelementptr %struct* %st, i32 10
- %x2 = getelementptr %struct* %st2, i64 %i, i32 0
- %y2 = getelementptr %struct* %st2, i64 %j, i32 1
- %z2 = getelementptr %struct* %st2, i64 %k, i32 2
- %x = getelementptr %struct* %st, i64 %i, i32 0
- %y = getelementptr %struct* %st, i64 %j, i32 1
- %z = getelementptr %struct* %st, i64 %k, i32 2
+ %st2 = getelementptr %struct, %struct* %st, i32 10
+ %x2 = getelementptr %struct, %struct* %st2, i64 %i, i32 0
+ %y2 = getelementptr %struct, %struct* %st2, i64 %j, i32 1
+ %z2 = getelementptr %struct, %struct* %st2, i64 %k, i32 2
+ %x = getelementptr %struct, %struct* %st, i64 %i, i32 0
+ %y = getelementptr %struct, %struct* %st, i64 %j, i32 1
+ %z = getelementptr %struct, %struct* %st, i64 %k, i32 2
ret void
}
; CHECK-DAG: PartialAlias: i32* %y2, i32* %z
define void @test_same_underlying_object_different_indices(%struct* %st, i64 %i1, i64 %j1, i64 %k1, i64 %i2, i64 %k2, i64 %j2) {
- %st2 = getelementptr %struct* %st, i32 10
- %x2 = getelementptr %struct* %st2, i64 %i2, i32 0
- %y2 = getelementptr %struct* %st2, i64 %j2, i32 1
- %z2 = getelementptr %struct* %st2, i64 %k2, i32 2
- %x = getelementptr %struct* %st, i64 %i1, i32 0
- %y = getelementptr %struct* %st, i64 %j1, i32 1
- %z = getelementptr %struct* %st, i64 %k1, i32 2
+ %st2 = getelementptr %struct, %struct* %st, i32 10
+ %x2 = getelementptr %struct, %struct* %st2, i64 %i2, i32 0
+ %y2 = getelementptr %struct, %struct* %st2, i64 %j2, i32 1
+ %z2 = getelementptr %struct, %struct* %st2, i64 %k2, i32 2
+ %x = getelementptr %struct, %struct* %st, i64 %i1, i32 0
+ %y = getelementptr %struct, %struct* %st, i64 %j1, i32 1
+ %z = getelementptr %struct, %struct* %st, i64 %k1, i32 2
ret void
}
; CHECK-LABEL: test_struct_in_array
; CHECK-DAG: MustAlias: i32* %x, i32* %y
define void @test_struct_in_array(%struct2* %st, i64 %i, i64 %j, i64 %k) {
- %x = getelementptr %struct2* %st, i32 0, i32 1, i32 1, i32 0
- %y = getelementptr %struct2* %st, i32 0, i32 0, i32 1, i32 1
+ %x = getelementptr %struct2, %struct2* %st, i32 0, i32 1, i32 1, i32 0
+ %y = getelementptr %struct2, %struct2* %st, i32 0, i32 0, i32 1, i32 1
ret void
}
br i1 false, label %for.body5, label %for.cond
for.body5: ; preds = %for.cond2
- %arrayidx = getelementptr inbounds [2 x i64]* undef, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 0
%tmp7 = load i64* %arrayidx, align 8
- %arrayidx9 = getelementptr inbounds [2 x i64]* undef, i32 0, i64 undef
+ %arrayidx9 = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 undef
%tmp10 = load i64* %arrayidx9, align 8
br label %for.cond2
%t = select i1 undef, i32* %t, i32* undef
%p = select i1 undef, i32* %p, i32* %p
%q = select i1 undef, i32* undef, i32* %p
- %a = getelementptr i8* %a, i32 0
+ %a = getelementptr i8, i8* %a, i32 0
unreachable
}
define void @test_with_zext() {
%1 = tail call i8* @malloc(i64 120)
- %a = getelementptr inbounds i8* %1, i64 8
- %2 = getelementptr inbounds i8* %1, i64 16
+ %a = getelementptr inbounds i8, i8* %1, i64 8
+ %2 = getelementptr inbounds i8, i8* %1, i64 16
%3 = zext i32 3 to i64
- %b = getelementptr inbounds i8* %2, i64 %3
+ %b = getelementptr inbounds i8, i8* %2, i64 %3
ret void
}
define void @test_with_lshr(i64 %i) {
%1 = tail call i8* @malloc(i64 120)
- %a = getelementptr inbounds i8* %1, i64 8
- %2 = getelementptr inbounds i8* %1, i64 16
+ %a = getelementptr inbounds i8, i8* %1, i64 8
+ %2 = getelementptr inbounds i8, i8* %1, i64 16
%3 = lshr i64 %i, 2
- %b = getelementptr inbounds i8* %2, i64 %3
+ %b = getelementptr inbounds i8, i8* %2, i64 %3
ret void
}
for.loop:
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
- %a = getelementptr inbounds i8* %mem, i64 8
- %a.plus1 = getelementptr inbounds i8* %mem, i64 16
+ %a = getelementptr inbounds i8, i8* %mem, i64 8
+ %a.plus1 = getelementptr inbounds i8, i8* %mem, i64 16
%i.64 = zext i32 %i to i64
- %b = getelementptr inbounds i8* %a.plus1, i64 %i.64
+ %b = getelementptr inbounds i8, i8* %a.plus1, i64 %i.64
%i.plus1 = add nuw nsw i32 %i, 1
%cmp = icmp eq i32 %i.plus1, 10
br i1 %cmp, label %for.loop.exit, label %for.loop
for.loop:
%mem = phi i8* [ %mem.orig, %0 ], [ %mem.plus1, %for.loop ]
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
- %a = getelementptr inbounds i8* %mem, i64 8
- %a.plus1 = getelementptr inbounds i8* %mem, i64 16
+ %a = getelementptr inbounds i8, i8* %mem, i64 8
+ %a.plus1 = getelementptr inbounds i8, i8* %mem, i64 16
%i.64 = zext i32 %i to i64
- %b = getelementptr inbounds i8* %a.plus1, i64 %i.64
+ %b = getelementptr inbounds i8, i8* %a.plus1, i64 %i.64
%i.plus1 = add nuw nsw i32 %i, 1
- %mem.plus1 = getelementptr inbounds i8* %mem, i64 8
+ %mem.plus1 = getelementptr inbounds i8, i8* %mem, i64 8
%cmp = icmp eq i32 %i.plus1, 10
br i1 %cmp, label %for.loop.exit, label %for.loop
define void @test_sign_extension(i32 %p) {
%1 = tail call i8* @malloc(i64 120)
%p.64 = zext i32 %p to i64
- %a = getelementptr inbounds i8* %1, i64 %p.64
+ %a = getelementptr inbounds i8, i8* %1, i64 %p.64
%p.minus1 = add i32 %p, -1
%p.minus1.64 = zext i32 %p.minus1 to i64
- %b.i8 = getelementptr inbounds i8* %1, i64 %p.minus1.64
+ %b.i8 = getelementptr inbounds i8, i8* %1, i64 %p.minus1.64
%b.i64 = bitcast i8* %b.i8 to i64*
ret void
}
for.loop:
%i = phi i32 [ 0, %reorder ], [ %i.next, %for.loop ]
%idxprom = zext i32 %i to i64
- %b = getelementptr inbounds [8 x i32]* %values, i64 0, i64 %idxprom
+ %b = getelementptr inbounds [8 x i32], [8 x i32]* %values, i64 0, i64 %idxprom
%i.next = add nuw nsw i32 %i, 1
%1 = icmp eq i32 %i.next, 10
br i1 %1, label %for.loop.exit, label %for.loop
reorder:
- %a = getelementptr inbounds [8 x i32]* %values, i64 0, i64 1
+ %a = getelementptr inbounds [8 x i32], [8 x i32]* %values, i64 0, i64 1
br label %for.loop
for.loop.exit:
; <label>:2 ; preds = %.lr.ph, %2
%i = phi i32 [ %d.val, %.lr.ph ], [ %i.plus1, %2 ]
%i.promoted = sext i32 %i to i64
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %d.promoted, i64 %i.promoted
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %d.promoted, i64 %i.promoted
%i.plus1 = add nsw i32 %i, 1
%cmp = icmp slt i32 %i.plus1, 2
br i1 %cmp, label %2, label %3
; <label>:3 ; preds = %._crit_edge, %0
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
ret void
}
define void @test_modulo_analysis_easy_case(i64 %i) {
%h = alloca [1 x [2 x i32*]], align 16
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %i, i64 0
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %i, i64 0
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
ret void
}
for.loop:
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
%i.promoted = sext i32 %i to i64
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 0
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 0
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
%i.plus1 = add nsw i32 %i, 1
%cmp = icmp slt i32 %i.plus1, 2
br i1 %cmp, label %for.loop, label %for.loop.exit
for.loop:
%i = phi i32 [ 0, %0 ], [ %i.plus1, %for.loop ]
%i.promoted = sext i32 %i to i64
- %x = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 %b.promoted
- %y = getelementptr inbounds [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
+ %x = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 %i.promoted, i64 %b.promoted
+ %y = getelementptr inbounds [1 x [2 x i32*]], [1 x [2 x i32*]]* %h, i64 0, i64 0, i64 1
%i.plus1 = add nsw i32 %i, 1
%cmp = icmp slt i32 %i.plus1, 2
br i1 %cmp, label %for.loop, label %for.loop.exit
; CHECK-LABEL: test_const_eval
; CHECK: NoAlias: i8* %a, i8* %b
define void @test_const_eval(i8* %ptr, i64 %offset) {
- %a = getelementptr inbounds i8* %ptr, i64 %offset
- %a.dup = getelementptr inbounds i8* %ptr, i64 %offset
+ %a = getelementptr inbounds i8, i8* %ptr, i64 %offset
+ %a.dup = getelementptr inbounds i8, i8* %ptr, i64 %offset
%three = zext i32 3 to i64
- %b = getelementptr inbounds i8* %a.dup, i64 %three
+ %b = getelementptr inbounds i8, i8* %a.dup, i64 %three
ret void
}
define void @test_const_eval_scaled(i8* %ptr) {
%three = zext i32 3 to i64
%six = mul i64 %three, 2
- %a = getelementptr inbounds i8* %ptr, i64 %six
- %b = getelementptr inbounds i8* %ptr, i64 6
+ %a = getelementptr inbounds i8, i8* %ptr, i64 %six
+ %b = getelementptr inbounds i8, i8* %ptr, i64 6
ret void
}
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
; CHECK: edge entry -> else probability is 64 / 68 = 94.1176% [HOT edge]
entry:
- %gep1 = getelementptr i32* %a, i32 1
+ %gep1 = getelementptr i32, i32* %a, i32 1
%val1 = load i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then, label %else
br label %exit
else:
- %gep2 = getelementptr i32* %a, i32 2
+ %gep2 = getelementptr i32, i32* %a, i32 2
%val2 = load i32* %gep2
%val3 = call i32 @regular_function(i32 %val2)
br label %exit
for.body.lr.ph:
%cmp216 = icmp sgt i32 %b, 0
- %arrayidx5 = getelementptr inbounds i32* %c, i64 1
- %arrayidx9 = getelementptr inbounds i32* %c, i64 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %c, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
br label %for.body
; CHECK: edge for.body.lr.ph -> for.body probability is 16 / 16 = 100%
%d.addr.010 = phi i32* [ %d, %while.body.lr.ph ], [ %incdec.ptr4, %if.end ]
%c.addr.09 = phi i32* [ %c, %while.body.lr.ph ], [ %c.addr.1, %if.end ]
%indvars.iv.next = add nsw i64 %indvars.iv, -1
- %arrayidx = getelementptr inbounds float* %f0, i64 %indvars.iv.next
+ %arrayidx = getelementptr inbounds float, float* %f0, i64 %indvars.iv.next
%1 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %f1, i64 %indvars.iv.next
+ %arrayidx2 = getelementptr inbounds float, float* %f1, i64 %indvars.iv.next
%2 = load float* %arrayidx2, align 4
%cmp = fcmp une float %1, %2
br i1 %cmp, label %if.then, label %if.else
if.then:
- %incdec.ptr = getelementptr inbounds i32* %b.addr.011, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.011, i64 1
%3 = load i32* %b.addr.011, align 4
%add = add nsw i32 %3, 12
store i32 %add, i32* %b.addr.011, align 4
br label %if.end
if.else:
- %incdec.ptr3 = getelementptr inbounds i32* %c.addr.09, i64 1
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %c.addr.09, i64 1
%4 = load i32* %c.addr.09, align 4
%sub = add nsw i32 %4, -13
store i32 %sub, i32* %c.addr.09, align 4
if.end:
%c.addr.1 = phi i32* [ %c.addr.09, %if.then ], [ %incdec.ptr3, %if.else ]
%b.addr.1 = phi i32* [ %incdec.ptr, %if.then ], [ %b.addr.011, %if.else ]
- %incdec.ptr4 = getelementptr inbounds i32* %d.addr.010, i64 1
+ %incdec.ptr4 = getelementptr inbounds i32, i32* %d.addr.010, i64 1
store i32 14, i32* %d.addr.010, align 4
%5 = trunc i64 %indvars.iv.next to i32
%tobool = icmp eq i32 %5, 0
; CHECK-NOT: May:
define void @test() {
- %D = getelementptr %T* @G, i64 0, i32 0
- %E = getelementptr %T* @G, i64 0, i32 1, i64 5
- %F = getelementptr i32* getelementptr (%T* @G, i64 0, i32 0), i64 0
- %X = getelementptr [10 x i8]* getelementptr (%T* @G, i64 0, i32 1), i64 0, i64 5
+ %D = getelementptr %T, %T* @G, i64 0, i32 0
+ %E = getelementptr %T, %T* @G, i64 0, i32 1, i64 5
+ %F = getelementptr i32, i32* getelementptr (%T* @G, i64 0, i32 0), i64 0
+ %X = getelementptr [10 x i8], [10 x i8]* getelementptr (%T* @G, i64 0, i32 1), i64 0, i64 5
ret void
}
define void @foo([3 x [3 x double]]* noalias %p) {
entry:
- %p3 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
+ %p3 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 0, i64 3
br label %loop
loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
- %p.0.i.0 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
+ %p.0.i.0 = getelementptr [3 x [3 x double]], [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
store volatile double 0.0, double* %p3
store volatile double 0.1, double* %p.0.i.0
; CHECK: ret i32 0
entry:
%u = alloca %union.anon, align 8
- %tmp9 = getelementptr inbounds %union.anon* %u, i64 0, i32 0
+ %tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0
store double %x, double* %tmp9, align 8, !tbaa !0
%tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
%idxprom = sext i32 %tmp2 to i64
%tmp4 = bitcast %union.anon* %u to [2 x i32]*
- %arrayidx = getelementptr inbounds [2 x i32]* %tmp4, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom
%tmp5 = load i32* %arrayidx, align 4, !tbaa !3
%tmp5.lobit = lshr i32 %tmp5, 31
ret i32 %tmp5.lobit
%tab = alloca i32, align 4
%tmp31 = mul i32 %indvar, -2
%tmp32 = add i32 %tmp31, 30
- %t.5 = getelementptr i32* %tab, i32 %tmp32
+ %t.5 = getelementptr i32, i32* %tab, i32 %tmp32
%loada = load i32* %tab
store i32 0, i32* %t.5
%loadb = load i32* %tab
define i8 @test0(i1 %x) {
entry:
%base = alloca i8, align 4
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
br i1 %x, label %red, label %green
red:
br label %green
define i8 @test1(i1 %x) {
entry:
%base = alloca i8, align 4
- %baseplusone = getelementptr i8* %base, i64 1
+ %baseplusone = getelementptr i8, i8* %base, i64 1
%sel = select i1 %x, i8* %baseplusone, i8* %base
store i8 0, i8* %sel
; even if they are nocapture
; CHECK: MayAlias: double* %A, double* %Index
define void @testr2(double* nocapture readonly %A, double* nocapture readonly %Index) {
- %arrayidx22 = getelementptr inbounds double* %Index, i64 2
+ %arrayidx22 = getelementptr inbounds double, double* %Index, i64 2
%1 = load double* %arrayidx22
- %arrayidx25 = getelementptr inbounds double* %A, i64 2
+ %arrayidx25 = getelementptr inbounds double, double* %A, i64 2
%2 = load double* %arrayidx25
%mul26 = fmul double %1, %2
ret void
; CHECK-NOT: May:
define void @test(%T* %P) {
- %A = getelementptr %T* %P, i64 0
- %B = getelementptr %T* %P, i64 0, i32 0
- %C = getelementptr %T* %P, i64 0, i32 1
- %D = getelementptr %T* %P, i64 0, i32 1, i64 0
- %E = getelementptr %T* %P, i64 0, i32 1, i64 5
+ %A = getelementptr %T, %T* %P, i64 0
+ %B = getelementptr %T, %T* %P, i64 0, i32 0
+ %C = getelementptr %T, %T* %P, i64 0, i32 1
+ %D = getelementptr %T, %T* %P, i64 0, i32 1, i64 0
+ %E = getelementptr %T, %T* %P, i64 0, i32 1, i64 5
ret void
}
define void @test_geps() {
; Cost of scalar integer geps should be one. We can't always expect it to be
; folded into the instruction addressing mode.
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i8*
- %a0 = getelementptr inbounds i8* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16*
- %a1 = getelementptr inbounds i16* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32*
- %a2 = getelementptr inbounds i32* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i8, i8*
+ %a0 = getelementptr inbounds i8, i8* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i16, i16*
+ %a1 = getelementptr inbounds i16, i16* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i32, i32*
+ %a2 = getelementptr inbounds i32, i32* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64*
- %a3 = getelementptr inbounds i64* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds i64, i64*
+ %a3 = getelementptr inbounds i64, i64* undef, i32 0
; Cost of scalar floating point geps should be one. We cannot fold the address
; computation.
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds float*
- %a4 = getelementptr inbounds float* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds double*
- %a5 = getelementptr inbounds double* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds float, float*
+ %a4 = getelementptr inbounds float, float* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds double, double*
+ %a5 = getelementptr inbounds double, double* undef, i32 0
; Cost of vector geps should be one. We cannot fold the address computation.
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i8>*
- %a7 = getelementptr inbounds <4 x i8>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i16>*
- %a8 = getelementptr inbounds <4 x i16>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i32>*
- %a9 = getelementptr inbounds <4 x i32>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i64>*
- %a10 = getelementptr inbounds <4 x i64>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x float>*
- %a11 = getelementptr inbounds <4 x float>* undef, i32 0
-;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x double>*
- %a12 = getelementptr inbounds <4 x double>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i8>, <4 x i8>*
+ %a7 = getelementptr inbounds <4 x i8>, <4 x i8>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i16>, <4 x i16>*
+ %a8 = getelementptr inbounds <4 x i16>, <4 x i16>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i32>, <4 x i32>*
+ %a9 = getelementptr inbounds <4 x i32>, <4 x i32>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x i64>, <4 x i64>*
+ %a10 = getelementptr inbounds <4 x i64>, <4 x i64>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x float>, <4 x float>*
+ %a11 = getelementptr inbounds <4 x float>, <4 x float>* undef, i32 0
+;CHECK: cost of 1 for instruction: {{.*}} getelementptr inbounds <4 x double>, <4 x double>*
+ %a12 = getelementptr inbounds <4 x double>, <4 x double>* undef, i32 0
ret void
define void @test_geps() {
; Cost of should be zero. We expect it to be folded into
; the instruction addressing mode.
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8*
- %a0 = getelementptr inbounds i8* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16*
- %a1 = getelementptr inbounds i16* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32*
- %a2 = getelementptr inbounds i32* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64*
- %a3 = getelementptr inbounds i64* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i8, i8*
+ %a0 = getelementptr inbounds i8, i8* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i16, i16*
+ %a1 = getelementptr inbounds i16, i16* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i32, i32*
+ %a2 = getelementptr inbounds i32, i32* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds i64, i64*
+ %a3 = getelementptr inbounds i64, i64* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds float*
- %a4 = getelementptr inbounds float* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds double*
- %a5 = getelementptr inbounds double* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds float, float*
+ %a4 = getelementptr inbounds float, float* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds double, double*
+ %a5 = getelementptr inbounds double, double* undef, i32 0
; Vector geps should also have zero cost.
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i8>*
- %a7 = getelementptr inbounds <4 x i8>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i16>*
- %a8 = getelementptr inbounds <4 x i16>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i32>*
- %a9 = getelementptr inbounds <4 x i32>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i64>*
- %a10 = getelementptr inbounds <4 x i64>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x float>*
- %a11 = getelementptr inbounds <4 x float>* undef, i32 0
-;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x double>*
- %a12 = getelementptr inbounds <4 x double>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i8>, <4 x i8>*
+ %a7 = getelementptr inbounds <4 x i8>, <4 x i8>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i16>, <4 x i16>*
+ %a8 = getelementptr inbounds <4 x i16>, <4 x i16>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i32>, <4 x i32>*
+ %a9 = getelementptr inbounds <4 x i32>, <4 x i32>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x i64>, <4 x i64>*
+ %a10 = getelementptr inbounds <4 x i64>, <4 x i64>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x float>, <4 x float>*
+ %a11 = getelementptr inbounds <4 x float>, <4 x float>* undef, i32 0
+;CHECK: cost of 0 for instruction: {{.*}} getelementptr inbounds <4 x double>, <4 x double>*
+ %a12 = getelementptr inbounds <4 x double>, <4 x double>* undef, i32 0
ret void
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %f, i64 %index
+ %0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
%wide.load = load <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %f, i64 %index
+ %0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
%wide.load = load <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %f, i64 %index
+ %0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
%wide.load = load <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c)
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%vec.phi = phi <2 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ]
- %0 = getelementptr inbounds i32* %A, i64 %index
+ %0 = getelementptr inbounds i32, i32* %A, i64 %index
%1 = bitcast i32* %0 to <2 x i32>*
%2 = load <2 x i32>* %1, align 4
%3 = sext <2 x i32> %2 to <2 x i64>
;CHECK: cost of 1 {{.*}} extract
%4 = extractelement <2 x i64> %3, i32 0
- %5 = getelementptr inbounds i32* %A, i64 %4
+ %5 = getelementptr inbounds i32, i32* %A, i64 %4
;CHECK: cost of 1 {{.*}} extract
%6 = extractelement <2 x i64> %3, i32 1
- %7 = getelementptr inbounds i32* %A, i64 %6
+ %7 = getelementptr inbounds i32, i32* %A, i64 %6
%8 = load i32* %5, align 4
;CHECK: cost of 1 {{.*}} insert
%9 = insertelement <2 x i32> undef, i32 %8, i32 0
vector.body: ; preds = %for.body.lr.ph, %vector.body
%index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body.lr.ph ]
%3 = add i64 %index, 2
- %4 = getelementptr inbounds i32* %B, i64 %3
+ %4 = getelementptr inbounds i32, i32* %B, i64 %3
;CHECK: cost of 0 {{.*}} bitcast
%5 = bitcast i32* %4 to <8 x i32>*
;CHECK: cost of 2 {{.*}} load
%6 = load <8 x i32>* %5, align 4
;CHECK: cost of 4 {{.*}} mul
%7 = mul nsw <8 x i32> %6, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
- %8 = getelementptr inbounds i32* %A, i64 %index
+ %8 = getelementptr inbounds i32, i32* %A, i64 %index
%9 = bitcast i32* %8 to <8 x i32>*
;CHECK: cost of 2 {{.*}} load
%10 = load <8 x i32>* %9, align 4
for.body: ; preds = %middle.block, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %end.idx.rnd.down, %middle.block ]
%13 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %B, i64 %13
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %13
;CHECK: cost of 1 {{.*}} load
%14 = load i32* %arrayidx, align 4
;CHECK: cost of 1 {{.*}} mul
%mul = mul nsw i32 %14, 5
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
;CHECK: cost of 1 {{.*}} load
%15 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %15, %mul
%mul.us.us = mul nsw i64 %k.029.us.us, 5
%arrayidx.sum.us.us = add i64 %mul.us.us, 7
%arrayidx10.sum.us.us = add i64 %arrayidx.sum.us.us, %tmp27.us.us
- %arrayidx11.us.us = getelementptr inbounds i32* %A, i64 %arrayidx10.sum.us.us
+ %arrayidx11.us.us = getelementptr inbounds i32, i32* %A, i64 %arrayidx10.sum.us.us
store i32 1, i32* %arrayidx11.us.us, align 4
%inc.us.us = add nsw i64 %k.029.us.us, 1
%exitcond = icmp eq i64 %inc.us.us, %o
for.body4.i:
%8 = phi i32 [ %inc.7.i, %for.body4.i ], [ %.pr.i, %for.body4.i.preheader ]
%arrayidx.sum1 = add i32 %add.i, %8
- %arrayidx.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum1
+ %arrayidx.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum1
%9 = load i8* %arrayidx.i, align 1
%conv.i = sext i8 %9 to i32
store i32 %conv.i, i32* @c, align 4
%inc.i = add nsw i32 %8, 1
store i32 %inc.i, i32* @b, align 4
%arrayidx.sum2 = add i32 %add.i, %inc.i
- %arrayidx.1.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum2
+ %arrayidx.1.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum2
%10 = load i8* %arrayidx.1.i, align 1
%conv.1.i = sext i8 %10 to i32
store i32 %conv.1.i, i32* @c, align 4
%inc.1.i = add nsw i32 %8, 2
store i32 %inc.1.i, i32* @b, align 4
%arrayidx.sum3 = add i32 %add.i, %inc.1.i
- %arrayidx.2.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum3
+ %arrayidx.2.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum3
%11 = load i8* %arrayidx.2.i, align 1
%conv.2.i = sext i8 %11 to i32
store i32 %conv.2.i, i32* @c, align 4
%inc.2.i = add nsw i32 %8, 3
store i32 %inc.2.i, i32* @b, align 4
%arrayidx.sum4 = add i32 %add.i, %inc.2.i
- %arrayidx.3.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum4
+ %arrayidx.3.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum4
%12 = load i8* %arrayidx.3.i, align 1
%conv.3.i = sext i8 %12 to i32
store i32 %conv.3.i, i32* @c, align 4
%inc.3.i = add nsw i32 %8, 4
store i32 %inc.3.i, i32* @b, align 4
%arrayidx.sum5 = add i32 %add.i, %inc.3.i
- %arrayidx.4.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum5
+ %arrayidx.4.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum5
%13 = load i8* %arrayidx.4.i, align 1
%conv.4.i = sext i8 %13 to i32
store i32 %conv.4.i, i32* @c, align 4
%inc.4.i = add nsw i32 %8, 5
store i32 %inc.4.i, i32* @b, align 4
%arrayidx.sum6 = add i32 %add.i, %inc.4.i
- %arrayidx.5.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum6
+ %arrayidx.5.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum6
%14 = load i8* %arrayidx.5.i, align 1
%conv.5.i = sext i8 %14 to i32
store i32 %conv.5.i, i32* @c, align 4
%inc.5.i = add nsw i32 %8, 6
store i32 %inc.5.i, i32* @b, align 4
%arrayidx.sum7 = add i32 %add.i, %inc.5.i
- %arrayidx.6.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum7
+ %arrayidx.6.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum7
%15 = load i8* %arrayidx.6.i, align 1
%conv.6.i = sext i8 %15 to i32
store i32 %conv.6.i, i32* @c, align 4
%inc.6.i = add nsw i32 %8, 7
store i32 %inc.6.i, i32* @b, align 4
%arrayidx.sum8 = add i32 %add.i, %inc.6.i
- %arrayidx.7.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum8
+ %arrayidx.7.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum8
%16 = load i8* %arrayidx.7.i, align 1
%conv.7.i = sext i8 %16 to i32
store i32 %conv.7.i, i32* @c, align 4
for.body4.ur.i:
%20 = phi i32 [ %inc.ur.i, %for.body4.ur.i ], [ %.ph, %for.body4.ur.i.preheader ]
%arrayidx.sum = add i32 %add.i, %20
- %arrayidx.ur.i = getelementptr inbounds i8* %3, i32 %arrayidx.sum
+ %arrayidx.ur.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum
%21 = load i8* %arrayidx.ur.i, align 1
%conv.ur.i = sext i8 %21 to i32
store i32 %conv.ur.i, i32* @c, align 4
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
entry:
- %p.rows.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 2
+ %p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2
%p.rows = load i32* %p.rows.ptr
%p.rows.sub = add i32 %p.rows, -1
%p.rows.sext = sext i32 %p.rows.sub to i64
- %p.cols.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 3
+ %p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3
%p.cols = load i32* %p.cols.ptr
%p.cols.sub = add i32 %p.cols, -1
%p.cols.sext = sext i32 %p.cols.sub to i64
- %p.deps.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 4
+ %p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4
%p.deps = load i32* %p.deps.ptr
%p.deps.sub = add i32 %p.deps, -1
%p.deps.sext = sext i32 %p.deps.sub to i64
- %a.cols.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 3
+ %a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3
%a.cols = load i32* %a.cols.ptr
- %a.deps.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 4
+ %a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4
%a.deps = load i32* %a.deps.ptr
- %a.base.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 0
+ %a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0
%a.base = load float** %a.base.ptr, align 8
br label %for.i
%tmp2 = add i64 %tmp1, %j
%tmp3 = mul i64 %tmp2, %a.deps.sext
%tmp4 = add nsw i64 %k, %tmp3
- %arrayidx = getelementptr inbounds float* %a.base, i64 %tmp4
+ %arrayidx = getelementptr inbounds float, float* %a.base, i64 %tmp4
store float 1.000000e+00, float* %arrayidx
%k.inc = add nsw i64 %k, 1
%k.exitcond = icmp eq i64 %k.inc, %p.deps.sext
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
entry:
- %p.rows.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 2
+ %p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2
%p.rows = load i32* %p.rows.ptr
%p.rows.sub = add i32 %p.rows, -1
%p.rows.sext = sext i32 %p.rows.sub to i64
- %p.cols.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 3
+ %p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3
%p.cols = load i32* %p.cols.ptr
%p.cols.sub = add i32 %p.cols, -1
%p.cols.sext = sext i32 %p.cols.sub to i64
- %p.deps.ptr = getelementptr inbounds %struct.Mat* %p, i64 0, i32 4
+ %p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4
%p.deps = load i32* %p.deps.ptr
%p.deps.sub = add i32 %p.deps, -1
%p.deps.sext = sext i32 %p.deps.sub to i64
- %a.cols.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 3
+ %a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3
%a.cols = load i32* %a.cols.ptr
%a.cols.sext = sext i32 %a.cols to i64
- %a.deps.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 4
+ %a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4
%a.deps = load i32* %a.deps.ptr
%a.deps.sext = sext i32 %a.deps to i64
- %a.base.ptr = getelementptr inbounds %struct.Mat* %a, i64 0, i32 0
+ %a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0
%a.base = load float** %a.base.ptr, align 8
br label %for.i
%tmp2 = add i64 %tmp1, %j
%tmp3 = mul i64 %tmp2, %a.deps.sext
%tmp4 = add nsw i64 %k, %tmp3
- %arrayidx = getelementptr inbounds float* %a.base, i64 %tmp4
+ %arrayidx = getelementptr inbounds float, float* %a.base, i64 %tmp4
store float 1.000000e+00, float* %arrayidx
%k.inc = add nsw i64 %k, 1
%k.exitcond = icmp eq i64 %k.inc, %p.deps.sext
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%prodj = mul i64 %j, 2
%vlaarrayidx.sum = add i64 %prodj, %tmp
- %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum
store double 1.0, double* %arrayidx
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
%subscript2 = mul i64 %subscript1, %o
%offset2 = add nsw i64 %k, 7
%subscript = add i64 %subscript2, %offset2
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
%k.019.us.us = phi i64 [ 0, %for.body6.lr.ph.us.us ], [ %inc.us.us, %for.body6.us.us ]
%arrayidx.sum.us.us = add i64 %k.019.us.us, 7
%arrayidx9.sum.us.us = add i64 %arrayidx.sum.us.us, %tmp17.us.us
- %arrayidx10.us.us = getelementptr inbounds double* %A, i64 %arrayidx9.sum.us.us
+ %arrayidx10.us.us = getelementptr inbounds double, double* %A, i64 %arrayidx9.sum.us.us
store double 1.000000e+00, double* %arrayidx10.us.us, align 8
%inc.us.us = add nsw i64 %k.019.us.us, 1
%exitcond = icmp eq i64 %inc.us.us, %o
%subscript2 = mul i64 %subscript1, %o
%offset2 = add nsw i64 %k, %r
%subscript = add i64 %subscript2, %offset2
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
for.j:
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%vlaarrayidx.sum = add i64 %j, %tmp
- %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum
%val = load double* %arrayidx
store double %val, double* %arrayidx
%j.inc = add nsw i64 %j, 1
for.body9.us.us: ; preds = %for.body9.us.us, %for.body9.lr.ph.us.us
%j.021.us.us = phi i64 [ 0, %for.body9.lr.ph.us.us ], [ %inc.us.us, %for.body9.us.us ]
%arrayidx.sum.us.us = add i64 %j.021.us.us, %0
- %arrayidx10.us.us = getelementptr inbounds double* %vla.us, i64 %arrayidx.sum.us.us
+ %arrayidx10.us.us = getelementptr inbounds double, double* %vla.us, i64 %arrayidx.sum.us.us
store double 1.000000e+00, double* %arrayidx10.us.us, align 8
%inc.us.us = add nsw i64 %j.021.us.us, 1
%exitcond50 = icmp eq i64 %inc.us.us, %indvars.iv48
%subscript1 = add i64 %j, %subscript0
%subscript2 = mul i64 %subscript1, %o
%subscript = add i64 %subscript2, %k
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
%tmp.us.us = add i64 %j, %tmp
%tmp17.us.us = mul i64 %tmp.us.us, %n_zext
%subscript = add i64 %tmp17.us.us, %k
- %idx = getelementptr inbounds double* %A, i64 %subscript
+ %idx = getelementptr inbounds double, double* %A, i64 %subscript
store double 1.0, double* %idx
br label %for.k.inc
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%tmp = mul nsw i64 %i, %m
%vlaarrayidx.sum = add i64 %j, %tmp
- %arrayidx = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum
store double 1.0, double* %arrayidx
%tmp1 = mul nsw i64 %j, %n
%vlaarrayidx.sum1 = add i64 %i, %tmp1
- %arrayidx1 = getelementptr inbounds double* %A, i64 %vlaarrayidx.sum1
+ %arrayidx1 = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum1
store double 1.0, double* %arrayidx1
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
%tmp5 = add i64 %iy.067, %0
%tmp6 = mul i64 %tmp5, undef
%arrayidx69.sum = add i64 undef, %tmp6
- %arrayidx70 = getelementptr inbounds double* %Ey, i64 %arrayidx69.sum
+ %arrayidx70 = getelementptr inbounds double, double* %Ey, i64 %arrayidx69.sum
%1 = load double* %arrayidx70, align 8
%inc = add nsw i64 %ix.062, 1
br i1 false, label %for.body60, label %for.end
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -1
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
%0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 11
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 11
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
%B.addr.12 = phi i64* [ %incdec.ptr, %for.body3 ], [ %B.addr.06, %for.body3.preheader ]
%mul = mul nsw i64 %i.05, 10
%add = add nsw i64 %mul, %j.03
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.05, 10
%add5 = add nsw i64 %mul4, %j.03
%sub = add nsw i64 %add5, -1
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
%2 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.12, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.12, i64 1
store i64 %2, i64* %B.addr.12, align 8
%inc = add nsw i64 %j.03, 1
%exitcond = icmp eq i64 %inc, %1
br i1 %exitcond, label %for.inc7.loopexit, label %for.body3
for.inc7.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.06, i64 %m
+ %scevgep = getelementptr i64, i64* %B.addr.06, i64 %m
br label %for.inc7
for.inc7: ; preds = %for.inc7.loopexit, %for.cond1.preheader
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 100
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
%0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 99
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
%0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -100
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
%0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -99
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
%0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 9
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
%0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 10
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
%0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 10
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 10
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 11
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
%0 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 10
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 10
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 10
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%mul = mul nsw i64 %i.03, 30
%mul4 = mul nsw i64 %j.02, 500
%add = add nsw i64 %mul, %mul4
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%0 = mul i64 %j.02, -500
%sub = add i64 %i.03, %0
%add6 = add nsw i64 %sub, 11
- %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
%1 = load i64* %arrayidx7, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %1, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %j.02, 500
%add = add nsw i64 %i.03, %mul
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%0 = mul i64 %j.02, -500
%sub = add i64 %i.03, %0
%add5 = add nsw i64 %sub, 11
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
%1 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %1, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 300
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 250
%sub = sub nsw i64 %mul4, %j.02
%add5 = add nsw i64 %sub, 11
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
%0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
%B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%mul = mul nsw i64 %i.03, 100
%add = add nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i64* %A, i64 %add
+ %arrayidx = getelementptr inbounds i64, i64* %A, i64 %add
store i64 0, i64* %arrayidx, align 8
%mul4 = mul nsw i64 %i.03, 100
%sub = sub nsw i64 %mul4, %j.02
%add5 = add nsw i64 %sub, 11
- %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5
+ %arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
%0 = load i64* %arrayidx6, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 20
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i64* %B.addr.04, i64 20
+ %scevgep = getelementptr i64, i64* %B.addr.04, i64 20
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 20
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
store i32 %conv, i32* %arrayidx1, align 4
%add = add nsw i64 %i.02, 9
%add2 = add nsw i64 %i.02, 10
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %add2, i64 %add
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
store i32 %conv, i32* %arrayidx1, align 4
%add = add nsw i64 %i.02, 9
%add2 = add nsw i64 %i.02, 9
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %add2, i64 %add
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%sub = add nsw i64 %mul, -6
%mul1 = mul nsw i64 %i.02, 3
%sub2 = add nsw i64 %mul1, -6
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %sub2, i64 %sub
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%sub = add nsw i64 %mul, -5
%mul1 = mul nsw i64 %i.02, 3
%sub2 = add nsw i64 %mul1, -6
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %sub2, i64 %sub
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%sub = sub nsw i64 %mul, %conv1
%mul2 = mul nsw i64 %i.02, 3
%sub3 = add nsw i64 %mul2, -6
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %sub3, i64 %sub
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub3, i64 %sub
store i32 %conv, i32* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%conv3 = sext i32 %n to i64
%sub4 = sub nsw i64 %mul2, %conv3
%add = add nsw i64 %sub4, 1
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %add, i64 %sub
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add, i64 %sub
store i32 %conv, i32* %arrayidx5, align 4
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%conv = trunc i64 %i.02 to i32
%mul = mul nsw i64 %i.02, 3
%sub = add nsw i64 %mul, -6
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %sub
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub
store i32 %conv, i32* %arrayidx1, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%conv = trunc i64 %i.02 to i32
%mul = mul nsw i64 %i.02, 3
%sub = add nsw i64 %mul, -5
- %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %sub
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub
store i32 %conv, i32* %arrayidx1, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 50
%sub = sub nsw i64 3, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
%sub = sub nsw i64 2, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
%sub = sub nsw i64 6, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
%sub = sub nsw i64 18, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 16
%sub = sub nsw i64 22, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 13
%sub = sub nsw i64 22, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 12
%sub = sub nsw i64 18, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
+ %arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 100
%sub = sub nsw i64 22, %i.02
%mul = mul nsw i64 %i.02, 3
%sub1 = add nsw i64 %mul, -18
- %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
+ %arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
store i32 %conv, i32* %arrayidx3, align 4
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
%0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 100
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 2
%add = add nsw i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc, 10
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul5 = shl nsw i64 %j.02, 1
%add64 = or i64 %mul5, 1
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add64
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add64
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc9 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc9, 10
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = add nsw i64 %mul, -45
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
for.body4: ; preds = %for.body4.preheader, %for.body4
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 10
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 5
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, -11
%add = add nsw i64 %mul, 45
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.03, 1
%exitcond4 = icmp ne i64 %inc, 6
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc7, 11
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 10
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 10
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 5
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 10
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 10
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 10
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 6
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 11
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 11
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 11
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 5
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
%conv = trunc i64 %i.03 to i32
%mul = mul nsw i64 %i.03, 11
%sub = sub nsw i64 %mul, %j.02
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 45
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 11
br i1 %exitcond, label %for.body3, label %for.inc5
for.inc5: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 11
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 11
%inc6 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc6, 6
br i1 %exitcond5, label %for.cond1.preheader, label %for.end7
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%add13 = or i64 %mul, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add13
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add13
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 2
%add = add i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
%add23 = or i64 %mul1, 1
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add23
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add23
%0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 11
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 12
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 13
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 18
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 19
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 11
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 12
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 13
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 18
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, -6
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 19
%mul = shl nsw i64 %i.03, 1
%mul4 = shl nsw i64 %j.02, 2
%sub = sub nsw i64 %mul, %mul4
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%mul5 = mul nsw i64 %i.03, 6
%mul6 = shl nsw i64 %j.02, 3
%add = add nsw i64 %mul5, %mul6
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%mul = shl nsw i64 %i.03, 1
%mul4 = shl nsw i64 %j.02, 2
%sub = sub nsw i64 %mul, %mul4
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%mul5 = mul nsw i64 %i.03, 6
%mul6 = shl nsw i64 %j.02, 3
%add = add nsw i64 %mul5, %mul6
%add7 = or i64 %add, 1
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
%0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc10, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end11
%mul4 = shl nsw i64 %j.02, 2
%sub = sub nsw i64 %mul, %mul4
%add5 = or i64 %sub, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add5
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add5
store i32 %conv, i32* %arrayidx, align 4
%mul5 = mul nsw i64 %i.03, 6
%mul6 = shl nsw i64 %j.02, 3
%add7 = add nsw i64 %mul5, %mul6
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
%0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond6 = icmp ne i64 %inc10, 100
br i1 %exitcond6, label %for.cond1.preheader, label %for.end11
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %j.02, 1
%add = add nsw i64 %i.03, %mul
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul4 = shl nsw i64 %j.02, 1
%add5 = add nsw i64 %i.03, %mul4
%sub = add nsw i64 %add5, -1
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc7
for.inc7: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc8 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc8, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end9
%mul6 = mul nsw i64 %M, 9
%mul7 = mul nsw i64 %mul6, %N
%add8 = add nsw i64 %add, %mul7
- %arrayidx = getelementptr inbounds i32* %A, i64 %add8
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add8
store i32 %conv, i32* %arrayidx, align 4
%mul9 = mul nsw i64 %i.03, 15
%mul10 = mul nsw i64 %j.02, 20
%mul14 = mul nsw i64 %mul13, %M
%sub = sub nsw i64 %add12, %mul14
%add15 = add nsw i64 %sub, 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 %add15
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15
%0 = load i32* %arrayidx16, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc17
for.inc17: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc18 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc18, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end19
%mul6 = mul nsw i64 %M, 9
%mul7 = mul nsw i64 %mul6, %N
%add8 = add nsw i64 %add, %mul7
- %arrayidx = getelementptr inbounds i32* %A, i64 %add8
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add8
store i32 %conv, i32* %arrayidx, align 4
%mul9 = mul nsw i64 %i.03, 15
%mul10 = mul nsw i64 %j.02, 20
%mul14 = mul nsw i64 %mul13, %M
%sub = sub nsw i64 %add12, %mul14
%add15 = add nsw i64 %sub, 5
- %arrayidx16 = getelementptr inbounds i32* %A, i64 %add15
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15
%0 = load i32* %arrayidx16, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc17
for.inc17: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc18 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc18, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end19
%mul4 = shl nsw i64 %i.06, 1
%0 = mul nsw i64 %mul4, %n
%arrayidx.sum = add i64 %0, %mul
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %arrayidx.sum
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %arrayidx.sum
store i32 %conv, i32* %arrayidx5, align 4
%mul6 = mul nsw i64 %j.03, 6
%add7 = or i64 %mul6, 1
%mul7 = shl nsw i64 %i.06, 3
%1 = mul nsw i64 %mul7, %n
%arrayidx8.sum = add i64 %1, %add7
- %arrayidx9 = getelementptr inbounds i32* %A, i64 %arrayidx8.sum
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %arrayidx8.sum
%2 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %2, i32* %B.addr.12, align 4
%inc = add nsw i64 %j.03, 1
%exitcond = icmp ne i64 %inc, %n
br i1 %exitcond, label %for.body3, label %for.inc10.loopexit
for.inc10.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %n
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %n
br label %for.inc10
for.inc10: ; preds = %for.inc10.loopexit, %for.cond1.preheader
%idxprom5 = sext i32 %mul4 to i64
%6 = mul nsw i64 %idxprom5, %0
%arrayidx.sum = add i64 %6, %idxprom
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %arrayidx.sum
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %arrayidx.sum
%7 = trunc i64 %indvars.iv8 to i32
store i32 %7, i32* %arrayidx6, align 4
%8 = trunc i64 %indvars.iv to i32
%idxprom10 = sext i32 %mul9 to i64
%10 = mul nsw i64 %idxprom10, %0
%arrayidx11.sum = add i64 %10, %idxprom8
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %arrayidx11.sum
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum
%11 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %11, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
br i1 %exitcond, label %for.body3, label %for.inc13.loopexit
for.inc13.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %3
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %3
br label %for.inc13
for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader
%mul5 = shl nsw i32 %3, 2
%add = add nsw i32 %mul4, %mul5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 %i.06, i32* %arrayidx, align 4
%mul6 = shl nsw i32 %n, 3
%mul7 = mul nsw i32 %mul6, %i.06
%add9 = add nsw i32 %mul7, %mul8
%add10 = or i32 %add9, 1
%idxprom11 = sext i32 %add10 to i64
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %idxprom11
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %idxprom11
%5 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %5, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
br i1 %exitcond, label %for.body3, label %for.inc13.loopexit
for.inc13.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %2
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %2
br label %for.inc13
for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader
%idxprom5 = zext i32 %mul4 to i64
%6 = mul nsw i64 %idxprom5, %0
%arrayidx.sum = add i64 %6, %idxprom
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %arrayidx.sum
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %arrayidx.sum
%7 = trunc i64 %indvars.iv8 to i32
store i32 %7, i32* %arrayidx6, align 4
%8 = trunc i64 %indvars.iv to i32
%idxprom10 = zext i32 %mul9 to i64
%10 = mul nsw i64 %idxprom10, %0
%arrayidx11.sum = add i64 %10, %idxprom8
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %arrayidx11.sum
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum
%11 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %11, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
br i1 %exitcond, label %for.body3, label %for.inc13.loopexit
for.inc13.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.05, i64 %3
+ %scevgep = getelementptr i32, i32* %B.addr.05, i64 %3
br label %for.inc13
for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader
for.body3:
%j.02 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ]
%res.11 = phi float [ %res.03, %for.cond1.preheader ], [ %add.res.1, %for.body3 ]
- %arrayidx4 = getelementptr inbounds [40 x float]* %rr, i32 %j.02, i32 %j.02
+ %arrayidx4 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %j.02, i32 %j.02
%0 = load float* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds [40 x float]* %rr, i32 %i.04, i32 %j.02
+ %arrayidx6 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %i.04, i32 %j.02
%1 = load float* %arrayidx6, align 4
%add = fadd float %0, %1
%cmp7 = fcmp ogt float %add, %g
; DELIN: da analyze - anti [=|<]!
; DELIN: da analyze - none!
%i = phi i64 [ 0, %entry ], [ %i.inc, %for.body ]
- %a.addr = getelementptr [100 x [100 x i32]]* %a, i64 0, i64 %i, i64 %i
- %a.addr.2 = getelementptr [100 x [100 x i32]]* %a, i64 0, i64 %i, i32 5
+ %a.addr = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i64 %i
+ %a.addr.2 = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i32 5
%0 = load i32* %a.addr, align 4
%1 = add i32 %0, 1
store i32 %1, i32* %a.addr.2, align 4
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
- %arrayidx1 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1
%0 = load i32* %arrayidx1, align 4
ret i32 %0
}
; CHECK: da analyze - none!
; CHECK: da analyze - none!
- %arrayidx1 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1
%0 = load i32* %arrayidx1, align 4
ret i32 %0
}
for.body6: ; preds = %for.body6.preheader, %for.body6
%k.02 = phi i64 [ %inc, %for.body6 ], [ 0, %for.body6.preheader ]
- %arrayidx8 = getelementptr inbounds [100 x [100 x i64]]* %A, i64 %i.011, i64 %j.07, i64 %k.02
+ %arrayidx8 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %A, i64 %i.011, i64 %j.07, i64 %k.02
store i64 %i.011, i64* %arrayidx8, align 8
%inc = add nsw i64 %k.02, 1
%exitcond13 = icmp ne i64 %inc, %n
%add = add nsw i64 %k9.05, 1
%add13 = add nsw i64 %j.07, 2
%add14 = add nsw i64 %i.011, 3
- %arrayidx17 = getelementptr inbounds [100 x [100 x i64]]* %A, i64 %add14, i64 %add13, i64 %add
+ %arrayidx17 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %A, i64 %add14, i64 %add13, i64 %add
%0 = load i64* %arrayidx17, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.24, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.24, i64 1
store i64 %0, i64* %B.addr.24, align 8
%inc19 = add nsw i64 %k9.05, 1
%exitcond = icmp ne i64 %inc19, %n
br i1 %exitcond, label %for.body12, label %for.inc21.loopexit
for.inc21.loopexit: ; preds = %for.body12
- %scevgep = getelementptr i64* %B.addr.18, i64 %n
+ %scevgep = getelementptr i64, i64* %B.addr.18, i64 %n
br label %for.inc21
for.inc21: ; preds = %for.inc21.loopexit, %for.cond10.loopexit
%add3547 = or i64 %mul, 1
%sub = add nsw i64 %k.037, -1
%sub36 = add nsw i64 %i.045, -3
- %arrayidx43 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %sub36, i64 %j.041, i64 2, i64 %sub, i64 %add3547, i64 %m.029, i64 %add34, i64 %add
+ %arrayidx43 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]], [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %sub36, i64 %j.041, i64 2, i64 %sub, i64 %add3547, i64 %m.029, i64 %add34, i64 %add
store i64 %i.045, i64* %arrayidx43, align 8
%add44 = add nsw i64 %t.03, 2
%add45 = add nsw i64 %n, 1
%sub47 = add nsw i64 %mul46, -1
%sub48 = sub nsw i64 1, %k.037
%add49 = add nsw i64 %i.045, 3
- %arrayidx57 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %add49, i64 2, i64 %u.06, i64 %sub48, i64 %sub47, i64 %o.025, i64 %add45, i64 %add44
+ %arrayidx57 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]], [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %add49, i64 2, i64 %u.06, i64 %sub48, i64 %sub47, i64 %o.025, i64 %add45, i64 %add44
%0 = load i64* %arrayidx57, align 8
- %incdec.ptr = getelementptr inbounds i64* %B.addr.112, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %B.addr.112, i64 1
store i64 %0, i64* %B.addr.112, align 8
%inc = add nsw i64 %t.03, 1
%exitcond = icmp ne i64 %inc, %n
br i1 %exitcond, label %for.body33, label %for.inc58.loopexit
for.inc58.loopexit: ; preds = %for.body33
- %scevgep = getelementptr i64* %B.addr.105, i64 %n
+ %scevgep = getelementptr i64, i64* %B.addr.105, i64 %n
br label %for.inc58
for.inc58: ; preds = %for.inc58.loopexit, %for.cond31.preheader
%conv2 = sext i8 %i.03 to i32
%conv3 = sext i8 %i.03 to i64
%add = add i64 %conv3, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv2, i32* %arrayidx, align 4
%idxprom4 = sext i8 %i.03 to i64
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i8 %i.03, 1
%conv = sext i8 %inc to i64
%conv2 = sext i16 %i.03 to i32
%conv3 = sext i16 %i.03 to i64
%add = add i64 %conv3, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv2, i32* %arrayidx, align 4
%idxprom4 = sext i16 %i.03 to i64
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i16 %i.03, 1
%conv = sext i16 %inc to i64
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%0 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %n
define void @p7(i32* %A, i32* %B, i8 signext %n) nounwind uwtable ssp {
entry:
%idxprom = sext i8 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
; CHECK: da analyze - none!
; CHECK: da analyze - none!
store i32 0, i32* %arrayidx, align 4
%conv = sext i8 %n to i64
%add = add i64 %conv, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
define void @p8(i32* %A, i32* %B, i16 signext %n) nounwind uwtable ssp {
entry:
%idxprom = sext i16 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
%conv = sext i16 %n to i64
%add = add i64 %conv, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
define void @p9(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp {
entry:
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
%add = add nsw i32 %n, 1
%idxprom1 = sext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
%0 = load i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
define void @p10(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp {
entry:
%idxprom = zext i32 %n to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
%add = add i32 %n, 1
%idxprom1 = zext i32 %add to i64
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
%0 = load i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
entry:
%idx.ext = zext i32 %size to i64
%add.ptr.sum = add i64 %idx.ext, -1
- %add.ptr1 = getelementptr inbounds %struct.S* %s, i64 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds %struct.S, %struct.S* %s, i64 %add.ptr.sum
%cmp1 = icmp eq i64 %add.ptr.sum, 0
br i1 %cmp1, label %while.end, label %while.body.preheader
while.body: ; preds = %while.body.preheader, %while.body
%i.02 = phi %struct.S* [ %incdec.ptr, %while.body ], [ %s, %while.body.preheader ]
- %0 = getelementptr inbounds %struct.S* %i.02, i64 1, i32 0
+ %0 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1, i32 0
%1 = load i32* %0, align 4
- %2 = getelementptr inbounds %struct.S* %i.02, i64 0, i32 0
+ %2 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 0, i32 0
store i32 %1, i32* %2, align 4
- %incdec.ptr = getelementptr inbounds %struct.S* %i.02, i64 1
+ %incdec.ptr = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1
%cmp = icmp eq %struct.S* %incdec.ptr, %add.ptr1
br i1 %cmp, label %while.end.loopexit, label %while.body
%conv = trunc i64 %i.03 to i32
%add = add nsw i64 %i.03, %j.02
%add4 = add nsw i64 %i.03, 1
- %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %add4, i64 %add
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add4, i64 %add
store i32 %conv, i32* %arrayidx5, align 4
%add6 = add nsw i64 %i.03, %j.02
- %arrayidx8 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add6
+ %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6
%0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc10, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end11
%add = add nsw i64 %j.03, %k.02
%add7 = add nsw i64 %i.05, 1
%sub = sub nsw i64 %j.03, %i.05
- %arrayidx9 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub, i64 %add7, i64 %add
+ %arrayidx9 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub, i64 %add7, i64 %add
store i32 %conv, i32* %arrayidx9, align 4
%add10 = add nsw i64 %j.03, %k.02
%sub11 = sub nsw i64 %j.03, %i.05
- %arrayidx14 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub11, i64 %i.05, i64 %add10
+ %arrayidx14 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub11, i64 %i.05, i64 %add10
%0 = load i32* %arrayidx14, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.21, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.21, i64 1
store i32 %0, i32* %B.addr.21, align 4
%inc = add nsw i64 %k.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body6, label %for.inc15
for.inc15: ; preds = %for.body6
- %scevgep = getelementptr i32* %B.addr.14, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.14, i64 100
%inc16 = add nsw i64 %j.03, 1
%exitcond8 = icmp ne i64 %inc16, 100
br i1 %exitcond8, label %for.cond4.preheader, label %for.inc18
for.inc18: ; preds = %for.inc15
- %scevgep7 = getelementptr i32* %B.addr.06, i64 10000
+ %scevgep7 = getelementptr i32, i32* %B.addr.06, i64 10000
%inc19 = add nsw i64 %i.05, 1
%exitcond9 = icmp ne i64 %inc19, 100
br i1 %exitcond9, label %for.cond1.preheader, label %for.end20
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 1
%sub = add nsw i64 %i.03, -1
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %sub, i64 %mul
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub, i64 %mul
store i32 %conv, i32* %arrayidx4, align 4
%add = add nsw i64 %i.03, %j.02
%add5 = add nsw i64 %add, 110
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add5
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add5
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc8
for.inc8: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc9 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc9, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end10
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %j.02, 1
%add = add nsw i64 %mul, %i.03
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add
store i32 %conv, i32* %arrayidx4, align 4
%mul5 = shl nsw i64 %j.02, 1
%sub = sub nsw i64 %mul5, %i.03
%add6 = add nsw i64 %sub, 5
- %arrayidx8 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add6
+ %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6
%0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc9
for.inc9: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc10 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc10, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end11
%add = add nsw i64 %mul, %j.02
%add4 = add nsw i64 %add, 1
%add5 = add nsw i64 %i.03, 2
- %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %add5, i64 %add4
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add5, i64 %add4
store i32 %conv, i32* %arrayidx6, align 4
%mul7 = shl nsw i64 %i.03, 1
%add8 = add nsw i64 %mul7, %j.02
- %arrayidx10 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add8
+ %arrayidx10 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add8
%0 = load i32* %arrayidx10, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc11
for.inc11: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc12 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc12, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end13
%sub = sub nsw i64 22, %i.03
%mul4 = mul nsw i64 %i.03, 3
%sub5 = add nsw i64 %mul4, -18
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub5, i64 %sub, i64 %add
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub5, i64 %sub, i64 %add
store i32 %conv, i32* %arrayidx7, align 4
%mul8 = mul nsw i64 %i.03, 3
%add9 = add nsw i64 %mul8, %j.02
- %arrayidx12 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.03, i64 %i.03, i64 %add9
+ %arrayidx12 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.03, i64 %i.03, i64 %add9
%0 = load i32* %arrayidx12, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc13
for.inc13: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc14 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc14, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end15
%add = add nsw i64 %mul, %j.02
%add4 = add nsw i64 %add, 2
%add5 = add nsw i64 %i.03, 1
- %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %add5, i64 %add4
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add5, i64 %add4
store i32 %conv, i32* %arrayidx6, align 4
%mul7 = shl nsw i64 %i.03, 3
%add8 = add nsw i64 %mul7, %j.02
%mul9 = shl nsw i64 %i.03, 1
- %arrayidx11 = getelementptr inbounds [100 x i32]* %A, i64 %mul9, i64 %add8
+ %arrayidx11 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %mul9, i64 %add8
%0 = load i32* %arrayidx11, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc12
for.inc12: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc13 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc13, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end14
%add4 = add nsw i64 %add, 2
%mul5 = shl nsw i64 %i.03, 1
%add6 = add nsw i64 %mul5, 4
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %add6, i64 %add4
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add6, i64 %add4
store i32 %conv, i32* %arrayidx7, align 4
%mul8 = mul nsw i64 %i.03, 5
%add9 = add nsw i64 %mul8, %j.02
%mul10 = mul nsw i64 %i.03, -2
%add11 = add nsw i64 %mul10, 20
- %arrayidx13 = getelementptr inbounds [100 x i32]* %A, i64 %add11, i64 %add9
+ %arrayidx13 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add11, i64 %add9
%0 = load i32* %arrayidx13, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc14
for.inc14: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc15 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc15, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end16
%B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ]
%conv = trunc i64 %i.03 to i32
%add = add nsw i64 %j.02, 2
- %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 4, i64 %add
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 4, i64 %add
store i32 %conv, i32* %arrayidx4, align 4
%mul = mul nsw i64 %i.03, 5
%add5 = add nsw i64 %mul, %j.02
%mul6 = mul nsw i64 %i.03, -2
%add7 = add nsw i64 %mul6, 4
- %arrayidx9 = getelementptr inbounds [100 x i32]* %A, i64 %add7, i64 %add5
+ %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add7, i64 %add5
%0 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc10
for.inc10: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc11 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc11, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end12
%add4 = add nsw i64 %add, 2
%mul5 = shl nsw i64 %i.03, 1
%add6 = add nsw i64 %mul5, 4
- %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %add6, i64 %add4
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add6, i64 %add4
store i32 %conv, i32* %arrayidx7, align 4
- %arrayidx9 = getelementptr inbounds [100 x i32]* %A, i64 4, i64 %j.02
+ %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 4, i64 %j.02
%0 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
%exitcond = icmp ne i64 %inc, 100
br i1 %exitcond, label %for.body3, label %for.inc10
for.inc10: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.04, i64 100
+ %scevgep = getelementptr i32, i32* %B.addr.04, i64 100
%inc11 = add nsw i64 %i.03, 1
%exitcond5 = icmp ne i64 %inc11, 100
br i1 %exitcond5, label %for.cond1.preheader, label %for.end12
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %j.05, %k.03
%idxprom = sext i32 %n to i64
- %arrayidx11 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %idxprom, i64 %i.07, i64 %add
+ %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %idxprom, i64 %i.07, i64 %add
store i32 %conv, i32* %arrayidx11, align 4
%mul = shl nsw i64 %j.05, 1
%sub = sub nsw i64 %mul, %l.02
%add12 = add nsw i64 %i.07, 10
- %arrayidx15 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
+ %arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
%0 = load i32* %arrayidx15, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc16
for.inc16: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc17 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc17, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc19
for.inc19: ; preds = %for.inc16
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc20 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc20, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc22
for.inc22: ; preds = %for.inc19
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc23 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc23, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end24
%B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ]
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %j.05, %k.03
- %arrayidx11 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.07, i64 %i.07, i64 %add
+ %arrayidx11 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.07, i64 %i.07, i64 %add
store i32 %conv, i32* %arrayidx11, align 4
%mul = shl nsw i64 %j.05, 1
%sub = sub nsw i64 %mul, %l.02
%add12 = add nsw i64 %i.07, 10
- %arrayidx15 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
+ %arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
%0 = load i32* %arrayidx15, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc16
for.inc16: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc17 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc17, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc19
for.inc19: ; preds = %for.inc16
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc20 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc20, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc22
for.inc22: ; preds = %for.inc19
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc23 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc23, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end24
%B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ]
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %i.07, %k.03
- %arrayidx12 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add, i64 %l.02
+ %arrayidx12 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add, i64 %l.02
store i32 %conv, i32* %arrayidx12, align 4
%add13 = add nsw i64 %l.02, 10
%add14 = add nsw i64 %j.05, %k.03
%add15 = add nsw i64 %i.07, 10
- %arrayidx19 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add15, i64 %add14, i64 %add13
+ %arrayidx19 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add15, i64 %add14, i64 %add13
%0 = load i32* %arrayidx19, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc20
for.inc20: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc21 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc21, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc23
for.inc23: ; preds = %for.inc20
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc24 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc24, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc26
for.inc26: ; preds = %for.inc23
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc27 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc27, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end28
%conv = trunc i64 %i.07 to i32
%add = add nsw i64 %l.02, %k.03
%add10 = add nsw i64 %i.07, %k.03
- %arrayidx13 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add10, i64 %add
+ %arrayidx13 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add10, i64 %add
store i32 %conv, i32* %arrayidx13, align 4
%add14 = add nsw i64 %l.02, 10
%add15 = add nsw i64 %j.05, %k.03
%add16 = add nsw i64 %i.07, 10
- %arrayidx20 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add16, i64 %add15, i64 %add14
+ %arrayidx20 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add16, i64 %add15, i64 %add14
%0 = load i32* %arrayidx20, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
%exitcond = icmp ne i64 %inc, 50
br i1 %exitcond, label %for.body9, label %for.inc21
for.inc21: ; preds = %for.body9
- %scevgep = getelementptr i32* %B.addr.24, i64 50
+ %scevgep = getelementptr i32, i32* %B.addr.24, i64 50
%inc22 = add nsw i64 %k.03, 1
%exitcond10 = icmp ne i64 %inc22, 50
br i1 %exitcond10, label %for.cond7.preheader, label %for.inc24
for.inc24: ; preds = %for.inc21
- %scevgep9 = getelementptr i32* %B.addr.16, i64 2500
+ %scevgep9 = getelementptr i32, i32* %B.addr.16, i64 2500
%inc25 = add nsw i64 %j.05, 1
%exitcond12 = icmp ne i64 %inc25, 50
br i1 %exitcond12, label %for.cond4.preheader, label %for.inc27
for.inc27: ; preds = %for.inc24
- %scevgep11 = getelementptr i32* %B.addr.08, i64 125000
+ %scevgep11 = getelementptr i32, i32* %B.addr.08, i64 125000
%inc28 = add nsw i64 %i.07, 1
%exitcond13 = icmp ne i64 %inc28, 50
br i1 %exitcond13, label %for.cond1.preheader, label %for.end29
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%0 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%exitcond = icmp ne i64 %indvars.iv.next, %n
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv2 = trunc i64 %i.03 to i32
%add = add nsw i64 %i.03, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv2, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %i.03
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %i.03
%1 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %0
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.03
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.03
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%0 = add nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%2 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, 19
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 19
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, 19
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 6
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %mul1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 7
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %mul1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%add = add i64 %i.02, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 20
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %n, 1
%add1 = add i64 %i.03, %mul
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%conv = trunc i64 %i.02 to i32
%mul = mul i64 %i.02, %n
%add = add i64 %mul, 5
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = mul i64 %i.02, %n
%add2 = add i64 %mul1, 5
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add2
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2
%0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 1000
%conv = trunc i64 %i.05 to i32
%mul = shl nsw i64 %i.05, 1
%add = add i64 %mul, %n1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul56 = add i64 %j.03, %n1
%add7 = mul i64 %mul56, 3
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
%0 = load i32* %arrayidx8, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc10 = add nsw i64 %j.03, 1
%exitcond7 = icmp ne i64 %inc10, %n2
%mul = shl nsw i64 %i.05, 1
%mul1 = mul i64 %n2, 5
%add = add i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
%mul6 = mul nsw i64 %j.03, 3
%mul7 = shl i64 %n2, 1
%add8 = add i64 %mul6, %mul7
- %arrayidx9 = getelementptr inbounds i32* %A, i64 %add8
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %add8
%0 = load i32* %arrayidx9, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc11 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc11, %n2
%conv = trunc i64 %i.05 to i32
%mul = shl nsw i64 %i.05, 1
%sub = sub i64 %mul, %n2
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul6 = shl i64 %n1, 1
%add = sub i64 %mul6, %j.03
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc9, %n2
%i.05 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%conv = trunc i64 %i.05 to i32
%add = sub i64 %n2, %i.05
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
%j.03 = phi i64 [ %inc8, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub5 = sub i64 %j.03, %n1
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %sub5
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub5
%0 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc8 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc8, %n2
%conv = trunc i64 %i.05 to i32
%mul = shl i64 %n1, 1
%add = sub i64 %mul, %i.05
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
%j.03 = phi i64 [ %inc9, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%add6 = sub i64 %n1, %j.03
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc9, %n2
%i.05 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%conv = trunc i64 %i.05 to i32
%add = sub i64 %n2, %i.05
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%inc = add nsw i64 %i.05, 1
%exitcond = icmp ne i64 %inc, %n1
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%mul = shl i64 %n2, 1
%add6 = sub i64 %mul, %j.03
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
%exitcond6 = icmp ne i64 %inc9, %n2
%conv = trunc i64 %i.05 to i32
%sub = sub nsw i64 %j.03, %i.05
%add = add i64 %sub, %n2
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %n2, 1
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %mul
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %0, i32* %B.addr.12, align 4
%inc = add nsw i64 %j.03, 1
%exitcond = icmp ne i64 %inc, %n2
br i1 %exitcond, label %for.body3, label %for.inc5.loopexit
for.inc5.loopexit: ; preds = %for.body3
- %scevgep = getelementptr i32* %B.addr.06, i64 %n2
+ %scevgep = getelementptr i32, i32* %B.addr.06, i64 %n2
br label %for.inc5
for.inc5: ; preds = %for.inc5.loopexit, %for.cond1.preheader
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 1
%add = add i64 %mul, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul14 = add i64 %i.03, %n
%add3 = mul i64 %mul14, 3
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %add3
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %add3
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%mul = shl nsw i64 %i.03, 1
%mul1 = mul i64 %n, 5
%add = add i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul2 = mul nsw i64 %i.03, 3
%mul3 = shl i64 %n, 1
%add4 = add i64 %mul2, %mul3
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %add4
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %add4
%0 = load i32* %arrayidx5, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%conv = trunc i64 %i.03 to i32
%mul = shl nsw i64 %i.03, 1
%sub = sub i64 %mul, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %n, 1
%add = sub i64 %mul2, %i.03
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%mul = mul nsw i64 %i.03, -2
%add = add i64 %mul, %n
%add1 = add i64 %add, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add1
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add1
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %n, 1
%sub = sub i64 %i.03, %mul2
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%mul = mul nsw i64 %i.03, -2
%mul1 = mul i64 %n, 3
%add = add i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%add2 = sub i64 %n, %i.03
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %add2
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2
%0 = load i32* %arrayidx3, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%mul = mul nsw i64 %i.03, -2
%mul1 = shl i64 %n, 1
%sub = sub i64 %mul, %mul1
- %arrayidx = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%sub2 = sub nsw i64 0, %i.03
%sub3 = sub i64 %sub2, %n
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %sub3
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %sub3
%0 = load i32* %arrayidx4, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, %n
%add1 = add i64 %add, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add1
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add1
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 0, %i.03
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%mul = shl i64 %N, 2
%mul1 = mul i64 %mul, %i.03
%add = add i64 %mul1, %M
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %N, 2
%mul3 = mul i64 %mul2, %i.03
%mul4 = mul i64 %M, 3
%add5 = add i64 %mul3, %mul4
%add6 = add i64 %add5, 1
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
%0 = load i32* %arrayidx7, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%mul = shl i64 %N, 1
%mul1 = mul i64 %mul, %i.03
%add = add i64 %mul1, %M
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul2 = shl i64 %N, 1
%mul3 = mul i64 %mul2, %i.03
%0 = mul i64 %M, -3
%sub = add i64 %mul3, %0
%add5 = add i64 %sub, 2
- %arrayidx6 = getelementptr inbounds i32* %A, i64 %add5
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %add5
%1 = load i32* %arrayidx6, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, %n
%add = add i64 %mul, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%mul1 = mul i64 %i.03, %n
%sub = sub i64 1, %mul1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%add = add i64 %i.03, %n
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%add1 = add i64 %n, 1
%sub = sub i64 %add1, %i.03
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 6, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 3
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 6, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 4
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 -6, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 10
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, 3
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%0 = mul i64 %i.03, -3
%sub = add i64 %0, 5
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
%1 = load i32* %arrayidx2, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.02
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 5, %i.02
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 4
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 30
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, %n
%add = add i64 %mul, 10
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 5
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 6
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
%mul = shl i64 %i.02, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 -10
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 -10
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
%mul = mul i64 %i.03, 3
- %arrayidx = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 10
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 10
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 30
%i.03 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = mul i64 %i.03, %n
%add = add i64 %mul, 10
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 5
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 6
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
%i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ]
%conv = trunc i64 %i.02 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 -10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 -10
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
%exitcond = icmp ne i64 %inc, 7
%i.03 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ]
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ]
%conv = trunc i64 %i.03 to i32
- %arrayidx = getelementptr inbounds i32* %A, i64 10
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 10
store i32 %conv, i32* %arrayidx, align 4
%mul = mul i64 %i.03, 3
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
%0 = load i32* %arrayidx1, align 4
- %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
%exitcond = icmp ne i64 %inc, %n
define void @z0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp {
entry:
%add = add i64 %n, 1
- %arrayidx = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
; CHECK: da analyze - none!
%add1 = add i64 %n, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %add1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1
%0 = load i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
define void @z1(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp {
entry:
- %arrayidx = getelementptr inbounds i32* %A, i64 %n
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %n
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
; CHECK: da analyze - none!
%add = add i64 %n, 1
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %add
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
%0 = load i32* %arrayidx1, align 4
store i32 %0, i32* %B, align 4
ret void
define void @z2(i32* %A, i32* %B, i64 %n, i64 %m) nounwind uwtable ssp {
entry:
- %arrayidx = getelementptr inbounds i32* %A, i64 %n
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %n
store i32 0, i32* %arrayidx, align 4
; CHECK: da analyze - none!
; CHECK: da analyze - confused!
; CHECK: da analyze - none!
- %arrayidx1 = getelementptr inbounds i32* %A, i64 %m
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %m
%0 = load i32* %arrayidx1, align 4
store i32 %0, i32* %B, align 4
ret void
invoke void @__dynamic_cast()
to label %bb1 unwind label %bb2
bb1:
- %Hidden = getelementptr inbounds i32* %v1, i64 1
+ %Hidden = getelementptr inbounds i32, i32* %v1, i64 1
ret void
bb2:
%lpad.loopexit80 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i32* %a, i64 %storemerge3
+ %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %storemerge3
%loadA = load i32* %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i32* %b, i64 %storemerge3
+ %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %storemerge3
%loadB = load i32* %arrayidxB, align 2
%mul = mul i32 %loadB, %loadA
%add = add nuw nsw i64 %storemerge3, 1
%a_float = bitcast i32* %a to float*
- %arrayidxA_plus_2 = getelementptr inbounds float* %a_float, i64 %add
+ %arrayidxA_plus_2 = getelementptr inbounds float, float* %a_float, i64 %add
%mul_float = sitofp i32 %mul to float
store float %mul_float, float* %arrayidxA_plus_2, align 2
; CHECK: Run-time memory checks:
; CHECK-NEXT: 0:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
; CHECK-NEXT: 1:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
@n = global i32 20, align 4
@B = common global i16* null, align 8
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i16* %a, i64 %storemerge3
+ %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
%loadA = load i16* %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
+ %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
%loadB = load i16* %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
+ %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
%loadC = load i16* %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
%add = add nuw nsw i64 %storemerge3, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
+ %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
store i16 %mul1, i16* %arrayidxA_plus_2, align 2
%exitcond = icmp eq i64 %add, 20
; CHECK: Run-time memory checks:
; CHECK-NEXT: 0:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
; CHECK-NEXT: 1:
-; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
-; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
+; CHECK-NEXT: %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
+; CHECK-NEXT: %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
@n = global i32 20, align 4
@B = common global i16* null, align 8
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
- %arrayidxA = getelementptr inbounds i16* %a, i64 %storemerge3
+ %arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
%loadA = load i16* %arrayidxA, align 2
- %arrayidxB = getelementptr inbounds i16* %b, i64 %storemerge3
+ %arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
%loadB = load i16* %arrayidxB, align 2
- %arrayidxC = getelementptr inbounds i16* %c, i64 %storemerge3
+ %arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
%loadC = load i16* %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
%add = add nuw nsw i64 %storemerge3, 1
- %arrayidxA_plus_2 = getelementptr inbounds i16* %a, i64 %add
+ %arrayidxA_plus_2 = getelementptr inbounds i16, i16* %a, i64 %add
store i16 %mul1, i16* %arrayidxA_plus_2, align 2
%exitcond = icmp eq i64 %add, 20
for.body: ; preds = %for.body, %entry
%i.01 = phi i32 [ 0, %entry ], [ %tmp8.7, %for.body ]
- %arrayidx = getelementptr i32* %bufUInt, i32 %i.01
- %arrayidx5 = getelementptr i32* %pattern, i32 %i.01
+ %arrayidx = getelementptr i32, i32* %bufUInt, i32 %i.01
+ %arrayidx5 = getelementptr i32, i32* %pattern, i32 %i.01
%tmp6 = load i32* %arrayidx5, align 4
store i32 %tmp6, i32* %arrayidx, align 4
%tmp8.7 = add i32 %i.01, 8
bb: ; preds = %bb, %entry
%i.01.0 = phi i32 [ 100, %entry ], [ %tmp4, %bb ] ; <i32> [#uses=2]
- %tmp1 = getelementptr [101 x i32]* @array, i32 0, i32 %i.01.0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr [101 x i32], [101 x i32]* @array, i32 0, i32 %i.01.0 ; <i32*> [#uses=1]
store i32 %x, i32* %tmp1
%tmp4 = add i32 %i.01.0, -1 ; <i32> [#uses=2]
%tmp7 = icmp sgt i32 %tmp4, -1 ; <i1> [#uses=1]
load i32* %srcptr, align 4 ; <i32>:1 [#uses=2]
and i32 %1, 255 ; <i32>:2 [#uses=1]
and i32 %1, -256 ; <i32>:3 [#uses=1]
- getelementptr [256 x i8]* @lut, i32 0, i32 %2 ; <i8*>:4 [#uses=1]
+ getelementptr [256 x i8], [256 x i8]* @lut, i32 0, i32 %2 ; <i8*>:4 [#uses=1]
load i8* %4, align 1 ; <i8>:5 [#uses=1]
zext i8 %5 to i32 ; <i32>:6 [#uses=1]
or i32 %6, %3 ; <i32>:7 [#uses=1]
bb1: ; preds = %bb1, %bb1.thread
%indvar = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=4]
%i.0.reg2mem.0 = sub i32 255, %indvar ; <i32> [#uses=2]
- %0 = getelementptr i32* %alp, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
+ %0 = getelementptr i32, i32* %alp, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
- %2 = getelementptr i32* %lam, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
+ %2 = getelementptr i32, i32* %lam, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
store i32 %1, i32* %2, align 4
%3 = sub i32 254, %indvar ; <i32> [#uses=1]
%4 = icmp slt i32 %3, 0 ; <i1> [#uses=1]
define void @_Z3foov() nounwind {
entry:
%x = alloca %struct.NonPod, align 8 ; <%struct.NonPod*> [#uses=2]
- %0 = getelementptr %struct.NonPod* %x, i32 0, i32 0 ; <[2 x %struct.Foo]*> [#uses=1]
- %1 = getelementptr [2 x %struct.Foo]* %0, i32 1, i32 0 ; <%struct.Foo*> [#uses=1]
+ %0 = getelementptr %struct.NonPod, %struct.NonPod* %x, i32 0, i32 0 ; <[2 x %struct.Foo]*> [#uses=1]
+ %1 = getelementptr [2 x %struct.Foo], [2 x %struct.Foo]* %0, i32 1, i32 0 ; <%struct.Foo*> [#uses=1]
br label %bb1.i
bb1.i: ; preds = %bb2.i, %entry
%.0.i = phi %struct.Foo* [ %1, %entry ], [ %4, %bb2.i ] ; <%struct.Foo*> [#uses=2]
- %2 = getelementptr %struct.NonPod* %x, i32 0, i32 0, i32 0 ; <%struct.Foo*> [#uses=1]
+ %2 = getelementptr %struct.NonPod, %struct.NonPod* %x, i32 0, i32 0, i32 0 ; <%struct.Foo*> [#uses=1]
%3 = icmp eq %struct.Foo* %.0.i, %2 ; <i1> [#uses=1]
br i1 %3, label %_ZN6NonPodD1Ev.exit, label %bb2.i
bb2.i: ; preds = %bb1.i
- %4 = getelementptr %struct.Foo* %.0.i, i32 -1 ; <%struct.Foo*> [#uses=1]
+ %4 = getelementptr %struct.Foo, %struct.Foo* %.0.i, i32 -1 ; <%struct.Foo*> [#uses=1]
br label %bb1.i
_ZN6NonPodD1Ev.exit: ; preds = %bb1.i
for.body: ; preds = %for.cond
%idxprom = sext i32 %0 to i64
- %arrayidx = getelementptr inbounds [0 x i32]* getelementptr inbounds ([1 x [0 x i32]]* @g_244, i32 0, i64 0), i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* getelementptr inbounds ([1 x [0 x i32]]* @g_244, i32 0, i64 0), i32 0, i64 %idxprom
%1 = load i32* %arrayidx, align 1
store i32 %1, i32* @func_21_l_773, align 4
store i32 1, i32* @g_814, align 4
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 123, i32* %tmp
%tmp2 = add i32 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
bb3:
%i.0 = phi i32 [ %7, %bb3 ], [ 0, %bb3.preheader ]
- getelementptr i32* %p, i32 %i.0
+ getelementptr i32, i32* %p, i32 %i.0
load i32* %3, align 4
add i32 %4, 1
- getelementptr i32* %p, i32 %i.0
+ getelementptr i32, i32* %p, i32 %i.0
store i32 %5, i32* %6, align 4
add i32 %i.0, 1
icmp slt i32 %7, %n
%7 = add i32 %x.06, %4 ; <i32> [#uses=1]
%8 = shl i32 %x.06, 1 ; <i32> [#uses=1]
%9 = add i32 %6, %8 ; <i32> [#uses=1]
- %10 = getelementptr i8* %r, i32 %9 ; <i8*> [#uses=1]
+ %10 = getelementptr i8, i8* %r, i32 %9 ; <i8*> [#uses=1]
%11 = load i8* %10, align 1 ; <i8> [#uses=1]
- %12 = getelementptr i8* %j, i32 %7 ; <i8*> [#uses=1]
+ %12 = getelementptr i8, i8* %j, i32 %7 ; <i8*> [#uses=1]
store i8 %11, i8* %12, align 1
%13 = add i32 %x.06, 1 ; <i32> [#uses=2]
br label %bb7
%x.12 = phi i32 [ %40, %bb15 ], [ 0, %bb.nph3 ] ; <i32> [#uses=5]
%29 = shl i32 %x.12, 2 ; <i32> [#uses=1]
%30 = add i32 %29, %25 ; <i32> [#uses=1]
- %31 = getelementptr i8* %r, i32 %30 ; <i8*> [#uses=1]
+ %31 = getelementptr i8, i8* %r, i32 %30 ; <i8*> [#uses=1]
%32 = load i8* %31, align 1 ; <i8> [#uses=1]
%.sum = add i32 %26, %x.12 ; <i32> [#uses=1]
- %33 = getelementptr i8* %j, i32 %.sum ; <i8*> [#uses=1]
+ %33 = getelementptr i8, i8* %j, i32 %.sum ; <i8*> [#uses=1]
store i8 %32, i8* %33, align 1
%34 = shl i32 %x.12, 2 ; <i32> [#uses=1]
%35 = or i32 %34, 2 ; <i32> [#uses=1]
%36 = add i32 %35, %25 ; <i32> [#uses=1]
- %37 = getelementptr i8* %r, i32 %36 ; <i8*> [#uses=1]
+ %37 = getelementptr i8, i8* %r, i32 %36 ; <i8*> [#uses=1]
%38 = load i8* %37, align 1 ; <i8> [#uses=1]
%.sum6 = add i32 %27, %x.12 ; <i32> [#uses=1]
- %39 = getelementptr i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
+ %39 = getelementptr i8, i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
store i8 %38, i8* %39, align 1
%40 = add i32 %x.12, 1 ; <i32> [#uses=2]
br label %bb15
%y.21 = phi i32 [ %57, %bb24 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
%53 = mul i32 %y.21, %50 ; <i32> [#uses=1]
%.sum1 = add i32 %53, %51 ; <i32> [#uses=1]
- %54 = getelementptr i8* %r, i32 %.sum1 ; <i8*> [#uses=1]
+ %54 = getelementptr i8, i8* %r, i32 %.sum1 ; <i8*> [#uses=1]
%55 = mul i32 %y.21, %w ; <i32> [#uses=1]
%.sum5 = add i32 %55, %.sum3 ; <i32> [#uses=1]
- %56 = getelementptr i8* %j, i32 %.sum5 ; <i8*> [#uses=1]
+ %56 = getelementptr i8, i8* %j, i32 %.sum5 ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %56, i8* %54, i32 %w, i32 1, i1 false)
%57 = add i32 %y.21, 1 ; <i32> [#uses=2]
br label %bb24
bb26: ; preds = %bb24.bb26_crit_edge, %bb22
%59 = mul i32 %x, %w ; <i32> [#uses=1]
%.sum4 = add i32 %.sum3, %59 ; <i32> [#uses=1]
- %60 = getelementptr i8* %j, i32 %.sum4 ; <i8*> [#uses=1]
+ %60 = getelementptr i8, i8* %j, i32 %.sum4 ; <i8*> [#uses=1]
%61 = mul i32 %x, %w ; <i32> [#uses=1]
%62 = sdiv i32 %61, 2 ; <i32> [#uses=1]
tail call void @llvm.memset.p0i8.i32(i8* %60, i8 -128, i32 %62, i32 1, i1 false)
bb30: ; preds = %bb31, %bb.nph11
%y.310 = phi i32 [ %70, %bb31 ], [ 0, %bb.nph11 ] ; <i32> [#uses=3]
%66 = mul i32 %y.310, %64 ; <i32> [#uses=1]
- %67 = getelementptr i8* %r, i32 %66 ; <i8*> [#uses=1]
+ %67 = getelementptr i8, i8* %r, i32 %66 ; <i8*> [#uses=1]
%68 = mul i32 %y.310, %w ; <i32> [#uses=1]
- %69 = getelementptr i8* %j, i32 %68 ; <i8*> [#uses=1]
+ %69 = getelementptr i8, i8* %j, i32 %68 ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %69, i8* %67, i32 %w, i32 1, i1 false)
%70 = add i32 %y.310, 1 ; <i32> [#uses=2]
br label %bb31
bb33: ; preds = %bb31.bb33_crit_edge, %bb29
%72 = mul i32 %x, %w ; <i32> [#uses=1]
- %73 = getelementptr i8* %j, i32 %72 ; <i8*> [#uses=1]
+ %73 = getelementptr i8, i8* %j, i32 %72 ; <i8*> [#uses=1]
%74 = mul i32 %x, %w ; <i32> [#uses=1]
%75 = sdiv i32 %74, 2 ; <i32> [#uses=1]
tail call void @llvm.memset.p0i8.i32(i8* %73, i8 -128, i32 %75, i32 1, i1 false)
%sum.04 = phi i32 [ 0, %entry ], [ %add2, %for.body ]
; CHECK: --> %sum.04{{ *}}Exits: 2450
%i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [50 x i32]* @arr1, i32 0, i32 %i.03
+ %arrayidx = getelementptr inbounds [50 x i32], [50 x i32]* @arr1, i32 0, i32 %i.03
%0 = load i32* %arrayidx, align 4
; CHECK: --> %0{{ *}}Exits: 50
- %arrayidx1 = getelementptr inbounds [50 x i32]* @arr2, i32 0, i32 %i.03
+ %arrayidx1 = getelementptr inbounds [50 x i32], [50 x i32]* @arr2, i32 0, i32 %i.03
%1 = load i32* %arrayidx1, align 4
; CHECK: --> %1{{ *}}Exits: 0
%add = add i32 %0, %sum.04
; CHECK: --> %sum.02{{ *}}Exits: 10
%n.01 = phi %struct.ListNode* [ bitcast ({ %struct.ListNode*, i32, [4 x i8] }* @node5 to %struct.ListNode*), %entry ], [ %1, %for.body ]
; CHECK: --> %n.01{{ *}}Exits: @node1
- %i = getelementptr inbounds %struct.ListNode* %n.01, i64 0, i32 1
+ %i = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 1
%0 = load i32* %i, align 4
%add = add nsw i32 %0, %sum.02
- %next = getelementptr inbounds %struct.ListNode* %n.01, i64 0, i32 0
+ %next = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 0
%1 = load %struct.ListNode** %next, align 8
; CHECK: --> %1{{ *}}Exits: 0
%cmp = icmp eq %struct.ListNode* %1, null
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32 addrspace(1)* %d, i64 %2 ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32 addrspace(1)* %d, i64 %2 ; <i32*> [#uses=1]
store i32 %1, i32 addrspace(1)* %3, align 4
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.lr.ph ]
- %arrayidx = getelementptr i8 addrspace(1)* %a, i64 %indvar
+ %arrayidx = getelementptr i8, i8 addrspace(1)* %a, i64 %indvar
store i8 0, i8 addrspace(1)* %arrayidx, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %tmp
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32* %d, i64 %2 ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32* %d, i64 %2 ; <i32*> [#uses=1]
store i32 %1, i32* %3, align 4
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.lr.ph ]
- %arrayidx = getelementptr i8* %a, i64 %indvar
+ %arrayidx = getelementptr i8, i8* %a, i64 %indvar
store i8 0, i8* %arrayidx, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %tmp
; min(N, i+3)
; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6
; CHECK-NEXT: --> (-1 + (-1 * ((-1 + (-1 * (sext i32 {3,+,1}<nw><%bb1> to i64))) smax (-1 + (-1 * (sext i32 %N to i64))))))
- %tmp11 = getelementptr inbounds i32* %A, i64 %tmp9
+ %tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9
%tmp12 = load i32* %tmp11, align 4
%tmp13 = shl nsw i32 %tmp12, 1
%tmp14 = icmp sge i32 3, %i.0
; max(0, i - 3)
; CHECK: select i1 %tmp14, i64 0, i64 %tmp17
; CHECK-NEXT: --> (-3 + (3 smax {0,+,1}<nuw><nsw><%bb1>))
- %tmp21 = getelementptr inbounds i32* %A, i64 %tmp19
+ %tmp21 = getelementptr inbounds i32, i32* %A, i64 %tmp19
store i32 %tmp13, i32* %tmp21, align 4
%tmp23 = add nuw nsw i32 %i.0, 1
br label %bb1
; CHECK: --> {0,+,2}<nuw><nsw><%bb>
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
-; CHECK: %2 = getelementptr inbounds double* %d, i64 %1
+; CHECK: %2 = getelementptr inbounds double, double* %d, i64 %1
; CHECK: --> {%d,+,16}<nsw><%bb>
- %2 = getelementptr inbounds double* %d, i64 %1 ; <double*> [#uses=1]
+ %2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
%3 = load double* %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %5 = getelementptr inbounds double* %q, i64 %4 ; <double*> [#uses=1]
+ %5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
%6 = load double* %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%8 = sext i32 %7 to i64 ; <i64> [#uses=1]
-; CHECK: %9 = getelementptr inbounds double* %q, i64 %8
+; CHECK: %9 = getelementptr inbounds double, double* %q, i64 %8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %9 = getelementptr inbounds double* %q, i64 %8 ; <double*> [#uses=1]
+ %9 = getelementptr inbounds double, double* %q, i64 %8 ; <double*> [#uses=1]
; Artificially repeat the above three instructions, this time using
; add nsw instead of or.
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%t8 = sext i32 %t7 to i64 ; <i64> [#uses=1]
-; CHECK: %t9 = getelementptr inbounds double* %q, i64 %t8
+; CHECK: %t9 = getelementptr inbounds double, double* %q, i64 %t8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %t9 = getelementptr inbounds double* %q, i64 %t8 ; <double*> [#uses=1]
+ %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
%10 = load double* %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
%14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %15 = getelementptr inbounds double* %d, i64 %14 ; <double*> [#uses=1]
+ %15 = getelementptr inbounds double, double* %d, i64 %14 ; <double*> [#uses=1]
store double %13, double* %15, align 8
%16 = add nsw i32 %i.01, 2 ; <i32> [#uses=2]
br label %bb1
; CHECK: --> {0,+,2}<nuw><nsw><%bb>
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
-; CHECK: %2 = getelementptr inbounds double* %d, i64 %1
+; CHECK: %2 = getelementptr inbounds double, double* %d, i64 %1
; CHECK: --> {%d,+,16}<nsw><%bb>
- %2 = getelementptr inbounds double* %d, i64 %1 ; <double*> [#uses=1]
+ %2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
%3 = load double* %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %5 = getelementptr inbounds double* %q, i64 %4 ; <double*> [#uses=1]
+ %5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
%6 = load double* %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%8 = sext i32 %7 to i64 ; <i64> [#uses=1]
-; CHECK: %9 = getelementptr inbounds double* %q, i64 %8
+; CHECK: %9 = getelementptr inbounds double, double* %q, i64 %8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %9 = getelementptr inbounds double* %q, i64 %8 ; <double*> [#uses=1]
+ %9 = getelementptr inbounds double, double* %q, i64 %8 ; <double*> [#uses=1]
; Artificially repeat the above three instructions, this time using
; add nsw instead of or.
; CHECK: --> {1,+,2}<nuw><nsw><%bb>
%t8 = sext i32 %t7 to i64 ; <i64> [#uses=1]
-; CHECK: %t9 = getelementptr inbounds double* %q, i64 %t8
+; CHECK: %t9 = getelementptr inbounds double, double* %q, i64 %t8
; CHECK: {(8 + %q),+,16}<nsw><%bb>
- %t9 = getelementptr inbounds double* %q, i64 %t8 ; <double*> [#uses=1]
+ %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
%10 = load double* %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
%14 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %15 = getelementptr inbounds double* %d, i64 %14 ; <double*> [#uses=1]
+ %15 = getelementptr inbounds double, double* %d, i64 %14 ; <double*> [#uses=1]
store double %13, double* %15, align 8
%16 = add nsw i32 %i.01, 2 ; <i32> [#uses=2]
br label %bb1
; CHECK: %i.01
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb>
%tmp2 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %tmp3 = getelementptr double* %p, i64 %tmp2 ; <double*> [#uses=1]
+ %tmp3 = getelementptr double, double* %p, i64 %tmp2 ; <double*> [#uses=1]
%tmp4 = load double* %tmp3, align 8 ; <double> [#uses=1]
%tmp5 = fmul double %tmp4, 9.200000e+00 ; <double> [#uses=1]
%tmp6 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr double* %p, i64 %tmp6 ; <double*> [#uses=1]
+ %tmp7 = getelementptr double, double* %p, i64 %tmp6 ; <double*> [#uses=1]
; CHECK: %tmp7
; CHECK-NEXT: --> {%p,+,8}<%bb>
store double %tmp5, double* %tmp7, align 8
%phitmp = sext i32 %tmp8 to i64 ; <i64> [#uses=1]
; CHECK: %phitmp
; CHECK-NEXT: --> {1,+,1}<nuw><nsw><%bb>
- %tmp9 = getelementptr double* %p, i64 %phitmp ; <double*> [#uses=1]
+ %tmp9 = getelementptr double, double* %p, i64 %phitmp ; <double*> [#uses=1]
; CHECK: %tmp9
; CHECK-NEXT: --> {(8 + %p),+,8}<%bb>
%tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1]
; CHECK: %__first.addr.02.i.i
; CHECK-NEXT: --> {%begin,+,4}<nuw><%for.body.i.i>
store i32 0, i32* %__first.addr.02.i.i, align 4
- %ptrincdec.i.i = getelementptr inbounds i32* %__first.addr.02.i.i, i64 1
+ %ptrincdec.i.i = getelementptr inbounds i32, i32* %__first.addr.02.i.i, i64 1
; CHECK: %ptrincdec.i.i
; CHECK-NEXT: --> {(4 + %begin),+,4}<nuw><%for.body.i.i>
%cmp.i.i = icmp eq i32* %ptrincdec.i.i, %end
%tmp = add nsw i64 %indvar.i.i, 1
; CHECK: %tmp =
; CHECK: {1,+,1}<nuw><nsw><%for.body.i.i>
- %ptrincdec.i.i = getelementptr inbounds i32* %begin, i64 %tmp
+ %ptrincdec.i.i = getelementptr inbounds i32, i32* %begin, i64 %tmp
; CHECK: %ptrincdec.i.i =
; CHECK: {(4 + %begin),+,4}<nsw><%for.body.i.i>
- %__first.addr.08.i.i = getelementptr inbounds i32* %begin, i64 %indvar.i.i
+ %__first.addr.08.i.i = getelementptr inbounds i32, i32* %begin, i64 %indvar.i.i
; CHECK: %__first.addr.08.i.i
; CHECK: {%begin,+,4}<nsw><%for.body.i.i>
store i32 0, i32* %__first.addr.08.i.i, align 4
; CHECK: --> {(4 + %arg),+,4}<nuw><%bb1> Exits: (8 + %arg)<nsw>
define i32 @PR12375(i32* readnone %arg) {
bb:
- %tmp = getelementptr inbounds i32* %arg, i64 2
+ %tmp = getelementptr inbounds i32, i32* %arg, i64 2
br label %bb1
bb1: ; preds = %bb1, %bb
%tmp2 = phi i32* [ %arg, %bb ], [ %tmp5, %bb1 ]
%tmp3 = phi i32 [ 0, %bb ], [ %tmp4, %bb1 ]
%tmp4 = add nsw i32 %tmp3, 1
- %tmp5 = getelementptr inbounds i32* %tmp2, i64 1
+ %tmp5 = getelementptr inbounds i32, i32* %tmp2, i64 1
%tmp6 = icmp ult i32* %tmp5, %tmp
br i1 %tmp6, label %bb1, label %bb7
bb2: ; preds = %bb2, %bb
%tmp = phi i32* [ %arg, %bb ], [ %tmp4, %bb2 ]
%tmp3 = icmp ult i32* %tmp, %arg1
- %tmp4 = getelementptr inbounds i32* %tmp, i64 1
+ %tmp4 = getelementptr inbounds i32, i32* %tmp, i64 1
br i1 %tmp3, label %bb2, label %bb5
bb5: ; preds = %bb2
unreachable
_ZNK4llvm12AttributeSet3endEj.exit: ; preds = %for.end
- %second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507"* undef, i32 %I.099.lcssa129, i32 1
+ %second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507", %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507"* undef, i32 %I.099.lcssa129, i32 1
%0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2
- %NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* %0, i32 0, i32 1
+ %NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506", %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* %0, i32 0, i32 1
%1 = load i32* %NumAttrs.i.i.i, align 4, !tbaa !8
- %add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* undef, i32 %1
+ %add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* undef, i32 %1
br i1 undef, label %return, label %for.body11
for.cond9: ; preds = %_ZNK4llvm9Attribute13getKindAsEnumEv.exit
]
_ZNK4llvm9Attribute13getKindAsEnumEv.exit: ; preds = %_ZNK4llvm9Attribute15isEnumAttributeEv.exit, %_ZNK4llvm9Attribute15isEnumAttributeEv.exit
- %incdec.ptr = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096, i32 1
+ %incdec.ptr = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096, i32 1
br i1 undef, label %for.cond9, label %return
cond.false21: ; preds = %_ZNK4llvm9Attribute15isEnumAttributeEv.exit, %for.body11
bb:
%i = phi i64 [ 0, %entry ], [ %i.next, %bb ]
- %pi = getelementptr double* %p, i64 %i
+ %pi = getelementptr double, double* %p, i64 %i
%i.next = add i64 %i, 1
- %pi.next = getelementptr double* %p, i64 %i.next
+ %pi.next = getelementptr double, double* %p, i64 %i.next
%x = load double* %pi
%y = load double* %pi.next
%z = fmul double %x, %y
%i.next = add i64 %i, 1
%e = add i64 %i, %j
- %pi.j = getelementptr double* %p, i64 %e
+ %pi.j = getelementptr double, double* %p, i64 %e
%f = add i64 %i.next, %j
- %pi.next.j = getelementptr double* %p, i64 %f
+ %pi.next.j = getelementptr double, double* %p, i64 %f
%x = load double* %pi.j
%y = load double* %pi.next.j
%z = fmul double %x, %y
%o = add i64 %j, 91
%g = add i64 %i, %o
- %pi.j.next = getelementptr double* %p, i64 %g
+ %pi.j.next = getelementptr double, double* %p, i64 %g
%a = load double* %pi.j.next
%b = fmul double %x, %a
store double %b, double* %pi.j.next
%i.next = add i64 %i, 1
%e = add i64 %i, %j
- %pi.j = getelementptr double* %p, i64 %e
+ %pi.j = getelementptr double, double* %p, i64 %e
%f = add i64 %i.next, %j
- %pi.next.j = getelementptr double* %p, i64 %f
+ %pi.next.j = getelementptr double, double* %p, i64 %f
%x = load double* %pi.j
%y = load double* %pi.next.j
%z = fmul double %x, %y
%o = add i64 %j, %n
%g = add i64 %i, %o
- %pi.j.next = getelementptr double* %p, i64 %g
+ %pi.j.next = getelementptr double, double* %p, i64 %g
%a = load double* %pi.j.next
%b = fmul double %x, %a
store double %b, double* %pi.j.next
define void @foo() {
entry:
%A = alloca %struct.A
- %B = getelementptr %struct.A* %A, i32 0, i32 0
+ %B = getelementptr %struct.A, %struct.A* %A, i32 0, i32 0
%Q = bitcast %struct.B* %B to %struct.A*
- %Z = getelementptr %struct.A* %Q, i32 0, i32 1
- %C = getelementptr %struct.B* %B, i32 1
+ %Z = getelementptr %struct.A, %struct.A* %Q, i32 0, i32 1
+ %C = getelementptr %struct.B, %struct.B* %B, i32 1
%X = bitcast %struct.B* %C to i32*
- %Y = getelementptr %struct.A* %A, i32 0, i32 1
+ %Y = getelementptr %struct.A, %struct.A* %A, i32 0, i32 1
ret void
}
define void @bar() {
%M = alloca %struct.A
- %N = getelementptr %struct.A* %M, i32 0, i32 0
+ %N = getelementptr %struct.A, %struct.A* %M, i32 0, i32 0
%O = bitcast %struct.B* %N to %struct.A*
- %P = getelementptr %struct.A* %O, i32 0, i32 1
- %R = getelementptr %struct.B* %N, i32 1
+ %P = getelementptr %struct.A, %struct.A* %O, i32 0, i32 1
+ %R = getelementptr %struct.B, %struct.B* %N, i32 1
%W = bitcast %struct.B* %R to i32*
- %V = getelementptr %struct.A* %M, i32 0, i32 1
+ %V = getelementptr %struct.A, %struct.A* %M, i32 0, i32 1
ret void
}
for.body: ; preds = %entry, %for.body
%i = phi i64 [ %inc, %for.body ], [ 0, %entry ] ; <i64> [#uses=2]
%inc = add nsw i64 %i, 1 ; <i64> [#uses=2]
- %arrayidx = getelementptr inbounds i64* %p, i64 %inc
+ %arrayidx = getelementptr inbounds i64, i64* %p, i64 %inc
store i64 0, i64* %arrayidx
%tmp6 = load i64* %p ; <i64> [#uses=1]
%cmp = icmp slt i64 %inc, %tmp6 ; <i1> [#uses=1]
%t2 = ashr i64 %t1, 7 ; <i32> [#uses=1]
%s1 = shl i64 %i.01, 5 ; <i32> [#uses=1]
%s2 = ashr i64 %s1, 5 ; <i32> [#uses=1]
- %t3 = getelementptr i64* %x, i64 %i.01 ; <i64*> [#uses=1]
+ %t3 = getelementptr i64, i64* %x, i64 %i.01 ; <i64*> [#uses=1]
store i64 0, i64* %t3, align 1
%indvar.next = add i64 %i.01, 199 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar.next, %n ; <i1> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
; CHECK: %2
; CHECK-NEXT: --> {-128,+,1}<nsw><%bb1> Exits: 127
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
%4 = load double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
%0 = trunc i64 %i.0.reg2mem.0 to i7 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
%4 = load double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i7 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
%4 = load double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 128 ; <i1> [#uses=1]
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
%4 = load double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, 1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
%0 = trunc i64 %i.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
- %3 = getelementptr double* %x, i64 %2 ; <double*> [#uses=1]
+ %3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
%4 = load double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr double* %x, i64 %6 ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
store double %5, double* %7, align 8
%8 = add i64 %i.0.reg2mem.0, -1 ; <i64> [#uses=2]
%9 = icmp sgt i64 %8, 127 ; <i1> [#uses=1]
%tmp4 = mul i32 %tmp3, %i.02 ; <i32> [#uses=1]
%tmp5 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
%tmp6 = sext i32 %j.01 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr [32 x [256 x i32]]* @table, i64 0, i64 %tmp5, i64 %tmp6 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr [32 x [256 x i32]], [32 x [256 x i32]]* @table, i64 0, i64 %tmp5, i64 %tmp6 ; <i32*> [#uses=1]
store i32 %tmp4, i32* %tmp7, align 4
%tmp8 = add i32 %j.01, 1 ; <i32> [#uses=2]
br label %bb2
for.body: ; preds = %for.body, %entry
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] ; <i64> [#uses=2]
- %arrayidx = getelementptr double* %p, i64 %i ; <double*> [#uses=2]
+ %arrayidx = getelementptr double, double* %p, i64 %i ; <double*> [#uses=2]
%t4 = load double* %arrayidx ; <double> [#uses=1]
%mul = fmul double %t4, 2.200000e+00 ; <double> [#uses=1]
store double %mul, double* %arrayidx
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 123, i32* %tmp
%tmp2 = add i32 %i.0, 1 ; <i32> [#uses=1]
br label %bb3
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds [8 x i32]* @foo.a, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @foo.a, i64 0, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds [8 x i32] addrspace(1)* @foo.a_as1, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32] addrspace(1)* @foo.a_as1, i64 0, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
%p.addr.05 = phi i16* [ %incdec.ptr, %for.body ], [ %p, %for.body.preheader ]
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %for.body.preheader ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
- %incdec.ptr = getelementptr inbounds i16* %p.addr.05, i32 1
+ %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i32 1
%0 = load i16* %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
br label %bb3
bb: ; preds = %bb3
- %tmp = getelementptr [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ %tmp = getelementptr [1000 x i32], [1000 x i32]* @A, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 123, i32* %tmp
%tmp4 = mul i32 %i.0, 4 ; <i32> [#uses=1]
%tmp5 = or i32 %tmp4, 1
ret void
bb2.i: ; preds = %bb3.i
- %1 = getelementptr %struct.SHA_INFO* %sha_info, i64 0, i32 3
+ %1 = getelementptr %struct.SHA_INFO, %struct.SHA_INFO* %sha_info, i64 0, i32 3
%2 = bitcast [16 x i32]* %1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %buffer_addr.0.i, i64 64, i32 1, i1 false)
- %3 = getelementptr %struct.SHA_INFO* %sha_info, i64 0, i32 3, i64 0
+ %3 = getelementptr %struct.SHA_INFO, %struct.SHA_INFO* %sha_info, i64 0, i32 3, i64 0
%4 = bitcast i32* %3 to i8*
br label %codeRepl
byte_reverse.exit.i: ; preds = %codeRepl
call fastcc void @sha_transform(%struct.SHA_INFO* %sha_info) nounwind
- %5 = getelementptr i8* %buffer_addr.0.i, i64 64
+ %5 = getelementptr i8, i8* %buffer_addr.0.i, i64 64
%6 = add i32 %count_addr.0.i, -64
br label %bb3.i
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ] ; <i64> [#uses=4]
%s0 = shl i64 %indvar, 8 ; <i64> [#uses=1]
%indvar.i8 = ashr i64 %s0, 8 ; <i64> [#uses=1]
- %t0 = getelementptr double* %d, i64 %indvar.i8 ; <double*> [#uses=2]
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8 ; <double*> [#uses=2]
%t1 = load double* %t0 ; <double> [#uses=1]
%t2 = fmul double %t1, 1.000000e-01 ; <double> [#uses=1]
store double %t2, double* %t0
%hiPart.035 = phi i32 [ %tmp12, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=2]
%peakCount.034 = phi float [ %tmp19, %bb1 ], [ %tmp3, %bb.nph ] ; <float> [#uses=1]
%tmp6 = sext i32 %hiPart.035 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
+ %tmp7 = getelementptr float, float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
%tmp8 = load float* %tmp7, align 4 ; <float> [#uses=1]
%tmp10 = fadd float %tmp8, %distERBhi.036 ; <float> [#uses=3]
%tmp12 = add i32 %hiPart.035, 1 ; <i32> [#uses=3]
%tmp15 = sext i32 %tmp12 to i64 ; <i64> [#uses=1]
- %tmp16 = getelementptr float* %peakWeight, i64 %tmp15 ; <float*> [#uses=1]
+ %tmp16 = getelementptr float, float* %peakWeight, i64 %tmp15 ; <float*> [#uses=1]
%tmp17 = load float* %tmp16, align 4 ; <float> [#uses=1]
%tmp19 = fadd float %tmp17, %peakCount.034 ; <float> [#uses=2]
br label %bb1
bb: ; preds = %bb4, %entry
%mode.0 = phi i8 [ 0, %entry ], [ %indvar.next, %bb4 ] ; <i8> [#uses=4]
zext i8 %mode.0 to i32 ; <i32>:1 [#uses=1]
- getelementptr [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
+ getelementptr [4 x i32], [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
load i32* %2, align 4 ; <i32>:3 [#uses=1]
icmp eq i32 %3, %0 ; <i1>:4 [#uses=1]
br i1 %4, label %bb1, label %bb2
%tmp = add i32 %j.0.i, 1 ; <i32> [#uses=5]
store i32 0, i32* %q, align 4
%tmp1 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp2 = getelementptr [9 x i32]* %a, i64 0, i64 %tmp1 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp1 ; <i32*> [#uses=1]
%tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
%tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
br i1 %tmp4, label %bb.i.bb7.i.backedge_crit_edge, label %bb1.i
bb1.i: ; preds = %bb.i
%tmp5 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp6 = sext i32 %tmp5 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr [17 x i32]* %b, i64 0, i64 %tmp6 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp6 ; <i32*> [#uses=1]
%tmp8 = load i32* %tmp7, align 4 ; <i32> [#uses=1]
%tmp9 = icmp eq i32 %tmp8, 0 ; <i1> [#uses=1]
br i1 %tmp9, label %bb1.i.bb7.i.backedge_crit_edge, label %bb2.i
bb2.i: ; preds = %bb1.i
%tmp10 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp11 = sext i32 %tmp10 to i64 ; <i64> [#uses=1]
- %tmp12 = getelementptr [15 x i32]* %c, i64 0, i64 %tmp11 ; <i32*> [#uses=1]
+ %tmp12 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp11 ; <i32*> [#uses=1]
%tmp13 = load i32* %tmp12, align 4 ; <i32> [#uses=1]
%tmp14 = icmp eq i32 %tmp13, 0 ; <i1> [#uses=1]
br i1 %tmp14, label %bb2.i.bb7.i.backedge_crit_edge, label %bb3.i
bb3.i: ; preds = %bb2.i
- %tmp15 = getelementptr [9 x i32]* %x1, i64 0, i64 1 ; <i32*> [#uses=1]
+ %tmp15 = getelementptr [9 x i32], [9 x i32]* %x1, i64 0, i64 1 ; <i32*> [#uses=1]
store i32 %tmp, i32* %tmp15, align 4
%tmp16 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp17 = getelementptr [9 x i32]* %a, i64 0, i64 %tmp16 ; <i32*> [#uses=1]
+ %tmp17 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp16 ; <i32*> [#uses=1]
store i32 0, i32* %tmp17, align 4
%tmp18 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp19 = sext i32 %tmp18 to i64 ; <i64> [#uses=1]
- %tmp20 = getelementptr [17 x i32]* %b, i64 0, i64 %tmp19 ; <i32*> [#uses=1]
+ %tmp20 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp19 ; <i32*> [#uses=1]
store i32 0, i32* %tmp20, align 4
%tmp21 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp22 = sext i32 %tmp21 to i64 ; <i64> [#uses=1]
- %tmp23 = getelementptr [15 x i32]* %c, i64 0, i64 %tmp22 ; <i32*> [#uses=1]
+ %tmp23 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp22 ; <i32*> [#uses=1]
store i32 0, i32* %tmp23, align 4
call void @Try(i32 2, i32* %q, i32* %b9, i32* %a10, i32* %c11, i32* %x1.sub) nounwind
%tmp24 = load i32* %q, align 4 ; <i32> [#uses=1]
bb5.i: ; preds = %bb3.i
%tmp26 = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %tmp27 = getelementptr [9 x i32]* %a, i64 0, i64 %tmp26 ; <i32*> [#uses=1]
+ %tmp27 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp26 ; <i32*> [#uses=1]
store i32 1, i32* %tmp27, align 4
%tmp28 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp29 = sext i32 %tmp28 to i64 ; <i64> [#uses=1]
- %tmp30 = getelementptr [17 x i32]* %b, i64 0, i64 %tmp29 ; <i32*> [#uses=1]
+ %tmp30 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp29 ; <i32*> [#uses=1]
store i32 1, i32* %tmp30, align 4
%tmp31 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp32 = sext i32 %tmp31 to i64 ; <i64> [#uses=1]
- %tmp33 = getelementptr [15 x i32]* %c, i64 0, i64 %tmp32 ; <i32*> [#uses=1]
+ %tmp33 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp32 ; <i32*> [#uses=1]
store i32 1, i32* %tmp33, align 4
br label %bb7.i.backedge
entry:
; CHECK-LABEL: Function: foo1
%0 = load float* %c, align 4, !alias.scope !9
- %arrayidx.i = getelementptr inbounds float* %a, i64 5
+ %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !noalias !6
%1 = load float* %c, align 4, !alias.scope !5
- %arrayidx.i2 = getelementptr inbounds float* %a, i64 15
+ %arrayidx.i2 = getelementptr inbounds float, float* %a, i64 15
store float %1, float* %arrayidx.i2, align 4, !noalias !6
%2 = load float* %c, align 4, !alias.scope !6
- %arrayidx.i3 = getelementptr inbounds float* %a, i64 16
+ %arrayidx.i3 = getelementptr inbounds float, float* %a, i64 16
store float %2, float* %arrayidx.i3, align 4, !noalias !5
ret void
entry:
; CHECK-LABEL: Function: foo1
%0 = load float* %c, align 4, !alias.scope !1
- %arrayidx.i = getelementptr inbounds float* %a, i64 5
+ %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !noalias !1
%1 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
entry:
; CHECK-LABEL: Function: foo2
%0 = load float* %c, align 4, !alias.scope !0
- %arrayidx.i = getelementptr inbounds float* %a, i64 5
+ %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !alias.scope !5, !noalias !4
- %arrayidx1.i = getelementptr inbounds float* %b, i64 8
+ %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !5
%1 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
; CHECK: define void @vrlh(
; CHECK: for.end:
-; CHECK: %arrayidx31 = getelementptr inbounds %union.vector_t* %t, i64 0, i32 0, i64 1
+; CHECK: %arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1
; CHECK: %tmp32 = load i64* %arrayidx31, align 8, !tbaa [[TAG:!.*]]
define void @vrlh(%union.vector_t* %va, %union.vector_t* %vb, %union.vector_t* %vd) nounwind {
%sub = sub nsw i32 7, %i.01
%idxprom = sext i32 %sub to i64
%half = bitcast %union.vector_t* %vb to [8 x i16]*
- %arrayidx = getelementptr inbounds [8 x i16]* %half, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [8 x i16], [8 x i16]* %half, i64 0, i64 %idxprom
%tmp4 = load i16* %arrayidx, align 2, !tbaa !0
%conv = zext i16 %tmp4 to i32
%and = and i32 %conv, 15
%sub6 = sub nsw i32 7, %i.01
%idxprom7 = sext i32 %sub6 to i64
%half9 = bitcast %union.vector_t* %va to [8 x i16]*
- %arrayidx10 = getelementptr inbounds [8 x i16]* %half9, i64 0, i64 %idxprom7
+ %arrayidx10 = getelementptr inbounds [8 x i16], [8 x i16]* %half9, i64 0, i64 %idxprom7
%tmp11 = load i16* %arrayidx10, align 2, !tbaa !0
%conv12 = zext i16 %tmp11 to i32
%shl = shl i32 %conv12, %and
%sub15 = sub nsw i32 7, %i.01
%idxprom16 = sext i32 %sub15 to i64
%half18 = bitcast %union.vector_t* %va to [8 x i16]*
- %arrayidx19 = getelementptr inbounds [8 x i16]* %half18, i64 0, i64 %idxprom16
+ %arrayidx19 = getelementptr inbounds [8 x i16], [8 x i16]* %half18, i64 0, i64 %idxprom16
%tmp20 = load i16* %arrayidx19, align 2, !tbaa !0
%conv21 = zext i16 %tmp20 to i32
%sub23 = sub nsw i32 16, %and
%sub26 = sub nsw i32 7, %i.01
%idxprom27 = sext i32 %sub26 to i64
%half28 = bitcast %union.vector_t* %t to [8 x i16]*
- %arrayidx29 = getelementptr inbounds [8 x i16]* %half28, i64 0, i64 %idxprom27
+ %arrayidx29 = getelementptr inbounds [8 x i16], [8 x i16]* %half28, i64 0, i64 %idxprom27
store i16 %conv24, i16* %arrayidx29, align 2, !tbaa !0
%inc = add nsw i32 %i.01, 1
%cmp = icmp slt i32 %inc, 8
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
- %arrayidx31 = getelementptr inbounds %union.vector_t* %t, i64 0, i32 0, i64 1
+ %arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1
%tmp32 = load i64* %arrayidx31, align 8, !tbaa !3
- %arrayidx35 = getelementptr inbounds %union.vector_t* %vd, i64 0, i32 0, i64 1
+ %arrayidx35 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 1
store i64 %tmp32, i64* %arrayidx35, align 8, !tbaa !3
- %arrayidx37 = getelementptr inbounds %union.vector_t* %t, i64 0, i32 0, i64 0
+ %arrayidx37 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 0
%tmp38 = load i64* %arrayidx37, align 8, !tbaa !3
- %arrayidx41 = getelementptr inbounds %union.vector_t* %vd, i64 0, i32 0, i64 0
+ %arrayidx41 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 0
store i64 %tmp38, i64* %arrayidx41, align 8, !tbaa !3
ret void
}
define i32 @test0(%struct.X* %a) nounwind {
entry:
- %i = getelementptr inbounds %struct.X* %a, i64 0, i32 0
+ %i = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0
store i32 0, i32* %i, align 4, !tbaa !4
br label %for.body
for.body: ; preds = %entry, %for.body
%i2.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %f = getelementptr inbounds %struct.X* %a, i64 %i2.01, i32 1
+ %f = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i2.01, i32 1
%tmp6 = load float* %f, align 4, !tbaa !5
%mul = fmul float %tmp6, 0x40019999A0000000
store float %mul, float* %f, align 4, !tbaa !5
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
- %i9 = getelementptr inbounds %struct.X* %a, i64 0, i32 0
+ %i9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0
%tmp10 = load i32* %i9, align 4, !tbaa !4
ret i32 %tmp10
}
define float @test1(%struct.X* %a) nounwind {
entry:
- %f = getelementptr inbounds %struct.X* %a, i64 0, i32 1
+ %f = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 1
store float 0x3FD3333340000000, float* %f, align 4, !tbaa !5
br label %for.body
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %i5 = getelementptr inbounds %struct.X* %a, i64 %i.01, i32 0
+ %i5 = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i.01, i32 0
%tmp6 = load i32* %i5, align 4, !tbaa !4
%mul = mul nsw i32 %tmp6, 3
store i32 %mul, i32* %i5, align 4, !tbaa !4
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %for.body
- %f9 = getelementptr inbounds %struct.X* %a, i64 0, i32 1
+ %f9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 1
%tmp10 = load float* %f9, align 4, !tbaa !5
ret float %tmp10
}
for.body: ; preds = %entry, %for.body
%i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
%tmp3 = load double** @P, !tbaa !1
- %scevgep = getelementptr double* %tmp3, i64 %i.07
+ %scevgep = getelementptr double, double* %tmp3, i64 %i.07
%tmp4 = load double* %scevgep, !tbaa !2
%mul = fmul double %tmp4, 2.300000e+00
store double %mul, double* %scevgep, !tbaa !2
%0 = bitcast i8* %call to %struct.Foo*
store %struct.Foo* %0, %struct.Foo** %f, align 8, !tbaa !4
%1 = load %struct.Foo** %f, align 8, !tbaa !4
- %i = getelementptr inbounds %struct.Foo* %1, i32 0, i32 0
+ %i = getelementptr inbounds %struct.Foo, %struct.Foo* %1, i32 0, i32 0
store i64 1, i64* %i, align 8, !tbaa !6
store i32 0, i32* %i1, align 4, !tbaa !0
br label %for.cond
%7 = phi %struct.Bar* [ %6, %new.notnull ], [ null, %for.body ]
store %struct.Bar* %7, %struct.Bar** %b, align 8, !tbaa !4
%8 = load %struct.Bar** %b, align 8, !tbaa !4
- %p = getelementptr inbounds %struct.Bar* %8, i32 0, i32 0
+ %p = getelementptr inbounds %struct.Bar, %struct.Bar* %8, i32 0, i32 0
store i8* null, i8** %p, align 8, !tbaa !9
%9 = load %struct.Foo** %f, align 8, !tbaa !4
%10 = bitcast %struct.Foo* %9 to i8*
%13 = load i32* %i1, align 4, !tbaa !0
%conv = sext i32 %13 to i64
%14 = load %struct.Foo** %f, align 8, !tbaa !4
- %i5 = getelementptr inbounds %struct.Foo* %14, i32 0, i32 0
+ %i5 = getelementptr inbounds %struct.Foo, %struct.Foo* %14, i32 0, i32 0
store i64 %conv, i64* %i5, align 8, !tbaa !6
br label %for.inc
for.end:
%16 = load %struct.Foo** %f, align 8, !tbaa !4
- %i6 = getelementptr inbounds %struct.Foo* %16, i32 0, i32 0
+ %i6 = getelementptr inbounds %struct.Foo, %struct.Foo* %16, i32 0, i32 0
%17 = load i64* %i6, align 8, !tbaa !6
ret i64 %17
}
entry:
store i64 0, i64* %x, !tbaa !4
%0 = bitcast i64* %x to i8*
- %1 = getelementptr i8* %0, i64 1
+ %1 = getelementptr i8, i8* %0, i64 1
store i8 1, i8* %1, !tbaa !5
%tmp3 = load i64* %x, !tbaa !4
ret i64 %tmp3
%0 = load i32** %s.addr, align 8, !tbaa !0
store i32 1, i32* %0, align 4, !tbaa !6
%1 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %1, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 1
store i32 4, i32* %f32, align 4, !tbaa !8
%2 = load i32** %s.addr, align 8, !tbaa !0
%3 = load i32* %2, align 4, !tbaa !6
%0 = load i32** %s.addr, align 8, !tbaa !0
store i32 1, i32* %0, align 4, !tbaa !6
%1 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f16 = getelementptr inbounds %struct.StructA* %1, i32 0, i32 0
+ %f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !11
%2 = load i32** %s.addr, align 8, !tbaa !0
%3 = load i32* %2, align 4, !tbaa !6
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
%1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %1, i32 0, i32 1
- %f321 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 1
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !12
%2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
+ %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
%3 = load i32* %f322, align 4, !tbaa !8
ret i32 %3
}
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
%1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %1, i32 0, i32 1
- %f16 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 0
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
+ %f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !14
%2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
%3 = load i32* %f321, align 4, !tbaa !8
ret i32 %3
}
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
%1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructB* %1, i32 0, i32 2
+ %f321 = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 2
store i32 4, i32* %f321, align 4, !tbaa !15
%2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
+ %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
%3 = load i32* %f322, align 4, !tbaa !8
ret i32 %3
}
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
%1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %1, i32 0, i32 1
- %f32_2 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 3
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
+ %f32_2 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 3
store i32 4, i32* %f32_2, align 4, !tbaa !16
%2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
%3 = load i32* %f321, align 4, !tbaa !8
ret i32 %3
}
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
%1 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructS* %1, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !17
%2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
+ %f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
%3 = load i32* %f322, align 4, !tbaa !8
ret i32 %3
}
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructA* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
%1 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f16 = getelementptr inbounds %struct.StructS* %1, i32 0, i32 0
+ %f16 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !19
%2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructA* %2, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
%3 = load i32* %f321, align 4, !tbaa !8
ret i32 %3
}
store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructS* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !17
%1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructS2* %1, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !20
%2 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f322 = getelementptr inbounds %struct.StructS* %2, i32 0, i32 1
+ %f322 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1
%3 = load i32* %f322, align 4, !tbaa !17
ret i32 %3
}
store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f32 = getelementptr inbounds %struct.StructS* %0, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !17
%1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0
- %f16 = getelementptr inbounds %struct.StructS2* %1, i32 0, i32 0
+ %f16 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !22
%2 = load %struct.StructS** %S.addr, align 8, !tbaa !0
- %f321 = getelementptr inbounds %struct.StructS* %2, i32 0, i32 1
+ %f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1
%3 = load i32* %f321, align 4, !tbaa !17
ret i32 %3
}
store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructC** %C.addr, align 8, !tbaa !0
- %b = getelementptr inbounds %struct.StructC* %0, i32 0, i32 1
- %a = getelementptr inbounds %struct.StructB* %b, i32 0, i32 1
- %f32 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 1
+ %b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %b, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !23
%1 = load %struct.StructD** %D.addr, align 8, !tbaa !0
- %b1 = getelementptr inbounds %struct.StructD* %1, i32 0, i32 1
- %a2 = getelementptr inbounds %struct.StructB* %b1, i32 0, i32 1
- %f323 = getelementptr inbounds %struct.StructA* %a2, i32 0, i32 1
+ %b1 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1
+ %a2 = getelementptr inbounds %struct.StructB, %struct.StructB* %b1, i32 0, i32 1
+ %f323 = getelementptr inbounds %struct.StructA, %struct.StructA* %a2, i32 0, i32 1
store i32 4, i32* %f323, align 4, !tbaa !25
%2 = load %struct.StructC** %C.addr, align 8, !tbaa !0
- %b4 = getelementptr inbounds %struct.StructC* %2, i32 0, i32 1
- %a5 = getelementptr inbounds %struct.StructB* %b4, i32 0, i32 1
- %f326 = getelementptr inbounds %struct.StructA* %a5, i32 0, i32 1
+ %b4 = getelementptr inbounds %struct.StructC, %struct.StructC* %2, i32 0, i32 1
+ %a5 = getelementptr inbounds %struct.StructB, %struct.StructB* %b4, i32 0, i32 1
+ %f326 = getelementptr inbounds %struct.StructA, %struct.StructA* %a5, i32 0, i32 1
%3 = load i32* %f326, align 4, !tbaa !23
ret i32 %3
}
store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
%0 = load %struct.StructC** %C.addr, align 8, !tbaa !0
- %b = getelementptr inbounds %struct.StructC* %0, i32 0, i32 1
+ %b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1
store %struct.StructB* %b, %struct.StructB** %b1, align 8, !tbaa !0
%1 = load %struct.StructD** %D.addr, align 8, !tbaa !0
- %b3 = getelementptr inbounds %struct.StructD* %1, i32 0, i32 1
+ %b3 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1
store %struct.StructB* %b3, %struct.StructB** %b2, align 8, !tbaa !0
%2 = load %struct.StructB** %b1, align 8, !tbaa !0
- %a = getelementptr inbounds %struct.StructB* %2, i32 0, i32 1
- %f32 = getelementptr inbounds %struct.StructA* %a, i32 0, i32 1
+ %a = getelementptr inbounds %struct.StructB, %struct.StructB* %2, i32 0, i32 1
+ %f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !12
%3 = load %struct.StructB** %b2, align 8, !tbaa !0
- %a4 = getelementptr inbounds %struct.StructB* %3, i32 0, i32 1
- %f325 = getelementptr inbounds %struct.StructA* %a4, i32 0, i32 1
+ %a4 = getelementptr inbounds %struct.StructB, %struct.StructB* %3, i32 0, i32 1
+ %f325 = getelementptr inbounds %struct.StructA, %struct.StructA* %a4, i32 0, i32 1
store i32 4, i32* %f325, align 4, !tbaa !12
%4 = load %struct.StructB** %b1, align 8, !tbaa !0
- %a6 = getelementptr inbounds %struct.StructB* %4, i32 0, i32 1
- %f327 = getelementptr inbounds %struct.StructA* %a6, i32 0, i32 1
+ %a6 = getelementptr inbounds %struct.StructB, %struct.StructB* %4, i32 0, i32 1
+ %f327 = getelementptr inbounds %struct.StructA, %struct.StructA* %a6, i32 0, i32 1
%5 = load i32* %f327, align 4, !tbaa !12
ret i32 %5
}
; CHECK: %relocate
; CHECK-NOT: %nparam
entry:
- %globalptr = getelementptr inbounds [6 x i8]* @globalstr, i32 0, i32 0
+ %globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0
%load1 = load i8* %globalptr
%alloca = alloca i1
%load2 = load i1* %alloca
%tok = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam)
%relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %tok, i32 4, i32 4)
%load4 = load i32 addrspace(1)* %relocate
- %nparam = getelementptr i32 addrspace(1)* %dparam, i32 5
+ %nparam = getelementptr i32, i32 addrspace(1)* %dparam, i32 5
%load5 = load i32 addrspace(1)* %nparam
ret void
}
@search = external global %CHESS_POSITION ; <%CHESS_POSITION*> [#uses=2]
define void @Evaluate() {
- %reg1321 = getelementptr %CHESS_POSITION* @search, i64 0, i32 1 ; <i32*> [#uses=1]
+ %reg1321 = getelementptr %CHESS_POSITION, %CHESS_POSITION* @search, i64 0, i32 1 ; <i32*> [#uses=1]
%reg114 = load i32* %reg1321 ; <i32> [#uses=0]
- %reg1801 = getelementptr %CHESS_POSITION* @search, i64 0, i32 0 ; <i32*> [#uses=1]
+ %reg1801 = getelementptr %CHESS_POSITION, %CHESS_POSITION* @search, i64 0, i32 0 ; <i32*> [#uses=1]
%reg182 = load i32* %reg1801 ; <i32> [#uses=0]
ret void
}
; RUN: verify-uselistorder %s
define i32* @t1({ float, i32 }* %X) {
- %W = getelementptr { float, i32 }* %X, i32 20, i32 1 ; <i32*> [#uses=0]
- %X.upgrd.1 = getelementptr { float, i32 }* %X, i64 20, i32 1 ; <i32*> [#uses=0]
- %Y = getelementptr { float, i32 }* %X, i64 20, i32 1 ; <i32*> [#uses=1]
- %Z = getelementptr { float, i32 }* %X, i64 20, i32 1 ; <i32*> [#uses=0]
+ %W = getelementptr { float, i32 }, { float, i32 }* %X, i32 20, i32 1 ; <i32*> [#uses=0]
+ %X.upgrd.1 = getelementptr { float, i32 }, { float, i32 }* %X, i64 20, i32 1 ; <i32*> [#uses=0]
+ %Y = getelementptr { float, i32 }, { float, i32 }* %X, i64 20, i32 1 ; <i32*> [#uses=1]
+ %Z = getelementptr { float, i32 }, { float, i32 }* %X, i64 20, i32 1 ; <i32*> [#uses=0]
ret i32* %Y
}
ret void
loop: ; preds = %loop
- %tmp.4.i9 = getelementptr i32* null, i32 %tmp.5.i10 ; <i32*> [#uses=1]
+ %tmp.4.i9 = getelementptr i32, i32* null, i32 %tmp.5.i10 ; <i32*> [#uses=1]
%tmp.5.i10 = load i32* %tmp.4.i9 ; <i32> [#uses=1]
br label %loop
}
define i32 @main(i32 %argc, i8** %argv) {
entry:
- %tmp65 = getelementptr i8** %argv, i32 1 ; <i8**> [#uses=1]
+ %tmp65 = getelementptr i8*, i8** %argv, i32 1 ; <i8**> [#uses=1]
%tmp66 = load i8** %tmp65 ; <i8*> [#uses=0]
br i1 icmp ne (i32 sub (i32 ptrtoint (i8* getelementptr ([4 x i8]* @str, i32 0, i64 1) to i32), i32 ptrtoint ([4 x i8]* @str to i32)), i32 1), label %exit_1, label %exit_2
}
define i64* @gep_nw(i64* %p, i64 %x) {
-; CHECK: %z = getelementptr inbounds i64* %p, i64 %x
- %z = getelementptr inbounds i64* %p, i64 %x
+; CHECK: %z = getelementptr inbounds i64, i64* %p, i64 %x
+ %z = getelementptr inbounds i64, i64* %p, i64 %x
ret i64* %z
}
define i64* @gep_plain(i64* %p, i64 %x) {
-; CHECK: %z = getelementptr i64* %p, i64 %x
- %z = getelementptr i64* %p, i64 %x
+; CHECK: %z = getelementptr i64, i64* %p, i64 %x
+ %z = getelementptr i64, i64* %p, i64 %x
ret i64* %z
}
; See if i92 indices work too.
define i32 *@test({i32, i32}* %t, i92 %n) {
; CHECK: @test
-; CHECK: %B = getelementptr { i32, i32 }* %t, i92 %n, i32 0
- %B = getelementptr {i32, i32}* %t, i92 %n, i32 0
+; CHECK: %B = getelementptr { i32, i32 }, { i32, i32 }* %t, i92 %n, i32 0
+ %B = getelementptr {i32, i32}, {i32, i32}* %t, i92 %n, i32 0
ret i32* %B
}
; Verify that struct GEP works with a vector of pointers.
define <2 x i32*> @test7(<2 x {i32, i32}*> %a) {
- %w = getelementptr <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
+ %w = getelementptr {i32, i32}, <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
ret <2 x i32*> %w
}
; Verify that array GEP works with a vector of pointers.
define <2 x i8*> @test8(<2 x [2 x i8]*> %a) {
- %w = getelementptr <2 x [2 x i8]*> %a, <2 x i32> <i32 0, i32 0>, <2 x i8> <i8 0, i8 1>
+ %w = getelementptr [2 x i8], <2 x [2 x i8]*> %a, <2 x i32> <i32 0, i32 0>, <2 x i8> <i8 0, i8 1>
ret <2 x i8*> %w
}
define i32* @foo(%ST* %s) {
entry:
- %reg = getelementptr %ST* %s, i32 1, i64 2, i32 1, i32 5, i32 13
+ %reg = getelementptr %ST, %ST* %s, i32 1, i64 2, i32 1, i32 5, i32 13
ret i32* %reg
}
; CHECK: getelementptr index type missmatch
define i32 @test(i32* %a) {
- %w = getelementptr i32* %a, <2 x i32> <i32 5, i32 9>
+ %w = getelementptr i32, i32* %a, <2 x i32> <i32 5, i32 9>
ret i32 %w
}
; CHECK: getelementptr index type missmatch
define <2 x i32> @test(<2 x i32*> %a) {
- %w = getelementptr <2 x i32*> %a, i32 2
+ %w = getelementptr i32, <2 x i32*> %a, i32 2
ret <2 x i32> %w
}
; CHECK: getelementptr index type missmatch
define <4 x i32> @test(<4 x i32>* %a) {
- %w = getelementptr <4 x i32>* %a, <2 x i32> <i32 5, i32 9>
+ %w = getelementptr <4 x i32>, <4 x i32>* %a, <2 x i32> <i32 5, i32 9>
ret i32 %w
}
; CHECK: invalid getelementptr indices
define <2 x i32*> @test7(<2 x {i32, i32}*> %a) {
- %w = getelementptr <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> <i32 0, i32 1>
+ %w = getelementptr {i32, i32}, <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> <i32 0, i32 1>
ret <2 x i32*> %w
}
--- /dev/null
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+; CHECK: <stdin>:4:22: error: explicit pointee type doesn't match operand's pointee type
+define void @test(i32* %t) {
+ %x = getelementptr i16, i32* %t, i32 0
+ ret void
+}
--- /dev/null
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+; CHECK: <stdin>:4:27: error: expected comma after getelementptr's type
+define void @test(i32* %t) {
+ %x = getelementptr i32* %t, i32 0
+ ret void
+}
+
inttoptr i8 1 to i8*
; CHECK-NEXT: bitcast i32 1 to <2 x i16>
bitcast i32 1 to <2 x i16>
- ; CHECK-NEXT: getelementptr i32* @X, i32 0
- getelementptr i32* @X, i32 0
- ; CHECK-NEXT: getelementptr inbounds i32* @X, i32 0
- getelementptr inbounds i32* @X, i32 0
+ ; CHECK-NEXT: getelementptr i32, i32* @X, i32 0
+ getelementptr i32, i32* @X, i32 0
+ ; CHECK-NEXT: getelementptr inbounds i32, i32* @X, i32 0
+ getelementptr inbounds i32, i32* @X, i32 0
; CHECK: select i1 true, i32 1, i32 0
select i1 true ,i32 1, i32 0
; CHECK-NEXT: icmp eq i32 1, 0
; CHECK: INST_RET {{.*}}op0=1
define i1 @test_load(i32 %a, {i32, i32}* %ptr) nounwind {
entry:
- %0 = getelementptr inbounds {i32, i32}* %ptr, i32 %a, i32 0
+ %0 = getelementptr inbounds {i32, i32}, {i32, i32}* %ptr, i32 %a, i32 0
%1 = load i32* %0
%2 = icmp eq i32 %1, %a
ret i1 %2
define void @getelementptr({i8, i8}* %s, <4 x i8*> %ptrs, <4 x i64> %offsets ){
entry:
-; CHECK: %res1 = getelementptr { i8, i8 }* %s, i32 1, i32 1
- %res1 = getelementptr {i8, i8}* %s, i32 1, i32 1
+; CHECK: %res1 = getelementptr { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
+ %res1 = getelementptr {i8, i8}, {i8, i8}* %s, i32 1, i32 1
-; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }* %s, i32 1, i32 1
- %res2 = getelementptr inbounds {i8, i8}* %s, i32 1, i32 1
+; CHECK-NEXT: %res2 = getelementptr inbounds { i8, i8 }, { i8, i8 }* %s, i32 1, i32 1
+ %res2 = getelementptr inbounds {i8, i8}, {i8, i8}* %s, i32 1, i32 1
-; CHECK-NEXT: %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets
- %res3 = getelementptr <4 x i8*> %ptrs, <4 x i64> %offsets
+; CHECK-NEXT: %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
+ %res3 = getelementptr i8, <4 x i8*> %ptrs, <4 x i64> %offsets
ret void
}
entry:
%0 = bitcast i128* %ptr to fp128*
%1 = load fp128* %0, align 16
- %add.ptr = getelementptr inbounds i128* %ptr, i64 1
+ %add.ptr = getelementptr inbounds i128, i128* %ptr, i64 1
%2 = bitcast i128* %add.ptr to fp128*
store fp128 %1, fp128* %2, align 16
ret void
%mul = fmul fast double %1, %0
%2 = load double* %y, align 8
%mul7 = fmul fast double %2, %0
- %arrayidx.1 = getelementptr inbounds double* %c, i64 1
+ %arrayidx.1 = getelementptr inbounds double, double* %c, i64 1
%3 = load double* %arrayidx.1, align 8
- %arrayidx2.1 = getelementptr inbounds double* %x, i64 1
+ %arrayidx2.1 = getelementptr inbounds double, double* %x, i64 1
%4 = load double* %arrayidx2.1, align 8
%mul.1 = fmul fast double %4, %3
%add.1 = fadd fast double %mul.1, %mul
- %arrayidx6.1 = getelementptr inbounds double* %y, i64 1
+ %arrayidx6.1 = getelementptr inbounds double, double* %y, i64 1
%5 = load double* %arrayidx6.1, align 8
%mul7.1 = fmul fast double %5, %3
%add8.1 = fadd fast double %mul7.1, %mul7
- %arrayidx.2 = getelementptr inbounds double* %c, i64 2
+ %arrayidx.2 = getelementptr inbounds double, double* %c, i64 2
%6 = load double* %arrayidx.2, align 8
- %arrayidx2.2 = getelementptr inbounds double* %x, i64 2
+ %arrayidx2.2 = getelementptr inbounds double, double* %x, i64 2
%7 = load double* %arrayidx2.2, align 8
%mul.2 = fmul fast double %7, %6
%add.2 = fadd fast double %mul.2, %add.1
- %arrayidx6.2 = getelementptr inbounds double* %y, i64 2
+ %arrayidx6.2 = getelementptr inbounds double, double* %y, i64 2
%8 = load double* %arrayidx6.2, align 8
%mul7.2 = fmul fast double %8, %6
%add8.2 = fadd fast double %mul7.2, %add8.1
- %arrayidx.3 = getelementptr inbounds double* %c, i64 3
+ %arrayidx.3 = getelementptr inbounds double, double* %c, i64 3
%9 = load double* %arrayidx.3, align 8
- %arrayidx2.3 = getelementptr inbounds double* %x, i64 3
+ %arrayidx2.3 = getelementptr inbounds double, double* %x, i64 3
%10 = load double* %arrayidx2.3, align 8
%mul.3 = fmul fast double %10, %9
%add.3 = fadd fast double %mul.3, %add.2
- %arrayidx6.3 = getelementptr inbounds double* %y, i64 3
+ %arrayidx6.3 = getelementptr inbounds double, double* %y, i64 3
%11 = load double* %arrayidx6.3, align 8
%mul7.3 = fmul fast double %11, %9
%add8.3 = fadd fast double %mul7.3, %add8.2
- %arrayidx.4 = getelementptr inbounds double* %c, i64 4
+ %arrayidx.4 = getelementptr inbounds double, double* %c, i64 4
%12 = load double* %arrayidx.4, align 8
- %arrayidx2.4 = getelementptr inbounds double* %x, i64 4
+ %arrayidx2.4 = getelementptr inbounds double, double* %x, i64 4
%13 = load double* %arrayidx2.4, align 8
%mul.4 = fmul fast double %13, %12
%add.4 = fadd fast double %mul.4, %add.3
- %arrayidx6.4 = getelementptr inbounds double* %y, i64 4
+ %arrayidx6.4 = getelementptr inbounds double, double* %y, i64 4
%14 = load double* %arrayidx6.4, align 8
%mul7.4 = fmul fast double %14, %12
%add8.4 = fadd fast double %mul7.4, %add8.3
- %arrayidx.5 = getelementptr inbounds double* %c, i64 5
+ %arrayidx.5 = getelementptr inbounds double, double* %c, i64 5
%15 = load double* %arrayidx.5, align 8
- %arrayidx2.5 = getelementptr inbounds double* %x, i64 5
+ %arrayidx2.5 = getelementptr inbounds double, double* %x, i64 5
%16 = load double* %arrayidx2.5, align 8
%mul.5 = fmul fast double %16, %15
%add.5 = fadd fast double %mul.5, %add.4
- %arrayidx6.5 = getelementptr inbounds double* %y, i64 5
+ %arrayidx6.5 = getelementptr inbounds double, double* %y, i64 5
%17 = load double* %arrayidx6.5, align 8
%mul7.5 = fmul fast double %17, %15
%add8.5 = fadd fast double %mul7.5, %add8.4
- %arrayidx.6 = getelementptr inbounds double* %c, i64 6
+ %arrayidx.6 = getelementptr inbounds double, double* %c, i64 6
%18 = load double* %arrayidx.6, align 8
- %arrayidx2.6 = getelementptr inbounds double* %x, i64 6
+ %arrayidx2.6 = getelementptr inbounds double, double* %x, i64 6
%19 = load double* %arrayidx2.6, align 8
%mul.6 = fmul fast double %19, %18
%add.6 = fadd fast double %mul.6, %add.5
- %arrayidx6.6 = getelementptr inbounds double* %y, i64 6
+ %arrayidx6.6 = getelementptr inbounds double, double* %y, i64 6
%20 = load double* %arrayidx6.6, align 8
%mul7.6 = fmul fast double %20, %18
%add8.6 = fadd fast double %mul7.6, %add8.5
- %arrayidx.7 = getelementptr inbounds double* %c, i64 7
+ %arrayidx.7 = getelementptr inbounds double, double* %c, i64 7
%21 = load double* %arrayidx.7, align 8
- %arrayidx2.7 = getelementptr inbounds double* %x, i64 7
+ %arrayidx2.7 = getelementptr inbounds double, double* %x, i64 7
%22 = load double* %arrayidx2.7, align 8
%mul.7 = fmul fast double %22, %21
%add.7 = fadd fast double %mul.7, %add.6
- %arrayidx6.7 = getelementptr inbounds double* %y, i64 7
+ %arrayidx6.7 = getelementptr inbounds double, double* %y, i64 7
%23 = load double* %arrayidx6.7, align 8
%mul7.7 = fmul fast double %23, %21
%add8.7 = fadd fast double %mul7.7, %add8.6
%0 = load i32* %c, align 4
; CHECK-NOT: mov w{{[0-9]*}}, w0
%add = add nsw i32 %0, %acc
- %arrayidx1 = getelementptr inbounds i32* %c, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 1
%1 = load i32* %arrayidx1, align 4
%add2 = add nsw i32 %add, %1
ret i32 %add2
define void @test_csr(%pl* nocapture readnone %this, %rs* nocapture %r) align 2 {
;CHECK-NOT: stp {{d[0-9]+}}, {{d[0-9]+}}
entry:
- %x.i = getelementptr inbounds %rs* %r, i64 0, i32 7, i32 0
- %y.i = getelementptr inbounds %rs* %r, i64 0, i32 7, i32 1
- %z.i = getelementptr inbounds %rs* %r, i64 0, i32 7, i32 2
- %x.i61 = getelementptr inbounds %rs* %r, i64 0, i32 8, i32 0
- %y.i62 = getelementptr inbounds %rs* %r, i64 0, i32 8, i32 1
- %z.i63 = getelementptr inbounds %rs* %r, i64 0, i32 8, i32 2
- %x.i58 = getelementptr inbounds %rs* %r, i64 0, i32 9, i32 0
- %y.i59 = getelementptr inbounds %rs* %r, i64 0, i32 9, i32 1
- %z.i60 = getelementptr inbounds %rs* %r, i64 0, i32 9, i32 2
- %na = getelementptr inbounds %rs* %r, i64 0, i32 0
+ %x.i = getelementptr inbounds %rs, %rs* %r, i64 0, i32 7, i32 0
+ %y.i = getelementptr inbounds %rs, %rs* %r, i64 0, i32 7, i32 1
+ %z.i = getelementptr inbounds %rs, %rs* %r, i64 0, i32 7, i32 2
+ %x.i61 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 8, i32 0
+ %y.i62 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 8, i32 1
+ %z.i63 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 8, i32 2
+ %x.i58 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 0
+ %y.i59 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 1
+ %z.i60 = getelementptr inbounds %rs, %rs* %r, i64 0, i32 9, i32 2
+ %na = getelementptr inbounds %rs, %rs* %r, i64 0, i32 0
%0 = bitcast double* %x.i to i8*
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 72, i32 8, i1 false)
%1 = load i32* %na, align 4
br i1 %cmp70, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
- %fn = getelementptr inbounds %rs* %r, i64 0, i32 4
+ %fn = getelementptr inbounds %rs, %rs* %r, i64 0, i32 4
%2 = load %v** %fn, align 8
- %fs = getelementptr inbounds %rs* %r, i64 0, i32 5
+ %fs = getelementptr inbounds %rs, %rs* %r, i64 0, i32 5
%3 = load %v** %fs, align 8
%4 = sext i32 %1 to i64
br label %for.body
%7 = phi <2 x double> [ zeroinitializer, %for.body.lr.ph ], [ %22, %for.body ]
%8 = phi <2 x double> [ zeroinitializer, %for.body.lr.ph ], [ %26, %for.body ]
%9 = phi <2 x double> [ zeroinitializer, %for.body.lr.ph ], [ %28, %for.body ]
- %x.i54 = getelementptr inbounds %v* %2, i64 %indvars.iv, i32 0
- %x1.i = getelementptr inbounds %v* %3, i64 %indvars.iv, i32 0
- %y.i56 = getelementptr inbounds %v* %2, i64 %indvars.iv, i32 1
+ %x.i54 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 0
+ %x1.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 0
+ %y.i56 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 1
%10 = bitcast double* %x.i54 to <2 x double>*
%11 = load <2 x double>* %10, align 8
- %y2.i = getelementptr inbounds %v* %3, i64 %indvars.iv, i32 1
+ %y2.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 1
%12 = bitcast double* %x1.i to <2 x double>*
%13 = load <2 x double>* %12, align 8
%14 = fadd fast <2 x double> %13, %11
- %z.i57 = getelementptr inbounds %v* %2, i64 %indvars.iv, i32 2
+ %z.i57 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 2
%15 = load double* %z.i57, align 8
- %z4.i = getelementptr inbounds %v* %3, i64 %indvars.iv, i32 2
+ %z4.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 2
%16 = load double* %z4.i, align 8
%add5.i = fadd fast double %16, %15
%17 = fadd fast <2 x double> %6, %11
%and = and i64 %conv, -8
%conv2 = trunc i64 %and to i32
%add.ptr.sum = add nsw i64 %and, -4
- %add.ptr3 = getelementptr inbounds i8* %0, i64 %add.ptr.sum
+ %add.ptr3 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
%size4 = bitcast i8* %add.ptr3 to i32*
store i32 %conv2, i32* %size4, align 4
%add.ptr.sum9 = add nsw i64 %and, -4
- %add.ptr5 = getelementptr inbounds i8* %0, i64 %add.ptr.sum9
+ %add.ptr5 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum9
%size6 = bitcast i8* %add.ptr5 to i32*
store i32 %conv2, i32* %size6, align 4
ret i8* %0
br label %for.body, !dbg !39
for.body: ; preds = %for.body, %entry
- %arrayidx5 = getelementptr inbounds i32* null, i64 1, !dbg !43
+ %arrayidx5 = getelementptr inbounds i32, i32* null, i64 1, !dbg !43
%0 = load i32* null, align 4, !dbg !45, !tbaa !46
%s1 = sub nsw i32 0, %0, !dbg !50
%n1 = sext i32 %s1 to i64, !dbg !50
- %arrayidx21 = getelementptr inbounds i32* null, i64 3, !dbg !51
+ %arrayidx21 = getelementptr inbounds i32, i32* null, i64 3, !dbg !51
%add53 = add nsw i64 %n1, 0, !dbg !52
%add55 = add nsw i64 %n1, 0, !dbg !53
%mul63 = mul nsw i64 %add53, -20995, !dbg !54
define void @f1(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
%0 = load double* %p, align 8
- %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
%1 = load double* %arrayidx1, align 8
- %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
%2 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
%3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
%4 = load double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
%mul8 = fmul fast double %2, %3
%add9 = fadd fast double %mul8, %sub
store double %add9, double* %q, align 8
- %arrayidx11 = getelementptr inbounds double* %p, i64 5
+ %arrayidx11 = getelementptr inbounds double, double* %p, i64 5
%5 = load double* %arrayidx11, align 8
- %arrayidx12 = getelementptr inbounds double* %p, i64 6
+ %arrayidx12 = getelementptr inbounds double, double* %p, i64 6
%6 = load double* %arrayidx12, align 8
- %arrayidx13 = getelementptr inbounds double* %p, i64 7
+ %arrayidx13 = getelementptr inbounds double, double* %p, i64 7
%7 = load double* %arrayidx13, align 8
%mul15 = fmul fast double %6, %7
%mul16 = fmul fast double %0, %5
%add17 = fadd fast double %mul16, %mul15
%mul18 = fmul fast double %5, %6
%add19 = fadd fast double %mul18, %add17
- %arrayidx20 = getelementptr inbounds double* %q, i64 1
+ %arrayidx20 = getelementptr inbounds double, double* %q, i64 1
store double %add19, double* %arrayidx20, align 8
ret void
}
define void @f2(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
%0 = load double* %p, align 8
- %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
%1 = load double* %arrayidx1, align 8
- %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
%2 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
%3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
%4 = load double* %arrayidx4, align 8
- %arrayidx5 = getelementptr inbounds double* %p, i64 5
+ %arrayidx5 = getelementptr inbounds double, double* %p, i64 5
%5 = load double* %arrayidx5, align 8
- %arrayidx6 = getelementptr inbounds double* %p, i64 6
+ %arrayidx6 = getelementptr inbounds double, double* %p, i64 6
%6 = load double* %arrayidx6, align 8
- %arrayidx7 = getelementptr inbounds double* %p, i64 7
+ %arrayidx7 = getelementptr inbounds double, double* %p, i64 7
%7 = load double* %arrayidx7, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %7
%mul16 = fmul fast double %2, %3
%add17 = fadd fast double %mul16, %sub
store double %add17, double* %q, align 8
- %arrayidx19 = getelementptr inbounds double* %q, i64 1
+ %arrayidx19 = getelementptr inbounds double, double* %q, i64 1
store double %add15, double* %arrayidx19, align 8
ret void
}
define void @f3(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
%0 = load double* %p, align 8
- %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
%1 = load double* %arrayidx1, align 8
- %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
%2 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
%3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
%4 = load double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
define void @f4(float* nocapture readonly %p, float* nocapture %q) #0 {
entry:
%0 = load float* %p, align 4
- %arrayidx1 = getelementptr inbounds float* %p, i64 1
+ %arrayidx1 = getelementptr inbounds float, float* %p, i64 1
%1 = load float* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds float* %p, i64 2
+ %arrayidx2 = getelementptr inbounds float, float* %p, i64 2
%2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %p, i64 3
+ %arrayidx3 = getelementptr inbounds float, float* %p, i64 3
%3 = load float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %p, i64 4
+ %arrayidx4 = getelementptr inbounds float, float* %p, i64 4
%4 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %p, i64 5
+ %arrayidx5 = getelementptr inbounds float, float* %p, i64 5
%5 = load float* %arrayidx5, align 4
- %arrayidx6 = getelementptr inbounds float* %p, i64 6
+ %arrayidx6 = getelementptr inbounds float, float* %p, i64 6
%6 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %p, i64 7
+ %arrayidx7 = getelementptr inbounds float, float* %p, i64 7
%7 = load float* %arrayidx7, align 4
%mul = fmul fast float %0, %1
%add = fadd fast float %mul, %7
%mul16 = fmul fast float %2, %3
%add17 = fadd fast float %mul16, %sub
store float %add17, float* %q, align 4
- %arrayidx19 = getelementptr inbounds float* %q, i64 1
+ %arrayidx19 = getelementptr inbounds float, float* %q, i64 1
store float %add15, float* %arrayidx19, align 4
ret void
}
define void @f5(float* nocapture readonly %p, float* nocapture %q) #0 {
entry:
%0 = load float* %p, align 4
- %arrayidx1 = getelementptr inbounds float* %p, i64 1
+ %arrayidx1 = getelementptr inbounds float, float* %p, i64 1
%1 = load float* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds float* %p, i64 2
+ %arrayidx2 = getelementptr inbounds float, float* %p, i64 2
%2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %p, i64 3
+ %arrayidx3 = getelementptr inbounds float, float* %p, i64 3
%3 = load float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %p, i64 4
+ %arrayidx4 = getelementptr inbounds float, float* %p, i64 4
%4 = load float* %arrayidx4, align 4
%mul = fmul fast float %0, %1
%add = fadd fast float %mul, %4
define void @f6(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
%0 = load double* %p, align 8
- %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
%1 = load double* %arrayidx1, align 8
- %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
%2 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
%3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
%4 = load double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
define void @f7(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
%0 = load double* %p, align 8
- %arrayidx1 = getelementptr inbounds double* %p, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %p, i64 1
%1 = load double* %arrayidx1, align 8
- %arrayidx2 = getelementptr inbounds double* %p, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %p, i64 2
%2 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %p, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %p, i64 3
%3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %p, i64 4
+ %arrayidx4 = getelementptr inbounds double, double* %p, i64 4
%4 = load double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
invoke.cont291:
%idxprom.i.i.i605 = sext i32 %0 to i64
- %arrayidx.i.i.i607 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i605
+ %arrayidx.i.i.i607 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i605
%idxprom.i.i.i596 = sext i32 %0 to i64
- %arrayidx.i.i.i598 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i596
+ %arrayidx.i.i.i598 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i596
br label %if.end356
if.else313:
invoke.cont326:
%idxprom.i.i.i587 = sext i32 %0 to i64
- %arrayidx.i.i.i589 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i587
+ %arrayidx.i.i.i589 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i587
%sub329 = fsub fast double undef, undef
br label %invoke.cont334
invoke.cont342:
%idxprom.i.i.i578 = sext i32 %0 to i64
- %arrayidx.i.i.i580 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i578
+ %arrayidx.i.i.i580 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i578
br label %if.end356
invoke.cont353:
%idxprom.i.i.i572 = sext i32 %0 to i64
- %arrayidx.i.i.i574 = getelementptr inbounds double* undef, i64 %idxprom.i.i.i572
+ %arrayidx.i.i.i574 = getelementptr inbounds double, double* undef, i64 %idxprom.i.i.i572
br label %if.end356
if.end356:
; CHECK-NEXT: ret
%add = add nsw i32 %i, 1
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i32* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%add1 = add nsw i32 %i, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds i32* %a, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %idxprom2
%1 = load i32* %arrayidx3, align 4
%add4 = add nsw i32 %1, %0
%idxprom5 = sext i32 %i to i64
- %arrayidx6 = getelementptr inbounds i32* %a, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i64 %idxprom5
store i32 %add4, i32* %arrayidx6, align 4
ret void
}
; Check that when two complex GEPs are used in two basic blocks, LLVM can
; elimilate the common subexpression for the second use.
define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) {
- %liberties = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
+ %liberties = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
%1 = load i32* %liberties, align 4
%cmp = icmp eq i32 %1, %lib
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %origin = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
+ %origin = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
%2 = load i32* %origin, align 4
store i32 %2, i32* %adj, align 4
br label %if.end
; CHECK-UseAA-LABEL: @test_GEP_CSE(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = bitcast [240 x %struct]* %string to i8*
; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8* [[PTR0]], i64 [[IDX]]
-; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23052
+; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, i8* [[PTR0]], i64 [[IDX]]
+; CHECK-UseAA: getelementptr i8, i8* [[PTR1]], i64 23052
; CHECK-UseAA: bitcast
; CHECK-UseAA: if.then:
-; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23048
+; CHECK-UseAA: getelementptr i8, i8* [[PTR1]], i64 23048
; CHECK-UseAA: bitcast
%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]}
; calculation and code gen can generate a better addressing mode for the second
; use.
define void @test_GEP_across_BB(%class.my* %this, i64 %idx) {
- %1 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
+ %1 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
%2 = load i32* %1, align 4
- %3 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
+ %3 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
%4 = load i32* %3, align 4
%5 = icmp eq i32 %2, %4
br i1 %5, label %if.true, label %exit
; CHECK-UseAA-LABEL: test_GEP_across_BB(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr
-; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 528
-; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: getelementptr i8, i8* [[PTR0]], i64 528
+; CHECK-UseAA: getelementptr i8, i8* [[PTR0]], i64 532
; CHECK-UseAA: if.true:
-; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* [[PTR0]], i64 532
; CHECK-UseAA: exit:
-; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 528
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* [[PTR0]], i64 528
%struct.S = type { float, double }
@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
entry:
%add = add nsw i32 %i, 5
%idxprom = sext i32 %add to i64
- %p = getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ %p = getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
ret double* %p
}
; CHECK-NoAA-LABEL: @test-struct_1(
; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, 88
; CHECK-UseAA-LABEL: @test-struct_1(
-; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 88
+; CHECK-UseAA: getelementptr i8, i8* %{{[a-zA-Z0-9]+}}, i64 88
%struct3 = type { i64, i32 }
%struct2 = type { %struct3, i32 }
define %struct2* @test-struct_2(%struct0* %ptr, i64 %idx) {
entry:
%arrayidx = add nsw i64 %idx, -2
- %ptr2 = getelementptr %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
+ %ptr2 = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
ret %struct2* %ptr2
}
; CHECK-NoAA-LABEL: @test-struct_2(
; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, -40
; CHECK-UseAA-LABEL: @test-struct_2(
-; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 -40
+; CHECK-UseAA: getelementptr i8, i8* %{{[a-zA-Z0-9]+}}, i64 -40
; Test that when a index is added from two constant, SeparateConstOffsetFromGEP
; pass does not generate incorrect result.
define void @test_const_add([3 x i32]* %in) {
%inc = add nsw i32 2, 1
%idxprom = sext i32 %inc to i64
- %arrayidx = getelementptr [3 x i32]* %in, i64 %idxprom, i64 2
+ %arrayidx = getelementptr [3 x i32], [3 x i32]* %in, i64 %idxprom, i64 2
store i32 0, i32* %arrayidx, align 4
ret void
}
define void @new_position(i32 %pos) {
entry:
%idxprom = sext i32 %pos to i64
- %arrayidx = getelementptr inbounds [400 x i8]* @board, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [400 x i8], [400 x i8]* @board, i64 0, i64 %idxprom
%tmp = load i8* %arrayidx, align 1
%.off = add i8 %tmp, -1
%switch = icmp ult i8 %.off, 2
if.then: ; preds = %entry
%tmp1 = load i32* @next_string, align 4
- %arrayidx8 = getelementptr inbounds [400 x i32]* @string_number, i64 0, i64 %idxprom
+ %arrayidx8 = getelementptr inbounds [400 x i32], [400 x i32]* @string_number, i64 0, i64 %idxprom
store i32 %tmp1, i32* %arrayidx8, align 4
br label %if.end
; instruction that can handle that.
; CHECK: stur x0, [sp, #20]
%a = alloca [49 x i32], align 4
- %p32 = getelementptr inbounds [49 x i32]* %a, i64 0, i64 2
+ %p32 = getelementptr inbounds [49 x i32], [49 x i32]* %a, i64 0, i64 2
%p = bitcast i32* %p32 to i64*
store i64 %val, i64* %p, align 8
ret void
; CHECK: ldp d{{[0-9]+}}, d{{[0-9]+}}
%ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
%0 = bitcast %0* %self to i8*
- %add.ptr = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr10.0 = bitcast i8* %add.ptr to double*
%tmp11 = load double* %add.ptr10.0, align 8
%add.ptr.sum = add i64 %ivar, 8
- %add.ptr10.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum
+ %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
%1 = bitcast i8* %add.ptr10.1 to double*
%tmp12 = load double* %1, align 8
%add.ptr.sum17 = add i64 %ivar, 16
- %add.ptr4.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum17
+ %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17
%add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
%tmp = load double* %add.ptr4.1.0, align 8
%add.ptr4.1.sum = add i64 %ivar, 24
- %add.ptr4.1.1 = getelementptr inbounds i8* %0, i64 %add.ptr4.1.sum
+ %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum
%2 = bitcast i8* %add.ptr4.1.1 to double*
%tmp5 = load double* %2, align 8
%insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
unreachable
cond.false45: ; preds = %for.body14
- %add.ptr = getelementptr inbounds i8* %path, i64 %conv30
+ %add.ptr = getelementptr inbounds i8, i8* %path, i64 %conv30
unreachable
if.end56: ; preds = %for.cond10, %entry
%0 = va_arg i8** %args, i32
store i32 %0, i32* %vc, align 4
%ap.cur = load i8** %args
- %1 = getelementptr i8* %ap.cur, i32 15
+ %1 = getelementptr i8, i8* %ap.cur, i32 15
%2 = ptrtoint i8* %1 to i64
%3 = and i64 %2, -16
%ap.align = inttoptr i64 %3 to i8*
- %ap.next = getelementptr i8* %ap.align, i32 16
+ %ap.next = getelementptr i8, i8* %ap.align, i32 16
store i8* %ap.next, i8** %args
%4 = bitcast i8* %ap.align to %struct.s41*
%5 = bitcast %struct.s41* %vs to i8*
; FAST: ldr w[[B:[0-9]+]], [x2]
; FAST: add w[[C:[0-9]+]], w[[A]], w0
; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
- %i1 = getelementptr inbounds %struct.s42* %s1, i64 0, i32 0
+ %i1 = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 0
%0 = load i32* %i1, align 4, !tbaa !0
- %i2 = getelementptr inbounds %struct.s42* %s2, i64 0, i32 0
+ %i2 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 0
%1 = load i32* %i2, align 4, !tbaa !0
- %s = getelementptr inbounds %struct.s42* %s1, i64 0, i32 1
+ %s = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 1
%2 = load i16* %s, align 2, !tbaa !3
%conv = sext i16 %2 to i32
- %s5 = getelementptr inbounds %struct.s42* %s2, i64 0, i32 1
+ %s5 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 1
%3 = load i16* %s5, align 2, !tbaa !3
%conv6 = sext i16 %3 to i32
%add = add i32 %0, %i
; FAST: ldr w[[B:[0-9]+]], [x2]
; FAST: add w[[C:[0-9]+]], w[[A]], w0
; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
- %i1 = getelementptr inbounds %struct.s43* %s1, i64 0, i32 0
+ %i1 = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 0
%0 = load i32* %i1, align 4, !tbaa !0
- %i2 = getelementptr inbounds %struct.s43* %s2, i64 0, i32 0
+ %i2 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 0
%1 = load i32* %i2, align 4, !tbaa !0
- %s = getelementptr inbounds %struct.s43* %s1, i64 0, i32 1
+ %s = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 1
%2 = load i16* %s, align 2, !tbaa !3
%conv = sext i16 %2 to i32
- %s5 = getelementptr inbounds %struct.s43* %s2, i64 0, i32 1
+ %s5 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 1
%3 = load i16* %s5, align 2, !tbaa !3
%conv6 = sext i16 %3 to i32
%add = add i32 %0, %i
entry:
%idxprom = sext i32 %i1 to i64
%0 = load i8** @block, align 8
- %arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
+ %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
%1 = load i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
- %arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
%2 = load i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %if.end, label %if.then
%inc = add nsw i32 %i1, 1
%inc9 = add nsw i32 %i2, 1
%idxprom10 = sext i32 %inc to i64
- %arrayidx11 = getelementptr inbounds i8* %0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10
%3 = load i8* %arrayidx11, align 1
%idxprom12 = sext i32 %inc9 to i64
- %arrayidx13 = getelementptr inbounds i8* %0, i64 %idxprom12
+ %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12
%4 = load i8* %arrayidx13, align 1
%cmp16 = icmp eq i8 %3, %4
br i1 %cmp16, label %if.end23, label %if.then18
%inc24 = add nsw i32 %i1, 2
%inc25 = add nsw i32 %i2, 2
%idxprom26 = sext i32 %inc24 to i64
- %arrayidx27 = getelementptr inbounds i8* %0, i64 %idxprom26
+ %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26
%5 = load i8* %arrayidx27, align 1
%idxprom28 = sext i32 %inc25 to i64
- %arrayidx29 = getelementptr inbounds i8* %0, i64 %idxprom28
+ %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28
%6 = load i8* %arrayidx29, align 1
%cmp32 = icmp eq i8 %5, %6
br i1 %cmp32, label %return, label %if.then34
entry:
%idxprom = sext i32 %i1 to i64
%0 = load i8** @block, align 8
- %arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
+ %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
%1 = load i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
- %arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
%2 = load i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %if.end, label %if.then
%inc = add nsw i32 %i1, 1
%inc9 = add nsw i32 %i2, 1
%idxprom10 = sext i32 %inc to i64
- %arrayidx11 = getelementptr inbounds i8* %0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10
%3 = load i8* %arrayidx11, align 1
%idxprom12 = sext i32 %inc9 to i64
- %arrayidx13 = getelementptr inbounds i8* %0, i64 %idxprom12
+ %arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12
%4 = load i8* %arrayidx13, align 1
%cmp16 = icmp eq i8 %3, %4
br i1 %cmp16, label %if.end23, label %if.then18
%inc24 = add nsw i32 %i1, 2
%inc25 = add nsw i32 %i2, 2
%idxprom26 = sext i32 %inc24 to i64
- %arrayidx27 = getelementptr inbounds i8* %0, i64 %idxprom26
+ %arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26
%5 = load i8* %arrayidx27, align 1
%idxprom28 = sext i32 %inc25 to i64
- %arrayidx29 = getelementptr inbounds i8* %0, i64 %idxprom28
+ %arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28
%6 = load i8* %arrayidx29, align 1
%cmp32 = icmp eq i8 %5, %6
br i1 %cmp32, label %return, label %if.then34
if.then: ; preds = %entry
%idxprom = zext i8 %c to i64
- %arrayidx = getelementptr inbounds i32* %array, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom
%0 = load volatile i32* %arrayidx, align 4
%1 = load volatile i32* %arrayidx, align 4
%add3 = add nsw i32 %1, %0
if.then: ; preds = %entry
%idxprom = zext i8 %c to i64
- %arrayidx = getelementptr inbounds i32* %array, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom
%0 = load volatile i32* %arrayidx, align 4
%1 = load volatile i32* %arrayidx, align 4
%add3 = add nsw i32 %1, %0
entry:
%idxprom = sext i32 %i1 to i64
%tmp = load i8** @block, align 8
- %arrayidx = getelementptr inbounds i8* %tmp, i64 %idxprom
+ %arrayidx = getelementptr inbounds i8, i8* %tmp, i64 %idxprom
%tmp1 = load i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
- %arrayidx2 = getelementptr inbounds i8* %tmp, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom1
%tmp2 = load i8* %arrayidx2, align 1
%cmp = icmp eq i8 %tmp1, %tmp2
br i1 %cmp, label %if.end, label %if.then
%inc = add nsw i32 %i1, 1
%inc10 = add nsw i32 %i2, 1
%idxprom11 = sext i32 %inc to i64
- %arrayidx12 = getelementptr inbounds i8* %tmp, i64 %idxprom11
+ %arrayidx12 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom11
%tmp3 = load i8* %arrayidx12, align 1
%idxprom13 = sext i32 %inc10 to i64
- %arrayidx14 = getelementptr inbounds i8* %tmp, i64 %idxprom13
+ %arrayidx14 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom13
%tmp4 = load i8* %arrayidx14, align 1
%cmp17 = icmp eq i8 %tmp3, %tmp4
br i1 %cmp17, label %if.end25, label %if.then19
%inc26 = add nsw i32 %i1, 2
%inc27 = add nsw i32 %i2, 2
%idxprom28 = sext i32 %inc26 to i64
- %arrayidx29 = getelementptr inbounds i8* %tmp, i64 %idxprom28
+ %arrayidx29 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom28
%tmp5 = load i8* %arrayidx29, align 1
%idxprom30 = sext i32 %inc27 to i64
- %arrayidx31 = getelementptr inbounds i8* %tmp, i64 %idxprom30
+ %arrayidx31 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom30
%tmp6 = load i8* %arrayidx31, align 1
%cmp34 = icmp eq i8 %tmp5, %tmp6
br i1 %cmp34, label %return, label %if.then36
; CHECK: ldr xzr, [x{{[0-9]+}}, #8]
; CHECK: ret
define void @t1() {
- %incdec.ptr = getelementptr inbounds i64* @object, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 1
%tmp = load volatile i64* %incdec.ptr, align 8
ret void
}
; CHECK: [[ADDREG]]]
; CHECK: ret
define void @t2() {
- %incdec.ptr = getelementptr inbounds i64* @object, i64 -33
+ %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 -33
%tmp = load volatile i64* %incdec.ptr, align 8
ret void
}
; CHECK: ldr xzr, [x{{[0-9]+}}, #32760]
; CHECK: ret
define void @t3() {
- %incdec.ptr = getelementptr inbounds i64* @object, i64 4095
+ %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4095
%tmp = load volatile i64* %incdec.ptr, align 8
ret void
}
; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
; CHECK: ret
define void @t4() {
- %incdec.ptr = getelementptr inbounds i64* @object, i64 4096
+ %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4096
%tmp = load volatile i64* %incdec.ptr, align 8
ret void
}
; CHECK: ldr xzr, [x{{[0-9]+}}, x{{[0-9]+}}, lsl #3]
; CHECK: ret
define void @t5(i64 %a) {
- %incdec.ptr = getelementptr inbounds i64* @object, i64 %a
+ %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a
%tmp = load volatile i64* %incdec.ptr, align 8
ret void
}
; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]]
; CHECK: ret
define void @t6(i64 %a) {
- %tmp1 = getelementptr inbounds i64* @object, i64 %a
- %incdec.ptr = getelementptr inbounds i64* %tmp1, i64 4096
+ %tmp1 = getelementptr inbounds i64, i64* @object, i64 %a
+ %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096
%tmp = load volatile i64* %incdec.ptr, align 8
ret void
}
define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_8:
- %ptr_unsigned = getelementptr i8* %p, i32 4095
+ %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
%val_unsigned = load atomic i8* %ptr_unsigned monotonic, align 1
; CHECK: ldrb {{w[0-9]+}}, [x0, #4095]
- %ptr_regoff = getelementptr i8* %p, i32 %off32
+ %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
%val_regoff = load atomic i8* %ptr_regoff unordered, align 1
%tot1 = add i8 %val_unsigned, %val_regoff
; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
- %ptr_unscaled = getelementptr i8* %p, i32 -256
+ %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
%val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1
%tot2 = add i8 %tot1, %val_unscaled
; CHECK: ldurb {{w[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+ %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
%val_random = load atomic i8* %ptr_random unordered, align 1
%tot3 = add i8 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_16:
- %ptr_unsigned = getelementptr i16* %p, i32 4095
+ %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
%val_unsigned = load atomic i16* %ptr_unsigned monotonic, align 2
; CHECK: ldrh {{w[0-9]+}}, [x0, #8190]
- %ptr_regoff = getelementptr i16* %p, i32 %off32
+ %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
%val_regoff = load atomic i16* %ptr_regoff unordered, align 2
%tot1 = add i16 %val_unsigned, %val_regoff
; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
- %ptr_unscaled = getelementptr i16* %p, i32 -128
+ %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
%val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2
%tot2 = add i16 %tot1, %val_unscaled
; CHECK: ldurh {{w[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+ %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
%val_random = load atomic i16* %ptr_random unordered, align 2
%tot3 = add i16 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_32:
- %ptr_unsigned = getelementptr i32* %p, i32 4095
+ %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
%val_unsigned = load atomic i32* %ptr_unsigned monotonic, align 4
; CHECK: ldr {{w[0-9]+}}, [x0, #16380]
- %ptr_regoff = getelementptr i32* %p, i32 %off32
+ %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
%val_regoff = load atomic i32* %ptr_regoff unordered, align 4
%tot1 = add i32 %val_unsigned, %val_regoff
; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
- %ptr_unscaled = getelementptr i32* %p, i32 -64
+ %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
%val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4
%tot2 = add i32 %tot1, %val_unscaled
; CHECK: ldur {{w[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+ %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
%val_random = load atomic i32* %ptr_random unordered, align 4
%tot3 = add i32 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_64:
- %ptr_unsigned = getelementptr i64* %p, i32 4095
+ %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
%val_unsigned = load atomic i64* %ptr_unsigned monotonic, align 8
; CHECK: ldr {{x[0-9]+}}, [x0, #32760]
- %ptr_regoff = getelementptr i64* %p, i32 %off32
+ %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
%val_regoff = load atomic i64* %ptr_regoff unordered, align 8
%tot1 = add i64 %val_unsigned, %val_regoff
; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
- %ptr_unscaled = getelementptr i64* %p, i32 -32
+ %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
%val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8
%tot2 = add i64 %tot1, %val_unscaled
; CHECK: ldur {{x[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+ %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
%val_random = load atomic i64* %ptr_random unordered, align 8
%tot3 = add i64 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) {
; CHECK-LABEL: atomic_store_relaxed_8:
- %ptr_unsigned = getelementptr i8* %p, i32 4095
+ %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1
; CHECK: strb {{w[0-9]+}}, [x0, #4095]
- %ptr_regoff = getelementptr i8* %p, i32 %off32
+ %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
store atomic i8 %val, i8* %ptr_regoff unordered, align 1
; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw]
- %ptr_unscaled = getelementptr i8* %p, i32 -256
+ %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
; CHECK: sturb {{w[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
+ %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
store atomic i8 %val, i8* %ptr_random unordered, align 1
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]]
define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) {
; CHECK-LABEL: atomic_store_relaxed_16:
- %ptr_unsigned = getelementptr i16* %p, i32 4095
+ %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2
; CHECK: strh {{w[0-9]+}}, [x0, #8190]
- %ptr_regoff = getelementptr i16* %p, i32 %off32
+ %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
store atomic i16 %val, i16* %ptr_regoff unordered, align 2
; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1]
- %ptr_unscaled = getelementptr i16* %p, i32 -128
+ %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
; CHECK: sturh {{w[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
+ %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
store atomic i16 %val, i16* %ptr_random unordered, align 2
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]]
define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) {
; CHECK-LABEL: atomic_store_relaxed_32:
- %ptr_unsigned = getelementptr i32* %p, i32 4095
+ %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4
; CHECK: str {{w[0-9]+}}, [x0, #16380]
- %ptr_regoff = getelementptr i32* %p, i32 %off32
+ %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
store atomic i32 %val, i32* %ptr_regoff unordered, align 4
; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2]
- %ptr_unscaled = getelementptr i32* %p, i32 -64
+ %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
; CHECK: stur {{w[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
+ %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
store atomic i32 %val, i32* %ptr_random unordered, align 4
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: str {{w[0-9]+}}, [x[[ADDR]]]
define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) {
; CHECK-LABEL: atomic_store_relaxed_64:
- %ptr_unsigned = getelementptr i64* %p, i32 4095
+ %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8
; CHECK: str {{x[0-9]+}}, [x0, #32760]
- %ptr_regoff = getelementptr i64* %p, i32 %off32
+ %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
store atomic i64 %val, i64* %ptr_regoff unordered, align 8
; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3]
- %ptr_unscaled = getelementptr i64* %p, i32 -32
+ %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
; CHECK: stur {{x[0-9]+}}, [x0, #-256]
- %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
+ %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
store atomic i64 %val, i64* %ptr_random unordered, align 8
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: str {{x[0-9]+}}, [x[[ADDR]]]
define { i64, i1 } @foo(i64* , %Sstruct* , i1, i64) {
entry:
%.sroa.0 = alloca i72, align 16
- %.count.value = getelementptr inbounds %Sstruct* %1, i64 0, i32 0, i32 0
+ %.count.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 0, i32 0
%4 = load i64* %.count.value, align 8
- %.repeatedValue.value = getelementptr inbounds %Sstruct* %1, i64 0, i32 1, i32 0
+ %.repeatedValue.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 1, i32 0
%5 = load i32* %.repeatedValue.value, align 8
%6 = icmp eq i64 %4, 0
br label %7
%vl = alloca %struct.__va_list, align 8
%vl1 = bitcast %struct.__va_list* %vl to i8*
call void @llvm.va_start(i8* %vl1)
- %vr_offs_p = getelementptr inbounds %struct.__va_list* %vl, i64 0, i32 4
+ %vr_offs_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 4
%vr_offs = load i32* %vr_offs_p, align 4
%0 = icmp sgt i32 %vr_offs, -1
br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg
br i1 %inreg, label %vaarg.in_reg, label %vaarg.on_stack
vaarg.in_reg: ; preds = %vaarg.maybe_reg
- %reg_top_p = getelementptr inbounds %struct.__va_list* %vl, i64 0, i32 2
+ %reg_top_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 2
%reg_top = load i8** %reg_top_p, align 8
%1 = sext i32 %vr_offs to i64
- %2 = getelementptr i8* %reg_top, i64 %1
+ %2 = getelementptr i8, i8* %reg_top, i64 %1
%3 = ptrtoint i8* %2 to i64
%align_be = add i64 %3, 8
%4 = inttoptr i64 %align_be to i8*
br label %vaarg.end
vaarg.on_stack: ; preds = %vaarg.maybe_reg, %entry
- %stack_p = getelementptr inbounds %struct.__va_list* %vl, i64 0, i32 0
+ %stack_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 0
%stack = load i8** %stack_p, align 8
- %new_stack = getelementptr i8* %stack, i64 8
+ %new_stack = getelementptr i8, i8* %stack, i64 8
store i8* %new_stack, i8** %stack_p, align 8
br label %vaarg.end
define void @foo() nounwind ssp {
entry:
%buffer = alloca [33554432 x i8], align 1
- %arraydecay = getelementptr inbounds [33554432 x i8]* %buffer, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [33554432 x i8], [33554432 x i8]* %buffer, i64 0, i64 0
call void @doit(i8* %arraydecay) nounwind
ret void
}
%tmp = bitcast %struct.X* %x to i32*
%tmp1 = load i32* %tmp, align 4
- %b = getelementptr inbounds %struct.Y* %y, i64 0, i32 1
+ %b = getelementptr inbounds %struct.Y, %struct.Y* %y, i64 0, i32 1
%bf.clear = lshr i32 %tmp1, 3
%bf.clear.lobit = and i32 %bf.clear, 1
%frombool = trunc i32 %bf.clear.lobit to i8
%tmp = bitcast %struct.Z* %x to i64*
%tmp1 = load i64* %tmp, align 4
- %b = getelementptr inbounds %struct.A* %y, i64 0, i32 0
+ %b = getelementptr inbounds %struct.A, %struct.A* %y, i64 0, i32 0
%bf.clear = lshr i64 %tmp1, 3
%bf.clear.lobit = and i64 %bf.clear, 1
store i64 %bf.clear.lobit, i64* %b, align 8
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %arrayidx3 = getelementptr inbounds [65536 x i8]* @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift
+ %arrayidx3 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift
%0 = load i8* %arrayidx3, align 1
%conv = zext i8 %0 to i32
br label %return
; CHECK-NOT: and
; CHECK-NOT: ubfm
%idxprom10 = and i64 %x.sroa.3.0.extract.shift, 65535
- %arrayidx11 = getelementptr inbounds [65536 x i8]* @first_ones, i64 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom10
%1 = load i8* %arrayidx11, align 1
%conv12 = zext i8 %1 to i32
%add = add nsw i32 %conv12, 16
; CHECK-NOT: and
; CHECK-NOT: ubfm
%idxprom20 = and i64 %x.sroa.1.0.extract.shift, 65535
- %arrayidx21 = getelementptr inbounds [65536 x i8]* @first_ones, i64 0, i64 %idxprom20
+ %arrayidx21 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom20
%2 = load i8* %arrayidx21, align 1
%conv22 = zext i8 %2 to i32
%add23 = add nsw i32 %conv22, 32
entry:
%shr = lshr i64 %x, 4
%and = and i64 %shr, 15
- %arrayidx = getelementptr inbounds [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 %and
+ %arrayidx = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 %and
%0 = load i64* %arrayidx, align 8
ret i64 %0
}
entry:
%idxprom = sext i32 %i1 to i64
%0 = load i8** @block, align 8
- %arrayidx = getelementptr inbounds i8* %0, i64 %idxprom
+ %arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
%1 = load i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
- %arrayidx2 = getelementptr inbounds i8* %0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
%2 = load i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %return, label %if.then
%i.092 = phi i64 [ 1, %entry ], [ %inc53, %for.inc ]
%numLeft.091 = phi i32 [ 0, %entry ], [ %numLeft.1, %for.inc ]
%2 = load i8** @mazeRoute, align 8, !tbaa !3
- %arrayidx = getelementptr inbounds i8* %2, i64 %i.092
+ %arrayidx = getelementptr inbounds i8, i8* %2, i64 %i.092
%3 = load i8* %arrayidx, align 1, !tbaa !1
%tobool = icmp eq i8 %3, 0
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
%4 = load i64** @TOP, align 8, !tbaa !3
- %arrayidx1 = getelementptr inbounds i64* %4, i64 %i.092
+ %arrayidx1 = getelementptr inbounds i64, i64* %4, i64 %i.092
%5 = load i64* %arrayidx1, align 8, !tbaa !0
%6 = load i64** @netsAssign, align 8, !tbaa !3
- %arrayidx2 = getelementptr inbounds i64* %6, i64 %5
+ %arrayidx2 = getelementptr inbounds i64, i64* %6, i64 %5
%7 = load i64* %arrayidx2, align 8, !tbaa !0
%8 = load i64** @BOT, align 8, !tbaa !3
- %arrayidx3 = getelementptr inbounds i64* %8, i64 %i.092
+ %arrayidx3 = getelementptr inbounds i64, i64* %8, i64 %i.092
%9 = load i64* %arrayidx3, align 8, !tbaa !0
- %arrayidx4 = getelementptr inbounds i64* %6, i64 %9
+ %arrayidx4 = getelementptr inbounds i64, i64* %6, i64 %9
%10 = load i64* %arrayidx4, align 8, !tbaa !0
%cmp5 = icmp ugt i64 %i.092, 1
%cmp6 = icmp ugt i64 %10, 1
if.then9: ; preds = %land.lhs.true7
%12 = load i8** @mazeRoute, align 8, !tbaa !3
- %arrayidx10 = getelementptr inbounds i8* %12, i64 %i.092
+ %arrayidx10 = getelementptr inbounds i8, i8* %12, i64 %i.092
store i8 0, i8* %arrayidx10, align 1, !tbaa !1
%13 = load i64** @TOP, align 8, !tbaa !3
- %arrayidx11 = getelementptr inbounds i64* %13, i64 %i.092
+ %arrayidx11 = getelementptr inbounds i64, i64* %13, i64 %i.092
%14 = load i64* %arrayidx11, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %14)
%15 = load i64** @BOT, align 8, !tbaa !3
- %arrayidx12 = getelementptr inbounds i64* %15, i64 %i.092
+ %arrayidx12 = getelementptr inbounds i64, i64* %15, i64 %i.092
%16 = load i64* %arrayidx12, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %16)
br label %for.inc
if.then20: ; preds = %land.lhs.true16
%19 = load i8** @mazeRoute, align 8, !tbaa !3
- %arrayidx21 = getelementptr inbounds i8* %19, i64 %i.092
+ %arrayidx21 = getelementptr inbounds i8, i8* %19, i64 %i.092
store i8 0, i8* %arrayidx21, align 1, !tbaa !1
%20 = load i64** @TOP, align 8, !tbaa !3
- %arrayidx22 = getelementptr inbounds i64* %20, i64 %i.092
+ %arrayidx22 = getelementptr inbounds i64, i64* %20, i64 %i.092
%21 = load i64* %arrayidx22, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %21)
%22 = load i64** @BOT, align 8, !tbaa !3
- %arrayidx23 = getelementptr inbounds i64* %22, i64 %i.092
+ %arrayidx23 = getelementptr inbounds i64, i64* %22, i64 %i.092
%23 = load i64* %arrayidx23, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %23)
br label %for.inc
if.then32: ; preds = %land.lhs.true28
%25 = load i8** @mazeRoute, align 8, !tbaa !3
- %arrayidx33 = getelementptr inbounds i8* %25, i64 %i.092
+ %arrayidx33 = getelementptr inbounds i8, i8* %25, i64 %i.092
store i8 0, i8* %arrayidx33, align 1, !tbaa !1
%26 = load i64** @TOP, align 8, !tbaa !3
- %arrayidx34 = getelementptr inbounds i64* %26, i64 %i.092
+ %arrayidx34 = getelementptr inbounds i64, i64* %26, i64 %i.092
%27 = load i64* %arrayidx34, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %27)
%28 = load i64** @BOT, align 8, !tbaa !3
- %arrayidx35 = getelementptr inbounds i64* %28, i64 %i.092
+ %arrayidx35 = getelementptr inbounds i64, i64* %28, i64 %i.092
%29 = load i64* %arrayidx35, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %29)
br label %for.inc
if.then44: ; preds = %land.lhs.true40
%32 = load i8** @mazeRoute, align 8, !tbaa !3
- %arrayidx45 = getelementptr inbounds i8* %32, i64 %i.092
+ %arrayidx45 = getelementptr inbounds i8, i8* %32, i64 %i.092
store i8 0, i8* %arrayidx45, align 1, !tbaa !1
%33 = load i64** @TOP, align 8, !tbaa !3
- %arrayidx46 = getelementptr inbounds i64* %33, i64 %i.092
+ %arrayidx46 = getelementptr inbounds i64, i64* %33, i64 %i.092
%34 = load i64* %arrayidx46, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %34)
%35 = load i64** @BOT, align 8, !tbaa !3
- %arrayidx47 = getelementptr inbounds i64* %35, i64 %i.092
+ %arrayidx47 = getelementptr inbounds i64, i64* %35, i64 %i.092
%36 = load i64* %arrayidx47, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %36)
br label %for.inc
sw.bb.i.i:
%ref.tr.i.i = phi %str1* [ %0, %sw.bb.i.i ], [ undef, %entry ]
- %operands.i.i = getelementptr inbounds %str1* %ref.tr.i.i, i64 0, i32 0, i32 2
+ %operands.i.i = getelementptr inbounds %str1, %str1* %ref.tr.i.i, i64 0, i32 0, i32 2
%arrayidx.i.i = bitcast i32* %operands.i.i to %str1**
%0 = load %str1** %arrayidx.i.i, align 8
- %code1.i.i.phi.trans.insert = getelementptr inbounds %str1* %0, i64 0, i32 0, i32 0, i64 16
+ %code1.i.i.phi.trans.insert = getelementptr inbounds %str1, %str1* %0, i64 0, i32 0, i32 0, i64 16
br label %sw.bb.i.i
}
tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27}"()
%tmp2 = load %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x28}"()
- %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice"* %tmp2, i64 0, i32 3
+ %pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice", %"class.H4ISP::H4ISPDevice"* %tmp2, i64 0, i32 3
%tmp3 = load %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8
%tobool.i269 = icmp eq %"class.H4ISP::H4ISPCameraManager"* %tmp3, null
br i1 %tobool.i269, label %if.then83, label %end
%src = alloca { double, double }, align 8
%dst = alloca { double, double }, align 8
- %src.realp = getelementptr inbounds { double, double }* %src, i32 0, i32 0
+ %src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0
%src.real = load double* %src.realp
- %src.imagp = getelementptr inbounds { double, double }* %src, i32 0, i32 1
+ %src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1
%src.imag = load double* %src.imagp
- %dst.realp = getelementptr inbounds { double, double }* %dst, i32 0, i32 0
- %dst.imagp = getelementptr inbounds { double, double }* %dst, i32 0, i32 1
+ %dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0
+ %dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1
store double %src.real, double* %dst.realp
store double %src.imag, double* %dst.imagp
ret void
; CHECK-NEXT: ldp w9, w10, [x8, #4]
; CHECK: ldr w8, [x8, #12]
%at = inttoptr i64 68141056 to %T*
- %o1 = getelementptr %T* %at, i32 0, i32 1
+ %o1 = getelementptr %T, %T* %at, i32 0, i32 1
%t1 = load i32* %o1
- %o2 = getelementptr %T* %at, i32 0, i32 2
+ %o2 = getelementptr %T, %T* %at, i32 0, i32 2
%t2 = load i32* %o2
%a1 = add i32 %t1, %t2
- %o3 = getelementptr %T* %at, i32 0, i32 3
+ %o3 = getelementptr %T, %T* %at, i32 0, i32 3
%t3 = load i32* %o3
%a2 = add i32 %a1, %t3
ret i32 %a2
%s2 = sub nsw i32 %s, %size
%s3 = sub nsw i32 %sub, %s2
store i32 %s3, i32* %offset, align 4
- %add.ptr = getelementptr inbounds i8* %base, i32 %sub
+ %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub
br label %return
return:
if.end:
%sub = sub nsw i32 %0, 1
store i32 %sub, i32* %offset, align 4
- %add.ptr = getelementptr inbounds i8* %base, i32 %sub
+ %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub
br label %return
return:
; CHECK-NOT: str
define void @test(%"struct.SU"* nocapture %su) {
entry:
- %r1 = getelementptr inbounds %"struct.SU"* %su, i64 1, i32 5
+ %r1 = getelementptr inbounds %"struct.SU", %"struct.SU"* %su, i64 1, i32 5
%r2 = bitcast %"struct.BO"* %r1 to i48*
%r3 = load i48* %r2, align 8
%r4 = and i48 %r3, -4294967296
; CHECK: ret
define void @test(%class.Complex* nocapture %out, i64 %out_start) {
entry:
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
+ %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%0 = bitcast %class.Complex* %arrayidx to i64*
%1 = load i64* %0, align 4
%t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
%t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
%3 = bitcast i32 %t0.sroa.2.0.extract.trunc to float
%add = add i64 %out_start, 8
- %arrayidx2 = getelementptr inbounds %class.Complex* %out, i64 %add
- %i.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 0
+ %arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add
+ %i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0
%4 = load float* %i.i, align 4
%add.i = fadd float %4, %2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
- %r.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 1
+ %r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1
%5 = load float* %r.i, align 4
%add5.i = fadd float %5, %3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
; CHECK: ret
define void @test_int(%class.Complex_int* nocapture %out, i64 %out_start) {
entry:
- %arrayidx = getelementptr inbounds %class.Complex_int* %out, i64 %out_start
+ %arrayidx = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %out_start
%0 = bitcast %class.Complex_int* %arrayidx to i64*
%1 = load i64* %0, align 4
%t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
%t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
%3 = bitcast i32 %t0.sroa.2.0.extract.trunc to i32
%add = add i64 %out_start, 8
- %arrayidx2 = getelementptr inbounds %class.Complex_int* %out, i64 %add
- %i.i = getelementptr inbounds %class.Complex_int* %arrayidx2, i64 0, i32 0
+ %arrayidx2 = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %add
+ %i.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 0
%4 = load i32* %i.i, align 4
%add.i = add i32 %4, %2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x i32> undef, i32 %add.i, i32 0
- %r.i = getelementptr inbounds %class.Complex_int* %arrayidx2, i64 0, i32 1
+ %r.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 1
%5 = load i32* %r.i, align 4
%add5.i = add i32 %5, %3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x i32> %retval.sroa.0.0.vec.insert.i, i32 %add5.i, i32 1
; CHECK: ret
define void @test_long(%class.Complex_long* nocapture %out, i64 %out_start) {
entry:
- %arrayidx = getelementptr inbounds %class.Complex_long* %out, i64 %out_start
+ %arrayidx = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %out_start
%0 = bitcast %class.Complex_long* %arrayidx to i128*
%1 = load i128* %0, align 4
%t0.sroa.0.0.extract.trunc = trunc i128 %1 to i64
%t0.sroa.2.0.extract.trunc = trunc i128 %t0.sroa.2.0.extract.shift to i64
%3 = bitcast i64 %t0.sroa.2.0.extract.trunc to i64
%add = add i64 %out_start, 8
- %arrayidx2 = getelementptr inbounds %class.Complex_long* %out, i64 %add
- %i.i = getelementptr inbounds %class.Complex_long* %arrayidx2, i32 0, i32 0
+ %arrayidx2 = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %add
+ %i.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 0
%4 = load i64* %i.i, align 4
%add.i = add i64 %4, %2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x i64> undef, i64 %add.i, i32 0
- %r.i = getelementptr inbounds %class.Complex_long* %arrayidx2, i32 0, i32 1
+ %r.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 1
%5 = load i64* %r.i, align 4
%add5.i = add i64 %5, %3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x i64> %retval.sroa.0.0.vec.insert.i, i64 %add5.i, i32 1
%min.0 = phi i32 [ 0, %entry ], [ %min.1, %do.cond ]
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ]
%p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ]
- %incdec.ptr = getelementptr inbounds i32* %p.addr.0, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1
%0 = load i32* %p.addr.0, align 4
%cmp = icmp sgt i32 %0, %max.0
br i1 %cmp, label %do.cond, label %if.else
is_sbox.exit155: ; preds = %if.then.i146, %for.body
%seg_offset.0.i151 = phi i32 [ %add9.i145, %if.then.i146 ], [ undef, %for.body ]
%idxprom15.i152 = sext i32 %seg_offset.0.i151 to i64
- %arrayidx18.i154 = getelementptr inbounds i32* null, i64 %idxprom15.i152
+ %arrayidx18.i154 = getelementptr inbounds i32, i32* null, i64 %idxprom15.i152
%x1 = load i32* %arrayidx18.i154, align 4
br i1 undef, label %for.body51, label %for.body
@var_default = external global [2 x i32]
define i32 @test_default_align() {
- %addr = getelementptr [2 x i32]* @var_default, i32 0, i32 0
+ %addr = getelementptr [2 x i32], [2 x i32]* @var_default, i32 0, i32 0
%val = load i32* %addr
ret i32 %val
; CHECK-LABEL: test_default_align:
; CHECK: ldrsw x0, [x[[REG1]], w0, sxtw #2]
; CHECK: ret
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds [0 x i32]* @array, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @array, i64 0, i64 %idxprom
%tmp1 = load i32* %arrayidx, align 4
%conv = sext i32 %tmp1 to i64
ret i64 %conv
@arr_var = extern_weak global [10 x i32]
define i32* @bar() {
- %addr = getelementptr [10 x i32]* @arr_var, i32 0, i32 5
+ %addr = getelementptr [10 x i32], [10 x i32]* @arr_var, i32 0, i32 5
; CHECK: adrp x[[ARR_VAR_HI:[0-9]+]], :got:arr_var
; CHECK: ldr [[ARR_VAR:x[0-9]+]], [x[[ARR_VAR_HI]], :got_lo12:arr_var]
; CHECK: add x0, [[ARR_VAR]], #20
; CHECK: movk x[[REG]], #0x73ce, lsl #16
; CHECK: movk x[[REG]], #0x2ff2
%0 = load i8** @pd2, align 8
- %arrayidx = getelementptr inbounds i8* %0, i64 12345678901234
+ %arrayidx = getelementptr inbounds i8, i8* %0, i64 12345678901234
%1 = load i8* %arrayidx, align 1
ret i8 %1
}
; CHECK: mov [[REG:x[0-9]+]], sp
; CHECK-NEXT: add x0, [[REG]], #8
%E = alloca %struct.S2Ty, align 4
- %B = getelementptr inbounds %struct.S2Ty* %E, i32 0, i32 1
+ %B = getelementptr inbounds %struct.S2Ty, %struct.S2Ty* %E, i32 0, i32 1
call void @takeS1(%struct.S1Ty* %B)
ret void
}
store i32 %target, i32* %target.addr, align 4
%0 = load i32* %target.addr, align 4
%idxprom = zext i32 %0 to i64
- %arrayidx = getelementptr inbounds [2 x i8*]* @fn.table, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @fn.table, i32 0, i64 %idxprom
%1 = load i8** %arrayidx, align 8
br label %indirectgoto
; ARM64: ldrb [[BYTE:w[0-9]+]], [x[[ADDR]]]
; ARM64: strb [[BYTE]], [x0]
%array = alloca i8, i32 8192
- %elem = getelementptr i8* %array, i32 8000
+ %elem = getelementptr i8, i8* %array, i32 8000
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %elem, i64 1, i32 1, i1 false)
ret void
}
; CHECK-LABEL: t2:
; CHECK: ldur w0, [x0, #-4]
; CHECK: ret
- %0 = getelementptr i32 *%ptr, i32 -1
+ %0 = getelementptr i32, i32 *%ptr, i32 -1
%1 = load i32* %0, align 4
ret i32 %1
}
; CHECK-LABEL: t3:
; CHECK: ldur w0, [x0, #-256]
; CHECK: ret
- %0 = getelementptr i32 *%ptr, i32 -64
+ %0 = getelementptr i32, i32 *%ptr, i32 -64
%1 = load i32* %0, align 4
ret i32 %1
}
; CHECK-LABEL: t4:
; CHECK: stur wzr, [x0, #-4]
; CHECK: ret
- %0 = getelementptr i32 *%ptr, i32 -1
+ %0 = getelementptr i32, i32 *%ptr, i32 -1
store i32 0, i32* %0, align 4
ret void
}
; CHECK-LABEL: t5:
; CHECK: stur wzr, [x0, #-256]
; CHECK: ret
- %0 = getelementptr i32 *%ptr, i32 -64
+ %0 = getelementptr i32, i32 *%ptr, i32 -64
store i32 0, i32* %0, align 4
ret void
}
; CHECK-LABEL: _gep_promotion:
; CHECK: ldrb {{[a-z][0-9]+}}, {{\[[a-z][0-9]+\]}}
- %arrayidx = getelementptr inbounds i8* %0, i8 %add
+ %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
%1 = load i8* %arrayidx, align 1
ret i8 %1
; CHECK: ret
%ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
%0 = bitcast %0* %self to i8*
- %add.ptr = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr10.0 = bitcast i8* %add.ptr to double*
%tmp11 = load double* %add.ptr10.0, align 8
%add.ptr.sum = add i64 %ivar, 8
- %add.ptr10.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum
+ %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
%1 = bitcast i8* %add.ptr10.1 to double*
%tmp12 = load double* %1, align 8
%add.ptr.sum17 = add i64 %ivar, 16
- %add.ptr4.1 = getelementptr inbounds i8* %0, i64 %add.ptr.sum17
+ %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17
%add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
%tmp = load double* %add.ptr4.1.0, align 8
%add.ptr4.1.sum = add i64 %ivar, 24
- %add.ptr4.1.1 = getelementptr inbounds i8* %0, i64 %add.ptr4.1.sum
+ %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum
%2 = bitcast i8* %add.ptr4.1.1 to double*
%tmp5 = load double* %2, align 8
%insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
; CHECK: ret
%ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
%0 = bitcast %0* %self to i8*
- %add.ptr = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr10.0 = bitcast i8* %add.ptr to double*
%tmp11 = load double* %add.ptr10.0, align 8
- %add.ptr10.1 = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
%1 = bitcast i8* %add.ptr10.1 to double*
%tmp12 = load double* %1, align 8
- %add.ptr4.1 = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
%tmp = load double* %add.ptr4.1.0, align 8
- %add.ptr4.1.1 = getelementptr inbounds i8* %0, i64 %ivar
+ %add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
%2 = bitcast i8* %add.ptr4.1.1 to double*
%tmp5 = load double* %2, align 8
%insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
- %arrayidx86 = getelementptr inbounds %struct.a* %ctx, i64 0, i64 %idxprom83
+ %arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
%result = load i16* %arrayidx86, align 2
ret i16 %result
}
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
- %arrayidx86 = getelementptr inbounds %struct.b* %ctx, i64 0, i64 %idxprom83
+ %arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
%result = load i32* %arrayidx86, align 4
ret i32 %result
}
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
- %arrayidx86 = getelementptr inbounds %struct.c* %ctx, i64 0, i64 %idxprom83
+ %arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
%result = load i64* %arrayidx86, align 8
ret i64 %result
}
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
- %arrayidx86 = getelementptr inbounds %struct.a* %ctx, i64 0, i64 %idxprom83
+ %arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
store i16 %val, i16* %arrayidx86, align 8
ret void
}
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
- %arrayidx86 = getelementptr inbounds %struct.b* %ctx, i64 0, i64 %idxprom83
+ %arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
store i32 %val, i32* %arrayidx86, align 8
ret void
}
%shr81 = lshr i32 %xor72, 9
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
- %arrayidx86 = getelementptr inbounds %struct.c* %ctx, i64 0, i64 %idxprom83
+ %arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
store i64 %val, i64* %arrayidx86, align 8
ret void
}
; CHECK: str x{{[0-9+]}}, [x{{[0-9+]}}], #8
; CHECK: ret
%tmp = load i64** %out, align 8
- %incdec.ptr = getelementptr inbounds i64* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %tmp, i64 1
store i64 %spacing, i64* %tmp, align 4
store i64* %incdec.ptr, i64** %out, align 8
ret void
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
; CHECK: ret
%tmp = load i32** %out, align 8
- %incdec.ptr = getelementptr inbounds i32* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %tmp, i64 1
store i32 %spacing, i32* %tmp, align 4
store i32* %incdec.ptr, i32** %out, align 8
ret void
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
; CHECK: ret
%tmp = load i16** %out, align 8
- %incdec.ptr = getelementptr inbounds i16* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds i16, i16* %tmp, i64 1
store i16 %spacing, i16* %tmp, align 4
store i16* %incdec.ptr, i16** %out, align 8
ret void
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
; CHECK: ret
%tmp = load i8** %out, align 8
- %incdec.ptr = getelementptr inbounds i8* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %tmp, i64 1
store i8 %spacing, i8* %tmp, align 4
store i8* %incdec.ptr, i8** %out, align 8
ret void
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
; CHECK: ret
%tmp = load i32** %out, align 8
- %incdec.ptr = getelementptr inbounds i32* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %tmp, i64 1
%trunc = trunc i64 %spacing to i32
store i32 %trunc, i32* %tmp, align 4
store i32* %incdec.ptr, i32** %out, align 8
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
; CHECK: ret
%tmp = load i16** %out, align 8
- %incdec.ptr = getelementptr inbounds i16* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds i16, i16* %tmp, i64 1
%trunc = trunc i64 %spacing to i16
store i16 %trunc, i16* %tmp, align 4
store i16* %incdec.ptr, i16** %out, align 8
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
; CHECK: ret
%tmp = load i8** %out, align 8
- %incdec.ptr = getelementptr inbounds i8* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %tmp, i64 1
%trunc = trunc i64 %spacing to i8
store i8 %trunc, i8* %tmp, align 4
store i8* %incdec.ptr, i8** %out, align 8
; CHECK: str s{{[0-9+]}}, [x{{[0-9+]}}], #4
; CHECK: ret
%tmp = load float** %out, align 8
- %incdec.ptr = getelementptr inbounds float* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds float, float* %tmp, i64 1
store float %spacing, float* %tmp, align 4
store float* %incdec.ptr, float** %out, align 8
ret void
; CHECK: str d{{[0-9+]}}, [x{{[0-9+]}}], #8
; CHECK: ret
%tmp = load double** %out, align 8
- %incdec.ptr = getelementptr inbounds double* %tmp, i64 1
+ %incdec.ptr = getelementptr inbounds double, double* %tmp, i64 1
store double %spacing, double* %tmp, align 4
store double* %incdec.ptr, double** %out, align 8
ret void
; CHECK-NEXT: str d0, [x0, #32]!
; CHECK-NEXT: ret
%tmp = load double** %out, align 8
- %ptr = getelementptr inbounds double* %tmp, i64 4
+ %ptr = getelementptr inbounds double, double* %tmp, i64 4
store double %spacing, double* %ptr, align 4
ret double *%ptr
}
; CHECK-NEXT: str s0, [x0, #12]!
; CHECK-NEXT: ret
%tmp = load float** %out, align 8
- %ptr = getelementptr inbounds float* %tmp, i64 3
+ %ptr = getelementptr inbounds float, float* %tmp, i64 3
store float %spacing, float* %ptr, align 4
ret float *%ptr
}
; CHECK-NEXT: str x1, [x0, #16]!
; CHECK-NEXT: ret
%tmp = load i64** %out, align 8
- %ptr = getelementptr inbounds i64* %tmp, i64 2
+ %ptr = getelementptr inbounds i64, i64* %tmp, i64 2
store i64 %spacing, i64* %ptr, align 4
ret i64 *%ptr
}
; CHECK-NEXT: str w1, [x0, #8]!
; CHECK-NEXT: ret
%tmp = load i32** %out, align 8
- %ptr = getelementptr inbounds i32* %tmp, i64 2
+ %ptr = getelementptr inbounds i32, i32* %tmp, i64 2
store i32 %spacing, i32* %ptr, align 4
ret i32 *%ptr
}
; CHECK-NEXT: strh w1, [x0, #4]!
; CHECK-NEXT: ret
%tmp = load i16** %out, align 8
- %ptr = getelementptr inbounds i16* %tmp, i64 2
+ %ptr = getelementptr inbounds i16, i16* %tmp, i64 2
store i16 %spacing, i16* %ptr, align 4
ret i16 *%ptr
}
; CHECK-NEXT: strb w1, [x0, #2]!
; CHECK-NEXT: ret
%tmp = load i8** %out, align 8
- %ptr = getelementptr inbounds i8* %tmp, i64 2
+ %ptr = getelementptr inbounds i8, i8* %tmp, i64 2
store i8 %spacing, i8* %ptr, align 4
ret i8 *%ptr
}
; CHECK-NEXT: str w1, [x0, #8]!
; CHECK-NEXT: ret
%tmp = load i32** %out, align 8
- %ptr = getelementptr inbounds i32* %tmp, i64 2
+ %ptr = getelementptr inbounds i32, i32* %tmp, i64 2
%trunc = trunc i64 %spacing to i32
store i32 %trunc, i32* %ptr, align 4
ret i32 *%ptr
; CHECK-NEXT: strh w1, [x0, #4]!
; CHECK-NEXT: ret
%tmp = load i16** %out, align 8
- %ptr = getelementptr inbounds i16* %tmp, i64 2
+ %ptr = getelementptr inbounds i16, i16* %tmp, i64 2
%trunc = trunc i64 %spacing to i16
store i16 %trunc, i16* %ptr, align 4
ret i16 *%ptr
; CHECK-NEXT: strb w1, [x0, #2]!
; CHECK-NEXT: ret
%tmp = load i8** %out, align 8
- %ptr = getelementptr inbounds i8* %tmp, i64 2
+ %ptr = getelementptr inbounds i8, i8* %tmp, i64 2
%trunc = trunc i64 %spacing to i8
store i8 %trunc, i8* %ptr, align 4
ret i8 *%ptr
; CHECK: ldr d0, [x0, #8]!
; CHECK: str d0, [x1]
; CHECK: ret
- %ptr = getelementptr inbounds double* %src, i64 1
+ %ptr = getelementptr inbounds double, double* %src, i64 1
%tmp = load double* %ptr, align 4
store double %tmp, double* %out, align 4
ret double* %ptr
; CHECK: ldr s0, [x0, #4]!
; CHECK: str s0, [x1]
; CHECK: ret
- %ptr = getelementptr inbounds float* %src, i64 1
+ %ptr = getelementptr inbounds float, float* %src, i64 1
%tmp = load float* %ptr, align 4
store float %tmp, float* %out, align 4
ret float* %ptr
; CHECK: ldr x[[REG:[0-9]+]], [x0, #8]!
; CHECK: str x[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i64* %src, i64 1
+ %ptr = getelementptr inbounds i64, i64* %src, i64 1
%tmp = load i64* %ptr, align 4
store i64 %tmp, i64* %out, align 4
ret i64* %ptr
; CHECK: ldr w[[REG:[0-9]+]], [x0, #4]!
; CHECK: str w[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i32* %src, i64 1
+ %ptr = getelementptr inbounds i32, i32* %src, i64 1
%tmp = load i32* %ptr, align 4
store i32 %tmp, i32* %out, align 4
ret i32* %ptr
; CHECK: ldrh w[[REG:[0-9]+]], [x0, #2]!
; CHECK: str w[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i16* %src, i64 1
+ %ptr = getelementptr inbounds i16, i16* %src, i64 1
%tmp = load i16* %ptr, align 4
%ext = zext i16 %tmp to i32
store i32 %ext, i32* %out, align 4
; CHECK: ldrh w[[REG:[0-9]+]], [x0, #2]!
; CHECK: str x[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i16* %src, i64 1
+ %ptr = getelementptr inbounds i16, i16* %src, i64 1
%tmp = load i16* %ptr, align 4
%ext = zext i16 %tmp to i64
store i64 %ext, i64* %out, align 4
; CHECK: ldrb w[[REG:[0-9]+]], [x0, #1]!
; CHECK: str w[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i8* %src, i64 1
+ %ptr = getelementptr inbounds i8, i8* %src, i64 1
%tmp = load i8* %ptr, align 4
%ext = zext i8 %tmp to i32
store i32 %ext, i32* %out, align 4
; CHECK: ldrb w[[REG:[0-9]+]], [x0, #1]!
; CHECK: str x[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i8* %src, i64 1
+ %ptr = getelementptr inbounds i8, i8* %src, i64 1
%tmp = load i8* %ptr, align 4
%ext = zext i8 %tmp to i64
store i64 %ext, i64* %out, align 4
; CHECK: ldrsw x[[REG:[0-9]+]], [x0, #4]!
; CHECK: str x[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i32* %src, i64 1
+ %ptr = getelementptr inbounds i32, i32* %src, i64 1
%tmp = load i32* %ptr, align 4
%ext = sext i32 %tmp to i64
store i64 %ext, i64* %out, align 8
; CHECK: ldrsh w[[REG:[0-9]+]], [x0, #2]!
; CHECK: str w[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i16* %src, i64 1
+ %ptr = getelementptr inbounds i16, i16* %src, i64 1
%tmp = load i16* %ptr, align 4
%ext = sext i16 %tmp to i32
store i32 %ext, i32* %out, align 4
; CHECK: ldrsh x[[REG:[0-9]+]], [x0, #2]!
; CHECK: str x[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i16* %src, i64 1
+ %ptr = getelementptr inbounds i16, i16* %src, i64 1
%tmp = load i16* %ptr, align 4
%ext = sext i16 %tmp to i64
store i64 %ext, i64* %out, align 4
; CHECK: ldrsb w[[REG:[0-9]+]], [x0, #1]!
; CHECK: str w[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i8* %src, i64 1
+ %ptr = getelementptr inbounds i8, i8* %src, i64 1
%tmp = load i8* %ptr, align 4
%ext = sext i8 %tmp to i32
store i32 %ext, i32* %out, align 4
; CHECK: ldrsb x[[REG:[0-9]+]], [x0, #1]!
; CHECK: str x[[REG]], [x1]
; CHECK: ret
- %ptr = getelementptr inbounds i8* %src, i64 1
+ %ptr = getelementptr inbounds i8, i8* %src, i64 1
%tmp = load i8* %ptr, align 4
%ext = sext i8 %tmp to i64
store i64 %ext, i64* %out, align 4
; ret
%paddr = bitcast i64* %addr to i64**
store i64* %addr, i64** %paddr
- %newaddr = getelementptr i64* %addr, i32 1
+ %newaddr = getelementptr i64, i64* %addr, i32 1
ret i64* %newaddr
}
; Function Attrs: nounwind ssp
define void @f(double* %P1) #0 {
entry:
- %arrayidx4 = getelementptr inbounds double* %P1, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %P1, i64 1
%0 = load double* %arrayidx4, align 8, !tbaa !1
%1 = load double* %P1, align 8, !tbaa !1
%2 = insertelement <2 x double> undef, double %0, i32 0
define <8 x i8> @test_v8i8_pre_load(<8 x i8>* %addr) {
; CHECK-LABEL: test_v8i8_pre_load:
; CHECK: ldr d0, [x0, #40]!
- %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
%val = load <8 x i8>* %newaddr, align 8
store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
ret <8 x i8> %val
define <8 x i8> @test_v8i8_post_load(<8 x i8>* %addr) {
; CHECK-LABEL: test_v8i8_post_load:
; CHECK: ldr d0, [x0], #40
- %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
%val = load <8 x i8>* %addr, align 8
store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
ret <8 x i8> %val
define void @test_v8i8_pre_store(<8 x i8> %in, <8 x i8>* %addr) {
; CHECK-LABEL: test_v8i8_pre_store:
; CHECK: str d0, [x0, #40]!
- %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
store <8 x i8> %in, <8 x i8>* %newaddr, align 8
store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
ret void
define void @test_v8i8_post_store(<8 x i8> %in, <8 x i8>* %addr) {
; CHECK-LABEL: test_v8i8_post_store:
; CHECK: str d0, [x0], #40
- %newaddr = getelementptr <8 x i8>* %addr, i32 5
+ %newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
store <8 x i8> %in, <8 x i8>* %addr, align 8
store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
ret void
define <4 x i16> @test_v4i16_pre_load(<4 x i16>* %addr) {
; CHECK-LABEL: test_v4i16_pre_load:
; CHECK: ldr d0, [x0, #40]!
- %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
%val = load <4 x i16>* %newaddr, align 8
store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
ret <4 x i16> %val
define <4 x i16> @test_v4i16_post_load(<4 x i16>* %addr) {
; CHECK-LABEL: test_v4i16_post_load:
; CHECK: ldr d0, [x0], #40
- %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
%val = load <4 x i16>* %addr, align 8
store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
ret <4 x i16> %val
define void @test_v4i16_pre_store(<4 x i16> %in, <4 x i16>* %addr) {
; CHECK-LABEL: test_v4i16_pre_store:
; CHECK: str d0, [x0, #40]!
- %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
store <4 x i16> %in, <4 x i16>* %newaddr, align 8
store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
ret void
define void @test_v4i16_post_store(<4 x i16> %in, <4 x i16>* %addr) {
; CHECK-LABEL: test_v4i16_post_store:
; CHECK: str d0, [x0], #40
- %newaddr = getelementptr <4 x i16>* %addr, i32 5
+ %newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
store <4 x i16> %in, <4 x i16>* %addr, align 8
store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
ret void
define <2 x i32> @test_v2i32_pre_load(<2 x i32>* %addr) {
; CHECK-LABEL: test_v2i32_pre_load:
; CHECK: ldr d0, [x0, #40]!
- %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
%val = load <2 x i32>* %newaddr, align 8
store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
ret <2 x i32> %val
define <2 x i32> @test_v2i32_post_load(<2 x i32>* %addr) {
; CHECK-LABEL: test_v2i32_post_load:
; CHECK: ldr d0, [x0], #40
- %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
%val = load <2 x i32>* %addr, align 8
store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
ret <2 x i32> %val
define void @test_v2i32_pre_store(<2 x i32> %in, <2 x i32>* %addr) {
; CHECK-LABEL: test_v2i32_pre_store:
; CHECK: str d0, [x0, #40]!
- %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
store <2 x i32> %in, <2 x i32>* %newaddr, align 8
store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
ret void
define void @test_v2i32_post_store(<2 x i32> %in, <2 x i32>* %addr) {
; CHECK-LABEL: test_v2i32_post_store:
; CHECK: str d0, [x0], #40
- %newaddr = getelementptr <2 x i32>* %addr, i32 5
+ %newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
store <2 x i32> %in, <2 x i32>* %addr, align 8
store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
ret void
define <2 x float> @test_v2f32_pre_load(<2 x float>* %addr) {
; CHECK-LABEL: test_v2f32_pre_load:
; CHECK: ldr d0, [x0, #40]!
- %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
%val = load <2 x float>* %newaddr, align 8
store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
ret <2 x float> %val
define <2 x float> @test_v2f32_post_load(<2 x float>* %addr) {
; CHECK-LABEL: test_v2f32_post_load:
; CHECK: ldr d0, [x0], #40
- %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
%val = load <2 x float>* %addr, align 8
store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
ret <2 x float> %val
define void @test_v2f32_pre_store(<2 x float> %in, <2 x float>* %addr) {
; CHECK-LABEL: test_v2f32_pre_store:
; CHECK: str d0, [x0, #40]!
- %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
store <2 x float> %in, <2 x float>* %newaddr, align 8
store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
ret void
define void @test_v2f32_post_store(<2 x float> %in, <2 x float>* %addr) {
; CHECK-LABEL: test_v2f32_post_store:
; CHECK: str d0, [x0], #40
- %newaddr = getelementptr <2 x float>* %addr, i32 5
+ %newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
store <2 x float> %in, <2 x float>* %addr, align 8
store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
ret void
define <1 x i64> @test_v1i64_pre_load(<1 x i64>* %addr) {
; CHECK-LABEL: test_v1i64_pre_load:
; CHECK: ldr d0, [x0, #40]!
- %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
%val = load <1 x i64>* %newaddr, align 8
store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
ret <1 x i64> %val
define <1 x i64> @test_v1i64_post_load(<1 x i64>* %addr) {
; CHECK-LABEL: test_v1i64_post_load:
; CHECK: ldr d0, [x0], #40
- %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
%val = load <1 x i64>* %addr, align 8
store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
ret <1 x i64> %val
define void @test_v1i64_pre_store(<1 x i64> %in, <1 x i64>* %addr) {
; CHECK-LABEL: test_v1i64_pre_store:
; CHECK: str d0, [x0, #40]!
- %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
store <1 x i64> %in, <1 x i64>* %newaddr, align 8
store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
ret void
define void @test_v1i64_post_store(<1 x i64> %in, <1 x i64>* %addr) {
; CHECK-LABEL: test_v1i64_post_store:
; CHECK: str d0, [x0], #40
- %newaddr = getelementptr <1 x i64>* %addr, i32 5
+ %newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
store <1 x i64> %in, <1 x i64>* %addr, align 8
store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
ret void
define <16 x i8> @test_v16i8_pre_load(<16 x i8>* %addr) {
; CHECK-LABEL: test_v16i8_pre_load:
; CHECK: ldr q0, [x0, #80]!
- %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
%val = load <16 x i8>* %newaddr, align 8
store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
ret <16 x i8> %val
define <16 x i8> @test_v16i8_post_load(<16 x i8>* %addr) {
; CHECK-LABEL: test_v16i8_post_load:
; CHECK: ldr q0, [x0], #80
- %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
%val = load <16 x i8>* %addr, align 8
store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
ret <16 x i8> %val
define void @test_v16i8_pre_store(<16 x i8> %in, <16 x i8>* %addr) {
; CHECK-LABEL: test_v16i8_pre_store:
; CHECK: str q0, [x0, #80]!
- %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
store <16 x i8> %in, <16 x i8>* %newaddr, align 8
store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
ret void
define void @test_v16i8_post_store(<16 x i8> %in, <16 x i8>* %addr) {
; CHECK-LABEL: test_v16i8_post_store:
; CHECK: str q0, [x0], #80
- %newaddr = getelementptr <16 x i8>* %addr, i32 5
+ %newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
store <16 x i8> %in, <16 x i8>* %addr, align 8
store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
ret void
define <8 x i16> @test_v8i16_pre_load(<8 x i16>* %addr) {
; CHECK-LABEL: test_v8i16_pre_load:
; CHECK: ldr q0, [x0, #80]!
- %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
%val = load <8 x i16>* %newaddr, align 8
store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
ret <8 x i16> %val
define <8 x i16> @test_v8i16_post_load(<8 x i16>* %addr) {
; CHECK-LABEL: test_v8i16_post_load:
; CHECK: ldr q0, [x0], #80
- %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
%val = load <8 x i16>* %addr, align 8
store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
ret <8 x i16> %val
define void @test_v8i16_pre_store(<8 x i16> %in, <8 x i16>* %addr) {
; CHECK-LABEL: test_v8i16_pre_store:
; CHECK: str q0, [x0, #80]!
- %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
store <8 x i16> %in, <8 x i16>* %newaddr, align 8
store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
ret void
define void @test_v8i16_post_store(<8 x i16> %in, <8 x i16>* %addr) {
; CHECK-LABEL: test_v8i16_post_store:
; CHECK: str q0, [x0], #80
- %newaddr = getelementptr <8 x i16>* %addr, i32 5
+ %newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
store <8 x i16> %in, <8 x i16>* %addr, align 8
store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
ret void
define <4 x i32> @test_v4i32_pre_load(<4 x i32>* %addr) {
; CHECK-LABEL: test_v4i32_pre_load:
; CHECK: ldr q0, [x0, #80]!
- %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
%val = load <4 x i32>* %newaddr, align 8
store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
ret <4 x i32> %val
define <4 x i32> @test_v4i32_post_load(<4 x i32>* %addr) {
; CHECK-LABEL: test_v4i32_post_load:
; CHECK: ldr q0, [x0], #80
- %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
%val = load <4 x i32>* %addr, align 8
store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
ret <4 x i32> %val
define void @test_v4i32_pre_store(<4 x i32> %in, <4 x i32>* %addr) {
; CHECK-LABEL: test_v4i32_pre_store:
; CHECK: str q0, [x0, #80]!
- %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
store <4 x i32> %in, <4 x i32>* %newaddr, align 8
store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
ret void
define void @test_v4i32_post_store(<4 x i32> %in, <4 x i32>* %addr) {
; CHECK-LABEL: test_v4i32_post_store:
; CHECK: str q0, [x0], #80
- %newaddr = getelementptr <4 x i32>* %addr, i32 5
+ %newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
store <4 x i32> %in, <4 x i32>* %addr, align 8
store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
ret void
define <4 x float> @test_v4f32_pre_load(<4 x float>* %addr) {
; CHECK-LABEL: test_v4f32_pre_load:
; CHECK: ldr q0, [x0, #80]!
- %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
%val = load <4 x float>* %newaddr, align 8
store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
ret <4 x float> %val
define <4 x float> @test_v4f32_post_load(<4 x float>* %addr) {
; CHECK-LABEL: test_v4f32_post_load:
; CHECK: ldr q0, [x0], #80
- %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
%val = load <4 x float>* %addr, align 8
store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
ret <4 x float> %val
define void @test_v4f32_pre_store(<4 x float> %in, <4 x float>* %addr) {
; CHECK-LABEL: test_v4f32_pre_store:
; CHECK: str q0, [x0, #80]!
- %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
store <4 x float> %in, <4 x float>* %newaddr, align 8
store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
ret void
define void @test_v4f32_post_store(<4 x float> %in, <4 x float>* %addr) {
; CHECK-LABEL: test_v4f32_post_store:
; CHECK: str q0, [x0], #80
- %newaddr = getelementptr <4 x float>* %addr, i32 5
+ %newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
store <4 x float> %in, <4 x float>* %addr, align 8
store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
ret void
define <2 x i64> @test_v2i64_pre_load(<2 x i64>* %addr) {
; CHECK-LABEL: test_v2i64_pre_load:
; CHECK: ldr q0, [x0, #80]!
- %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
%val = load <2 x i64>* %newaddr, align 8
store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
ret <2 x i64> %val
define <2 x i64> @test_v2i64_post_load(<2 x i64>* %addr) {
; CHECK-LABEL: test_v2i64_post_load:
; CHECK: ldr q0, [x0], #80
- %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
%val = load <2 x i64>* %addr, align 8
store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
ret <2 x i64> %val
define void @test_v2i64_pre_store(<2 x i64> %in, <2 x i64>* %addr) {
; CHECK-LABEL: test_v2i64_pre_store:
; CHECK: str q0, [x0, #80]!
- %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
store <2 x i64> %in, <2 x i64>* %newaddr, align 8
store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
ret void
define void @test_v2i64_post_store(<2 x i64> %in, <2 x i64>* %addr) {
; CHECK-LABEL: test_v2i64_post_store:
; CHECK: str q0, [x0], #80
- %newaddr = getelementptr <2 x i64>* %addr, i32 5
+ %newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
store <2 x i64> %in, <2 x i64>* %addr, align 8
store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
ret void
define <2 x double> @test_v2f64_pre_load(<2 x double>* %addr) {
; CHECK-LABEL: test_v2f64_pre_load:
; CHECK: ldr q0, [x0, #80]!
- %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
%val = load <2 x double>* %newaddr, align 8
store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
ret <2 x double> %val
define <2 x double> @test_v2f64_post_load(<2 x double>* %addr) {
; CHECK-LABEL: test_v2f64_post_load:
; CHECK: ldr q0, [x0], #80
- %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
%val = load <2 x double>* %addr, align 8
store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
ret <2 x double> %val
define void @test_v2f64_pre_store(<2 x double> %in, <2 x double>* %addr) {
; CHECK-LABEL: test_v2f64_pre_store:
; CHECK: str q0, [x0, #80]!
- %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
store <2 x double> %in, <2 x double>* %newaddr, align 8
store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
ret void
define void @test_v2f64_post_store(<2 x double> %in, <2 x double>* %addr) {
; CHECK-LABEL: test_v2f64_post_store:
; CHECK: str q0, [x0], #80
- %newaddr = getelementptr <2 x double>* %addr, i32 5
+ %newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
store <2 x double> %in, <2 x double>* %addr, align 8
store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
ret void
%elt = extractelement <16 x i8> %in, i32 3
store i8 %elt, i8* %addr
- %newaddr = getelementptr i8* %addr, i32 1
+ %newaddr = getelementptr i8, i8* %addr, i32 1
ret i8* %newaddr
}
%elt = extractelement <16 x i8> %in, i32 3
store i8 %elt, i8* %addr
- %newaddr = getelementptr i8* %addr, i32 2
+ %newaddr = getelementptr i8, i8* %addr, i32 2
ret i8* %newaddr
}
%elt = extractelement <8 x i16> %in, i32 3
store i16 %elt, i16* %addr
- %newaddr = getelementptr i16* %addr, i32 1
+ %newaddr = getelementptr i16, i16* %addr, i32 1
ret i16* %newaddr
}
%elt = extractelement <8 x i16> %in, i32 3
store i16 %elt, i16* %addr
- %newaddr = getelementptr i16* %addr, i32 2
+ %newaddr = getelementptr i16, i16* %addr, i32 2
ret i16* %newaddr
}
%elt = extractelement <4 x i32> %in, i32 3
store i32 %elt, i32* %addr
- %newaddr = getelementptr i32* %addr, i32 1
+ %newaddr = getelementptr i32, i32* %addr, i32 1
ret i32* %newaddr
}
%elt = extractelement <4 x i32> %in, i32 3
store i32 %elt, i32* %addr
- %newaddr = getelementptr i32* %addr, i32 2
+ %newaddr = getelementptr i32, i32* %addr, i32 2
ret i32* %newaddr
}
%elt = extractelement <4 x float> %in, i32 3
store float %elt, float* %addr
- %newaddr = getelementptr float* %addr, i32 1
+ %newaddr = getelementptr float, float* %addr, i32 1
ret float* %newaddr
}
%elt = extractelement <4 x float> %in, i32 3
store float %elt, float* %addr
- %newaddr = getelementptr float* %addr, i32 2
+ %newaddr = getelementptr float, float* %addr, i32 2
ret float* %newaddr
}
%elt = extractelement <2 x i64> %in, i64 1
store i64 %elt, i64* %addr
- %newaddr = getelementptr i64* %addr, i64 1
+ %newaddr = getelementptr i64, i64* %addr, i64 1
ret i64* %newaddr
}
%elt = extractelement <2 x i64> %in, i64 1
store i64 %elt, i64* %addr
- %newaddr = getelementptr i64* %addr, i64 2
+ %newaddr = getelementptr i64, i64* %addr, i64 2
ret i64* %newaddr
}
%elt = extractelement <2 x double> %in, i32 1
store double %elt, double* %addr
- %newaddr = getelementptr double* %addr, i32 1
+ %newaddr = getelementptr double, double* %addr, i32 1
ret double* %newaddr
}
%elt = extractelement <2 x double> %in, i32 1
store double %elt, double* %addr
- %newaddr = getelementptr double* %addr, i32 2
+ %newaddr = getelementptr double, double* %addr, i32 2
ret double* %newaddr
}
%elt = extractelement <8 x i8> %in, i32 3
store i8 %elt, i8* %addr
- %newaddr = getelementptr i8* %addr, i32 1
+ %newaddr = getelementptr i8, i8* %addr, i32 1
ret i8* %newaddr
}
%elt = extractelement <8 x i8> %in, i32 3
store i8 %elt, i8* %addr
- %newaddr = getelementptr i8* %addr, i32 2
+ %newaddr = getelementptr i8, i8* %addr, i32 2
ret i8* %newaddr
}
%elt = extractelement <4 x i16> %in, i32 3
store i16 %elt, i16* %addr
- %newaddr = getelementptr i16* %addr, i32 1
+ %newaddr = getelementptr i16, i16* %addr, i32 1
ret i16* %newaddr
}
%elt = extractelement <4 x i16> %in, i32 3
store i16 %elt, i16* %addr
- %newaddr = getelementptr i16* %addr, i32 2
+ %newaddr = getelementptr i16, i16* %addr, i32 2
ret i16* %newaddr
}
%elt = extractelement <2 x i32> %in, i32 1
store i32 %elt, i32* %addr
- %newaddr = getelementptr i32* %addr, i32 1
+ %newaddr = getelementptr i32, i32* %addr, i32 1
ret i32* %newaddr
}
%elt = extractelement <2 x i32> %in, i32 1
store i32 %elt, i32* %addr
- %newaddr = getelementptr i32* %addr, i32 2
+ %newaddr = getelementptr i32, i32* %addr, i32 2
ret i32* %newaddr
}
%elt = extractelement <2 x float> %in, i32 1
store float %elt, float* %addr
- %newaddr = getelementptr float* %addr, i32 1
+ %newaddr = getelementptr float, float* %addr, i32 1
ret float* %newaddr
}
%elt = extractelement <2 x float> %in, i32 1
store float %elt, float* %addr
- %newaddr = getelementptr float* %addr, i32 2
+ %newaddr = getelementptr float, float* %addr, i32 2
ret float* %newaddr
}
;CHECK-LABEL: test_v16i8_post_imm_ld2:
;CHECK: ld2.16b { v0, v1 }, [x0], #32
%ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld2
}
;CHECK-LABEL: test_v16i8_post_reg_ld2:
;CHECK: ld2.16b { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld2
}
;CHECK-LABEL: test_v8i8_post_imm_ld2:
;CHECK: ld2.8b { v0, v1 }, [x0], #16
%ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 16
+ %tmp = getelementptr i8, i8* %A, i32 16
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld2
}
;CHECK-LABEL: test_v8i8_post_reg_ld2:
;CHECK: ld2.8b { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld2
}
;CHECK-LABEL: test_v8i16_post_imm_ld2:
;CHECK: ld2.8h { v0, v1 }, [x0], #32
%ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld2
}
;CHECK-LABEL: test_v8i16_post_reg_ld2:
;CHECK: ld2.8h { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld2
}
;CHECK-LABEL: test_v4i16_post_imm_ld2:
;CHECK: ld2.4h { v0, v1 }, [x0], #16
%ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 8
+ %tmp = getelementptr i16, i16* %A, i32 8
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld2
}
;CHECK-LABEL: test_v4i16_post_reg_ld2:
;CHECK: ld2.4h { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld2
}
;CHECK-LABEL: test_v4i32_post_imm_ld2:
;CHECK: ld2.4s { v0, v1 }, [x0], #32
%ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld2
}
;CHECK-LABEL: test_v4i32_post_reg_ld2:
;CHECK: ld2.4s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld2
}
;CHECK-LABEL: test_v2i32_post_imm_ld2:
;CHECK: ld2.2s { v0, v1 }, [x0], #16
%ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld2
}
;CHECK-LABEL: test_v2i32_post_reg_ld2:
;CHECK: ld2.2s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld2
}
;CHECK-LABEL: test_v2i64_post_imm_ld2:
;CHECK: ld2.2d { v0, v1 }, [x0], #32
%ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld2
}
;CHECK-LABEL: test_v2i64_post_reg_ld2:
;CHECK: ld2.2d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld2
}
;CHECK-LABEL: test_v1i64_post_imm_ld2:
;CHECK: ld1.1d { v0, v1 }, [x0], #16
%ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 2
+ %tmp = getelementptr i64, i64* %A, i32 2
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld2
}
;CHECK-LABEL: test_v1i64_post_reg_ld2:
;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld2
}
;CHECK-LABEL: test_v4f32_post_imm_ld2:
;CHECK: ld2.4s { v0, v1 }, [x0], #32
%ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld2
}
;CHECK-LABEL: test_v4f32_post_reg_ld2:
;CHECK: ld2.4s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld2
}
;CHECK-LABEL: test_v2f32_post_imm_ld2:
;CHECK: ld2.2s { v0, v1 }, [x0], #16
%ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld2
}
;CHECK-LABEL: test_v2f32_post_reg_ld2:
;CHECK: ld2.2s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld2
}
;CHECK-LABEL: test_v2f64_post_imm_ld2:
;CHECK: ld2.2d { v0, v1 }, [x0], #32
%ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld2
}
;CHECK-LABEL: test_v2f64_post_reg_ld2:
;CHECK: ld2.2d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld2
}
;CHECK-LABEL: test_v1f64_post_imm_ld2:
;CHECK: ld1.1d { v0, v1 }, [x0], #16
%ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 2
+ %tmp = getelementptr double, double* %A, i32 2
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld2
}
;CHECK-LABEL: test_v1f64_post_reg_ld2:
;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld2
}
;CHECK-LABEL: test_v16i8_post_imm_ld3:
;CHECK: ld3.16b { v0, v1, v2 }, [x0], #48
%ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 48
+ %tmp = getelementptr i8, i8* %A, i32 48
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
}
;CHECK-LABEL: test_v16i8_post_reg_ld3:
;CHECK: ld3.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
}
;CHECK-LABEL: test_v8i8_post_imm_ld3:
;CHECK: ld3.8b { v0, v1, v2 }, [x0], #24
%ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 24
+ %tmp = getelementptr i8, i8* %A, i32 24
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
}
;CHECK-LABEL: test_v8i8_post_reg_ld3:
;CHECK: ld3.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
}
;CHECK-LABEL: test_v8i16_post_imm_ld3:
;CHECK: ld3.8h { v0, v1, v2 }, [x0], #48
%ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 24
+ %tmp = getelementptr i16, i16* %A, i32 24
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
}
;CHECK-LABEL: test_v8i16_post_reg_ld3:
;CHECK: ld3.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
}
;CHECK-LABEL: test_v4i16_post_imm_ld3:
;CHECK: ld3.4h { v0, v1, v2 }, [x0], #24
%ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 12
+ %tmp = getelementptr i16, i16* %A, i32 12
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
}
;CHECK-LABEL: test_v4i16_post_reg_ld3:
;CHECK: ld3.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
}
;CHECK-LABEL: test_v4i32_post_imm_ld3:
;CHECK: ld3.4s { v0, v1, v2 }, [x0], #48
%ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 12
+ %tmp = getelementptr i32, i32* %A, i32 12
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
}
;CHECK-LABEL: test_v4i32_post_reg_ld3:
;CHECK: ld3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
}
;CHECK-LABEL: test_v2i32_post_imm_ld3:
;CHECK: ld3.2s { v0, v1, v2 }, [x0], #24
%ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 6
+ %tmp = getelementptr i32, i32* %A, i32 6
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
}
;CHECK-LABEL: test_v2i32_post_reg_ld3:
;CHECK: ld3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
}
;CHECK-LABEL: test_v2i64_post_imm_ld3:
;CHECK: ld3.2d { v0, v1, v2 }, [x0], #48
%ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 6
+ %tmp = getelementptr i64, i64* %A, i32 6
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
}
;CHECK-LABEL: test_v2i64_post_reg_ld3:
;CHECK: ld3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
}
;CHECK-LABEL: test_v1i64_post_imm_ld3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
%ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 3
+ %tmp = getelementptr i64, i64* %A, i32 3
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
}
;CHECK-LABEL: test_v1i64_post_reg_ld3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
}
;CHECK-LABEL: test_v4f32_post_imm_ld3:
;CHECK: ld3.4s { v0, v1, v2 }, [x0], #48
%ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 12
+ %tmp = getelementptr float, float* %A, i32 12
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld3
}
;CHECK-LABEL: test_v4f32_post_reg_ld3:
;CHECK: ld3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld3
}
;CHECK-LABEL: test_v2f32_post_imm_ld3:
;CHECK: ld3.2s { v0, v1, v2 }, [x0], #24
%ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 6
+ %tmp = getelementptr float, float* %A, i32 6
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld3
}
;CHECK-LABEL: test_v2f32_post_reg_ld3:
;CHECK: ld3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld3
}
;CHECK-LABEL: test_v2f64_post_imm_ld3:
;CHECK: ld3.2d { v0, v1, v2 }, [x0], #48
%ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 6
+ %tmp = getelementptr double, double* %A, i32 6
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld3
}
;CHECK-LABEL: test_v2f64_post_reg_ld3:
;CHECK: ld3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld3
}
;CHECK-LABEL: test_v1f64_post_imm_ld3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
%ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 3
+ %tmp = getelementptr double, double* %A, i32 3
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld3
}
;CHECK-LABEL: test_v1f64_post_reg_ld3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld3
}
;CHECK-LABEL: test_v16i8_post_imm_ld4:
;CHECK: ld4.16b { v0, v1, v2, v3 }, [x0], #64
%ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 64
+ %tmp = getelementptr i8, i8* %A, i32 64
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
}
;CHECK-LABEL: test_v16i8_post_reg_ld4:
;CHECK: ld4.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
}
;CHECK-LABEL: test_v8i8_post_imm_ld4:
;CHECK: ld4.8b { v0, v1, v2, v3 }, [x0], #32
%ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
}
;CHECK-LABEL: test_v8i8_post_reg_ld4:
;CHECK: ld4.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
}
;CHECK-LABEL: test_v8i16_post_imm_ld4:
;CHECK: ld4.8h { v0, v1, v2, v3 }, [x0], #64
%ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 32
+ %tmp = getelementptr i16, i16* %A, i32 32
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
}
;CHECK-LABEL: test_v8i16_post_reg_ld4:
;CHECK: ld4.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
}
;CHECK-LABEL: test_v4i16_post_imm_ld4:
;CHECK: ld4.4h { v0, v1, v2, v3 }, [x0], #32
%ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
}
;CHECK-LABEL: test_v4i16_post_reg_ld4:
;CHECK: ld4.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
}
;CHECK-LABEL: test_v4i32_post_imm_ld4:
;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], #64
%ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 16
+ %tmp = getelementptr i32, i32* %A, i32 16
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
}
;CHECK-LABEL: test_v4i32_post_reg_ld4:
;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
}
;CHECK-LABEL: test_v2i32_post_imm_ld4:
;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], #32
%ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
}
;CHECK-LABEL: test_v2i32_post_reg_ld4:
;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
}
;CHECK-LABEL: test_v2i64_post_imm_ld4:
;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], #64
%ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 8
+ %tmp = getelementptr i64, i64* %A, i32 8
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
}
;CHECK-LABEL: test_v2i64_post_reg_ld4:
;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
}
;CHECK-LABEL: test_v1i64_post_imm_ld4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
%ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
}
;CHECK-LABEL: test_v1i64_post_reg_ld4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
}
;CHECK-LABEL: test_v4f32_post_imm_ld4:
;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], #64
%ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 16
+ %tmp = getelementptr float, float* %A, i32 16
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
}
;CHECK-LABEL: test_v4f32_post_reg_ld4:
;CHECK: ld4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
}
;CHECK-LABEL: test_v2f32_post_imm_ld4:
;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], #32
%ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
}
;CHECK-LABEL: test_v2f32_post_reg_ld4:
;CHECK: ld4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
}
;CHECK-LABEL: test_v2f64_post_imm_ld4:
;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], #64
%ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 8
+ %tmp = getelementptr double, double* %A, i32 8
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
}
;CHECK-LABEL: test_v2f64_post_reg_ld4:
;CHECK: ld4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
}
;CHECK-LABEL: test_v1f64_post_imm_ld4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
%ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
}
;CHECK-LABEL: test_v1f64_post_reg_ld4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
}
;CHECK-LABEL: test_v16i8_post_imm_ld1x2:
;CHECK: ld1.16b { v0, v1 }, [x0], #32
%ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld1x2
}
;CHECK-LABEL: test_v16i8_post_reg_ld1x2:
;CHECK: ld1.16b { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x2.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld1x2
}
;CHECK-LABEL: test_v8i8_post_imm_ld1x2:
;CHECK: ld1.8b { v0, v1 }, [x0], #16
%ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 16
+ %tmp = getelementptr i8, i8* %A, i32 16
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld1x2
}
;CHECK-LABEL: test_v8i8_post_reg_ld1x2:
;CHECK: ld1.8b { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x2.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld1x2
}
;CHECK-LABEL: test_v8i16_post_imm_ld1x2:
;CHECK: ld1.8h { v0, v1 }, [x0], #32
%ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld1x2
}
;CHECK-LABEL: test_v8i16_post_reg_ld1x2:
;CHECK: ld1.8h { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x2.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld1x2
}
;CHECK-LABEL: test_v4i16_post_imm_ld1x2:
;CHECK: ld1.4h { v0, v1 }, [x0], #16
%ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 8
+ %tmp = getelementptr i16, i16* %A, i32 8
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld1x2
}
;CHECK-LABEL: test_v4i16_post_reg_ld1x2:
;CHECK: ld1.4h { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x2.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld1x2
}
;CHECK-LABEL: test_v4i32_post_imm_ld1x2:
;CHECK: ld1.4s { v0, v1 }, [x0], #32
%ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld1x2
}
;CHECK-LABEL: test_v4i32_post_reg_ld1x2:
;CHECK: ld1.4s { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x2.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld1x2
}
;CHECK-LABEL: test_v2i32_post_imm_ld1x2:
;CHECK: ld1.2s { v0, v1 }, [x0], #16
%ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld1x2
}
;CHECK-LABEL: test_v2i32_post_reg_ld1x2:
;CHECK: ld1.2s { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x2.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld1x2
}
;CHECK-LABEL: test_v2i64_post_imm_ld1x2:
;CHECK: ld1.2d { v0, v1 }, [x0], #32
%ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld1x2
}
;CHECK-LABEL: test_v2i64_post_reg_ld1x2:
;CHECK: ld1.2d { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x2.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld1x2
}
;CHECK-LABEL: test_v1i64_post_imm_ld1x2:
;CHECK: ld1.1d { v0, v1 }, [x0], #16
%ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 2
+ %tmp = getelementptr i64, i64* %A, i32 2
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld1x2
}
;CHECK-LABEL: test_v1i64_post_reg_ld1x2:
;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x2.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld1x2
}
;CHECK-LABEL: test_v4f32_post_imm_ld1x2:
;CHECK: ld1.4s { v0, v1 }, [x0], #32
%ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld1x2
}
;CHECK-LABEL: test_v4f32_post_reg_ld1x2:
;CHECK: ld1.4s { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x2.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld1x2
}
;CHECK-LABEL: test_v2f32_post_imm_ld1x2:
;CHECK: ld1.2s { v0, v1 }, [x0], #16
%ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld1x2
}
;CHECK-LABEL: test_v2f32_post_reg_ld1x2:
;CHECK: ld1.2s { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x2.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld1x2
}
;CHECK-LABEL: test_v2f64_post_imm_ld1x2:
;CHECK: ld1.2d { v0, v1 }, [x0], #32
%ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld1x2
}
;CHECK-LABEL: test_v2f64_post_reg_ld1x2:
;CHECK: ld1.2d { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x2.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld1x2
}
;CHECK-LABEL: test_v1f64_post_imm_ld1x2:
;CHECK: ld1.1d { v0, v1 }, [x0], #16
%ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 2
+ %tmp = getelementptr double, double* %A, i32 2
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld1x2
}
;CHECK-LABEL: test_v1f64_post_reg_ld1x2:
;CHECK: ld1.1d { v0, v1 }, [x0], x{{[0-9]+}}
%ld1x2 = tail call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x2.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld1x2
}
;CHECK-LABEL: test_v16i8_post_imm_ld1x3:
;CHECK: ld1.16b { v0, v1, v2 }, [x0], #48
%ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 48
+ %tmp = getelementptr i8, i8* %A, i32 48
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld1x3
}
;CHECK-LABEL: test_v16i8_post_reg_ld1x3:
;CHECK: ld1.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x3.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld1x3
}
;CHECK-LABEL: test_v8i8_post_imm_ld1x3:
;CHECK: ld1.8b { v0, v1, v2 }, [x0], #24
%ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 24
+ %tmp = getelementptr i8, i8* %A, i32 24
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld1x3
}
;CHECK-LABEL: test_v8i8_post_reg_ld1x3:
;CHECK: ld1.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x3.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld1x3
}
;CHECK-LABEL: test_v8i16_post_imm_ld1x3:
;CHECK: ld1.8h { v0, v1, v2 }, [x0], #48
%ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 24
+ %tmp = getelementptr i16, i16* %A, i32 24
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld1x3
}
;CHECK-LABEL: test_v8i16_post_reg_ld1x3:
;CHECK: ld1.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x3.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld1x3
}
;CHECK-LABEL: test_v4i16_post_imm_ld1x3:
;CHECK: ld1.4h { v0, v1, v2 }, [x0], #24
%ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 12
+ %tmp = getelementptr i16, i16* %A, i32 12
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld1x3
}
;CHECK-LABEL: test_v4i16_post_reg_ld1x3:
;CHECK: ld1.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x3.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld1x3
}
;CHECK-LABEL: test_v4i32_post_imm_ld1x3:
;CHECK: ld1.4s { v0, v1, v2 }, [x0], #48
%ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 12
+ %tmp = getelementptr i32, i32* %A, i32 12
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld1x3
}
;CHECK-LABEL: test_v4i32_post_reg_ld1x3:
;CHECK: ld1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x3.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld1x3
}
;CHECK-LABEL: test_v2i32_post_imm_ld1x3:
;CHECK: ld1.2s { v0, v1, v2 }, [x0], #24
%ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 6
+ %tmp = getelementptr i32, i32* %A, i32 6
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld1x3
}
;CHECK-LABEL: test_v2i32_post_reg_ld1x3:
;CHECK: ld1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x3.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld1x3
}
;CHECK-LABEL: test_v2i64_post_imm_ld1x3:
;CHECK: ld1.2d { v0, v1, v2 }, [x0], #48
%ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 6
+ %tmp = getelementptr i64, i64* %A, i32 6
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld1x3
}
;CHECK-LABEL: test_v2i64_post_reg_ld1x3:
;CHECK: ld1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x3.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld1x3
}
;CHECK-LABEL: test_v1i64_post_imm_ld1x3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
%ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 3
+ %tmp = getelementptr i64, i64* %A, i32 3
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld1x3
}
;CHECK-LABEL: test_v1i64_post_reg_ld1x3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x3.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld1x3
}
;CHECK-LABEL: test_v4f32_post_imm_ld1x3:
;CHECK: ld1.4s { v0, v1, v2 }, [x0], #48
%ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 12
+ %tmp = getelementptr float, float* %A, i32 12
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld1x3
}
;CHECK-LABEL: test_v4f32_post_reg_ld1x3:
;CHECK: ld1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x3.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld1x3
}
;CHECK-LABEL: test_v2f32_post_imm_ld1x3:
;CHECK: ld1.2s { v0, v1, v2 }, [x0], #24
%ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 6
+ %tmp = getelementptr float, float* %A, i32 6
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld1x3
}
;CHECK-LABEL: test_v2f32_post_reg_ld1x3:
;CHECK: ld1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x3.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld1x3
}
;CHECK-LABEL: test_v2f64_post_imm_ld1x3:
;CHECK: ld1.2d { v0, v1, v2 }, [x0], #48
%ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 6
+ %tmp = getelementptr double, double* %A, i32 6
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld1x3
}
;CHECK-LABEL: test_v2f64_post_reg_ld1x3:
;CHECK: ld1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x3.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld1x3
}
;CHECK-LABEL: test_v1f64_post_imm_ld1x3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], #24
%ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 3
+ %tmp = getelementptr double, double* %A, i32 3
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld1x3
}
;CHECK-LABEL: test_v1f64_post_reg_ld1x3:
;CHECK: ld1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld1x3 = tail call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x3.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld1x3
}
;CHECK-LABEL: test_v16i8_post_imm_ld1x4:
;CHECK: ld1.16b { v0, v1, v2, v3 }, [x0], #64
%ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 64
+ %tmp = getelementptr i8, i8* %A, i32 64
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld1x4
}
;CHECK-LABEL: test_v16i8_post_reg_ld1x4:
;CHECK: ld1.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld1x4.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld1x4
}
;CHECK-LABEL: test_v8i8_post_imm_ld1x4:
;CHECK: ld1.8b { v0, v1, v2, v3 }, [x0], #32
%ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld1x4
}
;CHECK-LABEL: test_v8i8_post_reg_ld1x4:
;CHECK: ld1.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld1x4.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld1x4
}
;CHECK-LABEL: test_v8i16_post_imm_ld1x4:
;CHECK: ld1.8h { v0, v1, v2, v3 }, [x0], #64
%ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 32
+ %tmp = getelementptr i16, i16* %A, i32 32
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld1x4
}
;CHECK-LABEL: test_v8i16_post_reg_ld1x4:
;CHECK: ld1.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld1x4.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld1x4
}
;CHECK-LABEL: test_v4i16_post_imm_ld1x4:
;CHECK: ld1.4h { v0, v1, v2, v3 }, [x0], #32
%ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld1x4
}
;CHECK-LABEL: test_v4i16_post_reg_ld1x4:
;CHECK: ld1.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld1x4.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld1x4
}
;CHECK-LABEL: test_v4i32_post_imm_ld1x4:
;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], #64
%ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 16
+ %tmp = getelementptr i32, i32* %A, i32 16
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld1x4
}
;CHECK-LABEL: test_v4i32_post_reg_ld1x4:
;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld1x4.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld1x4
}
;CHECK-LABEL: test_v2i32_post_imm_ld1x4:
;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], #32
%ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld1x4
}
;CHECK-LABEL: test_v2i32_post_reg_ld1x4:
;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld1x4.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld1x4
}
;CHECK-LABEL: test_v2i64_post_imm_ld1x4:
;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], #64
%ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 8
+ %tmp = getelementptr i64, i64* %A, i32 8
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld1x4
}
;CHECK-LABEL: test_v2i64_post_reg_ld1x4:
;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld1x4.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld1x4
}
;CHECK-LABEL: test_v1i64_post_imm_ld1x4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
%ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld1x4
}
;CHECK-LABEL: test_v1i64_post_reg_ld1x4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld1x4.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld1x4
}
;CHECK-LABEL: test_v4f32_post_imm_ld1x4:
;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], #64
%ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 16
+ %tmp = getelementptr float, float* %A, i32 16
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld1x4
}
;CHECK-LABEL: test_v4f32_post_reg_ld1x4:
;CHECK: ld1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld1x4.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld1x4
}
;CHECK-LABEL: test_v2f32_post_imm_ld1x4:
;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], #32
%ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld1x4
}
;CHECK-LABEL: test_v2f32_post_reg_ld1x4:
;CHECK: ld1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld1x4.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld1x4
}
;CHECK-LABEL: test_v2f64_post_imm_ld1x4:
;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], #64
%ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 8
+ %tmp = getelementptr double, double* %A, i32 8
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld1x4
}
;CHECK-LABEL: test_v2f64_post_reg_ld1x4:
;CHECK: ld1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld1x4.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld1x4
}
;CHECK-LABEL: test_v1f64_post_imm_ld1x4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], #32
%ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld1x4
}
;CHECK-LABEL: test_v1f64_post_reg_ld1x4:
;CHECK: ld1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld1x4 = tail call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld1x4.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld1x4
}
;CHECK-LABEL: test_v16i8_post_imm_ld2r:
;CHECK: ld2r.16b { v0, v1 }, [x0], #2
%ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 2
+ %tmp = getelementptr i8, i8* %A, i32 2
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld2
}
;CHECK-LABEL: test_v16i8_post_reg_ld2r:
;CHECK: ld2r.16b { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2r.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld2
}
;CHECK-LABEL: test_v8i8_post_imm_ld2r:
;CHECK: ld2r.8b { v0, v1 }, [x0], #2
%ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 2
+ %tmp = getelementptr i8, i8* %A, i32 2
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld2
}
;CHECK-LABEL: test_v8i8_post_reg_ld2r:
;CHECK: ld2r.8b { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2r.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld2
}
;CHECK-LABEL: test_v8i16_post_imm_ld2r:
;CHECK: ld2r.8h { v0, v1 }, [x0], #4
%ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 2
+ %tmp = getelementptr i16, i16* %A, i32 2
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld2
}
;CHECK-LABEL: test_v8i16_post_reg_ld2r:
;CHECK: ld2r.8h { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2r.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld2
}
;CHECK-LABEL: test_v4i16_post_imm_ld2r:
;CHECK: ld2r.4h { v0, v1 }, [x0], #4
%ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 2
+ %tmp = getelementptr i16, i16* %A, i32 2
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld2
}
;CHECK-LABEL: test_v4i16_post_reg_ld2r:
;CHECK: ld2r.4h { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2r.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld2
}
;CHECK-LABEL: test_v4i32_post_imm_ld2r:
;CHECK: ld2r.4s { v0, v1 }, [x0], #8
%ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 2
+ %tmp = getelementptr i32, i32* %A, i32 2
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld2
}
;CHECK-LABEL: test_v4i32_post_reg_ld2r:
;CHECK: ld2r.4s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2r.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld2
}
;CHECK-LABEL: test_v2i32_post_imm_ld2r:
;CHECK: ld2r.2s { v0, v1 }, [x0], #8
%ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 2
+ %tmp = getelementptr i32, i32* %A, i32 2
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld2
}
;CHECK-LABEL: test_v2i32_post_reg_ld2r:
;CHECK: ld2r.2s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2r.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld2
}
;CHECK-LABEL: test_v2i64_post_imm_ld2r:
;CHECK: ld2r.2d { v0, v1 }, [x0], #16
%ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 2
+ %tmp = getelementptr i64, i64* %A, i32 2
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld2
}
;CHECK-LABEL: test_v2i64_post_reg_ld2r:
;CHECK: ld2r.2d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2r.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld2
}
;CHECK-LABEL: test_v1i64_post_imm_ld2r:
;CHECK: ld2r.1d { v0, v1 }, [x0], #16
%ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 2
+ %tmp = getelementptr i64, i64* %A, i32 2
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld2
}
;CHECK-LABEL: test_v1i64_post_reg_ld2r:
;CHECK: ld2r.1d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2r.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld2
}
;CHECK-LABEL: test_v4f32_post_imm_ld2r:
;CHECK: ld2r.4s { v0, v1 }, [x0], #8
%ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 2
+ %tmp = getelementptr float, float* %A, i32 2
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld2
}
;CHECK-LABEL: test_v4f32_post_reg_ld2r:
;CHECK: ld2r.4s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2r.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld2
}
;CHECK-LABEL: test_v2f32_post_imm_ld2r:
;CHECK: ld2r.2s { v0, v1 }, [x0], #8
%ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 2
+ %tmp = getelementptr float, float* %A, i32 2
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld2
}
;CHECK-LABEL: test_v2f32_post_reg_ld2r:
;CHECK: ld2r.2s { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2r.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld2
}
;CHECK-LABEL: test_v2f64_post_imm_ld2r:
;CHECK: ld2r.2d { v0, v1 }, [x0], #16
%ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 2
+ %tmp = getelementptr double, double* %A, i32 2
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld2
}
;CHECK-LABEL: test_v2f64_post_reg_ld2r:
;CHECK: ld2r.2d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2r.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld2
}
;CHECK-LABEL: test_v1f64_post_imm_ld2r:
;CHECK: ld2r.1d { v0, v1 }, [x0], #16
%ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 2
+ %tmp = getelementptr double, double* %A, i32 2
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld2
}
;CHECK-LABEL: test_v1f64_post_reg_ld2r:
;CHECK: ld2r.1d { v0, v1 }, [x0], x{{[0-9]+}}
%ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2r.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld2
}
;CHECK-LABEL: test_v16i8_post_imm_ld3r:
;CHECK: ld3r.16b { v0, v1, v2 }, [x0], #3
%ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 3
+ %tmp = getelementptr i8, i8* %A, i32 3
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
}
;CHECK-LABEL: test_v16i8_post_reg_ld3r:
;CHECK: ld3r.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3r.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
}
;CHECK-LABEL: test_v8i8_post_imm_ld3r:
;CHECK: ld3r.8b { v0, v1, v2 }, [x0], #3
%ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 3
+ %tmp = getelementptr i8, i8* %A, i32 3
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
}
;CHECK-LABEL: test_v8i8_post_reg_ld3r:
;CHECK: ld3r.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3r.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
}
;CHECK-LABEL: test_v8i16_post_imm_ld3r:
;CHECK: ld3r.8h { v0, v1, v2 }, [x0], #6
%ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 3
+ %tmp = getelementptr i16, i16* %A, i32 3
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
}
;CHECK-LABEL: test_v8i16_post_reg_ld3r:
;CHECK: ld3r.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3r.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
}
;CHECK-LABEL: test_v4i16_post_imm_ld3r:
;CHECK: ld3r.4h { v0, v1, v2 }, [x0], #6
%ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 3
+ %tmp = getelementptr i16, i16* %A, i32 3
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
}
;CHECK-LABEL: test_v4i16_post_reg_ld3r:
;CHECK: ld3r.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3r.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
}
;CHECK-LABEL: test_v4i32_post_imm_ld3r:
;CHECK: ld3r.4s { v0, v1, v2 }, [x0], #12
%ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 3
+ %tmp = getelementptr i32, i32* %A, i32 3
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
}
;CHECK-LABEL: test_v4i32_post_reg_ld3r:
;CHECK: ld3r.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3r.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
}
;CHECK-LABEL: test_v2i32_post_imm_ld3r:
;CHECK: ld3r.2s { v0, v1, v2 }, [x0], #12
%ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 3
+ %tmp = getelementptr i32, i32* %A, i32 3
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
}
;CHECK-LABEL: test_v2i32_post_reg_ld3r:
;CHECK: ld3r.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3r.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
}
;CHECK-LABEL: test_v2i64_post_imm_ld3r:
;CHECK: ld3r.2d { v0, v1, v2 }, [x0], #24
%ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 3
+ %tmp = getelementptr i64, i64* %A, i32 3
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
}
;CHECK-LABEL: test_v2i64_post_reg_ld3r:
;CHECK: ld3r.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3r.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
}
;CHECK-LABEL: test_v1i64_post_imm_ld3r:
;CHECK: ld3r.1d { v0, v1, v2 }, [x0], #24
%ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 3
+ %tmp = getelementptr i64, i64* %A, i32 3
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
}
;CHECK-LABEL: test_v1i64_post_reg_ld3r:
;CHECK: ld3r.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3r.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
}
;CHECK-LABEL: test_v4f32_post_imm_ld3r:
;CHECK: ld3r.4s { v0, v1, v2 }, [x0], #12
%ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 3
+ %tmp = getelementptr float, float* %A, i32 3
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld3
}
;CHECK-LABEL: test_v4f32_post_reg_ld3r:
;CHECK: ld3r.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3r.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld3
}
;CHECK-LABEL: test_v2f32_post_imm_ld3r:
;CHECK: ld3r.2s { v0, v1, v2 }, [x0], #12
%ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 3
+ %tmp = getelementptr float, float* %A, i32 3
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld3
}
;CHECK-LABEL: test_v2f32_post_reg_ld3r:
;CHECK: ld3r.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3r.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld3
}
;CHECK-LABEL: test_v2f64_post_imm_ld3r:
;CHECK: ld3r.2d { v0, v1, v2 }, [x0], #24
%ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 3
+ %tmp = getelementptr double, double* %A, i32 3
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld3
}
;CHECK-LABEL: test_v2f64_post_reg_ld3r:
;CHECK: ld3r.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3r.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld3
}
;CHECK-LABEL: test_v1f64_post_imm_ld3r:
;CHECK: ld3r.1d { v0, v1, v2 }, [x0], #24
%ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 3
+ %tmp = getelementptr double, double* %A, i32 3
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld3
}
;CHECK-LABEL: test_v1f64_post_reg_ld3r:
;CHECK: ld3r.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
%ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3r.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld3
}
;CHECK-LABEL: test_v16i8_post_imm_ld4r:
;CHECK: ld4r.16b { v0, v1, v2, v3 }, [x0], #4
%ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 4
+ %tmp = getelementptr i8, i8* %A, i32 4
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
}
;CHECK-LABEL: test_v16i8_post_reg_ld4r:
;CHECK: ld4r.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4r.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
}
;CHECK-LABEL: test_v8i8_post_imm_ld4r:
;CHECK: ld4r.8b { v0, v1, v2, v3 }, [x0], #4
%ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 4
+ %tmp = getelementptr i8, i8* %A, i32 4
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
}
;CHECK-LABEL: test_v8i8_post_reg_ld4r:
;CHECK: ld4r.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4r.v8i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
}
;CHECK-LABEL: test_v8i16_post_imm_ld4r:
;CHECK: ld4r.8h { v0, v1, v2, v3 }, [x0], #8
%ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 4
+ %tmp = getelementptr i16, i16* %A, i32 4
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
}
;CHECK-LABEL: test_v8i16_post_reg_ld4r:
;CHECK: ld4r.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4r.v8i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
}
;CHECK-LABEL: test_v4i16_post_imm_ld4r:
;CHECK: ld4r.4h { v0, v1, v2, v3 }, [x0], #8
%ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i32 4
+ %tmp = getelementptr i16, i16* %A, i32 4
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
}
;CHECK-LABEL: test_v4i16_post_reg_ld4r:
;CHECK: ld4r.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4r.v4i16.p0i16(i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
}
;CHECK-LABEL: test_v4i32_post_imm_ld4r:
;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], #16
%ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
}
;CHECK-LABEL: test_v4i32_post_reg_ld4r:
;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4r.v4i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
}
;CHECK-LABEL: test_v2i32_post_imm_ld4r:
;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], #16
%ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
}
;CHECK-LABEL: test_v2i32_post_reg_ld4r:
;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4r.v2i32.p0i32(i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
}
;CHECK-LABEL: test_v2i64_post_imm_ld4r:
;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], #32
%ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
}
;CHECK-LABEL: test_v2i64_post_reg_ld4r:
;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4r.v2i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
}
;CHECK-LABEL: test_v1i64_post_imm_ld4r:
;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], #32
%ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
}
;CHECK-LABEL: test_v1i64_post_reg_ld4r:
;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4r.v1i64.p0i64(i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
}
;CHECK-LABEL: test_v4f32_post_imm_ld4r:
;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], #16
%ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
}
;CHECK-LABEL: test_v4f32_post_reg_ld4r:
;CHECK: ld4r.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4r.v4f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
}
;CHECK-LABEL: test_v2f32_post_imm_ld4r:
;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], #16
%ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
}
;CHECK-LABEL: test_v2f32_post_reg_ld4r:
;CHECK: ld4r.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4r.v2f32.p0f32(float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
}
;CHECK-LABEL: test_v2f64_post_imm_ld4r:
;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], #32
%ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
}
;CHECK-LABEL: test_v2f64_post_reg_ld4r:
;CHECK: ld4r.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4r.v2f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
}
;CHECK-LABEL: test_v1f64_post_imm_ld4r:
;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], #32
%ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
}
;CHECK-LABEL: test_v1f64_post_reg_ld4r:
;CHECK: ld4r.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
%ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4r.v1f64.p0f64(double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
}
;CHECK-LABEL: test_v16i8_post_imm_ld2lane:
;CHECK: ld2.b { v0, v1 }[0], [x0], #2
%ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 2
+ %tmp = getelementptr i8, i8* %A, i32 2
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld2
}
;CHECK-LABEL: test_v16i8_post_reg_ld2lane:
;CHECK: ld2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld2
}
;CHECK-LABEL: test_v8i8_post_imm_ld2lane:
;CHECK: ld2.b { v0, v1 }[0], [x0], #2
%ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 2
+ %tmp = getelementptr i8, i8* %A, i32 2
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld2
}
;CHECK-LABEL: test_v8i8_post_reg_ld2lane:
;CHECK: ld2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8> } %ld2
}
;CHECK-LABEL: test_v8i16_post_imm_ld2lane:
;CHECK: ld2.h { v0, v1 }[0], [x0], #4
%ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 2
+ %tmp = getelementptr i16, i16* %A, i32 2
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld2
}
;CHECK-LABEL: test_v8i16_post_reg_ld2lane:
;CHECK: ld2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16> } %ld2
}
;CHECK-LABEL: test_v4i16_post_imm_ld2lane:
;CHECK: ld2.h { v0, v1 }[0], [x0], #4
%ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 2
+ %tmp = getelementptr i16, i16* %A, i32 2
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld2
}
;CHECK-LABEL: test_v4i16_post_reg_ld2lane:
;CHECK: ld2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16> } %ld2
}
;CHECK-LABEL: test_v4i32_post_imm_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], #8
%ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 2
+ %tmp = getelementptr i32, i32* %A, i32 2
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld2
}
;CHECK-LABEL: test_v4i32_post_reg_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32> } %ld2
}
;CHECK-LABEL: test_v2i32_post_imm_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], #8
%ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 2
+ %tmp = getelementptr i32, i32* %A, i32 2
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld2
}
;CHECK-LABEL: test_v2i32_post_reg_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32> } %ld2
}
;CHECK-LABEL: test_v2i64_post_imm_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], #16
%ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i32 2
+ %tmp = getelementptr i64, i64* %A, i32 2
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld2
}
;CHECK-LABEL: test_v2i64_post_reg_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64> } %ld2
}
;CHECK-LABEL: test_v1i64_post_imm_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], #16
%ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i32 2
+ %tmp = getelementptr i64, i64* %A, i32 2
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld2
}
;CHECK-LABEL: test_v1i64_post_reg_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64> } %ld2
}
;CHECK-LABEL: test_v4f32_post_imm_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], #8
%ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 2
+ %tmp = getelementptr float, float* %A, i32 2
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld2
}
;CHECK-LABEL: test_v4f32_post_reg_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <4 x float>, <4 x float> } @llvm.aarch64.neon.ld2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float> } %ld2
}
;CHECK-LABEL: test_v2f32_post_imm_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], #8
%ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 2
+ %tmp = getelementptr float, float* %A, i32 2
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld2
}
;CHECK-LABEL: test_v2f32_post_reg_ld2lane:
;CHECK: ld2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <2 x float>, <2 x float> } @llvm.aarch64.neon.ld2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float> } %ld2
}
;CHECK-LABEL: test_v2f64_post_imm_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], #16
%ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i32 2
+ %tmp = getelementptr double, double* %A, i32 2
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld2
}
;CHECK-LABEL: test_v2f64_post_reg_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <2 x double>, <2 x double> } @llvm.aarch64.neon.ld2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double> } %ld2
}
;CHECK-LABEL: test_v1f64_post_imm_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], #16
%ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i32 2
+ %tmp = getelementptr double, double* %A, i32 2
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld2
}
;CHECK-LABEL: test_v1f64_post_reg_ld2lane:
;CHECK: ld2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
%ld2 = call { <1 x double>, <1 x double> } @llvm.aarch64.neon.ld2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double> } %ld2
}
;CHECK-LABEL: test_v16i8_post_imm_ld3lane:
;CHECK: ld3.b { v0, v1, v2 }[0], [x0], #3
%ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 3
+ %tmp = getelementptr i8, i8* %A, i32 3
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
}
;CHECK-LABEL: test_v16i8_post_reg_ld3lane:
;CHECK: ld3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8> } %ld3
}
;CHECK-LABEL: test_v8i8_post_imm_ld3lane:
;CHECK: ld3.b { v0, v1, v2 }[0], [x0], #3
%ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 3
+ %tmp = getelementptr i8, i8* %A, i32 3
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
}
;CHECK-LABEL: test_v8i8_post_reg_ld3lane:
;CHECK: ld3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8> } %ld3
}
;CHECK-LABEL: test_v8i16_post_imm_ld3lane:
;CHECK: ld3.h { v0, v1, v2 }[0], [x0], #6
%ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 3
+ %tmp = getelementptr i16, i16* %A, i32 3
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
}
;CHECK-LABEL: test_v8i16_post_reg_ld3lane:
;CHECK: ld3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16> } %ld3
}
;CHECK-LABEL: test_v4i16_post_imm_ld3lane:
;CHECK: ld3.h { v0, v1, v2 }[0], [x0], #6
%ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 3
+ %tmp = getelementptr i16, i16* %A, i32 3
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
}
;CHECK-LABEL: test_v4i16_post_reg_ld3lane:
;CHECK: ld3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16> } %ld3
}
;CHECK-LABEL: test_v4i32_post_imm_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
%ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 3
+ %tmp = getelementptr i32, i32* %A, i32 3
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
}
;CHECK-LABEL: test_v4i32_post_reg_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32> } %ld3
}
;CHECK-LABEL: test_v2i32_post_imm_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
%ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 3
+ %tmp = getelementptr i32, i32* %A, i32 3
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
}
;CHECK-LABEL: test_v2i32_post_reg_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32> } %ld3
}
;CHECK-LABEL: test_v2i64_post_imm_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
%ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i32 3
+ %tmp = getelementptr i64, i64* %A, i32 3
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
}
;CHECK-LABEL: test_v2i64_post_reg_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64> } %ld3
}
;CHECK-LABEL: test_v1i64_post_imm_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
%ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i32 3
+ %tmp = getelementptr i64, i64* %A, i32 3
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
}
;CHECK-LABEL: test_v1i64_post_reg_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64> } %ld3
}
;CHECK-LABEL: test_v4f32_post_imm_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
%ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 3
+ %tmp = getelementptr float, float* %A, i32 3
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld3
}
;CHECK-LABEL: test_v4f32_post_reg_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float> } %ld3
}
;CHECK-LABEL: test_v2f32_post_imm_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], #12
%ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 3
+ %tmp = getelementptr float, float* %A, i32 3
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld3
}
;CHECK-LABEL: test_v2f32_post_reg_ld3lane:
;CHECK: ld3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float> } %ld3
}
;CHECK-LABEL: test_v2f64_post_imm_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
%ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i32 3
+ %tmp = getelementptr double, double* %A, i32 3
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld3
}
;CHECK-LABEL: test_v2f64_post_reg_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double> } %ld3
}
;CHECK-LABEL: test_v1f64_post_imm_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], #24
%ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i32 3
+ %tmp = getelementptr double, double* %A, i32 3
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld3
}
;CHECK-LABEL: test_v1f64_post_reg_ld3lane:
;CHECK: ld3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
%ld3 = call { <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double> } %ld3
}
;CHECK-LABEL: test_v16i8_post_imm_ld4lane:
;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], #4
%ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 4
+ %tmp = getelementptr i8, i8* %A, i32 4
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
}
;CHECK-LABEL: test_v16i8_post_reg_ld4lane:
;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld4
}
;CHECK-LABEL: test_v8i8_post_imm_ld4lane:
;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], #4
%ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 4
+ %tmp = getelementptr i8, i8* %A, i32 4
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
}
;CHECK-LABEL: test_v8i8_post_reg_ld4lane:
;CHECK: ld4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
store i8* %tmp, i8** %ptr
ret { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %ld4
}
;CHECK-LABEL: test_v8i16_post_imm_ld4lane:
;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], #8
%ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 4
+ %tmp = getelementptr i16, i16* %A, i32 4
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
}
;CHECK-LABEL: test_v8i16_post_reg_ld4lane:
;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } @llvm.aarch64.neon.ld4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <8 x i16>, <8 x i16>, <8 x i16>, <8 x i16> } %ld4
}
;CHECK-LABEL: test_v4i16_post_imm_ld4lane:
;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], #8
%ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 4
+ %tmp = getelementptr i16, i16* %A, i32 4
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
}
;CHECK-LABEL: test_v4i16_post_reg_ld4lane:
;CHECK: ld4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
store i16* %tmp, i16** %ptr
ret { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %ld4
}
;CHECK-LABEL: test_v4i32_post_imm_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
%ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
}
;CHECK-LABEL: test_v4i32_post_reg_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %ld4
}
;CHECK-LABEL: test_v2i32_post_imm_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
%ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
}
;CHECK-LABEL: test_v2i32_post_reg_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
store i32* %tmp, i32** %ptr
ret { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> } %ld4
}
;CHECK-LABEL: test_v2i64_post_imm_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
%ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
}
;CHECK-LABEL: test_v2i64_post_reg_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.aarch64.neon.ld4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } %ld4
}
;CHECK-LABEL: test_v1i64_post_imm_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
%ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i32 4
+ %tmp = getelementptr i64, i64* %A, i32 4
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
}
;CHECK-LABEL: test_v1i64_post_reg_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } @llvm.aarch64.neon.ld4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
store i64* %tmp, i64** %ptr
ret { <1 x i64>, <1 x i64>, <1 x i64>, <1 x i64> } %ld4
}
;CHECK-LABEL: test_v4f32_post_imm_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
%ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
}
;CHECK-LABEL: test_v4f32_post_reg_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <4 x float>, <4 x float>, <4 x float>, <4 x float> } %ld4
}
;CHECK-LABEL: test_v2f32_post_imm_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], #16
%ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
}
;CHECK-LABEL: test_v2f32_post_reg_ld4lane:
;CHECK: ld4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <2 x float>, <2 x float>, <2 x float>, <2 x float> } @llvm.aarch64.neon.ld4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
store float* %tmp, float** %ptr
ret { <2 x float>, <2 x float>, <2 x float>, <2 x float> } %ld4
}
;CHECK-LABEL: test_v2f64_post_imm_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
%ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
}
;CHECK-LABEL: test_v2f64_post_reg_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <2 x double>, <2 x double>, <2 x double>, <2 x double> } @llvm.aarch64.neon.ld4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <2 x double>, <2 x double>, <2 x double>, <2 x double> } %ld4
}
;CHECK-LABEL: test_v1f64_post_imm_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], #32
%ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i32 4
+ %tmp = getelementptr double, double* %A, i32 4
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
}
;CHECK-LABEL: test_v1f64_post_reg_ld4lane:
;CHECK: ld4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
%ld4 = call { <1 x double>, <1 x double>, <1 x double>, <1 x double> } @llvm.aarch64.neon.ld4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
store double* %tmp, double** %ptr
ret { <1 x double>, <1 x double>, <1 x double>, <1 x double> } %ld4
}
;CHECK-LABEL: test_v16i8_post_imm_st2:
;CHECK: st2.16b { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st2:
;CHECK: st2.16b { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st2:
;CHECK: st2.8b { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i32 16
+ %tmp = getelementptr i8, i8* %A, i32 16
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st2:
;CHECK: st2.8b { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st2:
;CHECK: st2.8h { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st2:
;CHECK: st2.8h { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st2:
;CHECK: st2.4h { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i32 8
+ %tmp = getelementptr i16, i16* %A, i32 8
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st2:
;CHECK: st2.4h { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st2:
;CHECK: st2.4s { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st2:
;CHECK: st2.4s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st2:
;CHECK: st2.2s { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st2:
;CHECK: st2.2s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st2:
;CHECK: st2.2d { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 4
+ %tmp = getelementptr i64, i64* %A, i64 4
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st2:
;CHECK: st2.2d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st2:
;CHECK: st1.1d { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 2
+ %tmp = getelementptr i64, i64* %A, i64 2
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st2:
;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st2:
;CHECK: st2.4s { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st2:
;CHECK: st2.4s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st2:
;CHECK: st2.2s { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st2:
;CHECK: st2.2s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st2:
;CHECK: st2.2d { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 4
+ %tmp = getelementptr double, double* %A, i64 4
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st2:
;CHECK: st2.2d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st2:
;CHECK: st1.1d { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 2
+ %tmp = getelementptr double, double* %A, i64 2
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st2:
;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st3:
;CHECK: st3.16b { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i32 48
+ %tmp = getelementptr i8, i8* %A, i32 48
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st3:
;CHECK: st3.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st3:
;CHECK: st3.8b { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i32 24
+ %tmp = getelementptr i8, i8* %A, i32 24
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st3:
;CHECK: st3.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st3:
;CHECK: st3.8h { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i32 24
+ %tmp = getelementptr i16, i16* %A, i32 24
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st3:
;CHECK: st3.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st3:
;CHECK: st3.4h { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i32 12
+ %tmp = getelementptr i16, i16* %A, i32 12
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st3:
;CHECK: st3.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st3:
;CHECK: st3.4s { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i32 12
+ %tmp = getelementptr i32, i32* %A, i32 12
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st3:
;CHECK: st3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st3:
;CHECK: st3.2s { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i32 6
+ %tmp = getelementptr i32, i32* %A, i32 6
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st3:
;CHECK: st3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st3:
;CHECK: st3.2d { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 6
+ %tmp = getelementptr i64, i64* %A, i64 6
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st3:
;CHECK: st3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 3
+ %tmp = getelementptr i64, i64* %A, i64 3
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st3:
;CHECK: st3.4s { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i32 12
+ %tmp = getelementptr float, float* %A, i32 12
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st3:
;CHECK: st3.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st3:
;CHECK: st3.2s { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i32 6
+ %tmp = getelementptr float, float* %A, i32 6
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st3:
;CHECK: st3.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st3:
;CHECK: st3.2d { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 6
+ %tmp = getelementptr double, double* %A, i64 6
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st3:
;CHECK: st3.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 3
+ %tmp = getelementptr double, double* %A, i64 3
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st4:
;CHECK: st4.16b { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i32 64
+ %tmp = getelementptr i8, i8* %A, i32 64
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st4:
;CHECK: st4.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st4:
;CHECK: st4.8b { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st4:
;CHECK: st4.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st4:
;CHECK: st4.8h { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i32 32
+ %tmp = getelementptr i16, i16* %A, i32 32
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st4:
;CHECK: st4.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st4:
;CHECK: st4.4h { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st4:
;CHECK: st4.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st4:
;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i32 16
+ %tmp = getelementptr i32, i32* %A, i32 16
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st4:
;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st4:
;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st4:
;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st4:
;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 8
+ %tmp = getelementptr i64, i64* %A, i64 8
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st4:
;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 4
+ %tmp = getelementptr i64, i64* %A, i64 4
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st4:
;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i32 16
+ %tmp = getelementptr float, float* %A, i32 16
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st4:
;CHECK: st4.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st4:
;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st4:
;CHECK: st4.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st4:
;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 8
+ %tmp = getelementptr double, double* %A, i64 8
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st4:
;CHECK: st4.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 4
+ %tmp = getelementptr double, double* %A, i64 4
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st1x2:
;CHECK: st1.16b { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st1x2:
;CHECK: st1.16b { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st1x2:
;CHECK: st1.8b { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i32 16
+ %tmp = getelementptr i8, i8* %A, i32 16
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st1x2:
;CHECK: st1.8b { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st1x2:
;CHECK: st1.8h { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st1x2:
;CHECK: st1.8h { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st1x2:
;CHECK: st1.4h { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i32 8
+ %tmp = getelementptr i16, i16* %A, i32 8
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st1x2:
;CHECK: st1.4h { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st1x2:
;CHECK: st1.4s { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st1x2:
;CHECK: st1.4s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st1x2:
;CHECK: st1.2s { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st1x2:
;CHECK: st1.2s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st1x2:
;CHECK: st1.2d { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 4
+ %tmp = getelementptr i64, i64* %A, i64 4
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st1x2:
;CHECK: st1.2d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st1x2:
;CHECK: st1.1d { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 2
+ %tmp = getelementptr i64, i64* %A, i64 2
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st1x2:
;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st1x2:
;CHECK: st1.4s { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st1x2:
;CHECK: st1.4s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v4f32.p0f32(<4 x float> %B, <4 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st1x2:
;CHECK: st1.2s { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st1x2:
;CHECK: st1.2s { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v2f32.p0f32(<2 x float> %B, <2 x float> %C, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st1x2:
;CHECK: st1.2d { v0, v1 }, [x0], #32
call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 4
+ %tmp = getelementptr double, double* %A, i64 4
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st1x2:
;CHECK: st1.2d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v2f64.p0f64(<2 x double> %B, <2 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st1x2:
;CHECK: st1.1d { v0, v1 }, [x0], #16
call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 2
+ %tmp = getelementptr double, double* %A, i64 2
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st1x2:
;CHECK: st1.1d { v0, v1 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x2.v1f64.p0f64(<1 x double> %B, <1 x double> %C, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st1x3:
;CHECK: st1.16b { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i32 48
+ %tmp = getelementptr i8, i8* %A, i32 48
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st1x3:
;CHECK: st1.16b { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st1x3:
;CHECK: st1.8b { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i32 24
+ %tmp = getelementptr i8, i8* %A, i32 24
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st1x3:
;CHECK: st1.8b { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st1x3:
;CHECK: st1.8h { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i32 24
+ %tmp = getelementptr i16, i16* %A, i32 24
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st1x3:
;CHECK: st1.8h { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st1x3:
;CHECK: st1.4h { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i32 12
+ %tmp = getelementptr i16, i16* %A, i32 12
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st1x3:
;CHECK: st1.4h { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st1x3:
;CHECK: st1.4s { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i32 12
+ %tmp = getelementptr i32, i32* %A, i32 12
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st1x3:
;CHECK: st1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st1x3:
;CHECK: st1.2s { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i32 6
+ %tmp = getelementptr i32, i32* %A, i32 6
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st1x3:
;CHECK: st1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st1x3:
;CHECK: st1.2d { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 6
+ %tmp = getelementptr i64, i64* %A, i64 6
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st1x3:
;CHECK: st1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st1x3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 3
+ %tmp = getelementptr i64, i64* %A, i64 3
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st1x3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st1x3:
;CHECK: st1.4s { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i32 12
+ %tmp = getelementptr float, float* %A, i32 12
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st1x3:
;CHECK: st1.4s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st1x3:
;CHECK: st1.2s { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i32 6
+ %tmp = getelementptr float, float* %A, i32 6
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st1x3:
;CHECK: st1.2s { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st1x3:
;CHECK: st1.2d { v0, v1, v2 }, [x0], #48
call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 6
+ %tmp = getelementptr double, double* %A, i64 6
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st1x3:
;CHECK: st1.2d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st1x3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], #24
call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 3
+ %tmp = getelementptr double, double* %A, i64 3
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st1x3:
;CHECK: st1.1d { v0, v1, v2 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x3.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st1x4:
;CHECK: st1.16b { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i32 64
+ %tmp = getelementptr i8, i8* %A, i32 64
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st1x4:
;CHECK: st1.16b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st1x4:
;CHECK: st1.8b { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st1x4:
;CHECK: st1.8b { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st1x4:
;CHECK: st1.8h { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i32 32
+ %tmp = getelementptr i16, i16* %A, i32 32
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st1x4:
;CHECK: st1.8h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st1x4:
;CHECK: st1.4h { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i32 16
+ %tmp = getelementptr i16, i16* %A, i32 16
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st1x4:
;CHECK: st1.4h { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st1x4:
;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i32 16
+ %tmp = getelementptr i32, i32* %A, i32 16
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st1x4:
;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st1x4:
;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i32 8
+ %tmp = getelementptr i32, i32* %A, i32 8
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st1x4:
;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st1x4:
;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 8
+ %tmp = getelementptr i64, i64* %A, i64 8
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st1x4:
;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st1x4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 4
+ %tmp = getelementptr i64, i64* %A, i64 4
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st1x4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st1x4:
;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i32 16
+ %tmp = getelementptr float, float* %A, i32 16
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st1x4:
;CHECK: st1.4s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st1x4:
;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i32 8
+ %tmp = getelementptr float, float* %A, i32 8
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st1x4:
;CHECK: st1.2s { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st1x4:
;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], #64
call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 8
+ %tmp = getelementptr double, double* %A, i64 8
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st1x4:
;CHECK: st1.2d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st1x4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], #32
call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 4
+ %tmp = getelementptr double, double* %A, i64 4
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st1x4:
;CHECK: st1.1d { v0, v1, v2, v3 }, [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st1x4.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
define i8* @test_v16i8_post_imm_st2lanelane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C) {
call void @llvm.aarch64.neon.st2lanelane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i64 1, i8* %A)
- %tmp = getelementptr i8* %A, i32 2
+ %tmp = getelementptr i8, i8* %A, i32 2
ret i8* %tmp
}
define i8* @test_v16i8_post_reg_st2lanelane(i8* %A, i8** %ptr, <16 x i8> %B, <16 x i8> %C, i64 %inc) {
call void @llvm.aarch64.neon.st2lanelane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i64 1, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st2lane:
;CHECK: st2.b { v0, v1 }[0], [x0], #2
call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 2
+ %tmp = getelementptr i8, i8* %A, i32 2
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st2lane:
;CHECK: st2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st2lane:
;CHECK: st2.b { v0, v1 }[0], [x0], #2
call void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 2
+ %tmp = getelementptr i8, i8* %A, i32 2
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st2lane:
;CHECK: st2.b { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st2lane:
;CHECK: st2.h { v0, v1 }[0], [x0], #4
call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 2
+ %tmp = getelementptr i16, i16* %A, i32 2
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st2lane:
;CHECK: st2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st2lane:
;CHECK: st2.h { v0, v1 }[0], [x0], #4
call void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 2
+ %tmp = getelementptr i16, i16* %A, i32 2
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st2lane:
;CHECK: st2.h { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], #8
call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 2
+ %tmp = getelementptr i32, i32* %A, i32 2
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], #8
call void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 2
+ %tmp = getelementptr i32, i32* %A, i32 2
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], #16
call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 2
+ %tmp = getelementptr i64, i64* %A, i64 2
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], #16
call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 2
+ %tmp = getelementptr i64, i64* %A, i64 2
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], #8
call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 2
+ %tmp = getelementptr float, float* %A, i32 2
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], #8
call void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 2
+ %tmp = getelementptr float, float* %A, i32 2
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st2lane:
;CHECK: st2.s { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], #16
call void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 2
+ %tmp = getelementptr double, double* %A, i64 2
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], #16
call void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 2
+ %tmp = getelementptr double, double* %A, i64 2
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st2lane:
;CHECK: st2.d { v0, v1 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st2lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st3lane:
;CHECK: st3.b { v0, v1, v2 }[0], [x0], #3
call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 3
+ %tmp = getelementptr i8, i8* %A, i32 3
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st3lane:
;CHECK: st3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st3lane:
;CHECK: st3.b { v0, v1, v2 }[0], [x0], #3
call void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 3
+ %tmp = getelementptr i8, i8* %A, i32 3
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st3lane:
;CHECK: st3.b { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st3lane:
;CHECK: st3.h { v0, v1, v2 }[0], [x0], #6
call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 3
+ %tmp = getelementptr i16, i16* %A, i32 3
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st3lane:
;CHECK: st3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st3lane:
;CHECK: st3.h { v0, v1, v2 }[0], [x0], #6
call void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 3
+ %tmp = getelementptr i16, i16* %A, i32 3
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st3lane:
;CHECK: st3.h { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 3
+ %tmp = getelementptr i32, i32* %A, i32 3
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
call void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 3
+ %tmp = getelementptr i32, i32* %A, i32 3
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 3
+ %tmp = getelementptr i64, i64* %A, i64 3
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 3
+ %tmp = getelementptr i64, i64* %A, i64 3
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 3
+ %tmp = getelementptr float, float* %A, i32 3
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], #12
call void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 3
+ %tmp = getelementptr float, float* %A, i32 3
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st3lane:
;CHECK: st3.s { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
call void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 3
+ %tmp = getelementptr double, double* %A, i64 3
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], #24
call void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 3
+ %tmp = getelementptr double, double* %A, i64 3
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st3lane:
;CHECK: st3.d { v0, v1, v2 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st3lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v16i8_post_imm_st4lane:
;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], #4
call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 4
+ %tmp = getelementptr i8, i8* %A, i32 4
ret i8* %tmp
}
;CHECK-LABEL: test_v16i8_post_reg_st4lane:
;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v16i8.p0i8(<16 x i8> %B, <16 x i8> %C, <16 x i8> %D, <16 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_imm_st4lane:
;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], #4
call void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i32 4
+ %tmp = getelementptr i8, i8* %A, i32 4
ret i8* %tmp
}
;CHECK-LABEL: test_v8i8_post_reg_st4lane:
;CHECK: st4.b { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v8i8.p0i8(<8 x i8> %B, <8 x i8> %C, <8 x i8> %D, <8 x i8> %E, i64 0, i8* %A)
- %tmp = getelementptr i8* %A, i64 %inc
+ %tmp = getelementptr i8, i8* %A, i64 %inc
ret i8* %tmp
}
;CHECK-LABEL: test_v8i16_post_imm_st4lane:
;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], #8
call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 4
+ %tmp = getelementptr i16, i16* %A, i32 4
ret i16* %tmp
}
;CHECK-LABEL: test_v8i16_post_reg_st4lane:
;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v8i16.p0i16(<8 x i16> %B, <8 x i16> %C, <8 x i16> %D, <8 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_imm_st4lane:
;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], #8
call void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i32 4
+ %tmp = getelementptr i16, i16* %A, i32 4
ret i16* %tmp
}
;CHECK-LABEL: test_v4i16_post_reg_st4lane:
;CHECK: st4.h { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v4i16.p0i16(<4 x i16> %B, <4 x i16> %C, <4 x i16> %D, <4 x i16> %E, i64 0, i16* %A)
- %tmp = getelementptr i16* %A, i64 %inc
+ %tmp = getelementptr i16, i16* %A, i64 %inc
ret i16* %tmp
}
;CHECK-LABEL: test_v4i32_post_imm_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
ret i32* %tmp
}
;CHECK-LABEL: test_v4i32_post_reg_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v4i32.p0i32(<4 x i32> %B, <4 x i32> %C, <4 x i32> %D, <4 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_imm_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
call void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i32 4
+ %tmp = getelementptr i32, i32* %A, i32 4
ret i32* %tmp
}
;CHECK-LABEL: test_v2i32_post_reg_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v2i32.p0i32(<2 x i32> %B, <2 x i32> %C, <2 x i32> %D, <2 x i32> %E, i64 0, i32* %A)
- %tmp = getelementptr i32* %A, i64 %inc
+ %tmp = getelementptr i32, i32* %A, i64 %inc
ret i32* %tmp
}
;CHECK-LABEL: test_v2i64_post_imm_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 4
+ %tmp = getelementptr i64, i64* %A, i64 4
ret i64* %tmp
}
;CHECK-LABEL: test_v2i64_post_reg_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v2i64.p0i64(<2 x i64> %B, <2 x i64> %C, <2 x i64> %D, <2 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_imm_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 4
+ %tmp = getelementptr i64, i64* %A, i64 4
ret i64* %tmp
}
;CHECK-LABEL: test_v1i64_post_reg_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> %B, <1 x i64> %C, <1 x i64> %D, <1 x i64> %E, i64 0, i64* %A)
- %tmp = getelementptr i64* %A, i64 %inc
+ %tmp = getelementptr i64, i64* %A, i64 %inc
ret i64* %tmp
}
;CHECK-LABEL: test_v4f32_post_imm_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
ret float* %tmp
}
;CHECK-LABEL: test_v4f32_post_reg_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v4f32.p0f32(<4 x float> %B, <4 x float> %C, <4 x float> %D, <4 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_imm_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], #16
call void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i32 4
+ %tmp = getelementptr float, float* %A, i32 4
ret float* %tmp
}
;CHECK-LABEL: test_v2f32_post_reg_st4lane:
;CHECK: st4.s { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v2f32.p0f32(<2 x float> %B, <2 x float> %C, <2 x float> %D, <2 x float> %E, i64 0, float* %A)
- %tmp = getelementptr float* %A, i64 %inc
+ %tmp = getelementptr float, float* %A, i64 %inc
ret float* %tmp
}
;CHECK-LABEL: test_v2f64_post_imm_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
call void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 4
+ %tmp = getelementptr double, double* %A, i64 4
ret double* %tmp
}
;CHECK-LABEL: test_v2f64_post_reg_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v2f64.p0f64(<2 x double> %B, <2 x double> %C, <2 x double> %D, <2 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_imm_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], #32
call void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 4
+ %tmp = getelementptr double, double* %A, i64 4
ret double* %tmp
}
;CHECK-LABEL: test_v1f64_post_reg_st4lane:
;CHECK: st4.d { v0, v1, v2, v3 }[0], [x0], x{{[0-9]+}}
call void @llvm.aarch64.neon.st4lane.v1f64.p0f64(<1 x double> %B, <1 x double> %C, <1 x double> %D, <1 x double> %E, i64 0, double* %A)
- %tmp = getelementptr double* %A, i64 %inc
+ %tmp = getelementptr double, double* %A, i64 %inc
ret double* %tmp
}
%tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
%tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
%tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
- %tmp18 = getelementptr i8* %bar, i64 1
+ %tmp18 = getelementptr i8, i8* %bar, i64 1
store i8* %tmp18, i8** %ptr
ret <16 x i8> %tmp17
}
%tmp15 = insertelement <16 x i8> %tmp14, i8 %tmp1, i32 13
%tmp16 = insertelement <16 x i8> %tmp15, i8 %tmp1, i32 14
%tmp17 = insertelement <16 x i8> %tmp16, i8 %tmp1, i32 15
- %tmp18 = getelementptr i8* %bar, i64 %inc
+ %tmp18 = getelementptr i8, i8* %bar, i64 %inc
store i8* %tmp18, i8** %ptr
ret <16 x i8> %tmp17
}
%tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
%tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
%tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
- %tmp10 = getelementptr i8* %bar, i64 1
+ %tmp10 = getelementptr i8, i8* %bar, i64 1
store i8* %tmp10, i8** %ptr
ret <8 x i8> %tmp9
}
%tmp7 = insertelement <8 x i8> %tmp6, i8 %tmp1, i32 5
%tmp8 = insertelement <8 x i8> %tmp7, i8 %tmp1, i32 6
%tmp9 = insertelement <8 x i8> %tmp8, i8 %tmp1, i32 7
- %tmp10 = getelementptr i8* %bar, i64 %inc
+ %tmp10 = getelementptr i8, i8* %bar, i64 %inc
store i8* %tmp10, i8** %ptr
ret <8 x i8> %tmp9
}
%tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
%tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
%tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
- %tmp10 = getelementptr i16* %bar, i64 1
+ %tmp10 = getelementptr i16, i16* %bar, i64 1
store i16* %tmp10, i16** %ptr
ret <8 x i16> %tmp9
}
%tmp7 = insertelement <8 x i16> %tmp6, i16 %tmp1, i32 5
%tmp8 = insertelement <8 x i16> %tmp7, i16 %tmp1, i32 6
%tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 7
- %tmp10 = getelementptr i16* %bar, i64 %inc
+ %tmp10 = getelementptr i16, i16* %bar, i64 %inc
store i16* %tmp10, i16** %ptr
ret <8 x i16> %tmp9
}
%tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
%tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
- %tmp6 = getelementptr i16* %bar, i64 1
+ %tmp6 = getelementptr i16, i16* %bar, i64 1
store i16* %tmp6, i16** %ptr
ret <4 x i16> %tmp5
}
%tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
%tmp5 = insertelement <4 x i16> %tmp4, i16 %tmp1, i32 3
- %tmp6 = getelementptr i16* %bar, i64 %inc
+ %tmp6 = getelementptr i16, i16* %bar, i64 %inc
store i16* %tmp6, i16** %ptr
ret <4 x i16> %tmp5
}
%tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
%tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
- %tmp6 = getelementptr i32* %bar, i64 1
+ %tmp6 = getelementptr i32, i32* %bar, i64 1
store i32* %tmp6, i32** %ptr
ret <4 x i32> %tmp5
}
%tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
%tmp5 = insertelement <4 x i32> %tmp4, i32 %tmp1, i32 3
- %tmp6 = getelementptr i32* %bar, i64 %inc
+ %tmp6 = getelementptr i32, i32* %bar, i64 %inc
store i32* %tmp6, i32** %ptr
ret <4 x i32> %tmp5
}
%tmp1 = load i32* %bar
%tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
- %tmp4 = getelementptr i32* %bar, i64 1
+ %tmp4 = getelementptr i32, i32* %bar, i64 1
store i32* %tmp4, i32** %ptr
ret <2 x i32> %tmp3
}
%tmp1 = load i32* %bar
%tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
- %tmp4 = getelementptr i32* %bar, i64 %inc
+ %tmp4 = getelementptr i32, i32* %bar, i64 %inc
store i32* %tmp4, i32** %ptr
ret <2 x i32> %tmp3
}
%tmp1 = load i64* %bar
%tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
%tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
- %tmp4 = getelementptr i64* %bar, i64 1
+ %tmp4 = getelementptr i64, i64* %bar, i64 1
store i64* %tmp4, i64** %ptr
ret <2 x i64> %tmp3
}
%tmp1 = load i64* %bar
%tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
%tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
- %tmp4 = getelementptr i64* %bar, i64 %inc
+ %tmp4 = getelementptr i64, i64* %bar, i64 %inc
store i64* %tmp4, i64** %ptr
ret <2 x i64> %tmp3
}
%tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
%tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
%tmp5 = insertelement <4 x float> %tmp4, float %tmp1, i32 3
- %tmp6 = getelementptr float* %bar, i64 1
+ %tmp6 = getelementptr float, float* %bar, i64 1
store float* %tmp6, float** %ptr
ret <4 x float> %tmp5
}
%tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
%tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
%tmp5 = insertelement <4 x float> %tmp4, float %tmp1, i32 3
- %tmp6 = getelementptr float* %bar, i64 %inc
+ %tmp6 = getelementptr float, float* %bar, i64 %inc
store float* %tmp6, float** %ptr
ret <4 x float> %tmp5
}
%tmp1 = load float* %bar
%tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
%tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
- %tmp4 = getelementptr float* %bar, i64 1
+ %tmp4 = getelementptr float, float* %bar, i64 1
store float* %tmp4, float** %ptr
ret <2 x float> %tmp3
}
%tmp1 = load float* %bar
%tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
%tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
- %tmp4 = getelementptr float* %bar, i64 %inc
+ %tmp4 = getelementptr float, float* %bar, i64 %inc
store float* %tmp4, float** %ptr
ret <2 x float> %tmp3
}
%tmp1 = load double* %bar
%tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
%tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
- %tmp4 = getelementptr double* %bar, i64 1
+ %tmp4 = getelementptr double, double* %bar, i64 1
store double* %tmp4, double** %ptr
ret <2 x double> %tmp3
}
%tmp1 = load double* %bar
%tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
%tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
- %tmp4 = getelementptr double* %bar, i64 %inc
+ %tmp4 = getelementptr double, double* %bar, i64 %inc
store double* %tmp4, double** %ptr
ret <2 x double> %tmp3
}
; CHECK: ld1.b { v0 }[1], [x0], #1
%tmp1 = load i8* %bar
%tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
- %tmp3 = getelementptr i8* %bar, i64 1
+ %tmp3 = getelementptr i8, i8* %bar, i64 1
store i8* %tmp3, i8** %ptr
ret <16 x i8> %tmp2
}
; CHECK: ld1.b { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load i8* %bar
%tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
- %tmp3 = getelementptr i8* %bar, i64 %inc
+ %tmp3 = getelementptr i8, i8* %bar, i64 %inc
store i8* %tmp3, i8** %ptr
ret <16 x i8> %tmp2
}
; CHECK: ld1.b { v0 }[1], [x0], #1
%tmp1 = load i8* %bar
%tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
- %tmp3 = getelementptr i8* %bar, i64 1
+ %tmp3 = getelementptr i8, i8* %bar, i64 1
store i8* %tmp3, i8** %ptr
ret <8 x i8> %tmp2
}
; CHECK: ld1.b { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load i8* %bar
%tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
- %tmp3 = getelementptr i8* %bar, i64 %inc
+ %tmp3 = getelementptr i8, i8* %bar, i64 %inc
store i8* %tmp3, i8** %ptr
ret <8 x i8> %tmp2
}
; CHECK: ld1.h { v0 }[1], [x0], #2
%tmp1 = load i16* %bar
%tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
- %tmp3 = getelementptr i16* %bar, i64 1
+ %tmp3 = getelementptr i16, i16* %bar, i64 1
store i16* %tmp3, i16** %ptr
ret <8 x i16> %tmp2
}
; CHECK: ld1.h { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load i16* %bar
%tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
- %tmp3 = getelementptr i16* %bar, i64 %inc
+ %tmp3 = getelementptr i16, i16* %bar, i64 %inc
store i16* %tmp3, i16** %ptr
ret <8 x i16> %tmp2
}
; CHECK: ld1.h { v0 }[1], [x0], #2
%tmp1 = load i16* %bar
%tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
- %tmp3 = getelementptr i16* %bar, i64 1
+ %tmp3 = getelementptr i16, i16* %bar, i64 1
store i16* %tmp3, i16** %ptr
ret <4 x i16> %tmp2
}
; CHECK: ld1.h { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load i16* %bar
%tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
- %tmp3 = getelementptr i16* %bar, i64 %inc
+ %tmp3 = getelementptr i16, i16* %bar, i64 %inc
store i16* %tmp3, i16** %ptr
ret <4 x i16> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], #4
%tmp1 = load i32* %bar
%tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
- %tmp3 = getelementptr i32* %bar, i64 1
+ %tmp3 = getelementptr i32, i32* %bar, i64 1
store i32* %tmp3, i32** %ptr
ret <4 x i32> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load i32* %bar
%tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
- %tmp3 = getelementptr i32* %bar, i64 %inc
+ %tmp3 = getelementptr i32, i32* %bar, i64 %inc
store i32* %tmp3, i32** %ptr
ret <4 x i32> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], #4
%tmp1 = load i32* %bar
%tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
- %tmp3 = getelementptr i32* %bar, i64 1
+ %tmp3 = getelementptr i32, i32* %bar, i64 1
store i32* %tmp3, i32** %ptr
ret <2 x i32> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load i32* %bar
%tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
- %tmp3 = getelementptr i32* %bar, i64 %inc
+ %tmp3 = getelementptr i32, i32* %bar, i64 %inc
store i32* %tmp3, i32** %ptr
ret <2 x i32> %tmp2
}
; CHECK: ld1.d { v0 }[1], [x0], #8
%tmp1 = load i64* %bar
%tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
- %tmp3 = getelementptr i64* %bar, i64 1
+ %tmp3 = getelementptr i64, i64* %bar, i64 1
store i64* %tmp3, i64** %ptr
ret <2 x i64> %tmp2
}
; CHECK: ld1.d { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load i64* %bar
%tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
- %tmp3 = getelementptr i64* %bar, i64 %inc
+ %tmp3 = getelementptr i64, i64* %bar, i64 %inc
store i64* %tmp3, i64** %ptr
ret <2 x i64> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], #4
%tmp1 = load float* %bar
%tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
- %tmp3 = getelementptr float* %bar, i64 1
+ %tmp3 = getelementptr float, float* %bar, i64 1
store float* %tmp3, float** %ptr
ret <4 x float> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load float* %bar
%tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
- %tmp3 = getelementptr float* %bar, i64 %inc
+ %tmp3 = getelementptr float, float* %bar, i64 %inc
store float* %tmp3, float** %ptr
ret <4 x float> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], #4
%tmp1 = load float* %bar
%tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
- %tmp3 = getelementptr float* %bar, i64 1
+ %tmp3 = getelementptr float, float* %bar, i64 1
store float* %tmp3, float** %ptr
ret <2 x float> %tmp2
}
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load float* %bar
%tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
- %tmp3 = getelementptr float* %bar, i64 %inc
+ %tmp3 = getelementptr float, float* %bar, i64 %inc
store float* %tmp3, float** %ptr
ret <2 x float> %tmp2
}
; CHECK: ld1.d { v0 }[1], [x0], #8
%tmp1 = load double* %bar
%tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
- %tmp3 = getelementptr double* %bar, i64 1
+ %tmp3 = getelementptr double, double* %bar, i64 1
store double* %tmp3, double** %ptr
ret <2 x double> %tmp2
}
; CHECK: ld1.d { v0 }[1], [x0], x{{[0-9]+}}
%tmp1 = load double* %bar
%tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
- %tmp3 = getelementptr double* %bar, i64 %inc
+ %tmp3 = getelementptr double, double* %bar, i64 %inc
store double* %tmp3, double** %ptr
ret <2 x double> %tmp2
}
\ No newline at end of file
; CHECK-LABEL: t10:
%data = alloca <2 x float>, align 8
%a = alloca [2 x float], align 4
- %arraydecay = getelementptr inbounds [2 x float]* %a, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [2 x float], [2 x float]* %a, i32 0, i32 0
%0 = load <2 x float>* %data, align 8
call void asm sideeffect "ldr ${1:q}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
; CHECK: add {{x[0-9]+}}, [[TMP1]], #3344
store volatile i8* %var1, i8** @addr
- %var1plus2 = getelementptr i8* %var1, i32 2
+ %var1plus2 = getelementptr i8, i8* %var1, i32 2
store volatile i8* %var1plus2, i8** @addr
; CHECK: add [[TMP:x[0-9]+]], sp, #4095, lsl #12
; CHECK: add {{x[0-9]+}}, [[TMP1]], #3328
store volatile i8* %var2, i8** @addr
- %var2plus2 = getelementptr i8* %var2, i32 2
+ %var2plus2 = getelementptr i8, i8* %var2, i32 2
store volatile i8* %var2plus2, i8** @addr
store volatile i8* %var3, i8** @addr
- %var3plus2 = getelementptr i8* %var3, i32 2
+ %var3plus2 = getelementptr i8, i8* %var3, i32 2
store volatile i8* %var3plus2, i8** @addr
; CHECK: add sp, sp, #4095, lsl #12
; CHECK: ldp
define i32 @ldp_int(i32* %p) nounwind {
%tmp = load i32* %p, align 4
- %add.ptr = getelementptr inbounds i32* %p, i64 1
+ %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
%tmp1 = load i32* %add.ptr, align 4
%add = add nsw i32 %tmp1, %tmp
ret i32 %add
; CHECK: ldpsw
define i64 @ldp_sext_int(i32* %p) nounwind {
%tmp = load i32* %p, align 4
- %add.ptr = getelementptr inbounds i32* %p, i64 1
+ %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
%tmp1 = load i32* %add.ptr, align 4
%sexttmp = sext i32 %tmp to i64
%sexttmp1 = sext i32 %tmp1 to i64
; CHECK: ldp
define i64 @ldp_long(i64* %p) nounwind {
%tmp = load i64* %p, align 8
- %add.ptr = getelementptr inbounds i64* %p, i64 1
+ %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
%tmp1 = load i64* %add.ptr, align 8
%add = add nsw i64 %tmp1, %tmp
ret i64 %add
; CHECK: ldp
define float @ldp_float(float* %p) nounwind {
%tmp = load float* %p, align 4
- %add.ptr = getelementptr inbounds float* %p, i64 1
+ %add.ptr = getelementptr inbounds float, float* %p, i64 1
%tmp1 = load float* %add.ptr, align 4
%add = fadd float %tmp, %tmp1
ret float %add
; CHECK: ldp
define double @ldp_double(double* %p) nounwind {
%tmp = load double* %p, align 8
- %add.ptr = getelementptr inbounds double* %p, i64 1
+ %add.ptr = getelementptr inbounds double, double* %p, i64 1
%tmp1 = load double* %add.ptr, align 8
%add = fadd double %tmp, %tmp1
ret double %add
; LDUR_CHK: ldp [[DST1:w[0-9]+]], [[DST2:w[0-9]+]], [x0, #-8]
; LDUR_CHK-NEXT: add w{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i32* %a, i32 -1
+ %p1 = getelementptr inbounds i32, i32* %a, i32 -1
%tmp1 = load i32* %p1, align 2
- %p2 = getelementptr inbounds i32* %a, i32 -2
+ %p2 = getelementptr inbounds i32, i32* %a, i32 -2
%tmp2 = load i32* %p2, align 2
%tmp3 = add i32 %tmp1, %tmp2
ret i32 %tmp3
; LDUR_CHK: ldpsw [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-8]
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i32* %a, i32 -1
+ %p1 = getelementptr inbounds i32, i32* %a, i32 -1
%tmp1 = load i32* %p1, align 2
- %p2 = getelementptr inbounds i32* %a, i32 -2
+ %p2 = getelementptr inbounds i32, i32* %a, i32 -2
%tmp2 = load i32* %p2, align 2
%sexttmp1 = sext i32 %tmp1 to i64
%sexttmp2 = sext i32 %tmp2 to i64
; LDUR_CHK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-16]
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i64* %a, i64 -1
+ %p1 = getelementptr inbounds i64, i64* %a, i64 -1
%tmp1 = load i64* %p1, align 2
- %p2 = getelementptr inbounds i64* %a, i64 -2
+ %p2 = getelementptr inbounds i64, i64* %a, i64 -2
%tmp2 = load i64* %p2, align 2
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
; LDUR_CHK: ldp [[DST1:s[0-9]+]], [[DST2:s[0-9]+]], [x0, #-8]
; LDUR_CHK-NEXT: add s{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds float* %a, i64 -1
+ %p1 = getelementptr inbounds float, float* %a, i64 -1
%tmp1 = load float* %p1, align 2
- %p2 = getelementptr inbounds float* %a, i64 -2
+ %p2 = getelementptr inbounds float, float* %a, i64 -2
%tmp2 = load float* %p2, align 2
%tmp3 = fadd float %tmp1, %tmp2
ret float %tmp3
; LDUR_CHK: ldp [[DST1:d[0-9]+]], [[DST2:d[0-9]+]], [x0, #-16]
; LDUR_CHK-NEXT: add d{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds double* %a, i64 -1
+ %p1 = getelementptr inbounds double, double* %a, i64 -1
%tmp1 = load double* %p1, align 2
- %p2 = getelementptr inbounds double* %a, i64 -2
+ %p2 = getelementptr inbounds double, double* %a, i64 -2
%tmp2 = load double* %p2, align 2
%tmp3 = fadd double %tmp1, %tmp2
ret double %tmp3
; LDUR_CHK: ldp [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i64* %a, i64 -31
+ %p1 = getelementptr inbounds i64, i64* %a, i64 -31
%tmp1 = load i64* %p1, align 2
- %p2 = getelementptr inbounds i64* %a, i64 -32
+ %p2 = getelementptr inbounds i64, i64* %a, i64 -32
%tmp2 = load i64* %p2, align 2
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
; LDUR_CHK: ldpsw [[DST1:x[0-9]+]], [[DST2:x[0-9]+]], [x0, #-256]
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i32* %a, i64 -63
+ %p1 = getelementptr inbounds i32, i32* %a, i64 -63
%tmp1 = load i32* %p1, align 2
- %p2 = getelementptr inbounds i32* %a, i64 -64
+ %p2 = getelementptr inbounds i32, i32* %a, i64 -64
%tmp2 = load i32* %p2, align 2
%sexttmp1 = sext i32 %tmp1 to i64
%sexttmp2 = sext i32 %tmp2 to i64
; are used---just check that there isn't an ldp before the add
; LDUR_CHK: add
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i64* %a, i64 -32
+ %p1 = getelementptr inbounds i64, i64* %a, i64 -32
%tmp1 = load i64* %p1, align 2
- %p2 = getelementptr inbounds i64* %a, i64 -33
+ %p2 = getelementptr inbounds i64, i64* %a, i64 -33
%tmp2 = load i64* %p2, align 2
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
; are used---just check that there isn't an ldp before the add
; LDUR_CHK: add
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i32* %a, i64 -64
+ %p1 = getelementptr inbounds i32, i32* %a, i64 -64
%tmp1 = load i32* %p1, align 2
- %p2 = getelementptr inbounds i32* %a, i64 -65
+ %p2 = getelementptr inbounds i32, i32* %a, i64 -65
%tmp2 = load i32* %p2, align 2
%sexttmp1 = sext i32 %tmp1 to i64
%sexttmp2 = sext i32 %tmp2 to i64
; LDUR_CHK-NEXT: ldur
; LDUR_CHK-NEXT: add
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i64* %a, i64 -18
+ %p1 = getelementptr inbounds i64, i64* %a, i64 -18
%bp1 = bitcast i64* %p1 to i8*
- %bp1p1 = getelementptr inbounds i8* %bp1, i64 1
+ %bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
%dp1 = bitcast i8* %bp1p1 to i64*
%tmp1 = load i64* %dp1, align 1
- %p2 = getelementptr inbounds i64* %a, i64 -17
+ %p2 = getelementptr inbounds i64, i64* %a, i64 -17
%bp2 = bitcast i64* %p2 to i8*
- %bp2p1 = getelementptr inbounds i8* %bp2, i64 1
+ %bp2p1 = getelementptr inbounds i8, i8* %bp2, i64 1
%dp2 = bitcast i8* %bp2p1 to i64*
%tmp2 = load i64* %dp2, align 1
; LDUR_CHK-NEXT: ldursw
; LDUR_CHK-NEXT: add
; LDUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i32* %a, i64 -18
+ %p1 = getelementptr inbounds i32, i32* %a, i64 -18
%bp1 = bitcast i32* %p1 to i8*
- %bp1p1 = getelementptr inbounds i8* %bp1, i64 1
+ %bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
%dp1 = bitcast i8* %bp1p1 to i32*
%tmp1 = load i32* %dp1, align 1
- %p2 = getelementptr inbounds i32* %a, i64 -17
+ %p2 = getelementptr inbounds i32, i32* %a, i64 -17
%bp2 = bitcast i32* %p2 to i8*
- %bp2p1 = getelementptr inbounds i8* %bp2, i64 1
+ %bp2p1 = getelementptr inbounds i8, i8* %bp2, i64 1
%dp2 = bitcast i8* %bp2p1 to i32*
%tmp2 = load i32* %dp2, align 1
; CHECK: f0:
; CHECK: ldur x0, [x0, #-8]
; CHECK-NEXT: ret
- %tmp = getelementptr inbounds i64* %p, i64 -1
+ %tmp = getelementptr inbounds i64, i64* %p, i64 -1
%ret = load i64* %tmp, align 2
ret i64 %ret
}
; CHECK: f1:
; CHECK: ldur w0, [x0, #-4]
; CHECK-NEXT: ret
- %tmp = getelementptr inbounds i32* %p, i64 -1
+ %tmp = getelementptr inbounds i32, i32* %p, i64 -1
%ret = load i32* %tmp, align 2
ret i32 %ret
}
; CHECK: f2:
; CHECK: ldurh w0, [x0, #-2]
; CHECK-NEXT: ret
- %tmp = getelementptr inbounds i16* %p, i64 -1
+ %tmp = getelementptr inbounds i16, i16* %p, i64 -1
%ret = load i16* %tmp, align 2
ret i16 %ret
}
; CHECK: f3:
; CHECK: ldurb w0, [x0, #-1]
; CHECK-NEXT: ret
- %tmp = getelementptr inbounds i8* %p, i64 -1
+ %tmp = getelementptr inbounds i8, i8* %p, i64 -1
%ret = load i8* %tmp, align 2
ret i8 %ret
}
; CHECK-LABEL: zext32:
; CHECK: ldur w0, [x0, #-12]
; CHECK-NEXT: ret
- %p = getelementptr inbounds i8* %a, i64 -12
+ %p = getelementptr inbounds i8, i8* %a, i64 -12
%tmp1 = bitcast i8* %p to i32*
%tmp2 = load i32* %tmp1, align 4
%ret = zext i32 %tmp2 to i64
; CHECK-LABEL: zext16:
; CHECK: ldurh w0, [x0, #-12]
; CHECK-NEXT: ret
- %p = getelementptr inbounds i8* %a, i64 -12
+ %p = getelementptr inbounds i8, i8* %a, i64 -12
%tmp1 = bitcast i8* %p to i16*
%tmp2 = load i16* %tmp1, align 2
%ret = zext i16 %tmp2 to i64
; CHECK-LABEL: zext8:
; CHECK: ldurb w0, [x0, #-12]
; CHECK-NEXT: ret
- %p = getelementptr inbounds i8* %a, i64 -12
+ %p = getelementptr inbounds i8, i8* %a, i64 -12
%tmp2 = load i8* %p, align 1
%ret = zext i8 %tmp2 to i64
; CHECK: stp xzr, xzr, [sp, #16]
; CHECK: str xzr, [sp, #8]
%buf = alloca [26 x i8], align 1
- %0 = getelementptr inbounds [26 x i8]* %buf, i32 0, i32 0
+ %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false)
call void @something(i8* %0) nounwind
ret void
for.body: ; preds = %for.cond
%3 = load i32* %i, align 4
%idxprom = sext i32 %3 to i64
- %arrayidx = getelementptr inbounds [8 x i32]* %x, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom
%4 = load i32* %arrayidx, align 4
%add = add nsw i32 %4, 1
store i32 %add, i32* %xx, align 4
store i32 %add3, i32* %xx, align 4
%8 = load i32* %i, align 4
%idxprom4 = sext i32 %8 to i64
- %arrayidx5 = getelementptr inbounds [8 x i32]* %y, i32 0, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4
%9 = load i32* %arrayidx5, align 4
%10 = load i32* %yy, align 4
%mul = mul nsw i32 %10, %9
; Nothing explicit to check other than llc not crashing.
define { <16 x i8>, <16 x i8> } @test_v16i8_post_imm_ld2(i8* %A, i8** %ptr) {
%ld2 = tail call { <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld2.v16i8.p0i8(i8* %A)
- %tmp = getelementptr i8* %A, i32 32
+ %tmp = getelementptr i8, i8* %A, i32 32
store i8* %tmp, i8** %ptr
ret { <16 x i8>, <16 x i8> } %ld2
}
%3 = load i32* %yy, align 4
%4 = load i32* %i, align 4
%idxprom = sext i32 %4 to i64
- %arrayidx = getelementptr inbounds [8 x i32]* %x, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom
%5 = load i32* %arrayidx, align 4
%add = add nsw i32 %5, 1
store i32 %add, i32* %xx, align 4
store i32 %add3, i32* %xx, align 4
%9 = load i32* %i, align 4
%idxprom4 = sext i32 %9 to i64
- %arrayidx5 = getelementptr inbounds [8 x i32]* %y, i32 0, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4
%10 = load i32* %arrayidx5, align 4
%add4 = add nsw i32 %9, %add
%add = add nsw i32 %tmp1, %i
%idxprom = sext i32 %add to i64
%tmp2 = load i32** @a, align 8, !tbaa !3
- %arrayidx = getelementptr inbounds i32* %tmp2, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %tmp2, i64 %idxprom
%tmp3 = bitcast i32* %arrayidx to i8*
; CHECK: prfm pldl1strm
call void @llvm.prefetch(i8* %tmp3, i32 0, i32 0, i32 1)
%tmp4 = load i32** @a, align 8, !tbaa !3
- %arrayidx3 = getelementptr inbounds i32* %tmp4, i64 %idxprom
+ %arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom
%tmp5 = bitcast i32* %arrayidx3 to i8*
; CHECK: prfm pldl3keep
call void @llvm.prefetch(i8* %tmp5, i32 0, i32 1, i32 1)
%tmp6 = load i32** @a, align 8, !tbaa !3
- %arrayidx6 = getelementptr inbounds i32* %tmp6, i64 %idxprom
+ %arrayidx6 = getelementptr inbounds i32, i32* %tmp6, i64 %idxprom
%tmp7 = bitcast i32* %arrayidx6 to i8*
; CHECK: prfm pldl2keep
call void @llvm.prefetch(i8* %tmp7, i32 0, i32 2, i32 1)
%tmp8 = load i32** @a, align 8, !tbaa !3
- %arrayidx9 = getelementptr inbounds i32* %tmp8, i64 %idxprom
+ %arrayidx9 = getelementptr inbounds i32, i32* %tmp8, i64 %idxprom
%tmp9 = bitcast i32* %arrayidx9 to i8*
; CHECK: prfm pldl1keep
call void @llvm.prefetch(i8* %tmp9, i32 0, i32 3, i32 1)
%tmp10 = load i32** @a, align 8, !tbaa !3
- %arrayidx12 = getelementptr inbounds i32* %tmp10, i64 %idxprom
+ %arrayidx12 = getelementptr inbounds i32, i32* %tmp10, i64 %idxprom
%tmp11 = bitcast i32* %arrayidx12 to i8*
; CHECK: prfm plil1strm
call void @llvm.prefetch(i8* %tmp11, i32 0, i32 0, i32 0)
%tmp12 = load i32** @a, align 8, !tbaa !3
- %arrayidx15 = getelementptr inbounds i32* %tmp12, i64 %idxprom
+ %arrayidx15 = getelementptr inbounds i32, i32* %tmp12, i64 %idxprom
%tmp13 = bitcast i32* %arrayidx3 to i8*
; CHECK: prfm plil3keep
call void @llvm.prefetch(i8* %tmp13, i32 0, i32 1, i32 0)
%tmp14 = load i32** @a, align 8, !tbaa !3
- %arrayidx18 = getelementptr inbounds i32* %tmp14, i64 %idxprom
+ %arrayidx18 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom
%tmp15 = bitcast i32* %arrayidx6 to i8*
; CHECK: prfm plil2keep
call void @llvm.prefetch(i8* %tmp15, i32 0, i32 2, i32 0)
%tmp16 = load i32** @a, align 8, !tbaa !3
- %arrayidx21 = getelementptr inbounds i32* %tmp16, i64 %idxprom
+ %arrayidx21 = getelementptr inbounds i32, i32* %tmp16, i64 %idxprom
%tmp17 = bitcast i32* %arrayidx9 to i8*
; CHECK: prfm plil1keep
call void @llvm.prefetch(i8* %tmp17, i32 0, i32 3, i32 0)
%tmp18 = load i32** @a, align 8, !tbaa !3
- %arrayidx24 = getelementptr inbounds i32* %tmp18, i64 %idxprom
+ %arrayidx24 = getelementptr inbounds i32, i32* %tmp18, i64 %idxprom
%tmp19 = bitcast i32* %arrayidx12 to i8*
; CHECK: prfm pstl1strm
call void @llvm.prefetch(i8* %tmp19, i32 1, i32 0, i32 1)
%tmp20 = load i32** @a, align 8, !tbaa !3
- %arrayidx27 = getelementptr inbounds i32* %tmp20, i64 %idxprom
+ %arrayidx27 = getelementptr inbounds i32, i32* %tmp20, i64 %idxprom
%tmp21 = bitcast i32* %arrayidx15 to i8*
; CHECK: prfm pstl3keep
call void @llvm.prefetch(i8* %tmp21, i32 1, i32 1, i32 1)
%tmp22 = load i32** @a, align 8, !tbaa !3
- %arrayidx30 = getelementptr inbounds i32* %tmp22, i64 %idxprom
+ %arrayidx30 = getelementptr inbounds i32, i32* %tmp22, i64 %idxprom
%tmp23 = bitcast i32* %arrayidx18 to i8*
; CHECK: prfm pstl2keep
call void @llvm.prefetch(i8* %tmp23, i32 1, i32 2, i32 1)
%tmp24 = load i32** @a, align 8, !tbaa !3
- %arrayidx33 = getelementptr inbounds i32* %tmp24, i64 %idxprom
+ %arrayidx33 = getelementptr inbounds i32, i32* %tmp24, i64 %idxprom
%tmp25 = bitcast i32* %arrayidx21 to i8*
; CHECK: prfm pstl1keep
; CHECK: lsl [[REG:x[0-9]+]], x1, #1
; CHECK: ldrb w0, [x0, [[REG]]]
; CHECK: ret
- %tmp1 = getelementptr inbounds i16* %a, i64 %b
+ %tmp1 = getelementptr inbounds i16, i16* %a, i64 %b
%tmp2 = load i16* %tmp1
%tmp3 = trunc i16 %tmp2 to i8
ret i8 %tmp3
%off32.sext.tmp = shl i64 %offset, 32
%off32.sext = ashr i64 %off32.sext.tmp, 32
- %addr8_sxtw = getelementptr i8* %base, i64 %off32.sext
+ %addr8_sxtw = getelementptr i8, i8* %base, i64 %off32.sext
%val8_sxtw = load volatile i8* %addr8_sxtw
%val32_signed = sext i8 %val8_sxtw to i32
store volatile i32 %val32_signed, i32* @var_32bit
%0 = bitcast float* %source to <4 x float>*
%tmp2 = load <4 x float>* %0, align 4
%tmp5 = shufflevector <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x float> %tmp2, <4 x i32> <i32 0, i32 7, i32 0, i32 0>
- %arrayidx8 = getelementptr inbounds <4 x float>* %dest, i32 11
+ %arrayidx8 = getelementptr inbounds <4 x float>, <4 x float>* %dest, i32 11
store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4
ret void
}
; CHECK-NOT: phi
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
%tmp = add nsw i64 %indvars.iv, -1
- %arrayidx = getelementptr inbounds double* %b, i64 %tmp
+ %arrayidx = getelementptr inbounds double, double* %b, i64 %tmp
%tmp1 = load double* %arrayidx, align 8
; The induction variable should carry the scaling factor: 1 * 8 = 8.
; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8
%indvars.iv.next = add i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next
+ %arrayidx2 = getelementptr inbounds double, double* %c, i64 %indvars.iv.next
%tmp2 = load double* %arrayidx2, align 8
%mul = fmul double %tmp1, %tmp2
- %arrayidx4 = getelementptr inbounds double* %a, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds double, double* %a, i64 %indvars.iv
store double %mul, double* %arrayidx4, align 8
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; Comparison should be 19 * 8 = 152.
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 1
+ %addr = getelementptr i8, i8* %sp0, i64 1
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 1
+ %addr = getelementptr i16, i16* %sp0, i64 1
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 1
+ %addr = getelementptr i32, i32* %sp0, i64 1
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], x[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 1
+ %addr = getelementptr i64, i64* %sp0, i64 1
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 %offset
+ %addr = getelementptr i8, i8* %sp0, i64 %offset
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 %offset
+ %addr = getelementptr i16, i16* %sp0, i64 %offset
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], s[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 %offset
+ %addr = getelementptr i32, i32* %sp0, i64 %offset
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:s[0-9]+]], x[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 %offset
+ %addr = getelementptr i64, i64* %sp0, i64 %offset
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 1
+ %addr = getelementptr i8, i8* %sp0, i64 1
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 1
+ %addr = getelementptr i16, i16* %sp0, i64 1
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 1
+ %addr = getelementptr i32, i32* %sp0, i64 1
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 1
+ %addr = getelementptr i64, i64* %sp0, i64 1
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 %offset
+ %addr = getelementptr i8, i8* %sp0, i64 %offset
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 %offset
+ %addr = getelementptr i16, i16* %sp0, i64 %offset
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 %offset
+ %addr = getelementptr i32, i32* %sp0, i64 %offset
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: ucvtf [[REG:d[0-9]+]], d[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 %offset
+ %addr = getelementptr i64, i64* %sp0, i64 %offset
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-A57-NEXT: scvtf [[REG:s[0-9]+]], w[[REGNUM]]
; CHECK-A57-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 1
+ %addr = getelementptr i8, i8* %sp0, i64 1
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 1
+ %addr = getelementptr i16, i16* %sp0, i64 1
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 1
+ %addr = getelementptr i32, i32* %sp0, i64 1
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: scvtf [[REG:s[0-9]+]], x[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 1
+ %addr = getelementptr i64, i64* %sp0, i64 1
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-A57-NEXT: scvtf [[REG:s[0-9]+]], w[[REGNUM]]
; CHECK-A57-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 %offset
+ %addr = getelementptr i8, i8* %sp0, i64 %offset
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 %offset
+ %addr = getelementptr i16, i16* %sp0, i64 %offset
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: scvtf [[REG:s[0-9]+]], s[[SEXTREG]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 %offset
+ %addr = getelementptr i32, i32* %sp0, i64 %offset
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: scvtf [[REG:s[0-9]+]], x[[REGNUM]]
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 %offset
+ %addr = getelementptr i64, i64* %sp0, i64 %offset
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
; CHECK-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 1
+ %addr = getelementptr i8, i8* %sp0, i64 1
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-A57-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
; CHECK-A57-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 1
+ %addr = getelementptr i16, i16* %sp0, i64 1
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 1
+ %addr = getelementptr i32, i32* %sp0, i64 1
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 1
+ %addr = getelementptr i64, i64* %sp0, i64 1
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i8* %sp0, i64 %offset
+ %addr = getelementptr i8, i8* %sp0, i64 %offset
%pix_sp0.0.copyload = load i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-A57-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
; CHECK-A57-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i16* %sp0, i64 %offset
+ %addr = getelementptr i16, i16* %sp0, i64 %offset
%pix_sp0.0.copyload = load i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 %offset
+ %addr = getelementptr i32, i32* %sp0, i64 %offset
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: scvtf [[REG:d[0-9]+]], d[[SEXTREG]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i64* %sp0, i64 %offset
+ %addr = getelementptr i64, i64* %sp0, i64 %offset
%pix_sp0.0.copyload = load i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
; CHECK-NEXT: scvtf [[REG:d[0-9]+]], w[[REGNUM]]
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
- %addr = getelementptr i32* %sp0, i64 1
+ %addr = getelementptr i32, i32* %sp0, i64 1
%pix_sp0.0.copyload = load i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
%stack = alloca [128 x i32], align 4
%0 = bitcast [128 x i32]* %stack to i8*
%idxprom = sext i32 %a to i64
- %arrayidx = getelementptr inbounds [128 x i32]* %stack, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* %stack, i64 0, i64 %idxprom
store i32 %b, i32* %arrayidx, align 4
%1 = load volatile i32* @bar, align 4
%2 = load volatile i32* @bar, align 4
%19 = load volatile i32* @bar, align 4
%20 = load volatile i32* @bar, align 4
%idxprom1 = sext i32 %c to i64
- %arrayidx2 = getelementptr inbounds [128 x i32]* %stack, i64 0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds [128 x i32], [128 x i32]* %stack, i64 0, i64 %idxprom1
%21 = load i32* %arrayidx2, align 4
%factor = mul i32 %h, -2
%factor67 = mul i32 %g, -2
; CHECK-LABEL: st1lane_ro_16b
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.b { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr i8* %D, i64 %offset
+ %ptr = getelementptr i8, i8* %D, i64 %offset
%tmp = extractelement <16 x i8> %A, i32 1
store i8 %tmp, i8* %ptr
ret void
; CHECK-LABEL: st1lane0_ro_16b
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.b { v0 }[0], [x[[XREG]]]
- %ptr = getelementptr i8* %D, i64 %offset
+ %ptr = getelementptr i8, i8* %D, i64 %offset
%tmp = extractelement <16 x i8> %A, i32 0
store i8 %tmp, i8* %ptr
ret void
; CHECK-LABEL: st1lane_ro_8h
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.h { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr i16* %D, i64 %offset
+ %ptr = getelementptr i16, i16* %D, i64 %offset
%tmp = extractelement <8 x i16> %A, i32 1
store i16 %tmp, i16* %ptr
ret void
define void @st1lane0_ro_8h(<8 x i16> %A, i16* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_8h
; CHECK: str h0, [x0, x1, lsl #1]
- %ptr = getelementptr i16* %D, i64 %offset
+ %ptr = getelementptr i16, i16* %D, i64 %offset
%tmp = extractelement <8 x i16> %A, i32 0
store i16 %tmp, i16* %ptr
ret void
; CHECK-LABEL: st1lane_ro_4s
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.s { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr i32* %D, i64 %offset
+ %ptr = getelementptr i32, i32* %D, i64 %offset
%tmp = extractelement <4 x i32> %A, i32 1
store i32 %tmp, i32* %ptr
ret void
define void @st1lane0_ro_4s(<4 x i32> %A, i32* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_4s
; CHECK: str s0, [x0, x1, lsl #2]
- %ptr = getelementptr i32* %D, i64 %offset
+ %ptr = getelementptr i32, i32* %D, i64 %offset
%tmp = extractelement <4 x i32> %A, i32 0
store i32 %tmp, i32* %ptr
ret void
; CHECK-LABEL: st1lane_ro_4s_float
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.s { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr float* %D, i64 %offset
+ %ptr = getelementptr float, float* %D, i64 %offset
%tmp = extractelement <4 x float> %A, i32 1
store float %tmp, float* %ptr
ret void
define void @st1lane0_ro_4s_float(<4 x float> %A, float* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_4s_float
; CHECK: str s0, [x0, x1, lsl #2]
- %ptr = getelementptr float* %D, i64 %offset
+ %ptr = getelementptr float, float* %D, i64 %offset
%tmp = extractelement <4 x float> %A, i32 0
store float %tmp, float* %ptr
ret void
; CHECK-LABEL: st1lane_ro_2d
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.d { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr i64* %D, i64 %offset
+ %ptr = getelementptr i64, i64* %D, i64 %offset
%tmp = extractelement <2 x i64> %A, i32 1
store i64 %tmp, i64* %ptr
ret void
define void @st1lane0_ro_2d(<2 x i64> %A, i64* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_2d
; CHECK: str d0, [x0, x1, lsl #3]
- %ptr = getelementptr i64* %D, i64 %offset
+ %ptr = getelementptr i64, i64* %D, i64 %offset
%tmp = extractelement <2 x i64> %A, i32 0
store i64 %tmp, i64* %ptr
ret void
; CHECK-LABEL: st1lane_ro_2d_double
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.d { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr double* %D, i64 %offset
+ %ptr = getelementptr double, double* %D, i64 %offset
%tmp = extractelement <2 x double> %A, i32 1
store double %tmp, double* %ptr
ret void
define void @st1lane0_ro_2d_double(<2 x double> %A, double* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_2d_double
; CHECK: str d0, [x0, x1, lsl #3]
- %ptr = getelementptr double* %D, i64 %offset
+ %ptr = getelementptr double, double* %D, i64 %offset
%tmp = extractelement <2 x double> %A, i32 0
store double %tmp, double* %ptr
ret void
; CHECK-LABEL: st1lane_ro_8b
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.b { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr i8* %D, i64 %offset
+ %ptr = getelementptr i8, i8* %D, i64 %offset
%tmp = extractelement <8 x i8> %A, i32 1
store i8 %tmp, i8* %ptr
ret void
; CHECK-LABEL: st1lane0_ro_8b
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.b { v0 }[0], [x[[XREG]]]
- %ptr = getelementptr i8* %D, i64 %offset
+ %ptr = getelementptr i8, i8* %D, i64 %offset
%tmp = extractelement <8 x i8> %A, i32 0
store i8 %tmp, i8* %ptr
ret void
; CHECK-LABEL: st1lane_ro_4h
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.h { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr i16* %D, i64 %offset
+ %ptr = getelementptr i16, i16* %D, i64 %offset
%tmp = extractelement <4 x i16> %A, i32 1
store i16 %tmp, i16* %ptr
ret void
define void @st1lane0_ro_4h(<4 x i16> %A, i16* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_4h
; CHECK: str h0, [x0, x1, lsl #1]
- %ptr = getelementptr i16* %D, i64 %offset
+ %ptr = getelementptr i16, i16* %D, i64 %offset
%tmp = extractelement <4 x i16> %A, i32 0
store i16 %tmp, i16* %ptr
ret void
; CHECK-LABEL: st1lane_ro_2s
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.s { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr i32* %D, i64 %offset
+ %ptr = getelementptr i32, i32* %D, i64 %offset
%tmp = extractelement <2 x i32> %A, i32 1
store i32 %tmp, i32* %ptr
ret void
define void @st1lane0_ro_2s(<2 x i32> %A, i32* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_2s
; CHECK: str s0, [x0, x1, lsl #2]
- %ptr = getelementptr i32* %D, i64 %offset
+ %ptr = getelementptr i32, i32* %D, i64 %offset
%tmp = extractelement <2 x i32> %A, i32 0
store i32 %tmp, i32* %ptr
ret void
; CHECK-LABEL: st1lane_ro_2s_float
; CHECK: add x[[XREG:[0-9]+]], x0, x1
; CHECK: st1.s { v0 }[1], [x[[XREG]]]
- %ptr = getelementptr float* %D, i64 %offset
+ %ptr = getelementptr float, float* %D, i64 %offset
%tmp = extractelement <2 x float> %A, i32 1
store float %tmp, float* %ptr
ret void
define void @st1lane0_ro_2s_float(<2 x float> %A, float* %D, i64 %offset) {
; CHECK-LABEL: st1lane0_ro_2s_float
; CHECK: str s0, [x0, x1, lsl #2]
- %ptr = getelementptr float* %D, i64 %offset
+ %ptr = getelementptr float, float* %D, i64 %offset
%tmp = extractelement <2 x float> %A, i32 0
store float %tmp, float* %ptr
ret void
; CHECK: stp w0, w1, [x2]
define void @stp_int(i32 %a, i32 %b, i32* nocapture %p) nounwind {
store i32 %a, i32* %p, align 4
- %add.ptr = getelementptr inbounds i32* %p, i64 1
+ %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
store i32 %b, i32* %add.ptr, align 4
ret void
}
; CHECK: stp x0, x1, [x2]
define void @stp_long(i64 %a, i64 %b, i64* nocapture %p) nounwind {
store i64 %a, i64* %p, align 8
- %add.ptr = getelementptr inbounds i64* %p, i64 1
+ %add.ptr = getelementptr inbounds i64, i64* %p, i64 1
store i64 %b, i64* %add.ptr, align 8
ret void
}
; CHECK: stp s0, s1, [x0]
define void @stp_float(float %a, float %b, float* nocapture %p) nounwind {
store float %a, float* %p, align 4
- %add.ptr = getelementptr inbounds float* %p, i64 1
+ %add.ptr = getelementptr inbounds float, float* %p, i64 1
store float %b, float* %add.ptr, align 4
ret void
}
; CHECK: stp d0, d1, [x0]
define void @stp_double(double %a, double %b, double* nocapture %p) nounwind {
store double %a, double* %p, align 8
- %add.ptr = getelementptr inbounds double* %p, i64 1
+ %add.ptr = getelementptr inbounds double, double* %p, i64 1
store double %b, double* %add.ptr, align 8
ret void
}
; STUR_CHK: stur_int
; STUR_CHK: stp w{{[0-9]+}}, {{w[0-9]+}}, [x{{[0-9]+}}, #-8]
; STUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i32* %p, i32 -1
+ %p1 = getelementptr inbounds i32, i32* %p, i32 -1
store i32 %a, i32* %p1, align 2
- %p2 = getelementptr inbounds i32* %p, i32 -2
+ %p2 = getelementptr inbounds i32, i32* %p, i32 -2
store i32 %b, i32* %p2, align 2
ret void
}
; STUR_CHK: stur_long
; STUR_CHK: stp x{{[0-9]+}}, {{x[0-9]+}}, [x{{[0-9]+}}, #-16]
; STUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds i64* %p, i32 -1
+ %p1 = getelementptr inbounds i64, i64* %p, i32 -1
store i64 %a, i64* %p1, align 2
- %p2 = getelementptr inbounds i64* %p, i32 -2
+ %p2 = getelementptr inbounds i64, i64* %p, i32 -2
store i64 %b, i64* %p2, align 2
ret void
}
; STUR_CHK: stur_float
; STUR_CHK: stp s{{[0-9]+}}, {{s[0-9]+}}, [x{{[0-9]+}}, #-8]
; STUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds float* %p, i32 -1
+ %p1 = getelementptr inbounds float, float* %p, i32 -1
store float %a, float* %p1, align 2
- %p2 = getelementptr inbounds float* %p, i32 -2
+ %p2 = getelementptr inbounds float, float* %p, i32 -2
store float %b, float* %p2, align 2
ret void
}
; STUR_CHK: stur_double
; STUR_CHK: stp d{{[0-9]+}}, {{d[0-9]+}}, [x{{[0-9]+}}, #-16]
; STUR_CHK-NEXT: ret
- %p1 = getelementptr inbounds double* %p, i32 -1
+ %p1 = getelementptr inbounds double, double* %p, i32 -1
store double %a, double* %p1, align 2
- %p2 = getelementptr inbounds double* %p, i32 -2
+ %p2 = getelementptr inbounds double, double* %p, i32 -2
store double %b, double* %p2, align 2
ret void
}
; CHECK: stur w1, [x0, #-4]
; CHECK-NEXT: ret
%tmp1 = trunc i64 %val to i32
- %ptr = getelementptr inbounds i32* %p, i64 -1
+ %ptr = getelementptr inbounds i32, i32* %p, i64 -1
store i32 %tmp1, i32* %ptr, align 4
ret void
}
; CHECK: sturh w1, [x0, #-2]
; CHECK-NEXT: ret
%tmp1 = trunc i64 %val to i16
- %ptr = getelementptr inbounds i16* %p, i64 -1
+ %ptr = getelementptr inbounds i16, i16* %p, i64 -1
store i16 %tmp1, i16* %ptr, align 2
ret void
}
; CHECK: sturb w1, [x0, #-1]
; CHECK-NEXT: ret
%tmp1 = trunc i64 %val to i8
- %ptr = getelementptr inbounds i8* %p, i64 -1
+ %ptr = getelementptr inbounds i8, i8* %p, i64 -1
store i8 %tmp1, i8* %ptr, align 1
ret void
}
; CHECK: sturh w1, [x0, #-2]
; CHECK-NEXT: ret
%tmp1 = trunc i32 %val to i16
- %ptr = getelementptr inbounds i16* %p, i32 -1
+ %ptr = getelementptr inbounds i16, i16* %p, i32 -1
store i16 %tmp1, i16* %ptr, align 2
ret void
}
; CHECK: sturb w1, [x0, #-1]
; CHECK-NEXT: ret
%tmp1 = trunc i32 %val to i8
- %ptr = getelementptr inbounds i8* %p, i32 -1
+ %ptr = getelementptr inbounds i8, i8* %p, i32 -1
store i8 %tmp1, i8* %ptr, align 1
ret void
}
; CHECK: stur xzr, [x0, #12]
; CHECK-NEXT: stur xzr, [x0, #4]
; CHECK-NEXT: ret
- %B = getelementptr inbounds %struct.X* %p, i64 0, i32 1
+ %B = getelementptr inbounds %struct.X, %struct.X* %p, i64 0, i32 1
%val = bitcast i64* %B to i8*
call void @llvm.memset.p0i8.i64(i8* %val, i8 0, i64 16, i32 1, i1 false)
ret void
; CHECK: b {{_?B_ctor_base}}
%0 = bitcast %struct.C* %this to %struct.A*
%call = tail call %struct.A* @A_ctor_base(%struct.A* %0)
- %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
%call2 = tail call %struct.B* @B_ctor_base(%struct.B* %1, i32 %x)
ret %struct.C* %this
}
; CHECK-NOT: b {{_?B_ctor_base_nothisret}}
%0 = bitcast %struct.C* %this to %struct.A*
%call = tail call %struct.A* @A_ctor_base_nothisret(%struct.A* %0)
- %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
%call2 = tail call %struct.B* @B_ctor_base_nothisret(%struct.B* %1, i32 %x)
ret %struct.C* %this
}
; CHECK: bl {{_?B_ctor_complete}}
; CHECK-NOT: mov x0, {{x[0-9]+}}
; CHECK: b {{_?B_ctor_complete}}
- %b = getelementptr inbounds %struct.D* %this, i32 0, i32 0
+ %b = getelementptr inbounds %struct.D, %struct.D* %this, i32 0, i32 0
%call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
ret %struct.D* %this
entry:
; CHECK-LABEL: E_ctor_base:
; CHECK-NOT: b {{_?B_ctor_complete}}
- %b = getelementptr inbounds %struct.E* %this, i32 0, i32 0
+ %b = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 0
%call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
- %b2 = getelementptr inbounds %struct.E* %this, i32 0, i32 1
+ %b2 = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 1
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b2, i32 %x)
ret %struct.E* %this
}
; CHECK: ldr {{w[0-9]+}}, [x[[REG:[0-9]+]], #4]
; CHECK: str {{w[0-9]+}}, [x[[REG]], #8]
%0 = load i32** @a, align 8, !tbaa !1
- %arrayidx = getelementptr inbounds i32* %0, i64 2
+ %arrayidx = getelementptr inbounds i32, i32* %0, i64 2
store i32 %i, i32* %arrayidx, align 4, !tbaa !5
- %arrayidx1 = getelementptr inbounds i32* %0, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %0, i64 1
%1 = load i32* %arrayidx1, align 4, !tbaa !5
%add = add nsw i32 %k, %i
store i32 %add, i32* @m, align 4, !tbaa !5
%.pre37 = load i32** @zptr32, align 8
%dec = add nsw i32 %arg, -1
%idxprom8 = sext i32 %dec to i64
- %arrayidx9 = getelementptr inbounds i32* %.pre37, i64 %idxprom8
+ %arrayidx9 = getelementptr inbounds i32, i32* %.pre37, i64 %idxprom8
%tmp = trunc i64 %var to i32
store i32 %tmp, i32* %arrayidx9, align 4
ret void
%.pre37 = load i16** @zptr16, align 8
%dec = add nsw i32 %arg, -1
%idxprom8 = sext i32 %dec to i64
- %arrayidx9 = getelementptr inbounds i16* %.pre37, i64 %idxprom8
+ %arrayidx9 = getelementptr inbounds i16, i16* %.pre37, i64 %idxprom8
%tmp = trunc i64 %var to i16
store i16 %tmp, i16* %arrayidx9, align 4
ret void
%.pre37 = load i8** @zptr8, align 8
%dec = add nsw i32 %arg, -1
%idxprom8 = sext i32 %dec to i64
- %arrayidx9 = getelementptr inbounds i8* %.pre37, i64 %idxprom8
+ %arrayidx9 = getelementptr inbounds i8, i8* %.pre37, i64 %idxprom8
%tmp = trunc i64 %var to i8
store i8 %tmp, i8* %arrayidx9, align 4
ret void
; CHECK: ldr x[[REG:[0-9]+]], [x0]
; CHECK: str q0, [x[[REG]]]
%tmp1 = load %type1** %argtable, align 8
- %tmp2 = getelementptr inbounds %type1* %tmp1, i64 0, i32 0
+ %tmp2 = getelementptr inbounds %type1, %type1* %tmp1, i64 0, i32 0
store <16 x i8> zeroinitializer, <16 x i8>* %tmp2, align 16
ret void
}
; CHECK: ldr x[[REG:[0-9]+]], [x0]
; CHECK: str d0, [x[[REG]]]
%tmp1 = load %type2** %argtable, align 8
- %tmp2 = getelementptr inbounds %type2* %tmp1, i64 0, i32 0
+ %tmp2 = getelementptr inbounds %type2, %type2* %tmp1, i64 0, i32 0
store <8 x i8> zeroinitializer, <8 x i8>* %tmp2, align 8
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <2 x i64>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %array, i64 %offset
%tmp = load <2 x i64>* %arrayidx, align 16
%tmp1 = load <2 x i64>** @globalArray64x2, align 8
- %arrayidx1 = getelementptr inbounds <2 x i64>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %tmp1, i64 %offset
store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
- %arrayidx = getelementptr inbounds <2 x i64>* %array, i64 3
+ %arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %array, i64 3
%tmp = load <2 x i64>* %arrayidx, align 16
%tmp1 = load <2 x i64>** @globalArray64x2, align 8
- %arrayidx1 = getelementptr inbounds <2 x i64>* %tmp1, i64 5
+ %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %tmp1, i64 5
store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <4 x i32>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %array, i64 %offset
%tmp = load <4 x i32>* %arrayidx, align 16
%tmp1 = load <4 x i32>** @globalArray32x4, align 8
- %arrayidx1 = getelementptr inbounds <4 x i32>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %tmp1, i64 %offset
store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
- %arrayidx = getelementptr inbounds <4 x i32>* %array, i64 3
+ %arrayidx = getelementptr inbounds <4 x i32>, <4 x i32>* %array, i64 3
%tmp = load <4 x i32>* %arrayidx, align 16
%tmp1 = load <4 x i32>** @globalArray32x4, align 8
- %arrayidx1 = getelementptr inbounds <4 x i32>* %tmp1, i64 5
+ %arrayidx1 = getelementptr inbounds <4 x i32>, <4 x i32>* %tmp1, i64 5
store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <8 x i16>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <8 x i16>, <8 x i16>* %array, i64 %offset
%tmp = load <8 x i16>* %arrayidx, align 16
%tmp1 = load <8 x i16>** @globalArray16x8, align 8
- %arrayidx1 = getelementptr inbounds <8 x i16>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <8 x i16>, <8 x i16>* %tmp1, i64 %offset
store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
- %arrayidx = getelementptr inbounds <8 x i16>* %array, i64 3
+ %arrayidx = getelementptr inbounds <8 x i16>, <8 x i16>* %array, i64 3
%tmp = load <8 x i16>* %arrayidx, align 16
%tmp1 = load <8 x i16>** @globalArray16x8, align 8
- %arrayidx1 = getelementptr inbounds <8 x i16>* %tmp1, i64 5
+ %arrayidx1 = getelementptr inbounds <8 x i16>, <8 x i16>* %tmp1, i64 5
store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <16 x i8>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <16 x i8>, <16 x i8>* %array, i64 %offset
%tmp = load <16 x i8>* %arrayidx, align 16
%tmp1 = load <16 x i8>** @globalArray8x16, align 8
- %arrayidx1 = getelementptr inbounds <16 x i8>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <16 x i8>, <16 x i8>* %tmp1, i64 %offset
store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:q[0-9]+]], [x0, #48]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], #80]
- %arrayidx = getelementptr inbounds <16 x i8>* %array, i64 3
+ %arrayidx = getelementptr inbounds <16 x i8>, <16 x i8>* %array, i64 3
%tmp = load <16 x i8>* %arrayidx, align 16
%tmp1 = load <16 x i8>** @globalArray8x16, align 8
- %arrayidx1 = getelementptr inbounds <16 x i8>* %tmp1, i64 5
+ %arrayidx1 = getelementptr inbounds <16 x i8>, <16 x i8>* %tmp1, i64 5
store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
ret void
}
; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <1 x i64>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <1 x i64>, <1 x i64>* %array, i64 %offset
%tmp = load <1 x i64>* %arrayidx, align 8
%tmp1 = load <1 x i64>** @globalArray64x1, align 8
- %arrayidx1 = getelementptr inbounds <1 x i64>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <1 x i64>, <1 x i64>* %tmp1, i64 %offset
store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
ret void
}
; CHECK: ldr [[DEST:d[0-9]+]], [x0, #24]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
- %arrayidx = getelementptr inbounds <1 x i64>* %array, i64 3
+ %arrayidx = getelementptr inbounds <1 x i64>, <1 x i64>* %array, i64 3
%tmp = load <1 x i64>* %arrayidx, align 8
%tmp1 = load <1 x i64>** @globalArray64x1, align 8
- %arrayidx1 = getelementptr inbounds <1 x i64>* %tmp1, i64 5
+ %arrayidx1 = getelementptr inbounds <1 x i64>, <1 x i64>* %tmp1, i64 5
store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
ret void
}
; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <2 x i32>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <2 x i32>, <2 x i32>* %array, i64 %offset
%tmp = load <2 x i32>* %arrayidx, align 8
%tmp1 = load <2 x i32>** @globalArray32x2, align 8
- %arrayidx1 = getelementptr inbounds <2 x i32>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %tmp1, i64 %offset
store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
ret void
}
; CHECK: ldr [[DEST:d[0-9]+]], [x0, #24]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
- %arrayidx = getelementptr inbounds <2 x i32>* %array, i64 3
+ %arrayidx = getelementptr inbounds <2 x i32>, <2 x i32>* %array, i64 3
%tmp = load <2 x i32>* %arrayidx, align 8
%tmp1 = load <2 x i32>** @globalArray32x2, align 8
- %arrayidx1 = getelementptr inbounds <2 x i32>* %tmp1, i64 5
+ %arrayidx1 = getelementptr inbounds <2 x i32>, <2 x i32>* %tmp1, i64 5
store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
ret void
}
; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <4 x i16>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <4 x i16>, <4 x i16>* %array, i64 %offset
%tmp = load <4 x i16>* %arrayidx, align 8
%tmp1 = load <4 x i16>** @globalArray16x4, align 8
- %arrayidx1 = getelementptr inbounds <4 x i16>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <4 x i16>, <4 x i16>* %tmp1, i64 %offset
store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
ret void
}
; CHECK: ldr [[DEST:d[0-9]+]], [x0, #24]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], #40]
- %arrayidx = getelementptr inbounds <4 x i16>* %array, i64 3
+ %arrayidx = getelementptr inbounds <4 x i16>, <4 x i16>* %array, i64 3
%tmp = load <4 x i16>* %arrayidx, align 8
%tmp1 = load <4 x i16>** @globalArray16x4, align 8
- %arrayidx1 = getelementptr inbounds <4 x i16>* %tmp1, i64 5
+ %arrayidx1 = getelementptr inbounds <4 x i16>, <4 x i16>* %tmp1, i64 5
store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
ret void
}
; CHECK: ldr [[DEST:d[0-9]+]], [x0, [[SHIFTEDOFFSET]]]
; CHECK: ldr [[BASE:x[0-9]+]],
; CHECK: str [[DEST]], {{\[}}[[BASE]], [[SHIFTEDOFFSET]]]
- %arrayidx = getelementptr inbounds <8 x i8>* %array, i64 %offset
+ %arrayidx = getelementptr inbounds <8 x i8>, <8 x i8>* %array, i64 %offset
%tmp = load <8 x i8>* %arrayidx, align 8
%tmp1 = load <8 x i8>** @globalArray8x8, align 8
- %arrayidx1 = getelementptr inbounds <8 x i8>* %tmp1, i64 %offset
+ %arrayidx1 = getelementptr inbounds <8 x i8>, <8 x i8>* %tmp1, i64 %offset
store <8 x i8> %tmp, <8 x i8>* %arrayidx1, align 8
ret void
}
; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, #1]
; CHECK-NEXT: mul.8b v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i8* %sp0, i64 1
+ %addr = getelementptr i8, i8* %sp0, i64 1
%pix_sp0.0.copyload = load i8* %addr, align 1
%vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <8 x i8> %vec, %vec
; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, #1]
; CHECK-NEXT: mul.16b v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i8* %sp0, i64 1
+ %addr = getelementptr i8, i8* %sp0, i64 1
%pix_sp0.0.copyload = load i8* %addr, align 1
%vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <16 x i8> %vec, %vec
; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
; CHECK-NEXT: mul.4h v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i16* %sp0, i64 1
+ %addr = getelementptr i16, i16* %sp0, i64 1
%pix_sp0.0.copyload = load i16* %addr, align 1
%vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <4 x i16> %vec, %vec
; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, #2]
; CHECK-NEXT: mul.8h v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i16* %sp0, i64 1
+ %addr = getelementptr i16, i16* %sp0, i64 1
%pix_sp0.0.copyload = load i16* %addr, align 1
%vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <8 x i16> %vec, %vec
; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
; CHECK-NEXT: mul.2s v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i32* %sp0, i64 1
+ %addr = getelementptr i32, i32* %sp0, i64 1
%pix_sp0.0.copyload = load i32* %addr, align 1
%vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <2 x i32> %vec, %vec
; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, #4]
; CHECK-NEXT: mul.4s v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i32* %sp0, i64 1
+ %addr = getelementptr i32, i32* %sp0, i64 1
%pix_sp0.0.copyload = load i32* %addr, align 1
%vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <4 x i32> %vec, %vec
; CHECK-LABEL: fct22:
; CHECK: ldr d0, [x0, #8]
entry:
- %addr = getelementptr i64* %sp0, i64 1
+ %addr = getelementptr i64, i64* %sp0, i64 1
%pix_sp0.0.copyload = load i64* %addr, align 1
%vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
ret <1 x i64> %vec
; CHECK-LABEL: fct23:
; CHECK: ldr d[[REGNUM:[0-9]+]], [x0, #8]
entry:
- %addr = getelementptr i64* %sp0, i64 1
+ %addr = getelementptr i64, i64* %sp0, i64 1
%pix_sp0.0.copyload = load i64* %addr, align 1
%vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
ret <2 x i64> %vec
; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, x1]
; CHECK-NEXT: mul.8b v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i8* %sp0, i64 %offset
+ %addr = getelementptr i8, i8* %sp0, i64 %offset
%pix_sp0.0.copyload = load i8* %addr, align 1
%vec = insertelement <8 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <8 x i8> %vec, %vec
; CHECK: ldr b[[REGNUM:[0-9]+]], [x0, x1]
; CHECK-NEXT: mul.16b v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i8* %sp0, i64 %offset
+ %addr = getelementptr i8, i8* %sp0, i64 %offset
%pix_sp0.0.copyload = load i8* %addr, align 1
%vec = insertelement <16 x i8> undef, i8 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <16 x i8> %vec, %vec
; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
; CHECK-NEXT: mul.4h v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i16* %sp0, i64 %offset
+ %addr = getelementptr i16, i16* %sp0, i64 %offset
%pix_sp0.0.copyload = load i16* %addr, align 1
%vec = insertelement <4 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <4 x i16> %vec, %vec
; CHECK: ldr h[[REGNUM:[0-9]+]], [x0, x1, lsl #1]
; CHECK-NEXT: mul.8h v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i16* %sp0, i64 %offset
+ %addr = getelementptr i16, i16* %sp0, i64 %offset
%pix_sp0.0.copyload = load i16* %addr, align 1
%vec = insertelement <8 x i16> undef, i16 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <8 x i16> %vec, %vec
; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
; CHECK-NEXT: mul.2s v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i32* %sp0, i64 %offset
+ %addr = getelementptr i32, i32* %sp0, i64 %offset
%pix_sp0.0.copyload = load i32* %addr, align 1
%vec = insertelement <2 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <2 x i32> %vec, %vec
; CHECK: ldr s[[REGNUM:[0-9]+]], [x0, x1, lsl #2]
; CHECK-NEXT: mul.4s v0, v[[REGNUM]], v[[REGNUM]]
entry:
- %addr = getelementptr i32* %sp0, i64 %offset
+ %addr = getelementptr i32, i32* %sp0, i64 %offset
%pix_sp0.0.copyload = load i32* %addr, align 1
%vec = insertelement <4 x i32> undef, i32 %pix_sp0.0.copyload, i32 0
%vmull.i = mul <4 x i32> %vec, %vec
; CHECK-LABEL: fct30:
; CHECK: ldr d0, [x0, x1, lsl #3]
entry:
- %addr = getelementptr i64* %sp0, i64 %offset
+ %addr = getelementptr i64, i64* %sp0, i64 %offset
%pix_sp0.0.copyload = load i64* %addr, align 1
%vec = insertelement <1 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
ret <1 x i64> %vec
; CHECK-LABEL: fct31:
; CHECK: ldr d0, [x0, x1, lsl #3]
entry:
- %addr = getelementptr i64* %sp0, i64 %offset
+ %addr = getelementptr i64, i64* %sp0, i64 %offset
%pix_sp0.0.copyload = load i64* %addr, align 1
%vec = insertelement <2 x i64> undef, i64 %pix_sp0.0.copyload, i32 0
ret <2 x i64> %vec
; CHECK-NEXT: stur [[VAL2]], {{\[}}sp, #216]
entry:
%Control_Points = alloca [16 x [3 x double]], align 8
- %arraydecay5.3.1 = getelementptr inbounds [16 x [3 x double]]* %Control_Points, i64 0, i64 9, i64 0
+ %arraydecay5.3.1 = getelementptr inbounds [16 x [3 x double]], [16 x [3 x double]]* %Control_Points, i64 0, i64 9, i64 0
%tmp14 = bitcast double* %arraydecay5.3.1 to i8*
- %arraydecay11.3.1 = getelementptr inbounds %struct.Bicubic_Patch_Struct* %Shape, i64 0, i32 12, i64 1, i64 3, i64 0
+ %arraydecay11.3.1 = getelementptr inbounds %struct.Bicubic_Patch_Struct, %struct.Bicubic_Patch_Struct* %Shape, i64 0, i32 12, i64 1, i64 3, i64 0
%tmp15 = bitcast double* %arraydecay11.3.1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp14, i8* %tmp15, i64 24, i32 1, i1 false)
ret void
; CHECK: ldp
; CHECK-NEXT: add
; CHECK-NEXT: ret
- %add.ptr = getelementptr inbounds i64* %bar, i64 1
+ %add.ptr = getelementptr inbounds i64, i64* %bar, i64 1
%tmp = load i64* %add.ptr, align 8
- %add.ptr1 = getelementptr inbounds i64* %bar, i64 2
+ %add.ptr1 = getelementptr inbounds i64, i64* %bar, i64 2
%tmp1 = load i64* %add.ptr1, align 8
%add = add nsw i64 %tmp1, %tmp
ret i64 %add
; CHECK-NEXT: ldr
; CHECK-NEXT: add
; CHECK-NEXT: ret
- %add.ptr = getelementptr inbounds i64* %bar, i64 1
+ %add.ptr = getelementptr inbounds i64, i64* %bar, i64 1
%tmp = load volatile i64* %add.ptr, align 8
- %add.ptr1 = getelementptr inbounds i64* %bar, i64 2
+ %add.ptr1 = getelementptr inbounds i64, i64* %bar, i64 2
%tmp1 = load volatile i64* %add.ptr1, align 8
%add = add nsw i64 %tmp1, %tmp
ret i64 %add
; CHECK-LABEL: test_zextloadi1_unscaled:
; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
- %addr = getelementptr i1* %base, i32 -7
+ %addr = getelementptr i1, i1* %base, i32 -7
%val = load i1* %addr, align 1
%extended = zext i1 %val to i32
; CHECK-LABEL: test_zextloadi8_unscaled:
; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-7]
- %addr = getelementptr i8* %base, i32 -7
+ %addr = getelementptr i8, i8* %base, i32 -7
%val = load i8* %addr, align 1
%extended = zext i8 %val to i32
; CHECK-LABEL: test_zextloadi16_unscaled:
; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-14]
- %addr = getelementptr i16* %base, i32 -7
+ %addr = getelementptr i16, i16* %base, i32 -7
%val = load i16* %addr, align 2
%extended = zext i16 %val to i32
br label %end
else:
%tmp3 = call i8* @llvm.returnaddress(i32 0)
- %ptr = getelementptr inbounds i8* %tmp3, i64 -16
+ %ptr = getelementptr inbounds i8, i8* %tmp3, i64 -16
%ld = load i8* %ptr, align 4
%tmp2 = inttoptr i8 %ld to i8*
br label %end
; CHECK: ldrsh
; CHECK-NEXT: cmn
entry:
- %addr = getelementptr inbounds i16* %ptr1, i16 0
+ %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
%val = load i16* %addr, align 2
%cmp = icmp eq i16 %val, -1
br i1 %cmp, label %if, label %if.then
; CHECK: ldrsh
; CHECK-NEXT: cmn
entry:
- %addr = getelementptr inbounds i16* %ptr1, i16 0
+ %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
%val = load i16* %addr, align 2
%cmp = icmp sge i16 %val, -1
br i1 %cmp, label %if, label %if.then
; CHECK: ldrsh
; CHECK-NEXT: cmn
entry:
- %addr = getelementptr inbounds i16* %ptr1, i16 0
+ %addr = getelementptr inbounds i16, i16* %ptr1, i16 0
%val = load i16* %addr, align 2
%cmp = icmp uge i16 %val, -1
br i1 %cmp, label %if, label %if.then
; no checks for this case, it just should be processed without errors
define void @combine_non_adjacent_cmp_br(%struct.Struct* nocapture readonly %hdCall) #0 {
entry:
- %size = getelementptr inbounds %struct.Struct* %hdCall, i64 0, i32 0
+ %size = getelementptr inbounds %struct.Struct, %struct.Struct* %hdCall, i64 0, i32 0
%0 = load i64* %size, align 8
br label %land.rhs
br i1 %cmp, label %land.lhs.true, label %if.end
land.lhs.true: ; preds = %entry
- %arrayidx = getelementptr inbounds i8** %argv, i64 1
+ %arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1
%0 = load i8** %arrayidx, align 8
%cmp1 = icmp eq i8* %0, null
br i1 %cmp1, label %if.end, label %return
%src = alloca { double, double }, align 8
%dst = alloca { double, double }, align 8
- %src.realp = getelementptr inbounds { double, double }* %src, i32 0, i32 0
+ %src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0
%src.real = load double* %src.realp
- %src.imagp = getelementptr inbounds { double, double }* %src, i32 0, i32 1
+ %src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1
%src.imag = load double* %src.imagp
- %dst.realp = getelementptr inbounds { double, double }* %dst, i32 0, i32 0
- %dst.imagp = getelementptr inbounds { double, double }* %dst, i32 0, i32 1
+ %dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0
+ %dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1
store double %src.real, double* %dst.realp
store double %src.imag, double* %dst.imagp
ret void
for.body4.us:
%indvars.iv = phi i64 [ 0, %for.body4.lr.ph.us ], [ %indvars.iv.next, %for.body4.us ]
- %arrayidx6.us = getelementptr inbounds [8 x i8]* %a, i64 %indvars.iv26, i64 %indvars.iv
+ %arrayidx6.us = getelementptr inbounds [8 x i8], [8 x i8]* %a, i64 %indvars.iv26, i64 %indvars.iv
%0 = load i8* %arrayidx6.us, align 1
%idxprom7.us = zext i8 %0 to i64
- %arrayidx8.us = getelementptr inbounds i8* %box, i64 %idxprom7.us
+ %arrayidx8.us = getelementptr inbounds i8, i8* %box, i64 %idxprom7.us
%1 = load i8* %arrayidx8.us, align 1
store i8 %1, i8* %arrayidx6.us, align 1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@arr_var = extern_weak global [10 x i32]
define i32* @bar() {
- %addr = getelementptr [10 x i32]* @arr_var, i32 0, i32 5
+ %addr = getelementptr [10 x i32], [10 x i32]* @arr_var, i32 0, i32 5
; CHECK: adrp x[[ADDRHI:[0-9]+]], :got:arr_var
; CHECK-NEXT: ret
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
%tmp = load i16* %arrayidx, align 2
%tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
ret float %tmp1
; CHECK-NEXT: ret
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
%tmp = load i16* %arrayidx, align 2
%conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
ret double %conv
; CHECK-NEXT: fcvt s0, [[HREG]]
; CHECK-NEXT: ret
- %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
%tmp = load i16* %arrayidx, align 2
%tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
ret float %tmp1
; CHECK-NEXT: fcvt d0, [[HREG]]
; CHECK-NEXT: ret
- %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
%tmp = load i16* %arrayidx, align 2
%conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
ret double %conv
; CHECK-NEXT: fcvt s0, [[HREG]]
; CHECK-NEXT: ret
- %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
%tmp = load i16* %arrayidx, align 2
%tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
ret float %tmp1
; CHECK-NEXT: fcvt d0, [[HREG]]
; CHECK-NEXT: ret
- %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
%tmp = load i16* %arrayidx, align 2
%conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
ret double %conv
; CHECK-NEXT: fcvt s0, [[HREG]]
; CHECK-NEXT: ret
- %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
%tmp = load i16* %arrayidx, align 2
%tmp1 = tail call float @llvm.convert.from.fp16.f32(i16 %tmp)
ret float %tmp1
; CHECK-NEXT: fcvt d0, [[HREG]]
; CHECK-NEXT: ret
- %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
%tmp = load i16* %arrayidx, align 2
%conv = tail call double @llvm.convert.from.fp16.f64(i16 %tmp)
ret double %conv
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
%conv = fptrunc double %val to float
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds i16* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %idxprom
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
; CHECK-NEXT: ret
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
- %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
%conv = fptrunc double %val to float
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
- %arrayidx = getelementptr inbounds i16* %a, i64 %i
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 %i
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
; CHECK-NEXT: ret
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
- %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
%conv = fptrunc double %val to float
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
- %arrayidx = getelementptr inbounds i16* %a, i64 10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 10
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
; CHECK-NEXT: ret
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %val)
- %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
%conv = fptrunc double %val to float
%tmp = tail call i16 @llvm.convert.to.fp16.f32(float %conv)
- %arrayidx = getelementptr inbounds i16* %a, i64 -10
+ %arrayidx = getelementptr inbounds i16, i16* %a, i64 -10
store i16 %tmp, i16* %arrayidx, align 2
ret void
}
define double* @test_struct(%struct.foo* %f) {
; CHECK-LABEL: test_struct
; CHECK: add x0, x0, #24
- %1 = getelementptr inbounds %struct.foo* %f, i64 0, i32 3
+ %1 = getelementptr inbounds %struct.foo, %struct.foo* %f, i64 0, i32 3
ret double* %1
}
; CHECK-LABEL: test_array1
; CHECK: orr [[REG:x[0-9]+]], xzr, #0x4
; CHECK-NEXT: madd x0, x1, [[REG]], x0
- %1 = getelementptr inbounds i32* %a, i64 %i
+ %1 = getelementptr inbounds i32, i32* %a, i64 %i
ret i32* %1
}
define i32* @test_array2(i32* %a) {
; CHECK-LABEL: test_array2
; CHECK: add x0, x0, #16
- %1 = getelementptr inbounds i32* %a, i64 4
+ %1 = getelementptr inbounds i32, i32* %a, i64 4
ret i32* %1
}
define i32* @test_array3(i32* %a) {
; CHECK-LABEL: test_array3
; CHECK: add x0, x0, #1, lsl #12
- %1 = getelementptr inbounds i32* %a, i64 1024
+ %1 = getelementptr inbounds i32, i32* %a, i64 1024
ret i32* %1
}
; CHECK-LABEL: test_array4
; CHECK: movz [[REG:x[0-9]+]], #0x1008
; CHECK-NEXR: add x0, x0, [[REG]]
- %1 = getelementptr inbounds i32* %a, i64 1026
+ %1 = getelementptr inbounds i32, i32* %a, i64 1026
ret i32* %1
}
; CHECK: sxtw [[REG1:x[0-9]+]], w1
; CHECK-NEXT: orr [[REG2:x[0-9]+]], xzr, #0x4
; CHECK-NEXT: madd {{x[0-9]+}}, [[REG1]], [[REG2]], x0
- %1 = getelementptr inbounds i32* %a, i32 %i
+ %1 = getelementptr inbounds i32, i32* %a, i32 %i
ret i32* %1
}
; with memcpy.
define void @take_struct(%myStruct* byval %structval) {
; CHECK-LABEL: take_struct:
- %addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
- %addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
+ %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
+ %addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0
%val0 = load volatile i32* %addr0
; Some weird move means x0 is used for one access
define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %structval) {
; CHECK-LABEL: check_byval_align:
- %addr0 = getelementptr %myStruct* %structval, i64 0, i32 2
- %addr1 = getelementptr %myStruct* %structval, i64 0, i32 0
+ %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2
+ %addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0
%val0 = load volatile i32* %addr0
; Some weird move means x0 is used for one access
; if LLVM does it to %myStruct too. So this is the simplest check
define void @return_large_struct(%myStruct* sret %retval) {
; CHECK-LABEL: return_large_struct:
- %addr0 = getelementptr %myStruct* %retval, i64 0, i32 0
- %addr1 = getelementptr %myStruct* %retval, i64 0, i32 1
- %addr2 = getelementptr %myStruct* %retval, i64 0, i32 2
+ %addr0 = getelementptr %myStruct, %myStruct* %retval, i64 0, i32 0
+ %addr1 = getelementptr %myStruct, %myStruct* %retval, i64 0, i32 1
+ %addr2 = getelementptr %myStruct, %myStruct* %retval, i64 0, i32 2
store i64 42, i64* %addr0
store i8 2, i8* %addr1
i32* %var6, %myStruct* byval %struct, i32* byval %stacked,
double %notstacked) {
; CHECK-LABEL: struct_on_stack:
- %addr = getelementptr %myStruct* %struct, i64 0, i32 0
+ %addr = getelementptr %myStruct, %myStruct* %struct, i64 0, i32 0
%val64 = load volatile i64* %addr
store volatile i64 %val64, i64* @var64
; Currently nothing on local stack, so struct should be at sp
;CHECK-APPLE-IOS: add x8, x8, __MergedGlobals_x@PAGEOFF
;CHECK-APPLE-IOS: adrp x9, __MergedGlobals_y@PAGE
;CHECK-APPLE-IOS: add x9, x9, __MergedGlobals_y@PAGEOFF
- %x3 = getelementptr inbounds [1000 x i32]* @x, i32 0, i64 3
- %y3 = getelementptr inbounds [1000 x i32]* @y, i32 0, i64 3
+ %x3 = getelementptr inbounds [1000 x i32], [1000 x i32]* @x, i32 0, i64 3
+ %y3 = getelementptr inbounds [1000 x i32], [1000 x i32]* @y, i32 0, i64 3
store i32 %a1, i32* %x3, align 4
store i32 %a2, i32* %y3, align 4
store i32 %a3, i32* @z, align 4
; CHECK-LABEL: check_size:
%starti = ptrtoint %struct* @var to i64
- %endp = getelementptr %struct* @var, i64 1
+ %endp = getelementptr %struct, %struct* @var, i64 1
%endi = ptrtoint %struct* %endp to i64
%diff = sub i64 %endi, %starti
; CHECK-LABEL: check_field:
%starti = ptrtoint %struct* @var to i64
- %endp = getelementptr %struct* @var, i64 0, i32 1
+ %endp = getelementptr %struct, %struct* @var, i64 0, i32 1
%endi = ptrtoint i128* %endp to i64
%diff = sub i64 %endi, %starti
call void @llvm.aarch64.dmb(i32 15); CHECK: dmb sy
- %d1 = getelementptr i32* %d, i64 1
+ %d1 = getelementptr i32, i32* %d, i64 1
store i32 %b, i32* %d1 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
ret void
call void @llvm.aarch64.dsb(i32 15); CHECK: dsb sy
- %d1 = getelementptr i32* %d, i64 1
+ %d1 = getelementptr i32, i32* %d, i64 1
store i32 %b, i32* %d1 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
ret void
call void @llvm.aarch64.isb(i32 15); CHECK: isb
- %d1 = getelementptr i32* %d, i64 1
+ %d1 = getelementptr i32, i32* %d, i64 1
store i32 %b, i32* %d1 ; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, #4]
ret void
; CHECK-LABEL: load-pre-indexed-word
; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0
%add = load i32* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1
tail call void @bar_word(%s.word* %c, i32 %add)
ret void
}
; CHECK-LABEL: store-pre-indexed-word
; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1, i32 0
store i32 %val, i32* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.word* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.word, %struct.word* %ptr, i64 0, i32 1
tail call void @bar_word(%s.word* %c, i32 %val)
ret void
}
; CHECK-LABEL: load-pre-indexed-doubleword
; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0
%add = load i64* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1
tail call void @bar_doubleword(%s.doubleword* %c, i64 %add)
ret void
}
; CHECK-LABEL: store-pre-indexed-doubleword
; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1, i32 0
store i64 %val, i64* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.doubleword* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.doubleword, %struct.doubleword* %ptr, i64 0, i32 1
tail call void @bar_doubleword(%s.doubleword* %c, i64 %val)
ret void
}
; CHECK-LABEL: load-pre-indexed-quadword
; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1, i32 0
%add = load fp128* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1
tail call void @bar_quadword(%s.quadword* %c, fp128 %add)
ret void
}
; CHECK-LABEL: store-pre-indexed-quadword
; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1, i32 0
store fp128 %val, fp128* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.quadword* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.quadword, %struct.quadword* %ptr, i64 0, i32 1
tail call void @bar_quadword(%s.quadword* %c, fp128 %val)
ret void
}
; CHECK-LABEL: load-pre-indexed-float
; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1, i32 0
%add = load float* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1
tail call void @bar_float(%s.float* %c, float %add)
ret void
}
; CHECK-LABEL: store-pre-indexed-float
; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1, i32 0
store float %val, float* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.float* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.float, %struct.float* %ptr, i64 0, i32 1
tail call void @bar_float(%s.float* %c, float %val)
ret void
}
; CHECK-LABEL: load-pre-indexed-double
; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1, i32 0
%add = load double* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1
tail call void @bar_double(%s.double* %c, double %add)
ret void
}
; CHECK-LABEL: store-pre-indexed-double
; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}, #32]!
entry:
- %a = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1, i32 0
+ %a = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1, i32 0
store double %val, double* %a, align 4
br label %bar
bar:
- %c = getelementptr inbounds %struct.double* %ptr, i64 0, i32 1
+ %c = getelementptr inbounds %struct.double, %struct.double* %ptr, i64 0, i32 1
tail call void @bar_double(%s.double* %c, double %val)
ret void
}
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.i32** %this
- %gep1 = getelementptr inbounds %pre.struct.i32* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.i32* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.i64** %this
- %gep1 = getelementptr inbounds %pre.struct.i64* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.i64* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.i128** %this
- %gep1 = getelementptr inbounds %pre.struct.i128* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.i128* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.float** %this
- %gep1 = getelementptr inbounds %pre.struct.float* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.float* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.double** %this
- %gep1 = getelementptr inbounds %pre.struct.double* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.double* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.i32** %this
- %gep1 = getelementptr inbounds %pre.struct.i32* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.i32* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi i32* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.i64** %this
- %gep1 = getelementptr inbounds %pre.struct.i64* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.i64* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi i64* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.i128** %this
- %gep1 = getelementptr inbounds %pre.struct.i128* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.i128* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi <2 x i64>* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.float** %this
- %gep1 = getelementptr inbounds %pre.struct.float* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.float* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi float* [ %gep1, %if.then ], [ %gep2, %if.end ]
br i1 %cond, label %if.then, label %if.end
if.then:
%load1 = load %pre.struct.double** %this
- %gep1 = getelementptr inbounds %pre.struct.double* %load1, i64 0, i32 1
+ %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1
br label %return
if.end:
- %gep2 = getelementptr inbounds %pre.struct.double* %load2, i64 0, i32 2
+ %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
br label %return
return:
%retptr = phi double* [ %gep1, %if.then ], [ %gep2, %if.end ]
; CHECK-LABEL: load-post-indexed-word
; CHECK: ldr w{{[0-9]+}}, [x{{[0-9]+}}], #16
entry:
- %gep1 = getelementptr i32* %array, i64 2
+ %gep1 = getelementptr i32, i32* %array, i64 2
br label %body
body:
%iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr i32* %iv2, i64 -1
+ %gep2 = getelementptr i32, i32* %iv2, i64 -1
%load = load i32* %gep2
call void @use-word(i32 %load)
%load2 = load i32* %iv2
call void @use-word(i32 %load2)
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr i32* %iv2, i64 4
+ %gep3 = getelementptr i32, i32* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: load-post-indexed-doubleword
; CHECK: ldr x{{[0-9]+}}, [x{{[0-9]+}}], #32
entry:
- %gep1 = getelementptr i64* %array, i64 2
+ %gep1 = getelementptr i64, i64* %array, i64 2
br label %body
body:
%iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr i64* %iv2, i64 -1
+ %gep2 = getelementptr i64, i64* %iv2, i64 -1
%load = load i64* %gep2
call void @use-doubleword(i64 %load)
%load2 = load i64* %iv2
call void @use-doubleword(i64 %load2)
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr i64* %iv2, i64 4
+ %gep3 = getelementptr i64, i64* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: load-post-indexed-quadword
; CHECK: ldr q{{[0-9]+}}, [x{{[0-9]+}}], #64
entry:
- %gep1 = getelementptr <2 x i64>* %array, i64 2
+ %gep1 = getelementptr <2 x i64>, <2 x i64>* %array, i64 2
br label %body
body:
%iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr <2 x i64>* %iv2, i64 -1
+ %gep2 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 -1
%load = load <2 x i64>* %gep2
call void @use-quadword(<2 x i64> %load)
%load2 = load <2 x i64>* %iv2
call void @use-quadword(<2 x i64> %load2)
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr <2 x i64>* %iv2, i64 4
+ %gep3 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: load-post-indexed-float
; CHECK: ldr s{{[0-9]+}}, [x{{[0-9]+}}], #16
entry:
- %gep1 = getelementptr float* %array, i64 2
+ %gep1 = getelementptr float, float* %array, i64 2
br label %body
body:
%iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr float* %iv2, i64 -1
+ %gep2 = getelementptr float, float* %iv2, i64 -1
%load = load float* %gep2
call void @use-float(float %load)
%load2 = load float* %iv2
call void @use-float(float %load2)
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr float* %iv2, i64 4
+ %gep3 = getelementptr float, float* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: load-post-indexed-double
; CHECK: ldr d{{[0-9]+}}, [x{{[0-9]+}}], #32
entry:
- %gep1 = getelementptr double* %array, i64 2
+ %gep1 = getelementptr double, double* %array, i64 2
br label %body
body:
%iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr double* %iv2, i64 -1
+ %gep2 = getelementptr double, double* %iv2, i64 -1
%load = load double* %gep2
call void @use-double(double %load)
%load2 = load double* %iv2
call void @use-double(double %load2)
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr double* %iv2, i64 4
+ %gep3 = getelementptr double, double* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: store-post-indexed-word
; CHECK: str w{{[0-9]+}}, [x{{[0-9]+}}], #16
entry:
- %gep1 = getelementptr i32* %array, i64 2
+ %gep1 = getelementptr i32, i32* %array, i64 2
br label %body
body:
%iv2 = phi i32* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr i32* %iv2, i64 -1
+ %gep2 = getelementptr i32, i32* %iv2, i64 -1
%load = load i32* %gep2
call void @use-word(i32 %load)
store i32 %val, i32* %iv2
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr i32* %iv2, i64 4
+ %gep3 = getelementptr i32, i32* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: store-post-indexed-doubleword
; CHECK: str x{{[0-9]+}}, [x{{[0-9]+}}], #32
entry:
- %gep1 = getelementptr i64* %array, i64 2
+ %gep1 = getelementptr i64, i64* %array, i64 2
br label %body
body:
%iv2 = phi i64* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr i64* %iv2, i64 -1
+ %gep2 = getelementptr i64, i64* %iv2, i64 -1
%load = load i64* %gep2
call void @use-doubleword(i64 %load)
store i64 %val, i64* %iv2
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr i64* %iv2, i64 4
+ %gep3 = getelementptr i64, i64* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: store-post-indexed-quadword
; CHECK: str q{{[0-9]+}}, [x{{[0-9]+}}], #64
entry:
- %gep1 = getelementptr <2 x i64>* %array, i64 2
+ %gep1 = getelementptr <2 x i64>, <2 x i64>* %array, i64 2
br label %body
body:
%iv2 = phi <2 x i64>* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr <2 x i64>* %iv2, i64 -1
+ %gep2 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 -1
%load = load <2 x i64>* %gep2
call void @use-quadword(<2 x i64> %load)
store <2 x i64> %val, <2 x i64>* %iv2
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr <2 x i64>* %iv2, i64 4
+ %gep3 = getelementptr <2 x i64>, <2 x i64>* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: store-post-indexed-float
; CHECK: str s{{[0-9]+}}, [x{{[0-9]+}}], #16
entry:
- %gep1 = getelementptr float* %array, i64 2
+ %gep1 = getelementptr float, float* %array, i64 2
br label %body
body:
%iv2 = phi float* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr float* %iv2, i64 -1
+ %gep2 = getelementptr float, float* %iv2, i64 -1
%load = load float* %gep2
call void @use-float(float %load)
store float %val, float* %iv2
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr float* %iv2, i64 4
+ %gep3 = getelementptr float, float* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
; CHECK-LABEL: store-post-indexed-double
; CHECK: str d{{[0-9]+}}, [x{{[0-9]+}}], #32
entry:
- %gep1 = getelementptr double* %array, i64 2
+ %gep1 = getelementptr double, double* %array, i64 2
br label %body
body:
%iv2 = phi double* [ %gep3, %body ], [ %gep1, %entry ]
%iv = phi i64 [ %iv.next, %body ], [ %count, %entry ]
- %gep2 = getelementptr double* %iv2, i64 -1
+ %gep2 = getelementptr double, double* %iv2, i64 -1
%load = load double* %gep2
call void @use-double(double %load)
store double %val, double* %iv2
%iv.next = add i64 %iv, -4
- %gep3 = getelementptr double* %iv2, i64 4
+ %gep3 = getelementptr double, double* %iv2, i64 4
%cond = icmp eq i64 %iv.next, 0
br i1 %cond, label %exit, label %body
%phi1 = phi i32* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi i32* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
- %gep1 = getelementptr i32* %phi1, i64 -1
+ %gep1 = getelementptr i32, i32* %phi1, i64 -1
%load1 = load i32* %gep1
- %gep2 = getelementptr i32* %phi2, i64 -1
+ %gep2 = getelementptr i32, i32* %phi2, i64 -1
store i32 %load1, i32* %gep2
%load2 = load i32* %phi1
store i32 %load2, i32* %phi2
%dec.i = add nsw i64 %i, -1
- %gep3 = getelementptr i32* %phi2, i64 -2
- %gep4 = getelementptr i32* %phi1, i64 -2
+ %gep3 = getelementptr i32, i32* %phi2, i64 -2
+ %gep4 = getelementptr i32, i32* %phi1, i64 -2
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
%phi1 = phi i64* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi i64* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
- %gep1 = getelementptr i64* %phi1, i64 -1
+ %gep1 = getelementptr i64, i64* %phi1, i64 -1
%load1 = load i64* %gep1
- %gep2 = getelementptr i64* %phi2, i64 -1
+ %gep2 = getelementptr i64, i64* %phi2, i64 -1
store i64 %load1, i64* %gep2
%load2 = load i64* %phi1
store i64 %load2, i64* %phi2
%dec.i = add nsw i64 %i, -1
- %gep3 = getelementptr i64* %phi2, i64 -2
- %gep4 = getelementptr i64* %phi1, i64 -2
+ %gep3 = getelementptr i64, i64* %phi2, i64 -2
+ %gep4 = getelementptr i64, i64* %phi1, i64 -2
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
%phi1 = phi <2 x i64>* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi <2 x i64>* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
- %gep1 = getelementptr <2 x i64>* %phi1, i64 -1
+ %gep1 = getelementptr <2 x i64>, <2 x i64>* %phi1, i64 -1
%load1 = load <2 x i64>* %gep1
- %gep2 = getelementptr <2 x i64>* %phi2, i64 -1
+ %gep2 = getelementptr <2 x i64>, <2 x i64>* %phi2, i64 -1
store <2 x i64> %load1, <2 x i64>* %gep2
%load2 = load <2 x i64>* %phi1
store <2 x i64> %load2, <2 x i64>* %phi2
%dec.i = add nsw i64 %i, -1
- %gep3 = getelementptr <2 x i64>* %phi2, i64 -2
- %gep4 = getelementptr <2 x i64>* %phi1, i64 -2
+ %gep3 = getelementptr <2 x i64>, <2 x i64>* %phi2, i64 -2
+ %gep4 = getelementptr <2 x i64>, <2 x i64>* %phi1, i64 -2
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
%phi1 = phi float* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi float* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
- %gep1 = getelementptr float* %phi1, i64 -1
+ %gep1 = getelementptr float, float* %phi1, i64 -1
%load1 = load float* %gep1
- %gep2 = getelementptr float* %phi2, i64 -1
+ %gep2 = getelementptr float, float* %phi2, i64 -1
store float %load1, float* %gep2
%load2 = load float* %phi1
store float %load2, float* %phi2
%dec.i = add nsw i64 %i, -1
- %gep3 = getelementptr float* %phi2, i64 -2
- %gep4 = getelementptr float* %phi1, i64 -2
+ %gep3 = getelementptr float, float* %phi2, i64 -2
+ %gep4 = getelementptr float, float* %phi1, i64 -2
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
%phi1 = phi double* [ %gep4, %for.body ], [ %b, %0 ]
%phi2 = phi double* [ %gep3, %for.body ], [ %a, %0 ]
%i = phi i64 [ %dec.i, %for.body], [ %count, %0 ]
- %gep1 = getelementptr double* %phi1, i64 -1
+ %gep1 = getelementptr double, double* %phi1, i64 -1
%load1 = load double* %gep1
- %gep2 = getelementptr double* %phi2, i64 -1
+ %gep2 = getelementptr double, double* %phi2, i64 -1
store double %load1, double* %gep2
%load2 = load double* %phi1
store double %load2, double* %phi2
%dec.i = add nsw i64 %i, -1
- %gep3 = getelementptr double* %phi2, i64 -2
- %gep4 = getelementptr double* %phi1, i64 -2
+ %gep3 = getelementptr double, double* %phi2, i64 -2
+ %gep4 = getelementptr double, double* %phi1, i64 -2
%cond = icmp sgt i64 %dec.i, 0
br i1 %cond, label %for.body, label %end
end:
define void @ldst_8bit(i8* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_8bit:
- %addr8_sxtw = getelementptr i8* %base, i32 %off32
+ %addr8_sxtw = getelementptr i8, i8* %base, i32 %off32
%val8_sxtw = load volatile i8* %addr8_sxtw
%val32_signed = sext i8 %val8_sxtw to i32
store volatile i32 %val32_signed, i32* @var_32bit
; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{[wx][0-9]+}}, sxtw]
- %addr_lsl = getelementptr i8* %base, i64 %off64
+ %addr_lsl = getelementptr i8, i8* %base, i64 %off64
%val8_lsl = load volatile i8* %addr_lsl
%val32_unsigned = zext i8 %val8_lsl to i32
store volatile i32 %val32_unsigned, i32* @var_32bit
define void @ldst_16bit(i16* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_16bit:
- %addr8_sxtwN = getelementptr i16* %base, i32 %off32
+ %addr8_sxtwN = getelementptr i16, i16* %base, i32 %off32
%val8_sxtwN = load volatile i16* %addr8_sxtwN
%val32_signed = sext i16 %val8_sxtwN to i32
store volatile i32 %val32_signed, i32* @var_32bit
; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #1]
- %addr_lslN = getelementptr i16* %base, i64 %off64
+ %addr_lslN = getelementptr i16, i16* %base, i64 %off64
%val8_lslN = load volatile i16* %addr_lslN
%val32_unsigned = zext i16 %val8_lslN to i32
store volatile i32 %val32_unsigned, i32* @var_32bit
define void @ldst_32bit(i32* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_32bit:
- %addr_sxtwN = getelementptr i32* %base, i32 %off32
+ %addr_sxtwN = getelementptr i32, i32* %base, i32 %off32
%val_sxtwN = load volatile i32* %addr_sxtwN
store volatile i32 %val_sxtwN, i32* @var_32bit
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
- %addr_lslN = getelementptr i32* %base, i64 %off64
+ %addr_lslN = getelementptr i32, i32* %base, i64 %off64
%val_lslN = load volatile i32* %addr_lslN
store volatile i32 %val_lslN, i32* @var_32bit
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
define void @ldst_64bit(i64* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_64bit:
- %addr_sxtwN = getelementptr i64* %base, i32 %off32
+ %addr_sxtwN = getelementptr i64, i64* %base, i32 %off32
%val_sxtwN = load volatile i64* %addr_sxtwN
store volatile i64 %val_sxtwN, i64* @var_64bit
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
- %addr_lslN = getelementptr i64* %base, i64 %off64
+ %addr_lslN = getelementptr i64, i64* %base, i64 %off64
%val_lslN = load volatile i64* %addr_lslN
store volatile i64 %val_lslN, i64* @var_64bit
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
define void @ldst_float(float* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_float:
- %addr_sxtwN = getelementptr float* %base, i32 %off32
+ %addr_sxtwN = getelementptr float, float* %base, i32 %off32
%val_sxtwN = load volatile float* %addr_sxtwN
store volatile float %val_sxtwN, float* @var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #2]
; CHECK-NOFP-NOT: ldr {{s[0-9]+}},
- %addr_lslN = getelementptr float* %base, i64 %off64
+ %addr_lslN = getelementptr float, float* %base, i64 %off64
%val_lslN = load volatile float* %addr_lslN
store volatile float %val_lslN, float* @var_float
; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #2]
define void @ldst_double(double* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_double:
- %addr_sxtwN = getelementptr double* %base, i32 %off32
+ %addr_sxtwN = getelementptr double, double* %base, i32 %off32
%val_sxtwN = load volatile double* %addr_sxtwN
store volatile double %val_sxtwN, double* @var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #3]
; CHECK-NOFP-NOT: ldr {{d[0-9]+}},
- %addr_lslN = getelementptr double* %base, i64 %off64
+ %addr_lslN = getelementptr double, double* %base, i64 %off64
%val_lslN = load volatile double* %addr_lslN
store volatile double %val_lslN, double* @var_double
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #3]
define void @ldst_128bit(fp128* %base, i32 %off32, i64 %off64) minsize {
; CHECK-LABEL: ldst_128bit:
- %addr_sxtwN = getelementptr fp128* %base, i32 %off32
+ %addr_sxtwN = getelementptr fp128, fp128* %base, i32 %off32
%val_sxtwN = load volatile fp128* %addr_sxtwN
store volatile fp128 %val_sxtwN, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
; CHECK-NOFP-NOT: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{[xw][0-9]+}}, sxtw #4]
- %addr_lslN = getelementptr fp128* %base, i64 %off64
+ %addr_lslN = getelementptr fp128, fp128* %base, i64 %off64
%val_lslN = load volatile fp128* %addr_lslN
store volatile fp128 %val_lslN, fp128* %base
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, {{x[0-9]+}}, lsl #4]
%addr_8bit = load i8** @varptr
; match a sign-extending load 8-bit -> 32-bit
- %addr_sext32 = getelementptr i8* %addr_8bit, i64 -256
+ %addr_sext32 = getelementptr i8, i8* %addr_8bit, i64 -256
%val8_sext32 = load volatile i8* %addr_sext32
%val32_signed = sext i8 %val8_sext32 to i32
store volatile i32 %val32_signed, i32* @var_32bit
; CHECK: ldursb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
; match a zero-extending load volatile 8-bit -> 32-bit
- %addr_zext32 = getelementptr i8* %addr_8bit, i64 -12
+ %addr_zext32 = getelementptr i8, i8* %addr_8bit, i64 -12
%val8_zext32 = load volatile i8* %addr_zext32
%val32_unsigned = zext i8 %val8_zext32 to i32
store volatile i32 %val32_unsigned, i32* @var_32bit
; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-12]
; match an any-extending load volatile 8-bit -> 32-bit
- %addr_anyext = getelementptr i8* %addr_8bit, i64 -1
+ %addr_anyext = getelementptr i8, i8* %addr_8bit, i64 -1
%val8_anyext = load volatile i8* %addr_anyext
%newval8 = add i8 %val8_anyext, 1
store volatile i8 %newval8, i8* @var_8bit
; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
; match a sign-extending load volatile 8-bit -> 64-bit
- %addr_sext64 = getelementptr i8* %addr_8bit, i64 -5
+ %addr_sext64 = getelementptr i8, i8* %addr_8bit, i64 -5
%val8_sext64 = load volatile i8* %addr_sext64
%val64_signed = sext i8 %val8_sext64 to i64
store volatile i64 %val64_signed, i64* @var_64bit
; match a zero-extending load volatile 8-bit -> 64-bit.
; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
; of x0 so it's identical to load volatileing to 32-bits.
- %addr_zext64 = getelementptr i8* %addr_8bit, i64 -9
+ %addr_zext64 = getelementptr i8, i8* %addr_8bit, i64 -9
%val8_zext64 = load volatile i8* %addr_zext64
%val64_unsigned = zext i8 %val8_zext64 to i64
store volatile i64 %val64_unsigned, i64* @var_64bit
; CHECK: ldurb {{w[0-9]+}}, [{{x[0-9]+}}, #-9]
; truncating store volatile 32-bits to 8-bits
- %addr_trunc32 = getelementptr i8* %addr_8bit, i64 -256
+ %addr_trunc32 = getelementptr i8, i8* %addr_8bit, i64 -256
%val32 = load volatile i32* @var_32bit
%val8_trunc32 = trunc i32 %val32 to i8
store volatile i8 %val8_trunc32, i8* %addr_trunc32
; CHECK: sturb {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
; truncating store volatile 64-bits to 8-bits
- %addr_trunc64 = getelementptr i8* %addr_8bit, i64 -1
+ %addr_trunc64 = getelementptr i8, i8* %addr_8bit, i64 -1
%val64 = load volatile i64* @var_64bit
%val8_trunc64 = trunc i64 %val64 to i8
store volatile i8 %val8_trunc64, i8* %addr_trunc64
%addr_8bit = load i8** @varptr
; match a sign-extending load 16-bit -> 32-bit
- %addr8_sext32 = getelementptr i8* %addr_8bit, i64 -256
+ %addr8_sext32 = getelementptr i8, i8* %addr_8bit, i64 -256
%addr_sext32 = bitcast i8* %addr8_sext32 to i16*
%val16_sext32 = load volatile i16* %addr_sext32
%val32_signed = sext i16 %val16_sext32 to i32
; CHECK: ldursh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
; match a zero-extending load volatile 16-bit -> 32-bit. With offset that would be unaligned.
- %addr8_zext32 = getelementptr i8* %addr_8bit, i64 15
+ %addr8_zext32 = getelementptr i8, i8* %addr_8bit, i64 15
%addr_zext32 = bitcast i8* %addr8_zext32 to i16*
%val16_zext32 = load volatile i16* %addr_zext32
%val32_unsigned = zext i16 %val16_zext32 to i32
; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #15]
; match an any-extending load volatile 16-bit -> 32-bit
- %addr8_anyext = getelementptr i8* %addr_8bit, i64 -1
+ %addr8_anyext = getelementptr i8, i8* %addr_8bit, i64 -1
%addr_anyext = bitcast i8* %addr8_anyext to i16*
%val16_anyext = load volatile i16* %addr_anyext
%newval16 = add i16 %val16_anyext, 1
; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #-1]
; match a sign-extending load volatile 16-bit -> 64-bit
- %addr8_sext64 = getelementptr i8* %addr_8bit, i64 -5
+ %addr8_sext64 = getelementptr i8, i8* %addr_8bit, i64 -5
%addr_sext64 = bitcast i8* %addr8_sext64 to i16*
%val16_sext64 = load volatile i16* %addr_sext64
%val64_signed = sext i16 %val16_sext64 to i64
; match a zero-extending load volatile 16-bit -> 64-bit.
; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits
; of x0 so it's identical to load volatileing to 32-bits.
- %addr8_zext64 = getelementptr i8* %addr_8bit, i64 9
+ %addr8_zext64 = getelementptr i8, i8* %addr_8bit, i64 9
%addr_zext64 = bitcast i8* %addr8_zext64 to i16*
%val16_zext64 = load volatile i16* %addr_zext64
%val64_unsigned = zext i16 %val16_zext64 to i64
; CHECK: ldurh {{w[0-9]+}}, [{{x[0-9]+}}, #9]
; truncating store volatile 32-bits to 16-bits
- %addr8_trunc32 = getelementptr i8* %addr_8bit, i64 -256
+ %addr8_trunc32 = getelementptr i8, i8* %addr_8bit, i64 -256
%addr_trunc32 = bitcast i8* %addr8_trunc32 to i16*
%val32 = load volatile i32* @var_32bit
%val16_trunc32 = trunc i32 %val32 to i16
; CHECK: sturh {{w[0-9]+}}, [{{x[0-9]+}}, #-256]
; truncating store volatile 64-bits to 16-bits
- %addr8_trunc64 = getelementptr i8* %addr_8bit, i64 -1
+ %addr8_trunc64 = getelementptr i8, i8* %addr_8bit, i64 -1
%addr_trunc64 = bitcast i8* %addr8_trunc64 to i16*
%val64 = load volatile i64* @var_64bit
%val16_trunc64 = trunc i64 %val64 to i16
%addr_8bit = load i8** @varptr
; Straight 32-bit load/store
- %addr32_8_noext = getelementptr i8* %addr_8bit, i64 1
+ %addr32_8_noext = getelementptr i8, i8* %addr_8bit, i64 1
%addr32_noext = bitcast i8* %addr32_8_noext to i32*
%val32_noext = load volatile i32* %addr32_noext
store volatile i32 %val32_noext, i32* %addr32_noext
; CHECK: stur {{w[0-9]+}}, [{{x[0-9]+}}, #1]
; Zero-extension to 64-bits
- %addr32_8_zext = getelementptr i8* %addr_8bit, i64 -256
+ %addr32_8_zext = getelementptr i8, i8* %addr_8bit, i64 -256
%addr32_zext = bitcast i8* %addr32_8_zext to i32*
%val32_zext = load volatile i32* %addr32_zext
%val64_unsigned = zext i32 %val32_zext to i64
; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
; Sign-extension to 64-bits
- %addr32_8_sext = getelementptr i8* %addr_8bit, i64 -12
+ %addr32_8_sext = getelementptr i8, i8* %addr_8bit, i64 -12
%addr32_sext = bitcast i8* %addr32_8_sext to i32*
%val32_sext = load volatile i32* %addr32_sext
%val64_signed = sext i32 %val32_sext to i64
; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit]
; Truncation from 64-bits
- %addr64_8_trunc = getelementptr i8* %addr_8bit, i64 255
+ %addr64_8_trunc = getelementptr i8, i8* %addr_8bit, i64 255
%addr64_trunc = bitcast i8* %addr64_8_trunc to i64*
- %addr32_8_trunc = getelementptr i8* %addr_8bit, i64 -20
+ %addr32_8_trunc = getelementptr i8, i8* %addr_8bit, i64 -20
%addr32_trunc = bitcast i8* %addr32_8_trunc to i32*
%val64_trunc = load volatile i64* %addr64_trunc
; CHECK-LABEL: ldst_float:
%addr_8bit = load i8** @varptr
- %addrfp_8 = getelementptr i8* %addr_8bit, i64 -5
+ %addrfp_8 = getelementptr i8, i8* %addr_8bit, i64 -5
%addrfp = bitcast i8* %addrfp_8 to float*
%valfp = load volatile float* %addrfp
; CHECK-LABEL: ldst_double:
%addr_8bit = load i8** @varptr
- %addrfp_8 = getelementptr i8* %addr_8bit, i64 4
+ %addrfp_8 = getelementptr i8, i8* %addr_8bit, i64 4
%addrfp = bitcast i8* %addrfp_8 to double*
%valfp = load volatile double* %addrfp
; CHECK: adrp {{x[0-9]+}}, arr8
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr8]
- %arr8_sub1_addr = getelementptr i8* %arr8_addr, i64 1
+ %arr8_sub1_addr = getelementptr i8, i8* %arr8_addr, i64 1
%arr8_sub1 = load volatile i8* %arr8_sub1_addr
store volatile i8 %arr8_sub1, i8* @var_8bit
; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #1]
- %arr8_sub4095_addr = getelementptr i8* %arr8_addr, i64 4095
+ %arr8_sub4095_addr = getelementptr i8, i8* %arr8_addr, i64 4095
%arr8_sub4095 = load volatile i8* %arr8_sub4095_addr
store volatile i8 %arr8_sub4095, i8* @var_8bit
; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #4095]
; CHECK: adrp {{x[0-9]+}}, arr16
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr16]
- %arr16_sub1_addr = getelementptr i16* %arr16_addr, i64 1
+ %arr16_sub1_addr = getelementptr i16, i16* %arr16_addr, i64 1
%arr16_sub1 = load volatile i16* %arr16_sub1_addr
store volatile i16 %arr16_sub1, i16* @var_16bit
; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #2]
- %arr16_sub4095_addr = getelementptr i16* %arr16_addr, i64 4095
+ %arr16_sub4095_addr = getelementptr i16, i16* %arr16_addr, i64 4095
%arr16_sub4095 = load volatile i16* %arr16_sub4095_addr
store volatile i16 %arr16_sub4095, i16* @var_16bit
; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #8190]
; CHECK: adrp {{x[0-9]+}}, arr32
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr32]
- %arr32_sub1_addr = getelementptr i32* %arr32_addr, i64 1
+ %arr32_sub1_addr = getelementptr i32, i32* %arr32_addr, i64 1
%arr32_sub1 = load volatile i32* %arr32_sub1_addr
store volatile i32 %arr32_sub1, i32* @var_32bit
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #4]
- %arr32_sub4095_addr = getelementptr i32* %arr32_addr, i64 4095
+ %arr32_sub4095_addr = getelementptr i32, i32* %arr32_addr, i64 4095
%arr32_sub4095 = load volatile i32* %arr32_sub4095_addr
store volatile i32 %arr32_sub4095, i32* @var_32bit
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #16380]
; CHECK: adrp {{x[0-9]+}}, arr64
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr64]
- %arr64_sub1_addr = getelementptr i64* %arr64_addr, i64 1
+ %arr64_sub1_addr = getelementptr i64, i64* %arr64_addr, i64 1
%arr64_sub1 = load volatile i64* %arr64_sub1_addr
store volatile i64 %arr64_sub1, i64* @var_64bit
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #8]
- %arr64_sub4095_addr = getelementptr i64* %arr64_addr, i64 4095
+ %arr64_sub4095_addr = getelementptr i64, i64* %arr64_addr, i64 4095
%arr64_sub4095 = load volatile i64* %arr64_sub4095_addr
store volatile i64 %arr64_sub4095, i64* @var_64bit
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #32760]
; CHECK: ldp
; CHECK: stp
define void @f(i64* %p, i64* %q) {
- %addr2 = getelementptr i64* %q, i32 1
- %addr = getelementptr i64* %p, i32 1
+ %addr2 = getelementptr i64, i64* %q, i32 1
+ %addr = getelementptr i64, i64* %p, i32 1
%x = load i64* %p
%y = load i64* %addr
store i64 %x, i64* %q
%cmp = icmp eq i16 %0, %1
br i1 %cmp, label %if.end, label %return, !prof !988
if.end:
- %priority = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 2
+ %priority = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 2
%2 = load i8* %priority, align 1
- %priority5 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 2
+ %priority5 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 2
%3 = load i8* %priority5, align 1
- %string = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 5
+ %string = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 5
%4 = load i8** %string, align 8
- %string7 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 5
+ %string7 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 5
%5 = load i8** %string7, align 8
br label %while.cond
while.cond:
%lsr.iv27 = phi i64 [ %lsr.iv.next28, %if.end17 ], [ 0, %if.end ]
- %scevgep55 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep55 = getelementptr i8, i8* %4, i64 %lsr.iv27
%6 = load i8* %scevgep55, align 1
%idxprom.i.i = sext i8 %6 to i64
%isascii.i.i224 = icmp sgt i8 %6, -1
br i1 %isascii.i.i224, label %cond.true.i.i, label %cond.false.i.i, !prof !181
cond.true.i.i:
- %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
+ %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
%7 = load i32* %arrayidx.i.i, align 4
%and.i.i = and i32 %7, 32768
br label %isupper.exit
%isascii.i.i213225 = icmp sgt i8 %9, -1
br i1 %isascii.i.i213225, label %cond.true.i.i217, label %cond.false.i.i219, !prof !181
cond.true.i.i217:
- %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
+ %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
%10 = load i32* %arrayidx.i.i215, align 4
%and.i.i216 = and i32 %10, 32768
br label %isupper.exit223
land.lhs.true43:
%20 = ptrtoint i8* %16 to i64
%21 = sub i64 0, %20
- %scevgep52 = getelementptr i8* %4, i64 %21
- %scevgep53 = getelementptr i8* %scevgep52, i64 %lsr.iv27
- %scevgep54 = getelementptr i8* %scevgep53, i64 -1
+ %scevgep52 = getelementptr i8, i8* %4, i64 %21
+ %scevgep53 = getelementptr i8, i8* %scevgep52, i64 %lsr.iv27
+ %scevgep54 = getelementptr i8, i8* %scevgep53, i64 -1
%cmp45 = icmp eq i8* %scevgep54, null
br i1 %cmp45, label %return, label %lor.lhs.false47, !prof !996
lor.lhs.false47:
%22 = ptrtoint i8* %16 to i64
%23 = sub i64 0, %22
- %scevgep47 = getelementptr i8* %4, i64 %23
- %scevgep48 = getelementptr i8* %scevgep47, i64 %lsr.iv27
- %scevgep49 = getelementptr i8* %scevgep48, i64 -2
+ %scevgep47 = getelementptr i8, i8* %4, i64 %23
+ %scevgep48 = getelementptr i8, i8* %scevgep47, i64 %lsr.iv27
+ %scevgep49 = getelementptr i8, i8* %scevgep48, i64 -2
%cmp50 = icmp eq i8* %scevgep49, null
br i1 %cmp50, label %land.lhs.true52, label %while.cond59.preheader, !prof !997
land.lhs.true52:
%cmp61233.old = icmp eq i8 %18, 0
br i1 %cmp61233.old, label %return, label %land.rhs.preheader, !prof !999
land.rhs.preheader:
- %scevgep33 = getelementptr i8* %5, i64 %lsr.iv27
- %scevgep43 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep33 = getelementptr i8, i8* %5, i64 %lsr.iv27
+ %scevgep43 = getelementptr i8, i8* %4, i64 %lsr.iv27
br label %land.rhs
land.rhs:
%lsr.iv = phi i64 [ 0, %land.rhs.preheader ], [ %lsr.iv.next, %if.then83 ]
%25 = phi i8 [ %27, %if.then83 ], [ %18, %land.rhs.preheader ]
- %scevgep34 = getelementptr i8* %scevgep33, i64 %lsr.iv
+ %scevgep34 = getelementptr i8, i8* %scevgep33, i64 %lsr.iv
%26 = load i8* %scevgep34, align 1
%cmp64 = icmp eq i8 %26, 0
br i1 %cmp64, label %return, label %while.body66, !prof !1000
%or.cond208 = or i1 %cmp77, %cmp81
br i1 %or.cond208, label %return, label %if.then83, !prof !1002
if.then83:
- %scevgep44 = getelementptr i8* %scevgep43, i64 %lsr.iv
- %scevgep45 = getelementptr i8* %scevgep44, i64 1
+ %scevgep44 = getelementptr i8, i8* %scevgep43, i64 %lsr.iv
+ %scevgep45 = getelementptr i8, i8* %scevgep44, i64 1
%27 = load i8* %scevgep45, align 1
%cmp61 = icmp eq i8 %27, 0
%lsr.iv.next = add i64 %lsr.iv, 1
%cmp97238 = icmp eq i8 %28, 0
br i1 %cmp97238, label %return, label %land.rhs99.preheader, !prof !1004
land.rhs99.preheader:
- %scevgep31 = getelementptr i8* %5, i64 %lsr.iv27
- %scevgep40 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep31 = getelementptr i8, i8* %5, i64 %lsr.iv27
+ %scevgep40 = getelementptr i8, i8* %4, i64 %lsr.iv27
br label %land.rhs99
land.rhs99:
%lsr.iv17 = phi i64 [ 0, %land.rhs99.preheader ], [ %lsr.iv.next18, %if.then117 ]
%29 = phi i8 [ %31, %if.then117 ], [ %28, %land.rhs99.preheader ]
- %scevgep32 = getelementptr i8* %scevgep31, i64 %lsr.iv17
+ %scevgep32 = getelementptr i8, i8* %scevgep31, i64 %lsr.iv17
%30 = load i8* %scevgep32, align 1
%cmp101 = icmp eq i8 %30, 0
br i1 %cmp101, label %return, label %while.body104, !prof !1005
%or.cond210 = or i1 %or.cond209, %cmp115
br i1 %or.cond210, label %if.then117, label %return, !prof !1006
if.then117:
- %scevgep41 = getelementptr i8* %scevgep40, i64 %lsr.iv17
- %scevgep42 = getelementptr i8* %scevgep41, i64 1
+ %scevgep41 = getelementptr i8, i8* %scevgep40, i64 %lsr.iv17
+ %scevgep42 = getelementptr i8, i8* %scevgep41, i64 1
%31 = load i8* %scevgep42, align 1
%cmp97 = icmp eq i8 %31, 0
%lsr.iv.next18 = add i64 %lsr.iv17, 1
%cmp132244 = icmp eq i8 %32, 0
br i1 %cmp132244, label %return, label %land.rhs134.preheader, !prof !1008
land.rhs134.preheader:
- %scevgep29 = getelementptr i8* %5, i64 %lsr.iv27
- %scevgep37 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep29 = getelementptr i8, i8* %5, i64 %lsr.iv27
+ %scevgep37 = getelementptr i8, i8* %4, i64 %lsr.iv27
br label %land.rhs134
land.rhs134:
%lsr.iv22 = phi i64 [ 0, %land.rhs134.preheader ], [ %lsr.iv.next23, %if.then152 ]
%33 = phi i8 [ %35, %if.then152 ], [ %32, %land.rhs134.preheader ]
- %scevgep30 = getelementptr i8* %scevgep29, i64 %lsr.iv22
+ %scevgep30 = getelementptr i8, i8* %scevgep29, i64 %lsr.iv22
%34 = load i8* %scevgep30, align 1
%cmp136 = icmp eq i8 %34, 0
br i1 %cmp136, label %return, label %while.body139, !prof !1009
%or.cond212 = or i1 %or.cond211, %cmp150
br i1 %or.cond212, label %if.then152, label %return, !prof !1010
if.then152:
- %scevgep38 = getelementptr i8* %scevgep37, i64 %lsr.iv22
- %scevgep39 = getelementptr i8* %scevgep38, i64 1
+ %scevgep38 = getelementptr i8, i8* %scevgep37, i64 %lsr.iv22
+ %scevgep39 = getelementptr i8, i8* %scevgep38, i64 1
%35 = load i8* %scevgep39, align 1
%cmp132 = icmp eq i8 %35, 0
%lsr.iv.next23 = add i64 %lsr.iv22, 1
%a1 = alloca [256 x i32], align 4
%0 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %0)
- %arraydecay = getelementptr inbounds [256 x i32]* %a1, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i64 0, i64 0
call void @foo3(i32* %arraydecay)
call void asm sideeffect "foo2", "~{w0},~{w1},~{w2},~{w3},~{w4},~{w5},~{w6},~{w7},~{w8},~{w9},~{w10},~{w11},~{w12},~{w13},~{w14},~{w15},~{w16},~{w17},~{w18},~{w19},~{w20},~{w21},~{w22},~{w23},~{w24},~{w25},~{w26},~{w27},~{w28},~{w29},~{w30}"()
call void @llvm.lifetime.end(i64 1024, i8* %0)
; Important correctness point here is that LLVM doesn't try to use xzr
; as an addressing register: "str w0, [xzr]" is not a valid A64
; instruction (0b11111 in the Rn field would mean "sp").
- %addr = getelementptr i32* null, i64 0
+ %addr = getelementptr i32, i32* null, i64 0
store i32 %val, i32* %addr
; CHECK: str {{w[0-9]+}}, [{{x[0-9]+|sp}}]
cond_true11: ; preds = %entry
%tmp.i32 = load %struct.layer_data** @ld ; <%struct.layer_data*> [#uses=2]
- %tmp3.i35 = getelementptr %struct.layer_data* %tmp.i32, i32 0, i32 1, i32 2048; <i8*> [#uses=2]
- %tmp.i36 = getelementptr %struct.layer_data* %tmp.i32, i32 0, i32 2 ; <i8**> [#uses=1]
+ %tmp3.i35 = getelementptr %struct.layer_data, %struct.layer_data* %tmp.i32, i32 0, i32 1, i32 2048; <i8*> [#uses=2]
+ %tmp.i36 = getelementptr %struct.layer_data, %struct.layer_data* %tmp.i32, i32 0, i32 2 ; <i8**> [#uses=1]
store i8* %tmp3.i35, i8** %tmp.i36
store i8* %tmp3.i35, i8** null
ret void
%i.8.in = load i8* null ; <i8> [#uses=1]
%i.8 = zext i8 %i.8.in to i32 ; <i32> [#uses=4]
%j.7 = zext i8 %j.7.in to i32 ; <i32> [#uses=4]
- %tmp495 = getelementptr [4 x [4 x i32]]* %predicted_block, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=2]
+ %tmp495 = getelementptr [4 x [4 x i32]], [4 x [4 x i32]]* %predicted_block, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=2]
%tmp496 = load i32* %tmp495 ; <i32> [#uses=2]
%tmp502 = load i32* null ; <i32> [#uses=1]
- %tmp542 = getelementptr [6 x [4 x [4 x i32]]]* @quant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
+ %tmp542 = getelementptr [6 x [4 x [4 x i32]]], [6 x [4 x [4 x i32]]]* @quant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
%tmp543 = load i32* %tmp542 ; <i32> [#uses=1]
%tmp548 = ashr i32 0, 0 ; <i32> [#uses=3]
%tmp561 = sub i32 0, %tmp496 ; <i32> [#uses=3]
br i1 %tmp579, label %bb712, label %cond_next589
cond_next589: ; preds = %cond_next489
- %tmp605 = getelementptr [6 x [4 x [4 x i32]]]* @dequant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
+ %tmp605 = getelementptr [6 x [4 x [4 x i32]]], [6 x [4 x [4 x i32]]]* @dequant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
%tmp606 = load i32* %tmp605 ; <i32> [#uses=1]
%tmp612 = load i32* null ; <i32> [#uses=1]
%tmp629 = load i32* null ; <i32> [#uses=1]
cond_true740: ; preds = %bb737
%tmp761 = call fastcc i32 @sign( i32 %tmp576, i32 0 ) ; <i32> [#uses=1]
%tmp780 = load i32* null ; <i32> [#uses=1]
- %tmp785 = getelementptr [4 x [4 x i32]]* @A, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
+ %tmp785 = getelementptr [4 x [4 x i32]], [4 x [4 x i32]]* @A, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
%tmp786 = load i32* %tmp785 ; <i32> [#uses=1]
%tmp781 = mul i32 %tmp780, %tmp761 ; <i32> [#uses=1]
%tmp787 = mul i32 %tmp781, %tmp786 ; <i32> [#uses=1]
br label %bb
bb: ; preds = %bb, %0
- %p_addr.0 = getelementptr i8* %p, i32 0 ; <i8*> [#uses=1]
+ %p_addr.0 = getelementptr i8, i8* %p, i32 0 ; <i8*> [#uses=1]
%tmp2 = load i8* %p_addr.0 ; <i8> [#uses=2]
%tmp4.rec = add i32 0, 1 ; <i32> [#uses=1]
- %tmp4 = getelementptr i8* %p, i32 %tmp4.rec ; <i8*> [#uses=1]
+ %tmp4 = getelementptr i8, i8* %p, i32 %tmp4.rec ; <i8*> [#uses=1]
%tmp56 = zext i8 %tmp2 to i32 ; <i32> [#uses=1]
%tmp7 = and i32 %tmp56, 127 ; <i32> [#uses=1]
%tmp9 = shl i32 %tmp7, 0 ; <i32> [#uses=1]
br i1 %0, label %bb78.exitStub, label %bb26
bb26: ; preds = %bb74
- %tmp28 = getelementptr i32** %tmp1, i32 %fp.1.rec ; <i32**> [#uses=1]
+ %tmp28 = getelementptr i32*, i32** %tmp1, i32 %fp.1.rec ; <i32**> [#uses=1]
%tmp30 = load i32** %tmp28 ; <i32*> [#uses=4]
- %tmp33 = getelementptr i32* %tmp30, i32 %i.0196.0.ph ; <i32*> [#uses=1]
+ %tmp33 = getelementptr i32, i32* %tmp30, i32 %i.0196.0.ph ; <i32*> [#uses=1]
%tmp34 = load i32* %tmp33 ; <i32> [#uses=1]
- %tmp38 = getelementptr i32* %tmp30, i32 %tmp36224 ; <i32*> [#uses=1]
+ %tmp38 = getelementptr i32, i32* %tmp30, i32 %tmp36224 ; <i32*> [#uses=1]
%tmp39 = load i32* %tmp38 ; <i32> [#uses=1]
%tmp42 = mul i32 %tmp34, %fm.1 ; <i32> [#uses=1]
%tmp44 = add i32 %tmp42, %d0.1 ; <i32> [#uses=1]
- %tmp48 = getelementptr i32* %tmp30, i32 %tmp46223 ; <i32*> [#uses=1]
+ %tmp48 = getelementptr i32, i32* %tmp30, i32 %tmp46223 ; <i32*> [#uses=1]
%tmp49 = load i32* %tmp48 ; <i32> [#uses=1]
%tmp52 = mul i32 %tmp39, %fm.1 ; <i32> [#uses=1]
%tmp54 = add i32 %tmp52, %d1.1 ; <i32> [#uses=1]
- %tmp58 = getelementptr i32* %tmp30, i32 %tmp56222 ; <i32*> [#uses=1]
+ %tmp58 = getelementptr i32, i32* %tmp30, i32 %tmp56222 ; <i32*> [#uses=1]
%tmp59 = load i32* %tmp58 ; <i32> [#uses=1]
%tmp62 = mul i32 %tmp49, %fm.1 ; <i32> [#uses=1]
%tmp64 = add i32 %tmp62, %d2.1 ; <i32> [#uses=1]
%tmp67 = mul i32 %tmp59, %fm.1 ; <i32> [#uses=1]
%tmp69 = add i32 %tmp67, %d3.1 ; <i32> [#uses=1]
%tmp71.rec = add i32 %fp.1.rec, 1 ; <i32> [#uses=2]
- %tmp71 = getelementptr i32* %tmp1011, i32 %tmp71.rec ; <i32*> [#uses=1]
+ %tmp71 = getelementptr i32, i32* %tmp1011, i32 %tmp71.rec ; <i32*> [#uses=1]
br label %bb74
}
%spec.1961.adj.ins = or i64 %spec.1961.adj, 0 ; <i64> [#uses=2]
%tmp10959 = lshr i64 %spec.1961.adj.ins, 32 ; <i64> [#uses=2]
%tmp1920 = inttoptr i64 %tmp10959 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp21 = getelementptr %struct.tree_common* %tmp1920, i32 0, i32 3 ; <i8*> [#uses=1]
+ %tmp21 = getelementptr %struct.tree_common, %struct.tree_common* %tmp1920, i32 0, i32 3 ; <i8*> [#uses=1]
%tmp2122 = bitcast i8* %tmp21 to i32* ; <i32*> [#uses=1]
br i1 false, label %cond_next53, label %cond_true
cond_next881: ; preds = %bb866
%tmp884885 = inttoptr i64 %tmp10959 to %struct.tree_identifier* ; <%struct.tree_identifier*> [#uses=1]
- %tmp887 = getelementptr %struct.tree_identifier* %tmp884885, i32 0, i32 1, i32 0 ; <i8**> [#uses=1]
+ %tmp887 = getelementptr %struct.tree_identifier, %struct.tree_identifier* %tmp884885, i32 0, i32 1, i32 0 ; <i8**> [#uses=1]
%tmp888 = load i8** %tmp887 ; <i8*> [#uses=1]
tail call void (i32, ...)* @error( i32 undef, i8* %tmp888 )
ret void
br i1 false, label %cond_true1092, label %cond_next1102
cond_true1092: ; preds = %bb1084
- %tmp1094 = getelementptr %struct.rtx_def* %tmp1085, i32 0, i32 3 ; <%struct.u*> [#uses=1]
+ %tmp1094 = getelementptr %struct.rtx_def, %struct.rtx_def* %tmp1085, i32 0, i32 3 ; <%struct.u*> [#uses=1]
%tmp10981099 = bitcast %struct.u* %tmp1094 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=2]
%tmp1101 = load %struct.rtx_def** %tmp10981099 ; <%struct.rtx_def*> [#uses=1]
store %struct.rtx_def* %tmp1101, %struct.rtx_def** %ad_addr
br label %cond_next51.us
cond_next.us: ; preds = %bb.preheader
- %tmp37.us = getelementptr %struct.X_Y* %cinfo, i32 0, i32 17, i32 %tbl.014.us ; <%struct.H_TBL**> [#uses=3]
+ %tmp37.us = getelementptr %struct.X_Y, %struct.X_Y* %cinfo, i32 0, i32 17, i32 %tbl.014.us ; <%struct.H_TBL**> [#uses=3]
%tmp4524.us = load %struct.H_TBL** %tmp37.us ; <%struct.H_TBL*> [#uses=1]
icmp eq %struct.H_TBL* %tmp4524.us, null ; <i1>:0 [#uses=1]
br i1 %0, label %cond_true33.us.cond_true46.us_crit_edge, label %cond_next51.us
define internal void @_ZN1B1iEv(%struct.B* %this) {
entry:
- %tmp1 = getelementptr %struct.B* %this, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.B, %struct.B* %this, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp2 = load i32* %tmp1 ; <i32> [#uses=1]
%tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @str, i32 0, i32 0), i32 %tmp2 ) ; <i32> [#uses=0]
ret void
define internal void @_ZN1B1jEv(%struct.B* %this) {
entry:
- %tmp1 = getelementptr %struct.B* %this, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.B, %struct.B* %this, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp2 = load i32* %tmp1 ; <i32> [#uses=1]
%tmp4 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([7 x i8]* @str1, i32 0, i32 0), i32 %tmp2 ) ; <i32> [#uses=0]
ret void
%b.i29 = alloca %struct.B, align 4 ; <%struct.B*> [#uses=3]
%b.i1 = alloca %struct.B, align 4 ; <%struct.B*> [#uses=3]
%b.i = alloca %struct.B, align 4 ; <%struct.B*> [#uses=3]
- %tmp2.i = getelementptr %struct.B* %b.i, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp2.i = getelementptr %struct.B, %struct.B* %b.i, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 4, i32* %tmp2.i
br i1 icmp eq (i64 and (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 4294967296), i64 0), label %_Z3fooiM1BFvvE.exit, label %cond_true.i
cond_true.i: ; preds = %entry
%b2.i = bitcast %struct.B* %b.i to i8* ; <i8*> [#uses=1]
- %ctg23.i = getelementptr i8* %b2.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
+ %ctg23.i = getelementptr i8, i8* %b2.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
%tmp121314.i = bitcast i8* %ctg23.i to i32 (...)*** ; <i32 (...)***> [#uses=1]
%tmp15.i = load i32 (...)*** %tmp121314.i ; <i32 (...)**> [#uses=1]
%tmp151.i = bitcast i32 (...)** %tmp15.i to i8* ; <i8*> [#uses=1]
- %ctg2.i = getelementptr i8* %tmp151.i, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) ; <i8*> [#uses=1]
+ %ctg2.i = getelementptr i8, i8* %tmp151.i, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) ; <i8*> [#uses=1]
%tmp2021.i = bitcast i8* %ctg2.i to i32 (...)** ; <i32 (...)**> [#uses=1]
%tmp22.i = load i32 (...)** %tmp2021.i ; <i32 (...)*> [#uses=1]
%tmp2223.i = bitcast i32 (...)* %tmp22.i to void (%struct.B*)* ; <void (%struct.B*)*> [#uses=1]
_Z3fooiM1BFvvE.exit: ; preds = %cond_true.i, %entry
%iftmp.2.0.i = phi void (%struct.B*)* [ %tmp2223.i, %cond_true.i ], [ inttoptr (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to void (%struct.B*)*), %entry ] ; <void (%struct.B*)*> [#uses=1]
%b4.i = bitcast %struct.B* %b.i to i8* ; <i8*> [#uses=1]
- %ctg25.i = getelementptr i8* %b4.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
+ %ctg25.i = getelementptr i8, i8* %b4.i, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
%tmp3031.i = bitcast i8* %ctg25.i to %struct.B* ; <%struct.B*> [#uses=1]
call void %iftmp.2.0.i( %struct.B* %tmp3031.i )
- %tmp2.i30 = getelementptr %struct.B* %b.i29, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp2.i30 = getelementptr %struct.B, %struct.B* %b.i29, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 6, i32* %tmp2.i30
br i1 icmp eq (i64 and (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 4294967296), i64 0), label %_Z3fooiM1BFvvE.exit56, label %cond_true.i46
cond_true.i46: ; preds = %_Z3fooiM1BFvvE.exit
%b2.i35 = bitcast %struct.B* %b.i29 to i8* ; <i8*> [#uses=1]
- %ctg23.i36 = getelementptr i8* %b2.i35, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
+ %ctg23.i36 = getelementptr i8, i8* %b2.i35, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
%tmp121314.i37 = bitcast i8* %ctg23.i36 to i32 (...)*** ; <i32 (...)***> [#uses=1]
%tmp15.i38 = load i32 (...)*** %tmp121314.i37 ; <i32 (...)**> [#uses=1]
%tmp151.i41 = bitcast i32 (...)** %tmp15.i38 to i8* ; <i8*> [#uses=1]
- %ctg2.i42 = getelementptr i8* %tmp151.i41, i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) ; <i8*> [#uses=1]
+ %ctg2.i42 = getelementptr i8, i8* %tmp151.i41, i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) ; <i8*> [#uses=1]
%tmp2021.i43 = bitcast i8* %ctg2.i42 to i32 (...)** ; <i32 (...)**> [#uses=1]
%tmp22.i44 = load i32 (...)** %tmp2021.i43 ; <i32 (...)*> [#uses=1]
%tmp2223.i45 = bitcast i32 (...)* %tmp22.i44 to void (%struct.B*)* ; <void (%struct.B*)*> [#uses=1]
_Z3fooiM1BFvvE.exit56: ; preds = %cond_true.i46, %_Z3fooiM1BFvvE.exit
%iftmp.2.0.i49 = phi void (%struct.B*)* [ %tmp2223.i45, %cond_true.i46 ], [ inttoptr (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to void (%struct.B*)*), %_Z3fooiM1BFvvE.exit ] ; <void (%struct.B*)*> [#uses=1]
%b4.i53 = bitcast %struct.B* %b.i29 to i8* ; <i8*> [#uses=1]
- %ctg25.i54 = getelementptr i8* %b4.i53, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
+ %ctg25.i54 = getelementptr i8, i8* %b4.i53, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1jEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
%tmp3031.i55 = bitcast i8* %ctg25.i54 to %struct.B* ; <%struct.B*> [#uses=1]
call void %iftmp.2.0.i49( %struct.B* %tmp3031.i55 )
- %tmp2.i2 = getelementptr %struct.B* %b.i1, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp2.i2 = getelementptr %struct.B, %struct.B* %b.i1, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 -1, i32* %tmp2.i2
br i1 icmp eq (i64 and (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 4294967296), i64 0), label %_Z3fooiM1BFvvE.exit28, label %cond_true.i18
cond_true.i18: ; preds = %_Z3fooiM1BFvvE.exit56
%b2.i7 = bitcast %struct.B* %b.i1 to i8* ; <i8*> [#uses=1]
- %ctg23.i8 = getelementptr i8* %b2.i7, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
+ %ctg23.i8 = getelementptr i8, i8* %b2.i7, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
%tmp121314.i9 = bitcast i8* %ctg23.i8 to i32 (...)*** ; <i32 (...)***> [#uses=1]
%tmp15.i10 = load i32 (...)*** %tmp121314.i9 ; <i32 (...)**> [#uses=1]
%tmp151.i13 = bitcast i32 (...)** %tmp15.i10 to i8* ; <i8*> [#uses=1]
- %ctg2.i14 = getelementptr i8* %tmp151.i13, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) ; <i8*> [#uses=1]
+ %ctg2.i14 = getelementptr i8, i8* %tmp151.i13, i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) ; <i8*> [#uses=1]
%tmp2021.i15 = bitcast i8* %ctg2.i14 to i32 (...)** ; <i32 (...)**> [#uses=1]
%tmp22.i16 = load i32 (...)** %tmp2021.i15 ; <i32 (...)*> [#uses=1]
%tmp2223.i17 = bitcast i32 (...)* %tmp22.i16 to void (%struct.B*)* ; <void (%struct.B*)*> [#uses=1]
_Z3fooiM1BFvvE.exit28: ; preds = %cond_true.i18, %_Z3fooiM1BFvvE.exit56
%iftmp.2.0.i21 = phi void (%struct.B*)* [ %tmp2223.i17, %cond_true.i18 ], [ inttoptr (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to void (%struct.B*)*), %_Z3fooiM1BFvvE.exit56 ] ; <void (%struct.B*)*> [#uses=1]
%b4.i25 = bitcast %struct.B* %b.i1 to i8* ; <i8*> [#uses=1]
- %ctg25.i26 = getelementptr i8* %b4.i25, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
+ %ctg25.i26 = getelementptr i8, i8* %b4.i25, i32 ashr (i32 trunc (i64 lshr (i64 zext (i32 ptrtoint (void (%struct.B*)* @_ZN1B1iEv to i32) to i64), i64 32) to i32), i32 1) ; <i8*> [#uses=1]
%tmp3031.i27 = bitcast i8* %ctg25.i26 to %struct.B* ; <%struct.B*> [#uses=1]
call void %iftmp.2.0.i21( %struct.B* %tmp3031.i27 )
ret i32 0
%tmp1273 = load %struct.TestObj** null ; <%struct.TestObj*> [#uses=2]
%tmp2930.i = ptrtoint %struct.TestObj* %tmp1273 to i32 ; <i32> [#uses=1]
%tmp42.i348 = sub i32 0, %tmp2930.i ; <i32> [#uses=1]
- %tmp45.i = getelementptr %struct.TestObj* %tmp1273, i32 0, i32 0 ; <i8**> [#uses=2]
+ %tmp45.i = getelementptr %struct.TestObj, %struct.TestObj* %tmp1273, i32 0, i32 0 ; <i8**> [#uses=2]
%tmp48.i = load i8** %tmp45.i ; <i8*> [#uses=1]
%tmp50.i350 = call i32 (i8*, i8*, ...)* @sprintf( i8* getelementptr ([256 x i8]* @Msg, i32 0, i32 0), i8* getelementptr ([48 x i8]* @.str53615, i32 0, i32 0), i8* null, i8** %tmp45.i, i8* %tmp48.i ) ; <i32> [#uses=0]
br i1 false, label %cond_true.i632.i, label %Ut_TraceMsg.exit648.i
ret void
Ut_TraceMsg.exit648.i: ; preds = %cond_true1272
- %tmp57.i = getelementptr i8* null, i32 %tmp42.i348 ; <i8*> [#uses=0]
+ %tmp57.i = getelementptr i8, i8* null, i32 %tmp42.i348 ; <i8*> [#uses=0]
ret void
cond_next1275: ; preds = %cond_next1267
bb140: ; preds = %bb140, %cond_false
%indvar = phi i32 [ 0, %cond_false ], [ %indvar.next, %bb140 ] ; <i32> [#uses=2]
%edge.230.0.rec = shl i32 %indvar, 1 ; <i32> [#uses=3]
- %edge.230.0 = getelementptr %struct.shape_edge_t* null, i32 %edge.230.0.rec ; <%struct.shape_edge_t*> [#uses=1]
+ %edge.230.0 = getelementptr %struct.shape_edge_t, %struct.shape_edge_t* null, i32 %edge.230.0.rec ; <%struct.shape_edge_t*> [#uses=1]
%edge.230.0.sum6970 = or i32 %edge.230.0.rec, 1 ; <i32> [#uses=2]
- %tmp154 = getelementptr %struct.shape_edge_t* null, i32 %edge.230.0.sum6970 ; <%struct.shape_edge_t*> [#uses=1]
- %tmp11.i5 = getelementptr %struct.shape_edge_t* null, i32 %edge.230.0.sum6970, i32 0 ; <%struct.shape_edge_t**> [#uses=1]
+ %tmp154 = getelementptr %struct.shape_edge_t, %struct.shape_edge_t* null, i32 %edge.230.0.sum6970 ; <%struct.shape_edge_t*> [#uses=1]
+ %tmp11.i5 = getelementptr %struct.shape_edge_t, %struct.shape_edge_t* null, i32 %edge.230.0.sum6970, i32 0 ; <%struct.shape_edge_t**> [#uses=1]
store %struct.shape_edge_t* %edge.230.0, %struct.shape_edge_t** %tmp11.i5
store %struct.shape_edge_t* %tmp154, %struct.shape_edge_t** null
%tmp16254.0.rec = add i32 %edge.230.0.rec, 2 ; <i32> [#uses=1]
br i1 false, label %cond_true110.i, label %cond_next123.i
cond_true110.i: ; preds = %bb102.i
- %tmp116.i = getelementptr i8** %argv_addr.2321.0.i, i32 2 ; <i8**> [#uses=1]
+ %tmp116.i = getelementptr i8*, i8** %argv_addr.2321.0.i, i32 2 ; <i8**> [#uses=1]
%tmp117.i = load i8** %tmp116.i ; <i8*> [#uses=1]
%tmp126425.i = call %struct.FILE* @fopen( i8* %tmp117.i, i8* getelementptr ([2 x i8]* @.str44, i32 0, i32 0) ) ; <%struct.FILE*> [#uses=0]
ret i32 0
cond_next123.i: ; preds = %bb102.i
- %tmp122.i = getelementptr i8* %tmp215.i, i32 2 ; <i8*> [#uses=0]
+ %tmp122.i = getelementptr i8, i8* %tmp215.i, i32 2 ; <i8*> [#uses=0]
ret i32 0
bb162.i: ; preds = %cond_next212.i
C_addcmd.exit120.i: ; preds = %cond_next212.i
%tmp3.i.i.i.i105.i = call i8* @calloc( i32 15, i32 1 ) ; <i8*> [#uses=1]
- %tmp1.i108.i = getelementptr [100 x i8*]* @_C_cmds, i32 0, i32 0 ; <i8**> [#uses=1]
+ %tmp1.i108.i = getelementptr [100 x i8*], [100 x i8*]* @_C_cmds, i32 0, i32 0 ; <i8**> [#uses=1]
store i8* %tmp3.i.i.i.i105.i, i8** %tmp1.i108.i, align 4
%tmp.i91.i = load i32* @_C_nextcmd, align 4 ; <i32> [#uses=1]
store i32 0, i32* @_C_nextcmd, align 4
%tmp3.i.i.i.i95.i = call i8* @calloc( i32 15, i32 1 ) ; <i8*> [#uses=1]
- %tmp1.i98.i = getelementptr [100 x i8*]* @_C_cmds, i32 0, i32 %tmp.i91.i ; <i8**> [#uses=1]
+ %tmp1.i98.i = getelementptr [100 x i8*], [100 x i8*]* @_C_cmds, i32 0, i32 %tmp.i91.i ; <i8**> [#uses=1]
store i8* %tmp3.i.i.i.i95.i, i8** %tmp1.i98.i, align 4
br label %cond_next212.i
%max_d.3 = phi i32 [ -1, %entry ], [ %max_d.3, %bb30.i ], [ %max_d.3, %bb21.i ], [ %max_d.3, %C_addcmd.exit120.i ], [ 0, %bb192.i ], [ %max_d.3, %cond_next212.i ], [ %max_d.3, %cond_next212.i ], [ %max_d.3, %cond_next212.i ], [ %max_d.3, %cond_next212.i ] ; <i32> [#uses=7]
%argv_addr.2321.0.i = phi i8** [ %argv, %entry ], [ %tmp214.i, %bb192.i ], [ %tmp214.i, %C_addcmd.exit120.i ], [ %tmp214.i, %bb30.i ], [ %tmp214.i, %bb21.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ], [ %tmp214.i, %cond_next212.i ] ; <i8**> [#uses=2]
%argc_addr.2358.0.i = phi i32 [ %argc, %entry ], [ %tmp205399.i, %bb30.i ], [ 0, %bb21.i ], [ 0, %C_addcmd.exit120.i ], [ 0, %bb192.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ], [ 0, %cond_next212.i ] ; <i32> [#uses=1]
- %tmp214.i = getelementptr i8** %argv_addr.2321.0.i, i32 1 ; <i8**> [#uses=9]
+ %tmp214.i = getelementptr i8*, i8** %argv_addr.2321.0.i, i32 1 ; <i8**> [#uses=9]
%tmp215.i = load i8** %tmp214.i ; <i8*> [#uses=1]
%tmp1314.i = sext i8 0 to i32 ; <i32> [#uses=1]
switch i32 %tmp1314.i, label %bb192.i [
define %"struct.kc::impl_ID"* @_ZN2kc18f_typeofunpsubtermEPNS_15impl_unpsubtermEPNS_7impl_IDE(%"struct.kc::impl_Ccode_option"* %a_unpsubterm, %"struct.kc::impl_ID"* %a_operator) {
entry:
- %tmp8 = getelementptr %"struct.kc::impl_Ccode_option"* %a_unpsubterm, i32 0, i32 0, i32 0 ; <i32 (...)***> [#uses=0]
+ %tmp8 = getelementptr %"struct.kc::impl_Ccode_option", %"struct.kc::impl_Ccode_option"* %a_unpsubterm, i32 0, i32 0, i32 0 ; <i32 (...)***> [#uses=0]
br i1 false, label %bb41, label %bb55
bb41: ; preds = %entry
define fastcc void @outer_loop(%struct.lame_global_flags* %gfp, double* %xr, i32 %targ_bits, double* %best_noise, %struct.III_psy_xmin* %l3_xmin, i32* %l3_enc, %struct.III_scalefac_t* %scalefac, %struct.gr_info* %cod_info, i32 %ch) {
entry:
- %cod_info.182 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 1 ; <i32*> [#uses=1]
+ %cod_info.182 = getelementptr %struct.gr_info, %struct.gr_info* %cod_info, i32 0, i32 1 ; <i32*> [#uses=1]
br label %bb
bb: ; preds = %bb226, %entry
%save_cod_info.1.1 = phi i32 [ undef, %entry ], [ %save_cod_info.1.1, %bb226 ] ; <i32> [#uses=2]
bb155.i: ; preds = %cond_next215.i, %bb151.i
%indvar90.i = phi i32 [ %indvar.next91.i, %cond_next215.i ], [ 0, %bb151.i ] ; <i32> [#uses=2]
%sfb.3.reg2mem.0.i = add i32 %indvar90.i, %tmp37.i55 ; <i32> [#uses=4]
- %tmp161.i = getelementptr [4 x [21 x double]]* null, i32 0, i32 %tmp15747.i, i32 %sfb.3.reg2mem.0.i ; <double*> [#uses=1]
+ %tmp161.i = getelementptr [4 x [21 x double]], [4 x [21 x double]]* null, i32 0, i32 %tmp15747.i, i32 %sfb.3.reg2mem.0.i ; <double*> [#uses=1]
%tmp162.i74 = load double* %tmp161.i, align 4 ; <double> [#uses=0]
br i1 false, label %cond_true167.i, label %cond_next215.i
cond_true167.i: ; preds = %bb155.i
- %tmp173.i = getelementptr %struct.III_scalefac_t* null, i32 0, i32 1, i32 %sfb.3.reg2mem.0.i, i32 %i.154.i ; <i32*> [#uses=1]
+ %tmp173.i = getelementptr %struct.III_scalefac_t, %struct.III_scalefac_t* null, i32 0, i32 1, i32 %sfb.3.reg2mem.0.i, i32 %i.154.i ; <i32*> [#uses=1]
store i32 0, i32* %tmp173.i, align 4
- %tmp182.1.i = getelementptr [14 x i32]* @scalefac_band.1, i32 0, i32 %sfb.3.reg2mem.0.i ; <i32*> [#uses=0]
+ %tmp182.1.i = getelementptr [14 x i32], [14 x i32]* @scalefac_band.1, i32 0, i32 %sfb.3.reg2mem.0.i ; <i32*> [#uses=0]
%tmp185.i78 = add i32 %sfb.3.reg2mem.0.i, 1 ; <i32> [#uses=1]
- %tmp187.1.i = getelementptr [14 x i32]* @scalefac_band.1, i32 0, i32 %tmp185.i78 ; <i32*> [#uses=1]
+ %tmp187.1.i = getelementptr [14 x i32], [14 x i32]* @scalefac_band.1, i32 0, i32 %tmp185.i78 ; <i32*> [#uses=1]
%tmp188.i = load i32* %tmp187.1.i, align 4 ; <i32> [#uses=1]
%tmp21153.i = icmp slt i32 0, %tmp188.i ; <i1> [#uses=1]
br i1 %tmp21153.i, label %bb190.preheader.i, label %cond_next215.i
define fastcc void @outer_loop2(%struct.lame_global_flags* %gfp, double* %xr, i32 %targ_bits, double* %best_noise, %struct.III_psy_xmin* %l3_xmin, i32* %l3_enc, %struct.III_scalefac_t* %scalefac, %struct.gr_info* %cod_info, i32 %ch) {
entry:
- %cod_info.20128.1 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 20, i32 1 ; <i32*> [#uses=1]
- %cod_info.20128.2 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 20, i32 2 ; <i32*> [#uses=1]
- %cod_info.20128.3 = getelementptr %struct.gr_info* %cod_info, i32 0, i32 20, i32 3 ; <i32*> [#uses=1]
+ %cod_info.20128.1 = getelementptr %struct.gr_info, %struct.gr_info* %cod_info, i32 0, i32 20, i32 1 ; <i32*> [#uses=1]
+ %cod_info.20128.2 = getelementptr %struct.gr_info, %struct.gr_info* %cod_info, i32 0, i32 20, i32 2 ; <i32*> [#uses=1]
+ %cod_info.20128.3 = getelementptr %struct.gr_info, %struct.gr_info* %cod_info, i32 0, i32 20, i32 3 ; <i32*> [#uses=1]
br label %bb
bb: ; preds = %bb226, %entry
%save_cod_info.19.1 = phi i32* [ undef, %entry ], [ %save_cod_info.19.0, %bb226 ] ; <i32*> [#uses=1]
bb.i8.us.i: ; preds = %get_mem2Dpel.exit.i.us.i, %cond_true.i29
%j.04.i.us.i = phi i32 [ %indvar.next39.i, %get_mem2Dpel.exit.i.us.i ], [ 0, %cond_true.i29 ] ; <i32> [#uses=2]
- %tmp13.i.us.i = getelementptr i16*** null, i32 %j.04.i.us.i ; <i16***> [#uses=0]
+ %tmp13.i.us.i = getelementptr i16**, i16*** null, i32 %j.04.i.us.i ; <i16***> [#uses=0]
%tmp15.i.i.us.i = tail call i8* @calloc( i32 0, i32 2 ) ; <i8*> [#uses=0]
store i16* null, i16** null, align 4
br label %bb.i.i.us.i
define fastcc %struct.node_t* @_ZL6createP6node_tii3v_tS1_d(%struct.node_t* %n, i32 %lvl, i32 %dist, i64 %c.0.0, i64 %c.0.1, i64 %c.0.2, i64 %d.0.0, i64 %d.0.1, i64 %d.0.2, double %r) nounwind {
entry:
- %0 = getelementptr %struct.node_t* %n, i32 0, i32 1 ; <%struct.hit_t*> [#uses=1]
+ %0 = getelementptr %struct.node_t, %struct.node_t* %n, i32 0, i32 1 ; <%struct.hit_t*> [#uses=1]
%1 = bitcast %struct.hit_t* %0 to i256* ; <i256*> [#uses=1]
store i256 0, i256* %1, align 4
unreachable
store { i32, { double, double }* } %d_arg, { i32, { double, double }* }* %d
store i32 %x_arg, i32* %x
%tmp = load i32* %x ; <i32> [#uses=1]
- %tmp1 = getelementptr { i32, { double, double }* }* %d, i32 0, i32 1 ; <{ double, double }**> [#uses=1]
+ %tmp1 = getelementptr { i32, { double, double }* }, { i32, { double, double }* }* %d, i32 0, i32 1 ; <{ double, double }**> [#uses=1]
%.ptr = load { double, double }** %tmp1 ; <{ double, double }*> [#uses=1]
- %tmp2 = getelementptr { double, double }* %.ptr, i32 %tmp ; <{ double, double }*> [#uses=1]
+ %tmp2 = getelementptr { double, double }, { double, double }* %.ptr, i32 %tmp ; <{ double, double }*> [#uses=1]
%tmp3 = load { double, double }* %tmp2 ; <{ double, double }> [#uses=1]
store { double, double } %tmp3, { double, double }* %b
ret void
define void @simplify_unary_real(i8* nocapture %p) nounwind {
entry:
%tmp121 = load i64* null, align 4 ; <i64> [#uses=1]
- %0 = getelementptr %struct.rtx_def* null, i32 0, i32 3, i32 3, i32 0 ; <i64*> [#uses=1]
+ %0 = getelementptr %struct.rtx_def, %struct.rtx_def* null, i32 0, i32 3, i32 3, i32 0 ; <i64*> [#uses=1]
%tmp122 = load i64* %0, align 4 ; <i64> [#uses=1]
%1 = zext i64 undef to i192 ; <i192> [#uses=2]
%2 = zext i64 %tmp121 to i192 ; <i192> [#uses=1]
store i32 0, i32* @al_len, align 4
store i32 0, i32* @no_mat, align 4
store i32 0, i32* @no_mis, align 4
- %3 = getelementptr i8* %B, i32 %0 ; <i8*> [#uses=1]
+ %3 = getelementptr i8, i8* %B, i32 %0 ; <i8*> [#uses=1]
tail call void @diff(i8* undef, i8* %3, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
%4 = sitofp i32 undef to double ; <double> [#uses=1]
%5 = fdiv double %4, 1.000000e+01 ; <double> [#uses=1]
%3 = sub i32 %2, %0 ; <i32> [#uses=1]
store i32 0, i32* @no_mat, align 4
store i32 0, i32* @no_mis, align 4
- %4 = getelementptr i8* %B, i32 %0 ; <i8*> [#uses=1]
+ %4 = getelementptr i8, i8* %B, i32 %0 ; <i8*> [#uses=1]
tail call void @diff(i8* undef, i8* %4, i32 undef, i32 %3, i32 undef, i32 undef) nounwind
%5 = tail call i32 (i8*, ...)* @printf(i8* getelementptr ([33 x i8]* @"\01LC11", i32 0, i32 0), i32 %tmp13) nounwind ; <i32> [#uses=0]
%6 = load i32* @no_mis, align 4 ; <i32> [#uses=1]
bb168.i: ; preds = %bb167.i, %bb163.i, %bb161.i, %bb160.i, %bb158.i
%fi.5.i = phi i32 [ undef, %bb167.i ], [ %ci.910.i, %bb158.i ], [ undef, %bb160.i ], [ %ci.910.i, %bb161.i ], [ undef, %bb163.i ] ; <i32> [#uses=1]
%fj.4.i = phi i32 [ undef, %bb167.i ], [ undef, %bb158.i ], [ %fj.515.i, %bb160.i ], [ undef, %bb161.i ], [ %fj.515.i, %bb163.i ] ; <i32> [#uses=2]
- %scevgep88.i = getelementptr i32* null, i32 %i.121.i ; <i32*> [#uses=3]
+ %scevgep88.i = getelementptr i32, i32* null, i32 %i.121.i ; <i32*> [#uses=3]
%4 = load i32* %scevgep88.i, align 4 ; <i32> [#uses=2]
- %scevgep89.i = getelementptr i32* %0, i32 %i.121.i ; <i32*> [#uses=3]
+ %scevgep89.i = getelementptr i32, i32* %0, i32 %i.121.i ; <i32*> [#uses=3]
%5 = load i32* %scevgep89.i, align 4 ; <i32> [#uses=1]
%ci.10.i = select i1 undef, i32 %pi.316.i, i32 %i.121.i ; <i32> [#uses=0]
%cj.9.i = select i1 undef, i32 %pj.317.i, i32 undef ; <i32> [#uses=0]
bb11: ; preds = %bb9
store i32 0, i32* @no_mis, align 4
- %1 = getelementptr i8* %A, i32 0 ; <i8*> [#uses=1]
- %2 = getelementptr i8* %B, i32 0 ; <i8*> [#uses=1]
+ %1 = getelementptr i8, i8* %A, i32 0 ; <i8*> [#uses=1]
+ %2 = getelementptr i8, i8* %B, i32 0 ; <i8*> [#uses=1]
tail call void @diff(i8* %1, i8* %2, i32 undef, i32 undef, i32 undef, i32 undef) nounwind
br i1 undef, label %bb15, label %bb12
%fi.5.i = phi i32 [ %fi.614.i, %bb167.i ], [ %ci.910.i, %bb158.i ], [ %fi.614.i, %bb160.i ], [ %ci.910.i, %bb161.i ], [ %fi.614.i, %bb163.i ] ; <i32> [#uses=2]
%fj.4.i = phi i32 [ %cj.811.i, %bb167.i ], [ %cj.811.i, %bb158.i ], [ %fj.515.i, %bb160.i ], [ %cj.811.i, %bb161.i ], [ %fj.515.i, %bb163.i ] ; <i32> [#uses=2]
%f.5.i = phi i32 [ %7, %bb167.i ], [ %8, %bb158.i ], [ %7, %bb160.i ], [ %7, %bb161.i ], [ %7, %bb163.i ] ; <i32> [#uses=2]
- %scevgep88.i = getelementptr i32* %3, i32 undef ; <i32*> [#uses=1]
+ %scevgep88.i = getelementptr i32, i32* %3, i32 undef ; <i32*> [#uses=1]
%ci.10.i = select i1 undef, i32 %pi.316.i, i32 undef ; <i32> [#uses=0]
%ci.12.i = select i1 undef, i32 %fi.5.i, i32 undef ; <i32> [#uses=1]
%cj.11.i100 = select i1 undef, i32 %fj.4.i, i32 undef ; <i32> [#uses=1]
br i1 undef, label %bb220.i, label %bb158.i
bb220.i: ; preds = %bb218.i, %bb153.i
- %11 = getelementptr i32* null, i32 %6 ; <i32*> [#uses=1]
+ %11 = getelementptr i32, i32* null, i32 %6 ; <i32*> [#uses=1]
store i32 undef, i32* %11, align 4
br i1 undef, label %bb221.i, label %bb228.i
%fi.5.i = phi i32 [ %fi.614.i, %bb167.i ], [ %ci.910.i, %bb158.i ], [ %fi.614.i, %bb160.i ], [ %ci.910.i, %bb161.i ], [ %fi.614.i, %bb163.i ] ; <i32> [#uses=2]
%fj.4.i = phi i32 [ %cj.811.i, %bb167.i ], [ %cj.811.i, %bb158.i ], [ %fj.515.i, %bb160.i ], [ %cj.811.i, %bb161.i ], [ %fj.515.i, %bb163.i ] ; <i32> [#uses=2]
%f.5.i = phi i32 [ %3, %bb167.i ], [ %4, %bb158.i ], [ %3, %bb160.i ], [ %3, %bb161.i ], [ %3, %bb163.i ] ; <i32> [#uses=2]
- %scevgep88.i = getelementptr i32* %0, i32 undef ; <i32*> [#uses=2]
- %scevgep89.i = getelementptr i32* %1, i32 undef ; <i32*> [#uses=2]
+ %scevgep88.i = getelementptr i32, i32* %0, i32 undef ; <i32*> [#uses=2]
+ %scevgep89.i = getelementptr i32, i32* %1, i32 undef ; <i32*> [#uses=2]
%ci.10.i = select i1 undef, i32 %pi.316.i, i32 undef ; <i32> [#uses=0]
%cj.9.i = select i1 undef, i32 %pj.317.i, i32 undef ; <i32> [#uses=0]
%ci.12.i = select i1 undef, i32 %fi.5.i, i32 undef ; <i32> [#uses=2]
br i1 %0, label %bb8, label %bb
bb: ; preds = %entry
- %1 = getelementptr %struct.VERTEX* %tree, i32 0, i32 2 ; <%struct.VERTEX**> [#uses=1]
+ %1 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 2 ; <%struct.VERTEX**> [#uses=1]
%2 = load %struct.VERTEX** %1, align 4 ; <%struct.VERTEX*> [#uses=2]
%3 = icmp eq %struct.VERTEX* %2, null ; <i1> [#uses=1]
br i1 %3, label %bb7, label %bb1.i
bb1.i: ; preds = %bb1.i, %bb
%tree_addr.0.i = phi %struct.VERTEX* [ %5, %bb1.i ], [ %tree, %bb ] ; <%struct.VERTEX*> [#uses=3]
- %4 = getelementptr %struct.VERTEX* %tree_addr.0.i, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
+ %4 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree_addr.0.i, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
%5 = load %struct.VERTEX** %4, align 4 ; <%struct.VERTEX*> [#uses=2]
%6 = icmp eq %struct.VERTEX* %5, null ; <i1> [#uses=1]
br i1 %6, label %get_low.exit, label %bb1.i
get_low.exit: ; preds = %bb1.i
call void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delright, %struct.VERTEX* %2, %struct.VERTEX* %extra) nounwind
- %7 = getelementptr %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
+ %7 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
%8 = load %struct.VERTEX** %7, align 4 ; <%struct.VERTEX*> [#uses=1]
call void @build_delaunay(%struct.EDGE_PAIR* noalias sret %delleft, %struct.VERTEX* %8, %struct.VERTEX* %tree) nounwind
- %9 = getelementptr %struct.EDGE_PAIR* %delleft, i32 0, i32 0 ; <%struct.edge_rec**> [#uses=1]
+ %9 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delleft, i32 0, i32 0 ; <%struct.edge_rec**> [#uses=1]
%10 = load %struct.edge_rec** %9, align 8 ; <%struct.edge_rec*> [#uses=2]
- %11 = getelementptr %struct.EDGE_PAIR* %delleft, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %11 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delleft, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%12 = load %struct.edge_rec** %11, align 4 ; <%struct.edge_rec*> [#uses=1]
- %13 = getelementptr %struct.EDGE_PAIR* %delright, i32 0, i32 0 ; <%struct.edge_rec**> [#uses=1]
+ %13 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delright, i32 0, i32 0 ; <%struct.edge_rec**> [#uses=1]
%14 = load %struct.edge_rec** %13, align 8 ; <%struct.edge_rec*> [#uses=1]
- %15 = getelementptr %struct.EDGE_PAIR* %delright, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %15 = getelementptr %struct.EDGE_PAIR, %struct.EDGE_PAIR* %delright, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%16 = load %struct.edge_rec** %15, align 4 ; <%struct.edge_rec*> [#uses=2]
br label %bb.i
bb.i: ; preds = %bb4.i, %get_low.exit
%rdi_addr.0.i = phi %struct.edge_rec* [ %14, %get_low.exit ], [ %72, %bb4.i ] ; <%struct.edge_rec*> [#uses=2]
%ldi_addr.1.i = phi %struct.edge_rec* [ %12, %get_low.exit ], [ %ldi_addr.0.i, %bb4.i ] ; <%struct.edge_rec*> [#uses=3]
- %17 = getelementptr %struct.edge_rec* %rdi_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %17 = getelementptr %struct.edge_rec, %struct.edge_rec* %rdi_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%18 = load %struct.VERTEX** %17, align 4 ; <%struct.VERTEX*> [#uses=3]
%19 = ptrtoint %struct.edge_rec* %ldi_addr.1.i to i32 ; <i32> [#uses=1]
- %20 = getelementptr %struct.VERTEX* %18, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %20 = getelementptr %struct.VERTEX, %struct.VERTEX* %18, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%21 = load double* %20, align 4 ; <double> [#uses=3]
- %22 = getelementptr %struct.VERTEX* %18, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %22 = getelementptr %struct.VERTEX, %struct.VERTEX* %18, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%23 = load double* %22, align 4 ; <double> [#uses=3]
br label %bb2.i
%27 = and i32 %24, -64 ; <i32> [#uses=1]
%28 = or i32 %26, %27 ; <i32> [#uses=1]
%29 = inttoptr i32 %28 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %30 = getelementptr %struct.edge_rec* %29, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %30 = getelementptr %struct.edge_rec, %struct.edge_rec* %29, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%31 = load %struct.edge_rec** %30, align 4 ; <%struct.edge_rec*> [#uses=1]
%32 = ptrtoint %struct.edge_rec* %31 to i32 ; <i32> [#uses=2]
%33 = add i32 %32, 16 ; <i32> [#uses=1]
%ldi_addr.0.i = phi %struct.edge_rec* [ %ldi_addr.1.i, %bb.i ], [ %37, %bb1.i1 ] ; <%struct.edge_rec*> [#uses=4]
%.pn6.in.i = xor i32 %.pn6.in.in.i, 32 ; <i32> [#uses=1]
%.pn6.i = inttoptr i32 %.pn6.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %t1.0.in.i = getelementptr %struct.edge_rec* %ldi_addr.1.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %t2.0.in.i = getelementptr %struct.edge_rec* %.pn6.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %t1.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %ldi_addr.1.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %t2.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn6.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%t1.0.i = load %struct.VERTEX** %t1.0.in.i ; <%struct.VERTEX*> [#uses=2]
%t2.0.i = load %struct.VERTEX** %t2.0.in.i ; <%struct.VERTEX*> [#uses=2]
- %38 = getelementptr %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %38 = getelementptr %struct.VERTEX, %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%39 = load double* %38, align 4 ; <double> [#uses=3]
- %40 = getelementptr %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %40 = getelementptr %struct.VERTEX, %struct.VERTEX* %t1.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%41 = load double* %40, align 4 ; <double> [#uses=3]
- %42 = getelementptr %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %42 = getelementptr %struct.VERTEX, %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%43 = load double* %42, align 4 ; <double> [#uses=1]
- %44 = getelementptr %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %44 = getelementptr %struct.VERTEX, %struct.VERTEX* %t2.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%45 = load double* %44, align 4 ; <double> [#uses=1]
%46 = fsub double %39, %21 ; <double> [#uses=1]
%47 = fsub double %45, %23 ; <double> [#uses=1]
%54 = ptrtoint %struct.edge_rec* %rdi_addr.0.i to i32 ; <i32> [#uses=1]
%55 = xor i32 %54, 32 ; <i32> [#uses=3]
%56 = inttoptr i32 %55 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %57 = getelementptr %struct.edge_rec* %56, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %57 = getelementptr %struct.edge_rec, %struct.edge_rec* %56, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%58 = load %struct.VERTEX** %57, align 4 ; <%struct.VERTEX*> [#uses=2]
- %59 = getelementptr %struct.VERTEX* %58, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %59 = getelementptr %struct.VERTEX, %struct.VERTEX* %58, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%60 = load double* %59, align 4 ; <double> [#uses=1]
- %61 = getelementptr %struct.VERTEX* %58, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %61 = getelementptr %struct.VERTEX, %struct.VERTEX* %58, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%62 = load double* %61, align 4 ; <double> [#uses=1]
%63 = fsub double %60, %39 ; <double> [#uses=1]
%64 = fsub double %23, %41 ; <double> [#uses=1]
br i1 %70, label %bb4.i, label %bb5.i
bb4.i: ; preds = %bb3.i
- %71 = getelementptr %struct.edge_rec* %56, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %71 = getelementptr %struct.edge_rec, %struct.edge_rec* %56, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%72 = load %struct.edge_rec** %71, align 4 ; <%struct.edge_rec*> [#uses=1]
br label %bb.i
%75 = and i32 %55, -64 ; <i32> [#uses=1]
%76 = or i32 %74, %75 ; <i32> [#uses=1]
%77 = inttoptr i32 %76 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %78 = getelementptr %struct.edge_rec* %77, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %78 = getelementptr %struct.edge_rec, %struct.edge_rec* %77, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%79 = load %struct.edge_rec** %78, align 4 ; <%struct.edge_rec*> [#uses=1]
%80 = ptrtoint %struct.edge_rec* %79 to i32 ; <i32> [#uses=2]
%81 = add i32 %80, 16 ; <i32> [#uses=1]
%83 = and i32 %80, -64 ; <i32> [#uses=1]
%84 = or i32 %82, %83 ; <i32> [#uses=1]
%85 = inttoptr i32 %84 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %86 = getelementptr %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %86 = getelementptr %struct.edge_rec, %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%87 = load %struct.VERTEX** %86, align 4 ; <%struct.VERTEX*> [#uses=1]
%88 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=6]
- %89 = getelementptr %struct.edge_rec* %88, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
+ %89 = getelementptr %struct.edge_rec, %struct.edge_rec* %88, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %88, %struct.edge_rec** %89, align 4
- %90 = getelementptr %struct.edge_rec* %88, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=2]
+ %90 = getelementptr %struct.edge_rec, %struct.edge_rec* %88, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=2]
store %struct.VERTEX* %18, %struct.VERTEX** %90, align 4
%91 = ptrtoint %struct.edge_rec* %88 to i32 ; <i32> [#uses=5]
%92 = add i32 %91, 16 ; <i32> [#uses=2]
%93 = inttoptr i32 %92 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%94 = add i32 %91, 48 ; <i32> [#uses=1]
%95 = inttoptr i32 %94 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %96 = getelementptr %struct.edge_rec* %93, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %96 = getelementptr %struct.edge_rec, %struct.edge_rec* %93, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %95, %struct.edge_rec** %96, align 4
%97 = add i32 %91, 32 ; <i32> [#uses=1]
%98 = inttoptr i32 %97 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %99 = getelementptr %struct.edge_rec* %98, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %99 = getelementptr %struct.edge_rec, %struct.edge_rec* %98, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %98, %struct.edge_rec** %99, align 4
- %100 = getelementptr %struct.edge_rec* %98, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %100 = getelementptr %struct.edge_rec, %struct.edge_rec* %98, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %87, %struct.VERTEX** %100, align 4
- %101 = getelementptr %struct.edge_rec* %95, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %101 = getelementptr %struct.edge_rec, %struct.edge_rec* %95, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %93, %struct.edge_rec** %101, align 4
%102 = load %struct.edge_rec** %89, align 4 ; <%struct.edge_rec*> [#uses=1]
%103 = ptrtoint %struct.edge_rec* %102 to i32 ; <i32> [#uses=2]
%106 = and i32 %103, -64 ; <i32> [#uses=1]
%107 = or i32 %105, %106 ; <i32> [#uses=1]
%108 = inttoptr i32 %107 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %109 = getelementptr %struct.edge_rec* %85, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %109 = getelementptr %struct.edge_rec, %struct.edge_rec* %85, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%110 = load %struct.edge_rec** %109, align 4 ; <%struct.edge_rec*> [#uses=1]
%111 = ptrtoint %struct.edge_rec* %110 to i32 ; <i32> [#uses=2]
%112 = add i32 %111, 16 ; <i32> [#uses=1]
%114 = and i32 %111, -64 ; <i32> [#uses=1]
%115 = or i32 %113, %114 ; <i32> [#uses=1]
%116 = inttoptr i32 %115 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %117 = getelementptr %struct.edge_rec* %116, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %117 = getelementptr %struct.edge_rec, %struct.edge_rec* %116, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%118 = load %struct.edge_rec** %117, align 4 ; <%struct.edge_rec*> [#uses=1]
- %119 = getelementptr %struct.edge_rec* %108, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %119 = getelementptr %struct.edge_rec, %struct.edge_rec* %108, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%120 = load %struct.edge_rec** %119, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %118, %struct.edge_rec** %119, align 4
store %struct.edge_rec* %120, %struct.edge_rec** %117, align 4
store %struct.edge_rec* %122, %struct.edge_rec** %89, align 4
%123 = xor i32 %91, 32 ; <i32> [#uses=1]
%124 = inttoptr i32 %123 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %125 = getelementptr %struct.edge_rec* %124, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %125 = getelementptr %struct.edge_rec, %struct.edge_rec* %124, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%126 = load %struct.edge_rec** %125, align 4 ; <%struct.edge_rec*> [#uses=1]
%127 = ptrtoint %struct.edge_rec* %126 to i32 ; <i32> [#uses=2]
%128 = add i32 %127, 16 ; <i32> [#uses=1]
%130 = and i32 %127, -64 ; <i32> [#uses=1]
%131 = or i32 %129, %130 ; <i32> [#uses=1]
%132 = inttoptr i32 %131 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %133 = getelementptr %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %133 = getelementptr %struct.edge_rec, %struct.edge_rec* %ldi_addr.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%134 = load %struct.edge_rec** %133, align 4 ; <%struct.edge_rec*> [#uses=1]
%135 = ptrtoint %struct.edge_rec* %134 to i32 ; <i32> [#uses=2]
%136 = add i32 %135, 16 ; <i32> [#uses=1]
%138 = and i32 %135, -64 ; <i32> [#uses=1]
%139 = or i32 %137, %138 ; <i32> [#uses=1]
%140 = inttoptr i32 %139 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %141 = getelementptr %struct.edge_rec* %140, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %141 = getelementptr %struct.edge_rec, %struct.edge_rec* %140, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%142 = load %struct.edge_rec** %141, align 4 ; <%struct.edge_rec*> [#uses=1]
- %143 = getelementptr %struct.edge_rec* %132, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %143 = getelementptr %struct.edge_rec, %struct.edge_rec* %132, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%144 = load %struct.edge_rec** %143, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %142, %struct.edge_rec** %143, align 4
store %struct.edge_rec* %144, %struct.edge_rec** %141, align 4
%148 = and i32 %91, -64 ; <i32> [#uses=1]
%149 = or i32 %147, %148 ; <i32> [#uses=1]
%150 = inttoptr i32 %149 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %151 = getelementptr %struct.edge_rec* %150, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %151 = getelementptr %struct.edge_rec, %struct.edge_rec* %150, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%152 = load %struct.edge_rec** %151, align 4 ; <%struct.edge_rec*> [#uses=1]
%153 = ptrtoint %struct.edge_rec* %152 to i32 ; <i32> [#uses=2]
%154 = add i32 %153, 16 ; <i32> [#uses=1]
%157 = or i32 %155, %156 ; <i32> [#uses=1]
%158 = inttoptr i32 %157 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%159 = load %struct.VERTEX** %90, align 4 ; <%struct.VERTEX*> [#uses=1]
- %160 = getelementptr %struct.edge_rec* %124, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %160 = getelementptr %struct.edge_rec, %struct.edge_rec* %124, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%161 = load %struct.VERTEX** %160, align 4 ; <%struct.VERTEX*> [#uses=1]
- %162 = getelementptr %struct.edge_rec* %16, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %162 = getelementptr %struct.edge_rec, %struct.edge_rec* %16, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%163 = load %struct.VERTEX** %162, align 4 ; <%struct.VERTEX*> [#uses=1]
%164 = icmp eq %struct.VERTEX* %163, %159 ; <i1> [#uses=1]
%rdo_addr.0.i = select i1 %164, %struct.edge_rec* %88, %struct.edge_rec* %16 ; <%struct.edge_rec*> [#uses=3]
- %165 = getelementptr %struct.edge_rec* %10, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %165 = getelementptr %struct.edge_rec, %struct.edge_rec* %10, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%166 = load %struct.VERTEX** %165, align 4 ; <%struct.VERTEX*> [#uses=1]
%167 = icmp eq %struct.VERTEX* %166, %161 ; <i1> [#uses=1]
%ldo_addr.0.ph.i = select i1 %167, %struct.edge_rec* %124, %struct.edge_rec* %10 ; <%struct.edge_rec*> [#uses=3]
%lcand.2.i = phi %struct.edge_rec* [ %146, %bb5.i ], [ %lcand.1.i, %bb24.i ], [ %739, %bb25.i ] ; <%struct.edge_rec*> [#uses=5]
%rcand.2.i = phi %struct.edge_rec* [ %158, %bb5.i ], [ %666, %bb24.i ], [ %rcand.1.i, %bb25.i ] ; <%struct.edge_rec*> [#uses=5]
%basel.0.i = phi %struct.edge_rec* [ %88, %bb5.i ], [ %595, %bb24.i ], [ %716, %bb25.i ] ; <%struct.edge_rec*> [#uses=2]
- %168 = getelementptr %struct.edge_rec* %lcand.2.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %168 = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.2.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%169 = load %struct.edge_rec** %168, align 4 ; <%struct.edge_rec*> [#uses=3]
- %170 = getelementptr %struct.edge_rec* %basel.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
+ %170 = getelementptr %struct.edge_rec, %struct.edge_rec* %basel.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
%171 = load %struct.VERTEX** %170, align 4 ; <%struct.VERTEX*> [#uses=4]
%172 = ptrtoint %struct.edge_rec* %basel.0.i to i32 ; <i32> [#uses=3]
%173 = xor i32 %172, 32 ; <i32> [#uses=1]
%174 = inttoptr i32 %173 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %175 = getelementptr %struct.edge_rec* %174, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
+ %175 = getelementptr %struct.edge_rec, %struct.edge_rec* %174, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
%176 = load %struct.VERTEX** %175, align 4 ; <%struct.VERTEX*> [#uses=3]
%177 = ptrtoint %struct.edge_rec* %169 to i32 ; <i32> [#uses=1]
%178 = xor i32 %177, 32 ; <i32> [#uses=1]
%179 = inttoptr i32 %178 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %180 = getelementptr %struct.edge_rec* %179, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %180 = getelementptr %struct.edge_rec, %struct.edge_rec* %179, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%181 = load %struct.VERTEX** %180, align 4 ; <%struct.VERTEX*> [#uses=2]
- %182 = getelementptr %struct.VERTEX* %171, i32 0, i32 0, i32 0 ; <double*> [#uses=2]
+ %182 = getelementptr %struct.VERTEX, %struct.VERTEX* %171, i32 0, i32 0, i32 0 ; <double*> [#uses=2]
%183 = load double* %182, align 4 ; <double> [#uses=2]
- %184 = getelementptr %struct.VERTEX* %171, i32 0, i32 0, i32 1 ; <double*> [#uses=2]
+ %184 = getelementptr %struct.VERTEX, %struct.VERTEX* %171, i32 0, i32 0, i32 1 ; <double*> [#uses=2]
%185 = load double* %184, align 4 ; <double> [#uses=2]
- %186 = getelementptr %struct.VERTEX* %181, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %186 = getelementptr %struct.VERTEX, %struct.VERTEX* %181, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%187 = load double* %186, align 4 ; <double> [#uses=1]
- %188 = getelementptr %struct.VERTEX* %181, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %188 = getelementptr %struct.VERTEX, %struct.VERTEX* %181, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%189 = load double* %188, align 4 ; <double> [#uses=1]
- %190 = getelementptr %struct.VERTEX* %176, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %190 = getelementptr %struct.VERTEX, %struct.VERTEX* %176, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%191 = load double* %190, align 4 ; <double> [#uses=2]
- %192 = getelementptr %struct.VERTEX* %176, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %192 = getelementptr %struct.VERTEX, %struct.VERTEX* %176, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%193 = load double* %192, align 4 ; <double> [#uses=2]
%194 = fsub double %183, %191 ; <double> [#uses=1]
%195 = fsub double %189, %193 ; <double> [#uses=1]
br i1 %201, label %bb10.i, label %bb13.i
bb10.i: ; preds = %bb9.i
- %202 = getelementptr %struct.VERTEX* %171, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %202 = getelementptr %struct.VERTEX, %struct.VERTEX* %171, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%avail_edge.promoted25 = load %struct.edge_rec** @avail_edge ; <%struct.edge_rec*> [#uses=1]
br label %bb12.i
%206 = and i32 %203, -64 ; <i32> [#uses=3]
%207 = or i32 %205, %206 ; <i32> [#uses=1]
%208 = inttoptr i32 %207 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %209 = getelementptr %struct.edge_rec* %208, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %209 = getelementptr %struct.edge_rec, %struct.edge_rec* %208, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%210 = load %struct.edge_rec** %209, align 4 ; <%struct.edge_rec*> [#uses=1]
%211 = ptrtoint %struct.edge_rec* %210 to i32 ; <i32> [#uses=2]
%212 = add i32 %211, 16 ; <i32> [#uses=1]
%214 = and i32 %211, -64 ; <i32> [#uses=1]
%215 = or i32 %213, %214 ; <i32> [#uses=1]
%216 = inttoptr i32 %215 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %217 = getelementptr %struct.edge_rec* %lcand.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %217 = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%218 = load %struct.edge_rec** %217, align 4 ; <%struct.edge_rec*> [#uses=1]
%219 = ptrtoint %struct.edge_rec* %218 to i32 ; <i32> [#uses=2]
%220 = add i32 %219, 16 ; <i32> [#uses=1]
%222 = and i32 %219, -64 ; <i32> [#uses=1]
%223 = or i32 %221, %222 ; <i32> [#uses=1]
%224 = inttoptr i32 %223 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %225 = getelementptr %struct.edge_rec* %216, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %225 = getelementptr %struct.edge_rec, %struct.edge_rec* %216, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%226 = load %struct.edge_rec** %225, align 4 ; <%struct.edge_rec*> [#uses=1]
%227 = ptrtoint %struct.edge_rec* %226 to i32 ; <i32> [#uses=2]
%228 = add i32 %227, 16 ; <i32> [#uses=1]
%230 = and i32 %227, -64 ; <i32> [#uses=1]
%231 = or i32 %229, %230 ; <i32> [#uses=1]
%232 = inttoptr i32 %231 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %233 = getelementptr %struct.edge_rec* %232, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %233 = getelementptr %struct.edge_rec, %struct.edge_rec* %232, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%234 = load %struct.edge_rec** %233, align 4 ; <%struct.edge_rec*> [#uses=1]
- %235 = getelementptr %struct.edge_rec* %224, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %235 = getelementptr %struct.edge_rec, %struct.edge_rec* %224, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%236 = load %struct.edge_rec** %235, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %234, %struct.edge_rec** %235, align 4
store %struct.edge_rec* %236, %struct.edge_rec** %233, align 4
%241 = and i32 %240, 63 ; <i32> [#uses=1]
%242 = or i32 %241, %206 ; <i32> [#uses=1]
%243 = inttoptr i32 %242 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %244 = getelementptr %struct.edge_rec* %243, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %244 = getelementptr %struct.edge_rec, %struct.edge_rec* %243, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%245 = load %struct.edge_rec** %244, align 4 ; <%struct.edge_rec*> [#uses=1]
%246 = ptrtoint %struct.edge_rec* %245 to i32 ; <i32> [#uses=2]
%247 = add i32 %246, 16 ; <i32> [#uses=1]
%250 = or i32 %248, %249 ; <i32> [#uses=1]
%251 = inttoptr i32 %250 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%252 = inttoptr i32 %239 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %253 = getelementptr %struct.edge_rec* %252, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %253 = getelementptr %struct.edge_rec, %struct.edge_rec* %252, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%254 = load %struct.edge_rec** %253, align 4 ; <%struct.edge_rec*> [#uses=1]
%255 = ptrtoint %struct.edge_rec* %254 to i32 ; <i32> [#uses=2]
%256 = add i32 %255, 16 ; <i32> [#uses=1]
%258 = and i32 %255, -64 ; <i32> [#uses=1]
%259 = or i32 %257, %258 ; <i32> [#uses=1]
%260 = inttoptr i32 %259 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %261 = getelementptr %struct.edge_rec* %251, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %261 = getelementptr %struct.edge_rec, %struct.edge_rec* %251, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%262 = load %struct.edge_rec** %261, align 4 ; <%struct.edge_rec*> [#uses=1]
%263 = ptrtoint %struct.edge_rec* %262 to i32 ; <i32> [#uses=2]
%264 = add i32 %263, 16 ; <i32> [#uses=1]
%266 = and i32 %263, -64 ; <i32> [#uses=1]
%267 = or i32 %265, %266 ; <i32> [#uses=1]
%268 = inttoptr i32 %267 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %269 = getelementptr %struct.edge_rec* %268, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %269 = getelementptr %struct.edge_rec, %struct.edge_rec* %268, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%270 = load %struct.edge_rec** %269, align 4 ; <%struct.edge_rec*> [#uses=1]
- %271 = getelementptr %struct.edge_rec* %260, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %271 = getelementptr %struct.edge_rec, %struct.edge_rec* %260, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%272 = load %struct.edge_rec** %271, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %270, %struct.edge_rec** %271, align 4
store %struct.edge_rec* %272, %struct.edge_rec** %269, align 4
store %struct.edge_rec* %273, %struct.edge_rec** %261, align 4
store %struct.edge_rec* %274, %struct.edge_rec** %253, align 4
%275 = inttoptr i32 %206 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %276 = getelementptr %struct.edge_rec* %275, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %276 = getelementptr %struct.edge_rec, %struct.edge_rec* %275, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %avail_edge.tmp.026, %struct.edge_rec** %276, align 4
- %277 = getelementptr %struct.edge_rec* %t.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %277 = getelementptr %struct.edge_rec, %struct.edge_rec* %t.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%278 = load %struct.edge_rec** %277, align 4 ; <%struct.edge_rec*> [#uses=2]
%.pre.i = load double* %182, align 4 ; <double> [#uses=1]
%.pre22.i = load double* %184, align 4 ; <double> [#uses=1]
%.pn4.in.i = xor i32 %.pn4.in.in.i, 32 ; <i32> [#uses=1]
%.pn5.i = inttoptr i32 %.pn5.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%.pn4.i = inttoptr i32 %.pn4.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %v1.0.in.i = getelementptr %struct.edge_rec* %.pn5.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v2.0.in.i = getelementptr %struct.edge_rec* %.pn4.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v3.0.in.i = getelementptr %struct.edge_rec* %lcand.2.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %v1.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn5.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %v2.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn4.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %v3.0.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.2.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%v1.0.i = load %struct.VERTEX** %v1.0.in.i ; <%struct.VERTEX*> [#uses=3]
%v2.0.i = load %struct.VERTEX** %v2.0.in.i ; <%struct.VERTEX*> [#uses=3]
%v3.0.i = load %struct.VERTEX** %v3.0.in.i ; <%struct.VERTEX*> [#uses=3]
%281 = load double* %202, align 4 ; <double> [#uses=3]
- %282 = getelementptr %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %282 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%283 = load double* %282, align 4 ; <double> [#uses=1]
%284 = fsub double %283, %280 ; <double> [#uses=2]
- %285 = getelementptr %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %285 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%286 = load double* %285, align 4 ; <double> [#uses=1]
%287 = fsub double %286, %279 ; <double> [#uses=2]
- %288 = getelementptr %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %288 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%289 = load double* %288, align 4 ; <double> [#uses=1]
- %290 = getelementptr %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %290 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%291 = load double* %290, align 4 ; <double> [#uses=1]
%292 = fsub double %291, %280 ; <double> [#uses=2]
- %293 = getelementptr %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %293 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%294 = load double* %293, align 4 ; <double> [#uses=1]
%295 = fsub double %294, %279 ; <double> [#uses=2]
- %296 = getelementptr %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %296 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%297 = load double* %296, align 4 ; <double> [#uses=1]
- %298 = getelementptr %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %298 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%299 = load double* %298, align 4 ; <double> [#uses=1]
%300 = fsub double %299, %280 ; <double> [#uses=2]
- %301 = getelementptr %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %301 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%302 = load double* %301, align 4 ; <double> [#uses=1]
%303 = fsub double %302, %279 ; <double> [#uses=2]
- %304 = getelementptr %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %304 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.0.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%305 = load double* %304, align 4 ; <double> [#uses=1]
%306 = fsub double %289, %281 ; <double> [#uses=1]
%307 = fmul double %292, %303 ; <double> [#uses=1]
%329 = and i32 %326, -64 ; <i32> [#uses=1]
%330 = or i32 %328, %329 ; <i32> [#uses=1]
%331 = inttoptr i32 %330 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %332 = getelementptr %struct.edge_rec* %331, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %332 = getelementptr %struct.edge_rec, %struct.edge_rec* %331, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%333 = load %struct.edge_rec** %332, align 4 ; <%struct.edge_rec*> [#uses=1]
%334 = ptrtoint %struct.edge_rec* %333 to i32 ; <i32> [#uses=2]
%335 = add i32 %334, 16 ; <i32> [#uses=1]
%338 = or i32 %336, %337 ; <i32> [#uses=3]
%339 = xor i32 %338, 32 ; <i32> [#uses=1]
%340 = inttoptr i32 %339 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %341 = getelementptr %struct.edge_rec* %340, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %341 = getelementptr %struct.edge_rec, %struct.edge_rec* %340, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%342 = load %struct.VERTEX** %341, align 4 ; <%struct.VERTEX*> [#uses=2]
- %343 = getelementptr %struct.VERTEX* %325, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %343 = getelementptr %struct.VERTEX, %struct.VERTEX* %325, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%344 = load double* %343, align 4 ; <double> [#uses=1]
- %345 = getelementptr %struct.VERTEX* %325, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %345 = getelementptr %struct.VERTEX, %struct.VERTEX* %325, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%346 = load double* %345, align 4 ; <double> [#uses=1]
- %347 = getelementptr %struct.VERTEX* %342, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %347 = getelementptr %struct.VERTEX, %struct.VERTEX* %342, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%348 = load double* %347, align 4 ; <double> [#uses=1]
- %349 = getelementptr %struct.VERTEX* %342, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %349 = getelementptr %struct.VERTEX, %struct.VERTEX* %342, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%350 = load double* %349, align 4 ; <double> [#uses=1]
- %351 = getelementptr %struct.VERTEX* %324, i32 0, i32 0, i32 0 ; <double*> [#uses=2]
+ %351 = getelementptr %struct.VERTEX, %struct.VERTEX* %324, i32 0, i32 0, i32 0 ; <double*> [#uses=2]
%352 = load double* %351, align 4 ; <double> [#uses=3]
- %353 = getelementptr %struct.VERTEX* %324, i32 0, i32 0, i32 1 ; <double*> [#uses=2]
+ %353 = getelementptr %struct.VERTEX, %struct.VERTEX* %324, i32 0, i32 0, i32 1 ; <double*> [#uses=2]
%354 = load double* %353, align 4 ; <double> [#uses=3]
%355 = fsub double %344, %352 ; <double> [#uses=1]
%356 = fsub double %350, %354 ; <double> [#uses=1]
br i1 %362, label %bb14.i, label %bb17.i
bb14.i: ; preds = %bb13.i
- %363 = getelementptr %struct.VERTEX* %324, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %363 = getelementptr %struct.VERTEX, %struct.VERTEX* %324, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%avail_edge.promoted = load %struct.edge_rec** @avail_edge ; <%struct.edge_rec*> [#uses=1]
br label %bb16.i
%367 = and i32 %364, -64 ; <i32> [#uses=3]
%368 = or i32 %366, %367 ; <i32> [#uses=1]
%369 = inttoptr i32 %368 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %370 = getelementptr %struct.edge_rec* %369, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %370 = getelementptr %struct.edge_rec, %struct.edge_rec* %369, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%371 = load %struct.edge_rec** %370, align 4 ; <%struct.edge_rec*> [#uses=1]
%372 = ptrtoint %struct.edge_rec* %371 to i32 ; <i32> [#uses=2]
%373 = add i32 %372, 16 ; <i32> [#uses=1]
%375 = and i32 %372, -64 ; <i32> [#uses=1]
%376 = or i32 %374, %375 ; <i32> [#uses=1]
%377 = inttoptr i32 %376 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %378 = getelementptr %struct.edge_rec* %rcand.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %378 = getelementptr %struct.edge_rec, %struct.edge_rec* %rcand.0.i, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%379 = load %struct.edge_rec** %378, align 4 ; <%struct.edge_rec*> [#uses=1]
%380 = ptrtoint %struct.edge_rec* %379 to i32 ; <i32> [#uses=2]
%381 = add i32 %380, 16 ; <i32> [#uses=1]
%383 = and i32 %380, -64 ; <i32> [#uses=1]
%384 = or i32 %382, %383 ; <i32> [#uses=1]
%385 = inttoptr i32 %384 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %386 = getelementptr %struct.edge_rec* %377, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %386 = getelementptr %struct.edge_rec, %struct.edge_rec* %377, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%387 = load %struct.edge_rec** %386, align 4 ; <%struct.edge_rec*> [#uses=1]
%388 = ptrtoint %struct.edge_rec* %387 to i32 ; <i32> [#uses=2]
%389 = add i32 %388, 16 ; <i32> [#uses=1]
%391 = and i32 %388, -64 ; <i32> [#uses=1]
%392 = or i32 %390, %391 ; <i32> [#uses=1]
%393 = inttoptr i32 %392 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %394 = getelementptr %struct.edge_rec* %393, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %394 = getelementptr %struct.edge_rec, %struct.edge_rec* %393, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%395 = load %struct.edge_rec** %394, align 4 ; <%struct.edge_rec*> [#uses=1]
- %396 = getelementptr %struct.edge_rec* %385, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %396 = getelementptr %struct.edge_rec, %struct.edge_rec* %385, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%397 = load %struct.edge_rec** %396, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %395, %struct.edge_rec** %396, align 4
store %struct.edge_rec* %397, %struct.edge_rec** %394, align 4
%402 = and i32 %401, 63 ; <i32> [#uses=1]
%403 = or i32 %402, %367 ; <i32> [#uses=1]
%404 = inttoptr i32 %403 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %405 = getelementptr %struct.edge_rec* %404, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %405 = getelementptr %struct.edge_rec, %struct.edge_rec* %404, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%406 = load %struct.edge_rec** %405, align 4 ; <%struct.edge_rec*> [#uses=1]
%407 = ptrtoint %struct.edge_rec* %406 to i32 ; <i32> [#uses=2]
%408 = add i32 %407, 16 ; <i32> [#uses=1]
%411 = or i32 %409, %410 ; <i32> [#uses=1]
%412 = inttoptr i32 %411 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%413 = inttoptr i32 %400 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %414 = getelementptr %struct.edge_rec* %413, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %414 = getelementptr %struct.edge_rec, %struct.edge_rec* %413, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%415 = load %struct.edge_rec** %414, align 4 ; <%struct.edge_rec*> [#uses=1]
%416 = ptrtoint %struct.edge_rec* %415 to i32 ; <i32> [#uses=2]
%417 = add i32 %416, 16 ; <i32> [#uses=1]
%419 = and i32 %416, -64 ; <i32> [#uses=1]
%420 = or i32 %418, %419 ; <i32> [#uses=1]
%421 = inttoptr i32 %420 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %422 = getelementptr %struct.edge_rec* %412, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %422 = getelementptr %struct.edge_rec, %struct.edge_rec* %412, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%423 = load %struct.edge_rec** %422, align 4 ; <%struct.edge_rec*> [#uses=1]
%424 = ptrtoint %struct.edge_rec* %423 to i32 ; <i32> [#uses=2]
%425 = add i32 %424, 16 ; <i32> [#uses=1]
%427 = and i32 %424, -64 ; <i32> [#uses=1]
%428 = or i32 %426, %427 ; <i32> [#uses=1]
%429 = inttoptr i32 %428 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %430 = getelementptr %struct.edge_rec* %429, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %430 = getelementptr %struct.edge_rec, %struct.edge_rec* %429, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%431 = load %struct.edge_rec** %430, align 4 ; <%struct.edge_rec*> [#uses=1]
- %432 = getelementptr %struct.edge_rec* %421, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %432 = getelementptr %struct.edge_rec, %struct.edge_rec* %421, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%433 = load %struct.edge_rec** %432, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %431, %struct.edge_rec** %432, align 4
store %struct.edge_rec* %433, %struct.edge_rec** %430, align 4
store %struct.edge_rec* %434, %struct.edge_rec** %422, align 4
store %struct.edge_rec* %435, %struct.edge_rec** %414, align 4
%436 = inttoptr i32 %367 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %437 = getelementptr %struct.edge_rec* %436, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %437 = getelementptr %struct.edge_rec, %struct.edge_rec* %436, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %avail_edge.tmp.0, %struct.edge_rec** %437, align 4
%438 = add i32 %t.1.in.i, 16 ; <i32> [#uses=1]
%439 = and i32 %438, 63 ; <i32> [#uses=1]
%440 = and i32 %t.1.in.i, -64 ; <i32> [#uses=1]
%441 = or i32 %439, %440 ; <i32> [#uses=1]
%442 = inttoptr i32 %441 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %443 = getelementptr %struct.edge_rec* %442, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %443 = getelementptr %struct.edge_rec, %struct.edge_rec* %442, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%444 = load %struct.edge_rec** %443, align 4 ; <%struct.edge_rec*> [#uses=1]
%445 = ptrtoint %struct.edge_rec* %444 to i32 ; <i32> [#uses=2]
%446 = add i32 %445, 16 ; <i32> [#uses=1]
%.pn.in.i = xor i32 %.pn.in.in.i, 32 ; <i32> [#uses=1]
%.pn3.i = inttoptr i32 %.pn3.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%.pn.i = inttoptr i32 %.pn.in.i to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %v1.1.in.i = getelementptr %struct.edge_rec* %.pn3.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v2.1.in.i = getelementptr %struct.edge_rec* %.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
- %v3.1.in.i = getelementptr %struct.edge_rec* %rcand.2.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %v1.1.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn3.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %v2.1.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %v3.1.in.i = getelementptr %struct.edge_rec, %struct.edge_rec* %rcand.2.pn.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%v1.1.i = load %struct.VERTEX** %v1.1.in.i ; <%struct.VERTEX*> [#uses=3]
%v2.1.i = load %struct.VERTEX** %v2.1.in.i ; <%struct.VERTEX*> [#uses=3]
%v3.1.i = load %struct.VERTEX** %v3.1.in.i ; <%struct.VERTEX*> [#uses=3]
%452 = load double* %363, align 4 ; <double> [#uses=3]
- %453 = getelementptr %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %453 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%454 = load double* %453, align 4 ; <double> [#uses=1]
%455 = fsub double %454, %451 ; <double> [#uses=2]
- %456 = getelementptr %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %456 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%457 = load double* %456, align 4 ; <double> [#uses=1]
%458 = fsub double %457, %450 ; <double> [#uses=2]
- %459 = getelementptr %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %459 = getelementptr %struct.VERTEX, %struct.VERTEX* %v1.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%460 = load double* %459, align 4 ; <double> [#uses=1]
- %461 = getelementptr %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %461 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%462 = load double* %461, align 4 ; <double> [#uses=1]
%463 = fsub double %462, %451 ; <double> [#uses=2]
- %464 = getelementptr %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %464 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%465 = load double* %464, align 4 ; <double> [#uses=1]
%466 = fsub double %465, %450 ; <double> [#uses=2]
- %467 = getelementptr %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %467 = getelementptr %struct.VERTEX, %struct.VERTEX* %v2.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%468 = load double* %467, align 4 ; <double> [#uses=1]
- %469 = getelementptr %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %469 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%470 = load double* %469, align 4 ; <double> [#uses=1]
%471 = fsub double %470, %451 ; <double> [#uses=2]
- %472 = getelementptr %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %472 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%473 = load double* %472, align 4 ; <double> [#uses=1]
%474 = fsub double %473, %450 ; <double> [#uses=2]
- %475 = getelementptr %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %475 = getelementptr %struct.VERTEX, %struct.VERTEX* %v3.1.i, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%476 = load double* %475, align 4 ; <double> [#uses=1]
%477 = fsub double %460, %452 ; <double> [#uses=1]
%478 = fmul double %463, %474 ; <double> [#uses=1]
%497 = ptrtoint %struct.edge_rec* %lcand.1.i to i32 ; <i32> [#uses=1]
%498 = xor i32 %497, 32 ; <i32> [#uses=1]
%499 = inttoptr i32 %498 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %500 = getelementptr %struct.edge_rec* %499, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %500 = getelementptr %struct.edge_rec, %struct.edge_rec* %499, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%501 = load %struct.VERTEX** %500, align 4 ; <%struct.VERTEX*> [#uses=4]
- %502 = getelementptr %struct.VERTEX* %496, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %502 = getelementptr %struct.VERTEX, %struct.VERTEX* %496, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%503 = load double* %502, align 4 ; <double> [#uses=1]
- %504 = getelementptr %struct.VERTEX* %496, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %504 = getelementptr %struct.VERTEX, %struct.VERTEX* %496, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%505 = load double* %504, align 4 ; <double> [#uses=1]
- %506 = getelementptr %struct.VERTEX* %501, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %506 = getelementptr %struct.VERTEX, %struct.VERTEX* %501, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%507 = load double* %506, align 4 ; <double> [#uses=2]
- %508 = getelementptr %struct.VERTEX* %501, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %508 = getelementptr %struct.VERTEX, %struct.VERTEX* %501, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%509 = load double* %508, align 4 ; <double> [#uses=2]
- %510 = getelementptr %struct.VERTEX* %495, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %510 = getelementptr %struct.VERTEX, %struct.VERTEX* %495, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%511 = load double* %510, align 4 ; <double> [#uses=3]
- %512 = getelementptr %struct.VERTEX* %495, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %512 = getelementptr %struct.VERTEX, %struct.VERTEX* %495, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%513 = load double* %512, align 4 ; <double> [#uses=3]
%514 = fsub double %503, %511 ; <double> [#uses=2]
%515 = fsub double %509, %513 ; <double> [#uses=1]
%522 = ptrtoint %struct.edge_rec* %rcand.1.i to i32 ; <i32> [#uses=3]
%523 = xor i32 %522, 32 ; <i32> [#uses=1]
%524 = inttoptr i32 %523 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %525 = getelementptr %struct.edge_rec* %524, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %525 = getelementptr %struct.edge_rec, %struct.edge_rec* %524, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%526 = load %struct.VERTEX** %525, align 4 ; <%struct.VERTEX*> [#uses=4]
- %527 = getelementptr %struct.VERTEX* %526, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %527 = getelementptr %struct.VERTEX, %struct.VERTEX* %526, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%528 = load double* %527, align 4 ; <double> [#uses=4]
- %529 = getelementptr %struct.VERTEX* %526, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %529 = getelementptr %struct.VERTEX, %struct.VERTEX* %526, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%530 = load double* %529, align 4 ; <double> [#uses=4]
%531 = fsub double %530, %513 ; <double> [#uses=1]
%532 = fmul double %514, %531 ; <double> [#uses=1]
br i1 %537, label %bb21.i, label %do_merge.exit
bb21.i: ; preds = %bb17.i
- %538 = getelementptr %struct.edge_rec* %lcand.1.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %538 = getelementptr %struct.edge_rec, %struct.edge_rec* %lcand.1.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%539 = load %struct.VERTEX** %538, align 4 ; <%struct.VERTEX*> [#uses=3]
- %540 = getelementptr %struct.edge_rec* %rcand.1.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %540 = getelementptr %struct.edge_rec, %struct.edge_rec* %rcand.1.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%541 = load %struct.VERTEX** %540, align 4 ; <%struct.VERTEX*> [#uses=3]
br i1 %521, label %bb22.i, label %bb24.i
br i1 %536, label %bb23.i, label %bb25.i
bb23.i: ; preds = %bb22.i
- %542 = getelementptr %struct.VERTEX* %526, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %542 = getelementptr %struct.VERTEX, %struct.VERTEX* %526, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%543 = load double* %542, align 4 ; <double> [#uses=3]
%544 = fsub double %507, %528 ; <double> [#uses=2]
%545 = fsub double %509, %530 ; <double> [#uses=2]
- %546 = getelementptr %struct.VERTEX* %501, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %546 = getelementptr %struct.VERTEX, %struct.VERTEX* %501, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%547 = load double* %546, align 4 ; <double> [#uses=1]
- %548 = getelementptr %struct.VERTEX* %539, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %548 = getelementptr %struct.VERTEX, %struct.VERTEX* %539, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%549 = load double* %548, align 4 ; <double> [#uses=1]
%550 = fsub double %549, %528 ; <double> [#uses=2]
- %551 = getelementptr %struct.VERTEX* %539, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %551 = getelementptr %struct.VERTEX, %struct.VERTEX* %539, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%552 = load double* %551, align 4 ; <double> [#uses=1]
%553 = fsub double %552, %530 ; <double> [#uses=2]
- %554 = getelementptr %struct.VERTEX* %539, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %554 = getelementptr %struct.VERTEX, %struct.VERTEX* %539, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%555 = load double* %554, align 4 ; <double> [#uses=1]
- %556 = getelementptr %struct.VERTEX* %541, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %556 = getelementptr %struct.VERTEX, %struct.VERTEX* %541, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%557 = load double* %556, align 4 ; <double> [#uses=1]
%558 = fsub double %557, %528 ; <double> [#uses=2]
- %559 = getelementptr %struct.VERTEX* %541, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %559 = getelementptr %struct.VERTEX, %struct.VERTEX* %541, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%560 = load double* %559, align 4 ; <double> [#uses=1]
%561 = fsub double %560, %530 ; <double> [#uses=2]
- %562 = getelementptr %struct.VERTEX* %541, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %562 = getelementptr %struct.VERTEX, %struct.VERTEX* %541, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%563 = load double* %562, align 4 ; <double> [#uses=1]
%564 = fsub double %547, %543 ; <double> [#uses=1]
%565 = fmul double %550, %561 ; <double> [#uses=1]
%584 = and i32 %522, -64 ; <i32> [#uses=1]
%585 = or i32 %583, %584 ; <i32> [#uses=1]
%586 = inttoptr i32 %585 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %587 = getelementptr %struct.edge_rec* %586, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %587 = getelementptr %struct.edge_rec, %struct.edge_rec* %586, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%588 = load %struct.edge_rec** %587, align 4 ; <%struct.edge_rec*> [#uses=1]
%589 = ptrtoint %struct.edge_rec* %588 to i32 ; <i32> [#uses=2]
%590 = add i32 %589, 16 ; <i32> [#uses=1]
%593 = or i32 %591, %592 ; <i32> [#uses=1]
%594 = inttoptr i32 %593 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%595 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
- %596 = getelementptr %struct.edge_rec* %595, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
+ %596 = getelementptr %struct.edge_rec, %struct.edge_rec* %595, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %595, %struct.edge_rec** %596, align 4
- %597 = getelementptr %struct.edge_rec* %595, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %597 = getelementptr %struct.edge_rec, %struct.edge_rec* %595, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %526, %struct.VERTEX** %597, align 4
%598 = ptrtoint %struct.edge_rec* %595 to i32 ; <i32> [#uses=5]
%599 = add i32 %598, 16 ; <i32> [#uses=1]
%600 = inttoptr i32 %599 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%601 = add i32 %598, 48 ; <i32> [#uses=1]
%602 = inttoptr i32 %601 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %603 = getelementptr %struct.edge_rec* %600, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %603 = getelementptr %struct.edge_rec, %struct.edge_rec* %600, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %602, %struct.edge_rec** %603, align 4
%604 = add i32 %598, 32 ; <i32> [#uses=1]
%605 = inttoptr i32 %604 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %606 = getelementptr %struct.edge_rec* %605, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %606 = getelementptr %struct.edge_rec, %struct.edge_rec* %605, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %605, %struct.edge_rec** %606, align 4
- %607 = getelementptr %struct.edge_rec* %605, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %607 = getelementptr %struct.edge_rec, %struct.edge_rec* %605, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %495, %struct.VERTEX** %607, align 4
- %608 = getelementptr %struct.edge_rec* %602, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %608 = getelementptr %struct.edge_rec, %struct.edge_rec* %602, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %600, %struct.edge_rec** %608, align 4
%609 = load %struct.edge_rec** %596, align 4 ; <%struct.edge_rec*> [#uses=1]
%610 = ptrtoint %struct.edge_rec* %609 to i32 ; <i32> [#uses=2]
%613 = and i32 %610, -64 ; <i32> [#uses=1]
%614 = or i32 %612, %613 ; <i32> [#uses=1]
%615 = inttoptr i32 %614 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %616 = getelementptr %struct.edge_rec* %594, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %616 = getelementptr %struct.edge_rec, %struct.edge_rec* %594, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%617 = load %struct.edge_rec** %616, align 4 ; <%struct.edge_rec*> [#uses=1]
%618 = ptrtoint %struct.edge_rec* %617 to i32 ; <i32> [#uses=2]
%619 = add i32 %618, 16 ; <i32> [#uses=1]
%621 = and i32 %618, -64 ; <i32> [#uses=1]
%622 = or i32 %620, %621 ; <i32> [#uses=1]
%623 = inttoptr i32 %622 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %624 = getelementptr %struct.edge_rec* %623, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %624 = getelementptr %struct.edge_rec, %struct.edge_rec* %623, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%625 = load %struct.edge_rec** %624, align 4 ; <%struct.edge_rec*> [#uses=1]
- %626 = getelementptr %struct.edge_rec* %615, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %626 = getelementptr %struct.edge_rec, %struct.edge_rec* %615, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%627 = load %struct.edge_rec** %626, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %625, %struct.edge_rec** %626, align 4
store %struct.edge_rec* %627, %struct.edge_rec** %624, align 4
store %struct.edge_rec* %629, %struct.edge_rec** %596, align 4
%630 = xor i32 %598, 32 ; <i32> [#uses=2]
%631 = inttoptr i32 %630 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %632 = getelementptr %struct.edge_rec* %631, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %632 = getelementptr %struct.edge_rec, %struct.edge_rec* %631, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%633 = load %struct.edge_rec** %632, align 4 ; <%struct.edge_rec*> [#uses=1]
%634 = ptrtoint %struct.edge_rec* %633 to i32 ; <i32> [#uses=2]
%635 = add i32 %634, 16 ; <i32> [#uses=1]
%637 = and i32 %634, -64 ; <i32> [#uses=1]
%638 = or i32 %636, %637 ; <i32> [#uses=1]
%639 = inttoptr i32 %638 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %640 = getelementptr %struct.edge_rec* %174, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %640 = getelementptr %struct.edge_rec, %struct.edge_rec* %174, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%641 = load %struct.edge_rec** %640, align 4 ; <%struct.edge_rec*> [#uses=1]
%642 = ptrtoint %struct.edge_rec* %641 to i32 ; <i32> [#uses=2]
%643 = add i32 %642, 16 ; <i32> [#uses=1]
%645 = and i32 %642, -64 ; <i32> [#uses=1]
%646 = or i32 %644, %645 ; <i32> [#uses=1]
%647 = inttoptr i32 %646 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %648 = getelementptr %struct.edge_rec* %647, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %648 = getelementptr %struct.edge_rec, %struct.edge_rec* %647, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%649 = load %struct.edge_rec** %648, align 4 ; <%struct.edge_rec*> [#uses=1]
- %650 = getelementptr %struct.edge_rec* %639, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %650 = getelementptr %struct.edge_rec, %struct.edge_rec* %639, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%651 = load %struct.edge_rec** %650, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %649, %struct.edge_rec** %650, align 4
store %struct.edge_rec* %651, %struct.edge_rec** %648, align 4
%656 = and i32 %598, -64 ; <i32> [#uses=1]
%657 = or i32 %655, %656 ; <i32> [#uses=1]
%658 = inttoptr i32 %657 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %659 = getelementptr %struct.edge_rec* %658, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %659 = getelementptr %struct.edge_rec, %struct.edge_rec* %658, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%660 = load %struct.edge_rec** %659, align 4 ; <%struct.edge_rec*> [#uses=1]
%661 = ptrtoint %struct.edge_rec* %660 to i32 ; <i32> [#uses=2]
%662 = add i32 %661, 16 ; <i32> [#uses=1]
%669 = and i32 %172, -64 ; <i32> [#uses=1]
%670 = or i32 %668, %669 ; <i32> [#uses=1]
%671 = inttoptr i32 %670 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %672 = getelementptr %struct.edge_rec* %671, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %672 = getelementptr %struct.edge_rec, %struct.edge_rec* %671, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%673 = load %struct.edge_rec** %672, align 4 ; <%struct.edge_rec*> [#uses=1]
%674 = ptrtoint %struct.edge_rec* %673 to i32 ; <i32> [#uses=2]
%675 = add i32 %674, 16 ; <i32> [#uses=1]
%678 = or i32 %676, %677 ; <i32> [#uses=1]
%679 = inttoptr i32 %678 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%680 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
- %681 = getelementptr %struct.edge_rec* %680, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=5]
+ %681 = getelementptr %struct.edge_rec, %struct.edge_rec* %680, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=5]
store %struct.edge_rec* %680, %struct.edge_rec** %681, align 4
- %682 = getelementptr %struct.edge_rec* %680, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %682 = getelementptr %struct.edge_rec, %struct.edge_rec* %680, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %501, %struct.VERTEX** %682, align 4
%683 = ptrtoint %struct.edge_rec* %680 to i32 ; <i32> [#uses=4]
%684 = add i32 %683, 16 ; <i32> [#uses=1]
%685 = inttoptr i32 %684 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%686 = add i32 %683, 48 ; <i32> [#uses=1]
%687 = inttoptr i32 %686 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %688 = getelementptr %struct.edge_rec* %685, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %688 = getelementptr %struct.edge_rec, %struct.edge_rec* %685, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %687, %struct.edge_rec** %688, align 4
%689 = add i32 %683, 32 ; <i32> [#uses=1]
%690 = inttoptr i32 %689 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %691 = getelementptr %struct.edge_rec* %690, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %691 = getelementptr %struct.edge_rec, %struct.edge_rec* %690, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %690, %struct.edge_rec** %691, align 4
- %692 = getelementptr %struct.edge_rec* %690, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %692 = getelementptr %struct.edge_rec, %struct.edge_rec* %690, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %496, %struct.VERTEX** %692, align 4
- %693 = getelementptr %struct.edge_rec* %687, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %693 = getelementptr %struct.edge_rec, %struct.edge_rec* %687, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %685, %struct.edge_rec** %693, align 4
%694 = load %struct.edge_rec** %681, align 4 ; <%struct.edge_rec*> [#uses=1]
%695 = ptrtoint %struct.edge_rec* %694 to i32 ; <i32> [#uses=2]
%698 = and i32 %695, -64 ; <i32> [#uses=1]
%699 = or i32 %697, %698 ; <i32> [#uses=1]
%700 = inttoptr i32 %699 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %701 = getelementptr %struct.edge_rec* %499, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %701 = getelementptr %struct.edge_rec, %struct.edge_rec* %499, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%702 = load %struct.edge_rec** %701, align 4 ; <%struct.edge_rec*> [#uses=1]
%703 = ptrtoint %struct.edge_rec* %702 to i32 ; <i32> [#uses=2]
%704 = add i32 %703, 16 ; <i32> [#uses=1]
%706 = and i32 %703, -64 ; <i32> [#uses=1]
%707 = or i32 %705, %706 ; <i32> [#uses=1]
%708 = inttoptr i32 %707 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %709 = getelementptr %struct.edge_rec* %708, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %709 = getelementptr %struct.edge_rec, %struct.edge_rec* %708, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%710 = load %struct.edge_rec** %709, align 4 ; <%struct.edge_rec*> [#uses=1]
- %711 = getelementptr %struct.edge_rec* %700, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %711 = getelementptr %struct.edge_rec, %struct.edge_rec* %700, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%712 = load %struct.edge_rec** %711, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %710, %struct.edge_rec** %711, align 4
store %struct.edge_rec* %712, %struct.edge_rec** %709, align 4
store %struct.edge_rec* %714, %struct.edge_rec** %681, align 4
%715 = xor i32 %683, 32 ; <i32> [#uses=1]
%716 = inttoptr i32 %715 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %717 = getelementptr %struct.edge_rec* %716, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %717 = getelementptr %struct.edge_rec, %struct.edge_rec* %716, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%718 = load %struct.edge_rec** %717, align 4 ; <%struct.edge_rec*> [#uses=1]
%719 = ptrtoint %struct.edge_rec* %718 to i32 ; <i32> [#uses=2]
%720 = add i32 %719, 16 ; <i32> [#uses=1]
%722 = and i32 %719, -64 ; <i32> [#uses=1]
%723 = or i32 %721, %722 ; <i32> [#uses=1]
%724 = inttoptr i32 %723 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %725 = getelementptr %struct.edge_rec* %679, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %725 = getelementptr %struct.edge_rec, %struct.edge_rec* %679, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%726 = load %struct.edge_rec** %725, align 4 ; <%struct.edge_rec*> [#uses=1]
%727 = ptrtoint %struct.edge_rec* %726 to i32 ; <i32> [#uses=2]
%728 = add i32 %727, 16 ; <i32> [#uses=1]
%730 = and i32 %727, -64 ; <i32> [#uses=1]
%731 = or i32 %729, %730 ; <i32> [#uses=1]
%732 = inttoptr i32 %731 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %733 = getelementptr %struct.edge_rec* %732, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %733 = getelementptr %struct.edge_rec, %struct.edge_rec* %732, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%734 = load %struct.edge_rec** %733, align 4 ; <%struct.edge_rec*> [#uses=1]
- %735 = getelementptr %struct.edge_rec* %724, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %735 = getelementptr %struct.edge_rec, %struct.edge_rec* %724, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%736 = load %struct.edge_rec** %735, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %734, %struct.edge_rec** %735, align 4
store %struct.edge_rec* %736, %struct.edge_rec** %733, align 4
br label %bb9.i
do_merge.exit: ; preds = %bb17.i
- %740 = getelementptr %struct.edge_rec* %ldo_addr.0.ph.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %740 = getelementptr %struct.edge_rec, %struct.edge_rec* %ldo_addr.0.ph.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%741 = load %struct.VERTEX** %740, align 4 ; <%struct.VERTEX*> [#uses=1]
%742 = icmp eq %struct.VERTEX* %741, %tree_addr.0.i ; <i1> [#uses=1]
br i1 %742, label %bb5.loopexit, label %bb2
%743 = ptrtoint %struct.edge_rec* %ldo.07 to i32 ; <i32> [#uses=1]
%744 = xor i32 %743, 32 ; <i32> [#uses=1]
%745 = inttoptr i32 %744 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %746 = getelementptr %struct.edge_rec* %745, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %746 = getelementptr %struct.edge_rec, %struct.edge_rec* %745, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%747 = load %struct.edge_rec** %746, align 4 ; <%struct.edge_rec*> [#uses=3]
- %748 = getelementptr %struct.edge_rec* %747, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %748 = getelementptr %struct.edge_rec, %struct.edge_rec* %747, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%749 = load %struct.VERTEX** %748, align 4 ; <%struct.VERTEX*> [#uses=1]
%750 = icmp eq %struct.VERTEX* %749, %tree_addr.0.i ; <i1> [#uses=1]
br i1 %750, label %bb5.loopexit, label %bb2
bb4: ; preds = %bb5.loopexit, %bb4
%rdo.05 = phi %struct.edge_rec* [ %755, %bb4 ], [ %rdo_addr.0.i, %bb5.loopexit ] ; <%struct.edge_rec*> [#uses=1]
- %751 = getelementptr %struct.edge_rec* %rdo.05, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %751 = getelementptr %struct.edge_rec, %struct.edge_rec* %rdo.05, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%752 = load %struct.edge_rec** %751, align 4 ; <%struct.edge_rec*> [#uses=1]
%753 = ptrtoint %struct.edge_rec* %752 to i32 ; <i32> [#uses=1]
%754 = xor i32 %753, 32 ; <i32> [#uses=1]
%755 = inttoptr i32 %754 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %756 = getelementptr %struct.edge_rec* %755, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %756 = getelementptr %struct.edge_rec, %struct.edge_rec* %755, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%757 = load %struct.VERTEX** %756, align 4 ; <%struct.VERTEX*> [#uses=1]
%758 = icmp eq %struct.VERTEX* %757, %extra ; <i1> [#uses=1]
br i1 %758, label %bb6, label %bb4
bb5.loopexit: ; preds = %bb2, %do_merge.exit
%ldo.0.lcssa = phi %struct.edge_rec* [ %ldo_addr.0.ph.i, %do_merge.exit ], [ %747, %bb2 ] ; <%struct.edge_rec*> [#uses=1]
- %759 = getelementptr %struct.edge_rec* %rdo_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %759 = getelementptr %struct.edge_rec, %struct.edge_rec* %rdo_addr.0.i, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%760 = load %struct.VERTEX** %759, align 4 ; <%struct.VERTEX*> [#uses=1]
%761 = icmp eq %struct.VERTEX* %760, %extra ; <i1> [#uses=1]
br i1 %761, label %bb6, label %bb4
br label %bb15
bb7: ; preds = %bb
- %762 = getelementptr %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
+ %762 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 1 ; <%struct.VERTEX**> [#uses=1]
%763 = load %struct.VERTEX** %762, align 4 ; <%struct.VERTEX*> [#uses=4]
%764 = icmp eq %struct.VERTEX* %763, null ; <i1> [#uses=1]
%765 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=5]
- %766 = getelementptr %struct.edge_rec* %765, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
+ %766 = getelementptr %struct.edge_rec, %struct.edge_rec* %765, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %765, %struct.edge_rec** %766, align 4
- %767 = getelementptr %struct.edge_rec* %765, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
+ %767 = getelementptr %struct.edge_rec, %struct.edge_rec* %765, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=3]
br i1 %764, label %bb10, label %bb11
bb8: ; preds = %entry
%771 = inttoptr i32 %770 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%772 = add i32 %769, 48 ; <i32> [#uses=1]
%773 = inttoptr i32 %772 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %774 = getelementptr %struct.edge_rec* %771, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %774 = getelementptr %struct.edge_rec, %struct.edge_rec* %771, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %773, %struct.edge_rec** %774, align 4
%775 = add i32 %769, 32 ; <i32> [#uses=1]
%776 = inttoptr i32 %775 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %777 = getelementptr %struct.edge_rec* %776, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %777 = getelementptr %struct.edge_rec, %struct.edge_rec* %776, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %776, %struct.edge_rec** %777, align 4
- %778 = getelementptr %struct.edge_rec* %776, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %778 = getelementptr %struct.edge_rec, %struct.edge_rec* %776, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %extra, %struct.VERTEX** %778, align 4
- %779 = getelementptr %struct.edge_rec* %773, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %779 = getelementptr %struct.edge_rec, %struct.edge_rec* %773, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %771, %struct.edge_rec** %779, align 4
%780 = xor i32 %769, 32 ; <i32> [#uses=1]
br label %bb15
%783 = inttoptr i32 %782 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%784 = add i32 %781, 48 ; <i32> [#uses=1]
%785 = inttoptr i32 %784 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %786 = getelementptr %struct.edge_rec* %783, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %786 = getelementptr %struct.edge_rec, %struct.edge_rec* %783, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %785, %struct.edge_rec** %786, align 4
%787 = add i32 %781, 32 ; <i32> [#uses=1]
%788 = inttoptr i32 %787 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %789 = getelementptr %struct.edge_rec* %788, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %789 = getelementptr %struct.edge_rec, %struct.edge_rec* %788, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %788, %struct.edge_rec** %789, align 4
- %790 = getelementptr %struct.edge_rec* %788, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %790 = getelementptr %struct.edge_rec, %struct.edge_rec* %788, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %tree, %struct.VERTEX** %790, align 4
- %791 = getelementptr %struct.edge_rec* %785, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %791 = getelementptr %struct.edge_rec, %struct.edge_rec* %785, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %783, %struct.edge_rec** %791, align 4
%792 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
- %793 = getelementptr %struct.edge_rec* %792, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
+ %793 = getelementptr %struct.edge_rec, %struct.edge_rec* %792, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=4]
store %struct.edge_rec* %792, %struct.edge_rec** %793, align 4
- %794 = getelementptr %struct.edge_rec* %792, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %794 = getelementptr %struct.edge_rec, %struct.edge_rec* %792, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %tree, %struct.VERTEX** %794, align 4
%795 = ptrtoint %struct.edge_rec* %792 to i32 ; <i32> [#uses=5]
%796 = add i32 %795, 16 ; <i32> [#uses=1]
%797 = inttoptr i32 %796 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%798 = add i32 %795, 48 ; <i32> [#uses=2]
%799 = inttoptr i32 %798 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %800 = getelementptr %struct.edge_rec* %797, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %800 = getelementptr %struct.edge_rec, %struct.edge_rec* %797, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %799, %struct.edge_rec** %800, align 4
%801 = add i32 %795, 32 ; <i32> [#uses=1]
%802 = inttoptr i32 %801 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %803 = getelementptr %struct.edge_rec* %802, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %803 = getelementptr %struct.edge_rec, %struct.edge_rec* %802, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %802, %struct.edge_rec** %803, align 4
- %804 = getelementptr %struct.edge_rec* %802, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %804 = getelementptr %struct.edge_rec, %struct.edge_rec* %802, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %extra, %struct.VERTEX** %804, align 4
- %805 = getelementptr %struct.edge_rec* %799, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %805 = getelementptr %struct.edge_rec, %struct.edge_rec* %799, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %797, %struct.edge_rec** %805, align 4
%806 = xor i32 %781, 32 ; <i32> [#uses=1]
%807 = inttoptr i32 %806 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %808 = getelementptr %struct.edge_rec* %807, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %808 = getelementptr %struct.edge_rec, %struct.edge_rec* %807, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%809 = load %struct.edge_rec** %808, align 4 ; <%struct.edge_rec*> [#uses=1]
%810 = ptrtoint %struct.edge_rec* %809 to i32 ; <i32> [#uses=2]
%811 = add i32 %810, 16 ; <i32> [#uses=1]
%820 = and i32 %817, -64 ; <i32> [#uses=1]
%821 = or i32 %819, %820 ; <i32> [#uses=1]
%822 = inttoptr i32 %821 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %823 = getelementptr %struct.edge_rec* %822, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %823 = getelementptr %struct.edge_rec, %struct.edge_rec* %822, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%824 = load %struct.edge_rec** %823, align 4 ; <%struct.edge_rec*> [#uses=1]
- %825 = getelementptr %struct.edge_rec* %815, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %825 = getelementptr %struct.edge_rec, %struct.edge_rec* %815, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%826 = load %struct.edge_rec** %825, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %824, %struct.edge_rec** %825, align 4
store %struct.edge_rec* %826, %struct.edge_rec** %823, align 4
store %struct.edge_rec* %828, %struct.edge_rec** %808, align 4
%829 = xor i32 %795, 32 ; <i32> [#uses=3]
%830 = inttoptr i32 %829 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %831 = getelementptr %struct.edge_rec* %830, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %831 = getelementptr %struct.edge_rec, %struct.edge_rec* %830, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
%832 = load %struct.VERTEX** %831, align 4 ; <%struct.VERTEX*> [#uses=1]
%833 = and i32 %798, 63 ; <i32> [#uses=1]
%834 = and i32 %795, -64 ; <i32> [#uses=1]
%835 = or i32 %833, %834 ; <i32> [#uses=1]
%836 = inttoptr i32 %835 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %837 = getelementptr %struct.edge_rec* %836, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %837 = getelementptr %struct.edge_rec, %struct.edge_rec* %836, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%838 = load %struct.edge_rec** %837, align 4 ; <%struct.edge_rec*> [#uses=1]
%839 = ptrtoint %struct.edge_rec* %838 to i32 ; <i32> [#uses=2]
%840 = add i32 %839, 16 ; <i32> [#uses=1]
%844 = inttoptr i32 %843 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
%845 = load %struct.VERTEX** %767, align 4 ; <%struct.VERTEX*> [#uses=1]
%846 = call %struct.edge_rec* @alloc_edge() nounwind ; <%struct.edge_rec*> [#uses=4]
- %847 = getelementptr %struct.edge_rec* %846, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=7]
+ %847 = getelementptr %struct.edge_rec, %struct.edge_rec* %846, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=7]
store %struct.edge_rec* %846, %struct.edge_rec** %847, align 4
- %848 = getelementptr %struct.edge_rec* %846, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %848 = getelementptr %struct.edge_rec, %struct.edge_rec* %846, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %832, %struct.VERTEX** %848, align 4
%849 = ptrtoint %struct.edge_rec* %846 to i32 ; <i32> [#uses=6]
%850 = add i32 %849, 16 ; <i32> [#uses=2]
%851 = inttoptr i32 %850 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%852 = add i32 %849, 48 ; <i32> [#uses=1]
%853 = inttoptr i32 %852 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
- %854 = getelementptr %struct.edge_rec* %851, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %854 = getelementptr %struct.edge_rec, %struct.edge_rec* %851, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %853, %struct.edge_rec** %854, align 4
%855 = add i32 %849, 32 ; <i32> [#uses=1]
%856 = inttoptr i32 %855 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=3]
- %857 = getelementptr %struct.edge_rec* %856, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %857 = getelementptr %struct.edge_rec, %struct.edge_rec* %856, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %856, %struct.edge_rec** %857, align 4
- %858 = getelementptr %struct.edge_rec* %856, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
+ %858 = getelementptr %struct.edge_rec, %struct.edge_rec* %856, i32 0, i32 0 ; <%struct.VERTEX**> [#uses=1]
store %struct.VERTEX* %845, %struct.VERTEX** %858, align 4
- %859 = getelementptr %struct.edge_rec* %853, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %859 = getelementptr %struct.edge_rec, %struct.edge_rec* %853, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %851, %struct.edge_rec** %859, align 4
%860 = load %struct.edge_rec** %847, align 4 ; <%struct.edge_rec*> [#uses=1]
%861 = ptrtoint %struct.edge_rec* %860 to i32 ; <i32> [#uses=2]
%864 = and i32 %861, -64 ; <i32> [#uses=1]
%865 = or i32 %863, %864 ; <i32> [#uses=1]
%866 = inttoptr i32 %865 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %867 = getelementptr %struct.edge_rec* %844, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %867 = getelementptr %struct.edge_rec, %struct.edge_rec* %844, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%868 = load %struct.edge_rec** %867, align 4 ; <%struct.edge_rec*> [#uses=1]
%869 = ptrtoint %struct.edge_rec* %868 to i32 ; <i32> [#uses=2]
%870 = add i32 %869, 16 ; <i32> [#uses=1]
%872 = and i32 %869, -64 ; <i32> [#uses=1]
%873 = or i32 %871, %872 ; <i32> [#uses=1]
%874 = inttoptr i32 %873 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %875 = getelementptr %struct.edge_rec* %874, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %875 = getelementptr %struct.edge_rec, %struct.edge_rec* %874, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%876 = load %struct.edge_rec** %875, align 4 ; <%struct.edge_rec*> [#uses=1]
- %877 = getelementptr %struct.edge_rec* %866, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %877 = getelementptr %struct.edge_rec, %struct.edge_rec* %866, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%878 = load %struct.edge_rec** %877, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %876, %struct.edge_rec** %877, align 4
store %struct.edge_rec* %878, %struct.edge_rec** %875, align 4
store %struct.edge_rec* %880, %struct.edge_rec** %847, align 4
%881 = xor i32 %849, 32 ; <i32> [#uses=3]
%882 = inttoptr i32 %881 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %883 = getelementptr %struct.edge_rec* %882, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=6]
+ %883 = getelementptr %struct.edge_rec, %struct.edge_rec* %882, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=6]
%884 = load %struct.edge_rec** %883, align 4 ; <%struct.edge_rec*> [#uses=1]
%885 = ptrtoint %struct.edge_rec* %884 to i32 ; <i32> [#uses=2]
%886 = add i32 %885, 16 ; <i32> [#uses=1]
%895 = and i32 %892, -64 ; <i32> [#uses=1]
%896 = or i32 %894, %895 ; <i32> [#uses=1]
%897 = inttoptr i32 %896 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %898 = getelementptr %struct.edge_rec* %897, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %898 = getelementptr %struct.edge_rec, %struct.edge_rec* %897, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%899 = load %struct.edge_rec** %898, align 4 ; <%struct.edge_rec*> [#uses=1]
- %900 = getelementptr %struct.edge_rec* %890, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %900 = getelementptr %struct.edge_rec, %struct.edge_rec* %890, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%901 = load %struct.edge_rec** %900, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %899, %struct.edge_rec** %900, align 4
store %struct.edge_rec* %901, %struct.edge_rec** %898, align 4
%903 = load %struct.edge_rec** %766, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %902, %struct.edge_rec** %766, align 4
store %struct.edge_rec* %903, %struct.edge_rec** %883, align 4
- %904 = getelementptr %struct.VERTEX* %763, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %904 = getelementptr %struct.VERTEX, %struct.VERTEX* %763, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%905 = load double* %904, align 4 ; <double> [#uses=2]
- %906 = getelementptr %struct.VERTEX* %763, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %906 = getelementptr %struct.VERTEX, %struct.VERTEX* %763, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%907 = load double* %906, align 4 ; <double> [#uses=2]
- %908 = getelementptr %struct.VERTEX* %extra, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %908 = getelementptr %struct.VERTEX, %struct.VERTEX* %extra, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%909 = load double* %908, align 4 ; <double> [#uses=3]
- %910 = getelementptr %struct.VERTEX* %extra, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %910 = getelementptr %struct.VERTEX, %struct.VERTEX* %extra, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%911 = load double* %910, align 4 ; <double> [#uses=3]
- %912 = getelementptr %struct.VERTEX* %tree, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %912 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%913 = load double* %912, align 4 ; <double> [#uses=3]
- %914 = getelementptr %struct.VERTEX* %tree, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %914 = getelementptr %struct.VERTEX, %struct.VERTEX* %tree, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
%915 = load double* %914, align 4 ; <double> [#uses=3]
%916 = fsub double %905, %913 ; <double> [#uses=1]
%917 = fsub double %911, %915 ; <double> [#uses=1]
%933 = and i32 %849, -64 ; <i32> [#uses=3]
%934 = or i32 %932, %933 ; <i32> [#uses=1]
%935 = inttoptr i32 %934 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %936 = getelementptr %struct.edge_rec* %935, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %936 = getelementptr %struct.edge_rec, %struct.edge_rec* %935, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%937 = load %struct.edge_rec** %936, align 4 ; <%struct.edge_rec*> [#uses=1]
%938 = ptrtoint %struct.edge_rec* %937 to i32 ; <i32> [#uses=2]
%939 = add i32 %938, 16 ; <i32> [#uses=1]
%948 = and i32 %945, -64 ; <i32> [#uses=1]
%949 = or i32 %947, %948 ; <i32> [#uses=1]
%950 = inttoptr i32 %949 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %951 = getelementptr %struct.edge_rec* %943, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %951 = getelementptr %struct.edge_rec, %struct.edge_rec* %943, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%952 = load %struct.edge_rec** %951, align 4 ; <%struct.edge_rec*> [#uses=1]
%953 = ptrtoint %struct.edge_rec* %952 to i32 ; <i32> [#uses=2]
%954 = add i32 %953, 16 ; <i32> [#uses=1]
%956 = and i32 %953, -64 ; <i32> [#uses=1]
%957 = or i32 %955, %956 ; <i32> [#uses=1]
%958 = inttoptr i32 %957 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %959 = getelementptr %struct.edge_rec* %958, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %959 = getelementptr %struct.edge_rec, %struct.edge_rec* %958, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%960 = load %struct.edge_rec** %959, align 4 ; <%struct.edge_rec*> [#uses=1]
- %961 = getelementptr %struct.edge_rec* %950, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %961 = getelementptr %struct.edge_rec, %struct.edge_rec* %950, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%962 = load %struct.edge_rec** %961, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %960, %struct.edge_rec** %961, align 4
store %struct.edge_rec* %962, %struct.edge_rec** %959, align 4
%966 = and i32 %965, 63 ; <i32> [#uses=1]
%967 = or i32 %966, %933 ; <i32> [#uses=1]
%968 = inttoptr i32 %967 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %969 = getelementptr %struct.edge_rec* %968, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %969 = getelementptr %struct.edge_rec, %struct.edge_rec* %968, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
%970 = load %struct.edge_rec** %969, align 4 ; <%struct.edge_rec*> [#uses=1]
%971 = ptrtoint %struct.edge_rec* %970 to i32 ; <i32> [#uses=2]
%972 = add i32 %971, 16 ; <i32> [#uses=1]
%981 = and i32 %978, -64 ; <i32> [#uses=1]
%982 = or i32 %980, %981 ; <i32> [#uses=1]
%983 = inttoptr i32 %982 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %984 = getelementptr %struct.edge_rec* %976, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
+ %984 = getelementptr %struct.edge_rec, %struct.edge_rec* %976, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=3]
%985 = load %struct.edge_rec** %984, align 4 ; <%struct.edge_rec*> [#uses=1]
%986 = ptrtoint %struct.edge_rec* %985 to i32 ; <i32> [#uses=2]
%987 = add i32 %986, 16 ; <i32> [#uses=1]
%989 = and i32 %986, -64 ; <i32> [#uses=1]
%990 = or i32 %988, %989 ; <i32> [#uses=1]
%991 = inttoptr i32 %990 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=1]
- %992 = getelementptr %struct.edge_rec* %991, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %992 = getelementptr %struct.edge_rec, %struct.edge_rec* %991, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%993 = load %struct.edge_rec** %992, align 4 ; <%struct.edge_rec*> [#uses=1]
- %994 = getelementptr %struct.edge_rec* %983, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
+ %994 = getelementptr %struct.edge_rec, %struct.edge_rec* %983, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=2]
%995 = load %struct.edge_rec** %994, align 4 ; <%struct.edge_rec*> [#uses=1]
store %struct.edge_rec* %993, %struct.edge_rec** %994, align 4
store %struct.edge_rec* %995, %struct.edge_rec** %992, align 4
store %struct.edge_rec* %997, %struct.edge_rec** %883, align 4
%998 = inttoptr i32 %933 to %struct.edge_rec* ; <%struct.edge_rec*> [#uses=2]
%999 = load %struct.edge_rec** @avail_edge, align 4 ; <%struct.edge_rec*> [#uses=1]
- %1000 = getelementptr %struct.edge_rec* %998, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
+ %1000 = getelementptr %struct.edge_rec, %struct.edge_rec* %998, i32 0, i32 1 ; <%struct.edge_rec**> [#uses=1]
store %struct.edge_rec* %999, %struct.edge_rec** %1000, align 4
store %struct.edge_rec* %998, %struct.edge_rec** @avail_edge, align 4
br label %bb15
br i1 undef, label %bb18, label %bb22
bb22: ; preds = %bb18, %bb17
- %0 = getelementptr i8* null, i32 10 ; <i8*> [#uses=1]
+ %0 = getelementptr i8, i8* null, i32 10 ; <i8*> [#uses=1]
%1 = bitcast i8* %0 to i16* ; <i16*> [#uses=1]
%2 = load i16* %1, align 2 ; <i16> [#uses=1]
%3 = add i16 %2, 1 ; <i16> [#uses=1]
bb8: ; preds = %bb7, %entry
%2 = phi i32 [ 0, %entry ], [ %1, %bb7 ] ; <i32> [#uses=3]
- %scevgep22 = getelementptr %struct.iovec* %iov, i32 %2, i32 0; <i8**> [#uses=0]
+ %scevgep22 = getelementptr %struct.iovec, %struct.iovec* %iov, i32 %2, i32 0; <i8**> [#uses=0]
%3 = load i32* %nr_segs, align 4 ; <i32> [#uses=1]
%4 = icmp ult i32 %2, %3 ; <i1> [#uses=1]
br i1 %4, label %bb, label %bb9
bb4.i: ; preds = %entry
%0 = load %struct.tree** @g, align 4 ; <%struct.tree*> [#uses=2]
- %.idx45.i = getelementptr %struct.tree* %0, i32 0, i32 1 ; <double*> [#uses=1]
+ %.idx45.i = getelementptr %struct.tree, %struct.tree* %0, i32 0, i32 1 ; <double*> [#uses=1]
%.idx45.val.i = load double* %.idx45.i ; <double> [#uses=1]
- %.idx46.i = getelementptr %struct.tree* %0, i32 0, i32 2 ; <double*> [#uses=1]
+ %.idx46.i = getelementptr %struct.tree, %struct.tree* %0, i32 0, i32 2 ; <double*> [#uses=1]
%.idx46.val.i = load double* %.idx46.i ; <double> [#uses=1]
%1 = fsub double 0.000000e+00, %.idx45.val.i ; <double> [#uses=2]
%2 = fmul double %1, %1 ; <double> [#uses=1]
br label %bb11
bb11: ; preds = %bb9, %bb7
- %1 = getelementptr %struct.icstruct* %agg.result, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %1 = getelementptr %struct.icstruct, %struct.icstruct* %agg.result, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %1
ret void
}
%exp2 = call double @ldexp(double 1.000000e+00, i32 %level) nounwind ; <double> [#uses=1]
%.c = fptosi double %exp2 to i32 ; <i32> [#uses=1]
store i32 %.c, i32* null
- %1 = getelementptr %struct.Village* %0, i32 0, i32 3, i32 6, i32 0 ; <%struct.List**> [#uses=1]
+ %1 = getelementptr %struct.Village, %struct.Village* %0, i32 0, i32 3, i32 6, i32 0 ; <%struct.List**> [#uses=1]
store %struct.List* null, %struct.List** %1
- %2 = getelementptr %struct.Village* %0, i32 0, i32 3, i32 6, i32 2 ; <%struct.List**> [#uses=1]
+ %2 = getelementptr %struct.Village, %struct.Village* %0, i32 0, i32 3, i32 6, i32 2 ; <%struct.List**> [#uses=1]
store %struct.List* null, %struct.List** %2
ret %struct.Village* %0
%0 = call i8* @_Znwm(i32 4)
%1 = bitcast i8* %0 to i32*
%2 = load %struct.A** %this_addr, align 4
- %3 = getelementptr inbounds %struct.A* %2, i32 0, i32 0
+ %3 = getelementptr inbounds %struct.A, %struct.A* %2, i32 0, i32 0
store i32* %1, i32** %3, align 4
br label %return
%"alloca point" = bitcast i32 0 to i32
store %struct.A* %this, %struct.A** %this_addr
%0 = load %struct.A** %this_addr, align 4
- %1 = getelementptr inbounds %struct.A* %0, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.A, %struct.A* %0, i32 0, i32 0
%2 = load i32** %1, align 4
%3 = bitcast i32* %2 to i8*
call void @_ZdlPv(i8* %3) nounwind
entry:
%val.i.i = load <4 x float>* undef ; <<4 x float>> [#uses=1]
%val2.i.i = load <4 x float>* null ; <<4 x float>> [#uses=1]
- %elt3.i.i = getelementptr inbounds %struct.obb* %box, i32 0, i32 0, i32 2, i32 0 ; <<4 x float>*> [#uses=1]
+ %elt3.i.i = getelementptr inbounds %struct.obb, %struct.obb* %box, i32 0, i32 0, i32 2, i32 0 ; <<4 x float>*> [#uses=1]
%val4.i.i = load <4 x float>* %elt3.i.i ; <<4 x float>> [#uses=1]
%0 = shufflevector <2 x float> undef, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
%1 = fadd <4 x float> undef, zeroinitializer ; <<4 x float>> [#uses=1]
%x76 = fmul double %y.0, 0.000000e+00 ; <double> [#uses=1]
%x77 = fadd double %y.0, 0.000000e+00 ; <double> [#uses=1]
%tmpr = fadd double %x.0, %x76 ; <double> [#uses=1]
- %agg.result.0 = getelementptr %0* %agg.result, i32 0, i32 0 ; <double*> [#uses=1]
+ %agg.result.0 = getelementptr %0, %0* %agg.result, i32 0, i32 0 ; <double*> [#uses=1]
store double %tmpr, double* %agg.result.0, align 8
- %agg.result.1 = getelementptr %0* %agg.result, i32 0, i32 1 ; <double*> [#uses=1]
+ %agg.result.1 = getelementptr %0, %0* %agg.result, i32 0, i32 1 ; <double*> [#uses=1]
store double %x77, double* %agg.result.1, align 8
ret void
}
define arm_aapcs_vfpcc void @bar(%foo* noalias sret %agg.result, <4 x float> %quat.0) nounwind {
entry:
%quat_addr = alloca %foo, align 16 ; <%foo*> [#uses=2]
- %0 = getelementptr inbounds %foo* %quat_addr, i32 0, i32 0 ; <<4 x float>*> [#uses=1]
+ %0 = getelementptr inbounds %foo, %foo* %quat_addr, i32 0, i32 0 ; <<4 x float>*> [#uses=1]
store <4 x float> %quat.0, <4 x float>* %0
%1 = call arm_aapcs_vfpcc <4 x float> @quux(%foo* %quat_addr) nounwind ; <<4 x float>> [#uses=3]
%2 = fmul <4 x float> %1, %1 ; <<4 x float>> [#uses=2]
br i1 undef, label %bb85, label %bb
bb: ; preds = %entry
- %0 = getelementptr inbounds %bar* null, i32 0, i32 0, i32 0, i32 2 ; <float*> [#uses=2]
+ %0 = getelementptr inbounds %bar, %bar* null, i32 0, i32 0, i32 0, i32 2 ; <float*> [#uses=2]
%1 = load float* undef, align 4 ; <float> [#uses=1]
%2 = fsub float 0.000000e+00, undef ; <float> [#uses=2]
%3 = fmul float 0.000000e+00, undef ; <float> [#uses=1]
br label %bb3.i
bb3.i: ; preds = %bb2.i, %bb
- %0 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=0]
+ %0 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=0]
%1 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
- %2 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
+ %2 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
%3 = load float* %2, align 4 ; <float> [#uses=1]
- %4 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
+ %4 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
%5 = fsub float %3, undef ; <float> [#uses=2]
- %6 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 2 ; <float*> [#uses=2]
+ %6 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 2 ; <float*> [#uses=2]
%7 = load float* %6, align 4 ; <float> [#uses=1]
%8 = fsub float %7, undef ; <float> [#uses=1]
- %9 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=2]
+ %9 = getelementptr inbounds %quuz, %quuz* %c, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=2]
%10 = load float* %9, align 4 ; <float> [#uses=1]
%11 = fsub float %10, undef ; <float> [#uses=2]
- %12 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
+ %12 = getelementptr inbounds %quuz, %quuz* %c, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=2]
%13 = load float* %12, align 4 ; <float> [#uses=1]
%14 = fsub float %13, undef ; <float> [#uses=1]
%15 = load float* undef, align 4 ; <float> [#uses=1]
%22 = fmul float %5, %11 ; <float> [#uses=1]
%23 = fsub float %21, %22 ; <float> [#uses=2]
store float %18, float* undef
- %24 = getelementptr inbounds %bar* null, i32 0, i32 0, i32 0, i32 1 ; <float*> [#uses=2]
+ %24 = getelementptr inbounds %bar, %bar* null, i32 0, i32 0, i32 0, i32 1 ; <float*> [#uses=2]
store float %20, float* %24
store float %23, float* undef
- %25 = getelementptr inbounds %bar* null, i32 0, i32 0, i32 0, i32 3 ; <float*> [#uses=0]
+ %25 = getelementptr inbounds %bar, %bar* null, i32 0, i32 0, i32 0, i32 3 ; <float*> [#uses=0]
%26 = fmul float %18, %18 ; <float> [#uses=1]
%27 = fadd float %26, undef ; <float> [#uses=1]
%28 = fadd float %27, undef ; <float> [#uses=1]
br label %bb3.i
bb3.i: ; preds = %bb2.i, %bb
- %1 = getelementptr inbounds %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=1]
+ %1 = getelementptr inbounds %quuz, %quuz* %a, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=1]
%2 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
- %3 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
- %4 = getelementptr inbounds %quuz* %b, i32 0, i32 1, i32 0, i32 2 ; <float*> [#uses=1]
+ %3 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
+ %4 = getelementptr inbounds %quuz, %quuz* %b, i32 0, i32 1, i32 0, i32 2 ; <float*> [#uses=1]
%5 = fsub float 0.000000e+00, undef ; <float> [#uses=1]
- %6 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=1]
- %7 = getelementptr inbounds %quuz* %c, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
+ %6 = getelementptr inbounds %quuz, %quuz* %c, i32 0, i32 1, i32 0, i32 0 ; <float*> [#uses=1]
+ %7 = getelementptr inbounds %quuz, %quuz* %c, i32 0, i32 1, i32 0, i32 1 ; <float*> [#uses=1]
%8 = fsub float undef, undef ; <float> [#uses=1]
%9 = fmul float 0.000000e+00, %8 ; <float> [#uses=1]
%10 = fmul float %5, 0.000000e+00 ; <float> [#uses=1]
%13 = fmul float 0.000000e+00, undef ; <float> [#uses=1]
%14 = fsub float %12, %13 ; <float> [#uses=2]
store float %14, float* undef
- %15 = getelementptr inbounds %bar* %0, i32 0, i32 0, i32 0, i32 3 ; <float*> [#uses=1]
+ %15 = getelementptr inbounds %bar, %bar* %0, i32 0, i32 0, i32 0, i32 3 ; <float*> [#uses=1]
store float 0.000000e+00, float* %15
%16 = fmul float %11, %11 ; <float> [#uses=1]
%17 = fadd float %16, 0.000000e+00 ; <float> [#uses=1]
;CHECK: vtrn.16
%0 = shufflevector <8 x i16> %tmp.0, <8 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
%1 = shufflevector <8 x i16> %tmp.0, <8 x i16> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7>
- %agg.result1218.0 = getelementptr %struct.int16x8x2_t* %agg.result, i32 0, i32 0, i32 0, i32 0 ; <<8 x i16>*>
+ %agg.result1218.0 = getelementptr %struct.int16x8x2_t, %struct.int16x8x2_t* %agg.result, i32 0, i32 0, i32 0, i32 0 ; <<8 x i16>*>
store <8 x i16> %0, <8 x i16>* %agg.result1218.0, align 16
- %agg.result12.1.0 = getelementptr %struct.int16x8x2_t* %agg.result, i32 0, i32 0, i32 1, i32 0 ; <<8 x i16>*>
+ %agg.result12.1.0 = getelementptr %struct.int16x8x2_t, %struct.int16x8x2_t* %agg.result, i32 0, i32 0, i32 1, i32 0 ; <<8 x i16>*>
store <8 x i16> %1, <8 x i16>* %agg.result12.1.0, align 16
ret void
}
entry:
%0 = shufflevector <4 x i16> %a.0, <4 x i16> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 undef, i32 undef, i32 undef, i32 undef>
%1 = shufflevector <4 x i16> %a.0, <4 x i16> undef, <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
- %ptr26.0 = getelementptr inbounds %struct.int16x8x2_t* %ptr, i32 0, i32 0, i32 0, i32 0
+ %ptr26.0 = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* %ptr, i32 0, i32 0, i32 0, i32 0
store <8 x i16> %0, <8 x i16>* %ptr26.0, align 16
- %ptr20.1.0 = getelementptr inbounds %struct.int16x8x2_t* %ptr, i32 0, i32 0, i32 1, i32 0
+ %ptr20.1.0 = getelementptr inbounds %struct.int16x8x2_t, %struct.int16x8x2_t* %ptr, i32 0, i32 0, i32 1, i32 0
store <8 x i16> %1, <8 x i16>* %ptr20.1.0, align 16
ret void
}
%frame = inttoptr i32 %2 to [17 x i32]* ; <[17 x i32]*> [#uses=4]
%3 = load i32* undef ; <i32> [#uses=1]
%4 = load i32* null ; <i32> [#uses=1]
- %5 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 13 ; <i32*> [#uses=1]
+ %5 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 13 ; <i32*> [#uses=1]
%6 = bitcast i32* %5 to [8 x i8]** ; <[8 x i8]**> [#uses=1]
%7 = load [8 x i8]** %6 ; <[8 x i8]*> [#uses=1]
- %8 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 12 ; <i32*> [#uses=1]
+ %8 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 12 ; <i32*> [#uses=1]
%9 = load i32* %8 ; <i32> [#uses=1]
br i1 undef, label %bci_13, label %bci_4
%13 = add i32 %base_pc7, 0 ; <i32> [#uses=1]
%14 = inttoptr i32 %13 to void ([84 x i8]*, i32, [788 x i8]*)** ; <void ([84 x i8]*, i32, [788 x i8]*)**> [#uses=1]
%entry_point = load void ([84 x i8]*, i32, [788 x i8]*)** %14 ; <void ([84 x i8]*, i32, [788 x i8]*)*> [#uses=1]
- %15 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 1 ; <i32*> [#uses=1]
+ %15 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 1 ; <i32*> [#uses=1]
%16 = ptrtoint i32* %15 to i32 ; <i32> [#uses=1]
%stack_pointer_addr9 = bitcast i8* undef to i32* ; <i32*> [#uses=1]
store i32 %16, i32* %stack_pointer_addr9
- %17 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 2 ; <i32*> [#uses=1]
+ %17 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 %9, i32* %17
store i32 %10, i32* undef
store [84 x i8]* %method, [84 x i8]** undef
no_overflow: ; preds = %0
%frame = inttoptr i32 %1 to [17 x i32]* ; <[17 x i32]*> [#uses=4]
%2 = load i32* null ; <i32> [#uses=2]
- %3 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 14 ; <i32*> [#uses=1]
+ %3 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 14 ; <i32*> [#uses=1]
%4 = load i32* %3 ; <i32> [#uses=2]
%5 = load [8 x i8]** undef ; <[8 x i8]*> [#uses=2]
br i1 undef, label %bci_13, label %bci_4
ret void
bci_35: ; preds = %bci_30
- %7 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 15 ; <i32*> [#uses=1]
+ %7 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 15 ; <i32*> [#uses=1]
store i32 %2, i32* %7
- %8 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 14 ; <i32*> [#uses=1]
+ %8 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 14 ; <i32*> [#uses=1]
store i32 %4, i32* %8
- %9 = getelementptr inbounds [17 x i32]* %frame, i32 0, i32 13 ; <i32*> [#uses=1]
+ %9 = getelementptr inbounds [17 x i32], [17 x i32]* %frame, i32 0, i32 13 ; <i32*> [#uses=1]
%10 = bitcast i32* %9 to [8 x i8]** ; <[8 x i8]**> [#uses=1]
store [8 x i8]* %5, [8 x i8]** %10
call void inttoptr (i32 13839116 to void ([788 x i8]*, i32)*)([788 x i8]* %thread, i32 7)
%tmp1595.upgrd.7 = trunc i64 %tmp1595 to i32 ; <i32> [#uses=1]
%tmp1596 = and i32 %tmp1595.upgrd.7, 255 ; <i32> [#uses=1]
%gep.upgrd.8 = zext i32 %tmp1596 to i64 ; <i64> [#uses=1]
- %tmp1598 = getelementptr [64 x [256 x i32]]* @bishop_mobility_rr45, i32 0, i32 %tmp1572, i64 %gep.upgrd.8 ; <i32*> [#uses=1]
+ %tmp1598 = getelementptr [64 x [256 x i32]], [64 x [256 x i32]]* @bishop_mobility_rr45, i32 0, i32 %tmp1572, i64 %gep.upgrd.8 ; <i32*> [#uses=1]
%tmp1599 = load i32* %tmp1598 ; <i32> [#uses=1]
%tmp1602 = sub i32 0, %tmp1599 ; <i32> [#uses=1]
br i1 undef, label %cond_next1637, label %cond_true1607
; THUMB-DAG: movs [[VAL:r[0-9]+]], #0
; THUMB-NOT: str {{[a-z0-9]+}}, [{{[a-z0-9]+}}], {{[a-z0-9]+}}
; THUMB: str [[VAL]], [r[[ADDR]]]
- %0 = getelementptr inbounds %struct.foo* %this, i32 0, i32 1 ; <i64*> [#uses=1]
+ %0 = getelementptr inbounds %struct.foo, %struct.foo* %this, i32 0, i32 1 ; <i64*> [#uses=1]
store i32 0, i32* inttoptr (i32 8 to i32*), align 8
br i1 %tst, label %bb.nph96, label %bb3
%3 = shl i32 %packedValue, 16
%4 = ashr i32 %3, 30
%.sum = add i32 %4, 4
- %5 = getelementptr inbounds float* %table, i32 %.sum
+ %5 = getelementptr inbounds float, float* %table, i32 %.sum
;CHECK: vldr s
%6 = load float* %5, align 4
%tmp11 = insertelement <4 x float> undef, float %6, i32 0
%7 = shl i32 %packedValue, 18
%8 = ashr i32 %7, 30
%.sum12 = add i32 %8, 4
- %9 = getelementptr inbounds float* %table, i32 %.sum12
+ %9 = getelementptr inbounds float, float* %table, i32 %.sum12
;CHECK: vldr s
%10 = load float* %9, align 4
%tmp9 = insertelement <4 x float> %tmp11, float %10, i32 1
%11 = shl i32 %packedValue, 20
%12 = ashr i32 %11, 30
%.sum13 = add i32 %12, 4
- %13 = getelementptr inbounds float* %table, i32 %.sum13
+ %13 = getelementptr inbounds float, float* %table, i32 %.sum13
;CHECK: vldr s
%14 = load float* %13, align 4
%tmp7 = insertelement <4 x float> %tmp9, float %14, i32 2
%15 = shl i32 %packedValue, 22
%16 = ashr i32 %15, 30
%.sum14 = add i32 %16, 4
- %17 = getelementptr inbounds float* %table, i32 %.sum14
+ %17 = getelementptr inbounds float, float* %table, i32 %.sum14
;CHECK: vldr s
%18 = load float* %17, align 4
%tmp5 = insertelement <4 x float> %tmp7, float %18, i32 3
bb: ; preds = %bb445, %entry
%2 = load %struct.cellbox** undef, align 4 ; <%struct.cellbox*> [#uses=2]
- %3 = getelementptr inbounds %struct.cellbox* %2, i32 0, i32 3 ; <i32*> [#uses=1]
+ %3 = getelementptr inbounds %struct.cellbox, %struct.cellbox* %2, i32 0, i32 3 ; <i32*> [#uses=1]
store i32 undef, i32* %3, align 4
%4 = load i32* undef, align 4 ; <i32> [#uses=3]
%5 = icmp eq i32 undef, 1 ; <i1> [#uses=1]
bb11: ; preds = %bb10
%6 = load %struct.tilebox** undef, align 4 ; <%struct.tilebox*> [#uses=3]
%7 = load %struct.termbox** null, align 4 ; <%struct.termbox*> [#uses=1]
- %8 = getelementptr inbounds %struct.tilebox* %6, i32 0, i32 13 ; <i32*> [#uses=1]
+ %8 = getelementptr inbounds %struct.tilebox, %struct.tilebox* %6, i32 0, i32 13 ; <i32*> [#uses=1]
%9 = load i32* %8, align 4 ; <i32> [#uses=3]
- %10 = getelementptr inbounds %struct.tilebox* %6, i32 0, i32 15 ; <i32*> [#uses=1]
+ %10 = getelementptr inbounds %struct.tilebox, %struct.tilebox* %6, i32 0, i32 15 ; <i32*> [#uses=1]
%11 = load i32* %10, align 4 ; <i32> [#uses=1]
br i1 false, label %bb12, label %bb13
%25 = zext i1 %not.461 to i32 ; <i32> [#uses=1]
%iftmp.43.0 = add i32 %23, %iftmp.41.0.neg ; <i32> [#uses=1]
%26 = add i32 %iftmp.43.0, %25 ; <i32> [#uses=1]
- %27 = getelementptr inbounds %struct.tilebox* %6, i32 0, i32 10 ; <i32*> [#uses=1]
+ %27 = getelementptr inbounds %struct.tilebox, %struct.tilebox* %6, i32 0, i32 10 ; <i32*> [#uses=1]
store i32 %26, i32* %27, align 4
%28 = fptosi double undef to i32 ; <i32> [#uses=1]
%iftmp.45.0 = add i32 %28, %iftmp.40.0.neg ; <i32> [#uses=1]
%iftmp.47.0 = add i32 %39, %iftmp.40.0.neg ; <i32> [#uses=1]
%40 = add i32 %iftmp.47.0, 0 ; <i32> [#uses=1]
store i32 %40, i32* undef, align 4
- %41 = getelementptr inbounds %struct.termbox* %termptr.0478, i32 0, i32 0 ; <%struct.termbox**> [#uses=1]
+ %41 = getelementptr inbounds %struct.termbox, %struct.termbox* %termptr.0478, i32 0, i32 0 ; <%struct.termbox**> [#uses=1]
%42 = load %struct.termbox** %41, align 4 ; <%struct.termbox*> [#uses=2]
%43 = icmp eq %struct.termbox* %42, null ; <i1> [#uses=1]
br i1 %43, label %bb52.loopexit, label %bb36
br i1 %45, label %bb322, label %bb249
bb249: ; preds = %bb248
- %46 = getelementptr inbounds %struct.cellbox* %2, i32 0, i32 21, i32 undef ; <%struct.tilebox**> [#uses=1]
+ %46 = getelementptr inbounds %struct.cellbox, %struct.cellbox* %2, i32 0, i32 21, i32 undef ; <%struct.tilebox**> [#uses=1]
%47 = load %struct.tilebox** %46, align 4 ; <%struct.tilebox*> [#uses=1]
- %48 = getelementptr inbounds %struct.tilebox* %47, i32 0, i32 11 ; <i32*> [#uses=1]
+ %48 = getelementptr inbounds %struct.tilebox, %struct.tilebox* %47, i32 0, i32 11 ; <i32*> [#uses=1]
store i32 undef, i32* %48, align 4
unreachable
;CHECK: bx r12 @ TAILCALL
entry:
%.loc = alloca i32 ; <i32*> [#uses=2]
- %tmp.i = getelementptr inbounds %"class.llvm::StringInit"* %this, i32 0, i32 0, i32 4 ; <i8*> [#uses=1]
+ %tmp.i = getelementptr inbounds %"class.llvm::StringInit", %"class.llvm::StringInit"* %this, i32 0, i32 0, i32 4 ; <i8*> [#uses=1]
%0 = bitcast i8* %tmp.i to %"struct.llvm::Init"** ; <%"struct.llvm::Init"**> [#uses=1]
%tmp2.i = load %"struct.llvm::Init"** %0 ; <%"struct.llvm::Init"*> [#uses=2]
%1 = icmp eq %"struct.llvm::Init"* %tmp2.i, null ; <i1> [#uses=1]
br label %return
if.then: ; preds = %tmpbb
- %tmp2.i.i.i.i = getelementptr inbounds %"class.llvm::StringInit"* %this, i32 0, i32 1, i32 0, i32 0 ; <i8**> [#uses=1]
+ %tmp2.i.i.i.i = getelementptr inbounds %"class.llvm::StringInit", %"class.llvm::StringInit"* %this, i32 0, i32 1, i32 0, i32 0 ; <i8**> [#uses=1]
%tmp3.i.i.i.i = load i8** %tmp2.i.i.i.i ; <i8*> [#uses=2]
- %arrayidx.i.i.i.i = getelementptr inbounds i8* %tmp3.i.i.i.i, i32 -12 ; <i8*> [#uses=1]
+ %arrayidx.i.i.i.i = getelementptr inbounds i8, i8* %tmp3.i.i.i.i, i32 -12 ; <i8*> [#uses=1]
%tmp.i.i.i = bitcast i8* %arrayidx.i.i.i.i to i32* ; <i32*> [#uses=1]
%tmp2.i.i.i = load i32* %tmp.i.i.i ; <i32> [#uses=1]
- %tmp.i5 = getelementptr inbounds %"class.llvm::Record"* %R, i32 0, i32 4 ; <%"class.std::vector"*> [#uses=1]
- %tmp2.i.i = getelementptr inbounds %"class.llvm::Record"* %R, i32 0, i32 4, i32 0, i32 4 ; <i8*> [#uses=1]
+ %tmp.i5 = getelementptr inbounds %"class.llvm::Record", %"class.llvm::Record"* %R, i32 0, i32 4 ; <%"class.std::vector"*> [#uses=1]
+ %tmp2.i.i = getelementptr inbounds %"class.llvm::Record", %"class.llvm::Record"* %R, i32 0, i32 4, i32 0, i32 4 ; <i8*> [#uses=1]
%4 = bitcast i8* %tmp2.i.i to %"class.llvm::RecordVal"** ; <%"class.llvm::RecordVal"**> [#uses=1]
%tmp3.i.i6 = load %"class.llvm::RecordVal"** %4 ; <%"class.llvm::RecordVal"*> [#uses=1]
%tmp5.i.i = bitcast %"class.std::vector"* %tmp.i5 to %"class.llvm::RecordVal"** ; <%"class.llvm::RecordVal"**> [#uses=1]
br label %return
_ZN4llvm6Record8getValueENS_9StringRefE.exit: ; preds = %codeRepl
- %add.ptr.i.i = getelementptr inbounds %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload ; <%"class.llvm::RecordVal"*> [#uses=2]
+ %add.ptr.i.i = getelementptr inbounds %"class.llvm::RecordVal", %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload ; <%"class.llvm::RecordVal"*> [#uses=2]
%tobool5 = icmp eq %"class.llvm::RecordVal"* %add.ptr.i.i, null ; <i1> [#uses=1]
br i1 %tobool5, label %_ZN4llvm6Record8getValueENS_9StringRefE.exit.return_crit_edge, label %if.then6
br label %return
lor.lhs.false: ; preds = %land.lhs.true
- %tmp.i3 = getelementptr inbounds %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload, i32 3 ; <%"struct.llvm::Init"**> [#uses=1]
+ %tmp.i3 = getelementptr inbounds %"class.llvm::RecordVal", %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload, i32 3 ; <%"struct.llvm::Init"**> [#uses=1]
%tmp2.i4 = load %"struct.llvm::Init"** %tmp.i3 ; <%"struct.llvm::Init"*> [#uses=2]
%5 = icmp eq %"struct.llvm::Init"* %tmp2.i4, null ; <i1> [#uses=1]
br i1 %5, label %lor.lhs.false.if.end_crit_edge, label %tmpbb1
br label %if.end
if.end: ; preds = %.if.end_crit_edge, %lor.lhs.false.if.end_crit_edge, %if.then6.if.end_crit_edge
- %tmp.i1 = getelementptr inbounds %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload, i32 3 ; <%"struct.llvm::Init"**> [#uses=1]
+ %tmp.i1 = getelementptr inbounds %"class.llvm::RecordVal", %"class.llvm::RecordVal"* %tmp6.i.i, i32 %.reload, i32 3 ; <%"struct.llvm::Init"**> [#uses=1]
%tmp2.i2 = load %"struct.llvm::Init"** %tmp.i1 ; <%"struct.llvm::Init"*> [#uses=3]
%8 = bitcast %"class.llvm::StringInit"* %this to %"struct.llvm::Init"* ; <%"struct.llvm::Init"*> [#uses=1]
%cmp19 = icmp eq %"struct.llvm::Init"* %tmp2.i2, %8 ; <i1> [#uses=1]
cond.end: ; preds = %if.end
%9 = bitcast %"struct.llvm::Init"* %tmp2.i2 to %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*** ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)***> [#uses=1]
%10 = load %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*** %9 ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)**> [#uses=1]
- %vfn = getelementptr inbounds %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)** %10, i32 8 ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)**> [#uses=1]
+ %vfn = getelementptr inbounds %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*, %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)** %10, i32 8 ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)**> [#uses=1]
%11 = load %"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)** %vfn ; <%"struct.llvm::Init"* (%"struct.llvm::Init"*, %"class.llvm::Record"*, %"class.llvm::RecordVal"*, %"class.std::basic_string"*)*> [#uses=1]
%call25 = tail call %"struct.llvm::Init"* %11(%"struct.llvm::Init"* %tmp2.i2, %"class.llvm::Record"* %R, %"class.llvm::RecordVal"* %RV, %"class.std::basic_string"* %FieldName) ; <%"struct.llvm::Init"*> [#uses=1]
ret %"struct.llvm::Init"* %call25
while.cond: ; preds = %while.body, %entry
%0 = phi i32 [ 0, %entry ], [ %inc, %while.body ] ; <i32> [#uses=3]
- %buf.addr.0 = getelementptr i8* %buf, i32 %0 ; <i8*> [#uses=1]
+ %buf.addr.0 = getelementptr i8, i8* %buf, i32 %0 ; <i8*> [#uses=1]
%cmp7 = icmp ult i32 %0, %nbytes.addr.0, !dbg !20 ; <i1> [#uses=1]
br i1 %cmp7, label %land.rhs, label %while.end, !dbg !20
define internal void @_ZN1AD1Ev(%struct.A* nocapture %this) nounwind ssp align 2 {
entry:
- %tmp.i = getelementptr inbounds %struct.A* %this, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.i = getelementptr inbounds %struct.A, %struct.A* %this, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp2.i = load i32* %tmp.i ; <i32> [#uses=1]
%call.i = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str4, i32 0, i32 0), i32 %tmp2.i) nounwind ; <i32> [#uses=0]
%tmp3.i = load i32* @d ; <i32> [#uses=1]
br i1 %0, label %bb, label %bb1, !dbg !27
bb: ; preds = %entry
- %1 = getelementptr inbounds %struct.SVal* %location, i32 0, i32 1, !dbg !29 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !29 ; <i32*> [#uses=1]
%2 = load i32* %1, align 8, !dbg !29 ; <i32> [#uses=1]
%3 = add i32 %2, %i, !dbg !29 ; <i32> [#uses=1]
br label %bb2, !dbg !29
bb1: ; preds = %entry
- %4 = getelementptr inbounds %struct.SVal* %location, i32 0, i32 1, !dbg !30 ; <i32*> [#uses=1]
+ %4 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !30 ; <i32*> [#uses=1]
%5 = load i32* %4, align 8, !dbg !30 ; <i32> [#uses=1]
%6 = sub i32 %5, 1, !dbg !30 ; <i32> [#uses=1]
br label %bb2, !dbg !30
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.value(metadata %struct.SVal* %this, i64 0, metadata !31, metadata !{!"0x102"}), !dbg !34
- %0 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 0, !dbg !34 ; <i8**> [#uses=1]
+ %0 = getelementptr inbounds %struct.SVal, %struct.SVal* %this, i32 0, i32 0, !dbg !34 ; <i8**> [#uses=1]
store i8* null, i8** %0, align 8, !dbg !34
- %1 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 1, !dbg !34 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds %struct.SVal, %struct.SVal* %this, i32 0, i32 1, !dbg !34 ; <i32*> [#uses=1]
store i32 0, i32* %1, align 8, !dbg !34
br label %return, !dbg !34
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.declare(metadata %struct.SVal* %v, metadata !38, metadata !{!"0x102"}), !dbg !41
call void @_ZN4SValC1Ev(%struct.SVal* %v) nounwind, !dbg !41
- %1 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 1, !dbg !42 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 1, !dbg !42 ; <i32*> [#uses=1]
store i32 1, i32* %1, align 8, !dbg !42
- %2 = getelementptr inbounds %struct.SVal* %0, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
- %3 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
+ %2 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
+ %3 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
%4 = load i8** %3, align 8, !dbg !43 ; <i8*> [#uses=1]
store i8* %4, i8** %2, align 8, !dbg !43
- %5 = getelementptr inbounds %struct.SVal* %0, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
- %6 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
+ %5 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
+ %6 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
%7 = load i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
store i32 %7, i32* %5, align 8, !dbg !43
%8 = call i32 @_Z3fooi4SVal(i32 2, %struct.SVal* noalias %0) nounwind, !dbg !43 ; <i32> [#uses=0]
presymmetry.exit: ; preds = %bb28.i
%tmp175387 = or i32 undef, 12
- %scevgep101.i = getelementptr i32* %in, i32 undef
+ %scevgep101.i = getelementptr i32, i32* %in, i32 undef
%tmp189401 = or i32 undef, 7
- %scevgep97.i = getelementptr i32* %in, i32 undef
+ %scevgep97.i = getelementptr i32, i32* %in, i32 undef
%tmp198410 = or i32 undef, 1
- %scevgep.i48 = getelementptr i32* %in, i32 undef
+ %scevgep.i48 = getelementptr i32, i32* %in, i32 undef
%0 = load i32* %scevgep.i48, align 4
%1 = add nsw i32 %0, 0
store i32 %1, i32* undef, align 4
define i32 @main(i32 %argc) nounwind {
%1 = load i32* @sum, align 4
- %2 = getelementptr [80 x i8]* @array00, i32 0, i32 %argc
+ %2 = getelementptr [80 x i8], [80 x i8]* @array00, i32 0, i32 %argc
%3 = load i8* %2
%4 = zext i8 %3 to i32
%5 = add i32 %1, %4
br label %bb
bb:
- %p.2 = getelementptr [8096 x i8]* %buf, i32 0, i32 0
+ %p.2 = getelementptr [8096 x i8], [8096 x i8]* %buf, i32 0, i32 0
store i8 undef, i8* %p.2, align 1
ret void
}
br i1 %tst, label %bb46, label %bb8
bb8: ; preds = %bb3
- %1 = getelementptr inbounds i8* %0, i32 0
+ %1 = getelementptr inbounds i8, i8* %0, i32 0
store i8 0, i8* %1, align 1
%2 = call i32 @ptou() nounwind
; CHECK: umull [[REGISTER:lr|r[0-9]+]],
%.phi24 = load i8* null
%.phi26 = load i8** null
store i8 %.phi24, i8* %.phi26, align 1
- %0 = getelementptr inbounds i8* %.phi26, i32 1
+ %0 = getelementptr inbounds i8, i8* %.phi26, i32 1
store i8* %0, i8** %.load120, align 4
; CHECK: mul [[REGISTER:lr|r[0-9]+]],
; CHECK-NOT: [[REGISTER]],
bb1: ; preds = %entry
%0 = call %struct.ui* @vn_pp_to_ui(i32* undef) nounwind
call void @llvm.memset.p0i8.i32(i8* undef, i8 0, i32 40, i32 4, i1 false)
- %1 = getelementptr inbounds %struct.ui* %0, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.ui, %struct.ui* %0, i32 0, i32 0
store %struct.mo* undef, %struct.mo** %1, align 4
- %2 = getelementptr inbounds %struct.ui* %0, i32 0, i32 5
+ %2 = getelementptr inbounds %struct.ui, %struct.ui* %0, i32 0, i32 5
%3 = load i64* %2, align 4
%4 = call i32 @mo_create_nnm(%struct.mo* undef, i64 %3, i32** undef) nounwind
br i1 undef, label %bb3, label %bb2
for.body: ; preds = %_Z14printIsNotZeroi.exit17.for.body_crit_edge, %for.body.lr.ph
%tmp3 = phi i1 [ false, %for.body.lr.ph ], [ %phitmp27, %_Z14printIsNotZeroi.exit17.for.body_crit_edge ]
%i.022 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %_Z14printIsNotZeroi.exit17.for.body_crit_edge ]
- %x = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 0
- %y = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 1
+ %x = getelementptr %struct.Outer, %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 0
+ %y = getelementptr %struct.Outer, %struct.Outer* @oStruct, i32 0, i32 1, i32 %i.022, i32 1
%inc = add i32 %i.022, 1
%tmp8 = load i32* %x, align 4
%tmp11 = load i32* %y, align 4
br label %_Z14printIsNotZeroi.exit17.for.body_crit_edge
_Z14printIsNotZeroi.exit17.for.body_crit_edge: ; preds = %_Z14printIsNotZeroi.exit17
- %b.phi.trans.insert = getelementptr %struct.Outer* @oStruct, i32 0, i32 1, i32 %inc, i32 3
+ %b.phi.trans.insert = getelementptr %struct.Outer, %struct.Outer* @oStruct, i32 0, i32 1, i32 %inc, i32 3
%tmp3.pre = load i8* %b.phi.trans.insert, align 1
%phitmp27 = icmp eq i8 %val8, 0
br label %for.body
bb: ; preds = %entry, %bb
%j.05 = phi i32 [ %2, %bb ], [ 0, %entry ]
%tmp = mul i32 %j.05, %src_copy_start_index
- %uglygep = getelementptr i8* %src_copy_start6, i32 %tmp
+ %uglygep = getelementptr i8, i8* %src_copy_start6, i32 %tmp
%src_copy_start_addr.04 = bitcast i8* %uglygep to float*
- %dst_copy_start_addr.03 = getelementptr float* %dst_copy_start, i32 %j.05
+ %dst_copy_start_addr.03 = getelementptr float, float* %dst_copy_start, i32 %j.05
%1 = load float* %src_copy_start_addr.04, align 4
store float %1, float* %dst_copy_start_addr.03, align 4
%2 = add i32 %j.05, 1
; CHECK: mov{{.*}} r{{[0-9]+}}, #{{[01]}}
; CHECK: mov{{.*}} r{{[0-9]+}}, #{{[01]}}
; CHECK-NOT: mov r{{[0-9]+}}, #{{[01]}}
- %arrayidx = getelementptr i32* %A, i32 %0
+ %arrayidx = getelementptr i32, i32* %A, i32 %0
%tmp4 = load i32* %arrayidx, align 4
%cmp6 = icmp eq i32 %tmp4, %value
br i1 %cmp6, label %return, label %for.inc
bb.i: ; preds = %bb5.i
%1 = shl nsw i32 %k_addr.0.i, 1
%.sum8.i = add i32 %1, -1
- %2 = getelementptr inbounds [256 x i32]* %heap, i32 0, i32 %.sum8.i
+ %2 = getelementptr inbounds [256 x i32], [256 x i32]* %heap, i32 0, i32 %.sum8.i
%3 = load i32* %2, align 4
br i1 false, label %bb5.i, label %bb4.i
bb4.i: ; preds = %bb.i
%.sum10.i = add i32 %k_addr.0.i, -1
- %4 = getelementptr inbounds [256 x i32]* %heap, i32 0, i32 %.sum10.i
+ %4 = getelementptr inbounds [256 x i32], [256 x i32]* %heap, i32 0, i32 %.sum10.i
store i32 %3, i32* %4, align 4
br label %bb5.i
%0 = call i32 (...)* @get_index(i8* %.T0348, i32 0)
%1 = bitcast i16* %destValues to i8*
%2 = mul i32 %0, 6
- %3 = getelementptr i8* %1, i32 %2
+ %3 = getelementptr i8, i8* %1, i32 %2
%4 = bitcast i8* %3 to <3 x i16>*
%5 = load <3 x i16>* %4, align 1
%6 = bitcast i16* %sourceA to i8*
- %7 = getelementptr i8* %6, i32 %2
+ %7 = getelementptr i8, i8* %6, i32 %2
%8 = bitcast i8* %7 to <3 x i16>*
%9 = load <3 x i16>* %8, align 1
%10 = or <3 x i16> %9, %5
define void @Compute_Axis_Rotation_Transform(%struct.Transform_Struct.0.11.12.17.43.46.56.58.60* nocapture %transform, double* nocapture %V1, double %angle) nounwind {
entry:
store double 1.000000e+00, double* null, align 4
- %arrayidx5.1.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 0, i32 1
+ %arrayidx5.1.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60, %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 0, i32 1
store double 0.000000e+00, double* %arrayidx5.1.i, align 4
- %arrayidx5.2.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 0, i32 2
+ %arrayidx5.2.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60, %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 0, i32 2
store double 0.000000e+00, double* %arrayidx5.2.i, align 4
- %arrayidx5.114.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 1, i32 0
+ %arrayidx5.114.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60, %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 1, i32 0
store double 0.000000e+00, double* %arrayidx5.114.i, align 4
- %arrayidx5.1.1.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 1, i32 1
+ %arrayidx5.1.1.i = getelementptr inbounds %struct.Transform_Struct.0.11.12.17.43.46.56.58.60, %struct.Transform_Struct.0.11.12.17.43.46.56.58.60* %transform, i32 0, i32 0, i32 1, i32 1
store double 1.000000e+00, double* %arrayidx5.1.1.i, align 4
store double 0.000000e+00, double* null, align 4
store double 1.000000e+00, double* null, align 4
%call = tail call double @cos(double %angle) nounwind readnone
%call1 = tail call double @sin(double %angle) nounwind readnone
%0 = load double* %V1, align 4
- %arrayidx2 = getelementptr inbounds double* %V1, i32 1
+ %arrayidx2 = getelementptr inbounds double, double* %V1, i32 1
%1 = load double* %arrayidx2, align 4
%mul = fmul double %0, %1
%sub = fsub double 1.000000e+00, %call
%6 = phi i8* [ %19, %5 ], [ %0, %1 ]
%7 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* null, i32 1)
%8 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %7, 0
- %9 = getelementptr inbounds i8* null, i32 3
+ %9 = getelementptr inbounds i8, i8* null, i32 3
%10 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %9, i32 1)
%11 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %10, 2
%12 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %6, i32 1)
%13 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %12, 0
%14 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %12, 1
- %15 = getelementptr inbounds i8* %6, i32 3
+ %15 = getelementptr inbounds i8, i8* %6, i32 3
%16 = tail call { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8* %15, i32 1)
%17 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %16, 1
%18 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8> } %16, 2
- %19 = getelementptr inbounds i8* %6, i32 48
+ %19 = getelementptr inbounds i8, i8* %6, i32 48
%20 = bitcast <16 x i8> %13 to <2 x i64>
%21 = bitcast <16 x i8> %8 to <2 x i64>
%22 = bitcast <16 x i8> %14 to <2 x i64>
; CHECK-NOT: str r1
define void @test_byval_8_bytes_alignment_fixed_arg(i32 %n1, %struct_t* byval %val) nounwind {
entry:
- %a = getelementptr inbounds %struct_t* %val, i32 0, i32 0
+ %a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0
%0 = load double* %a
call void (double)* @f(double %0)
ret void
; CHECK: vldr d16, [sp, #8]
define void @test_byval_usage_scheduling(i32 %n1, i32 %n2, %struct_t* byval %val) nounwind {
entry:
- %a = getelementptr inbounds %struct_t* %val, i32 0, i32 0
+ %a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0
%0 = load double* %a
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), double %0)
ret void
define i32 @foo(i32* %a) nounwind optsize {
entry:
%0 = load i32* %a, align 4
- %arrayidx1 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 1
%1 = load i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 2
%2 = load i32* %arrayidx2, align 4
- %add.ptr = getelementptr inbounds i32* %a, i32 3
+ %add.ptr = getelementptr inbounds i32, i32* %a, i32 3
;Make sure we do not have a duplicated register in the front of the reg list
;EXPECTED: ldm [[BASE:r[0-9]+]]!, {[[REG:r[0-9]+]], {{r[0-9]+}},
;CHECK-NOT: ldm [[BASE:r[0-9]+]]!, {[[REG:r[0-9]+]], [[REG]],
; Load %source
%s0 = load <8 x i64> * %source, align 64
- %arrayidx64 = getelementptr inbounds <8 x i64> * %source, i32 6
+ %arrayidx64 = getelementptr inbounds <8 x i64>, <8 x i64> * %source, i32 6
%s120 = load <8 x i64> * %arrayidx64, align 64
%s122 = bitcast <8 x i64> %s120 to i512
%data.i.i677.48.extract.shift = lshr i512 %s122, 384
; Load %secondSource
%s1 = load <8 x i64> * %secondSource, align 64
- %arrayidx67 = getelementptr inbounds <8 x i64> * %secondSource, i32 6
+ %arrayidx67 = getelementptr inbounds <8 x i64>, <8 x i64> * %secondSource, i32 6
%s121 = load <8 x i64> * %arrayidx67, align 64
%s131 = bitcast <8 x i64> %s121 to i512
%data.i1.i676.48.extract.shift = lshr i512 %s131, 384
%vecinit35.i.i700 = shufflevector <8 x i64> %vecinit28.i.i699, <8 x i64> %s139, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 13, i32 undef, i32 undef>
%vecinit42.i.i701 = shufflevector <8 x i64> %vecinit35.i.i700, <8 x i64> %s139, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 14, i32 undef>
%vecinit49.i.i702 = shufflevector <8 x i64> %vecinit42.i.i701, <8 x i64> %s130, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 15>
- %arrayidx72 = getelementptr inbounds <8 x i64> * %dest, i32 6
+ %arrayidx72 = getelementptr inbounds <8 x i64>, <8 x i64> * %dest, i32 6
store <8 x i64> %vecinit49.i.i702, <8 x i64> * %arrayidx72, align 64
- %arrayidx78 = getelementptr inbounds <8 x i64> * %secondSource, i32 7
+ %arrayidx78 = getelementptr inbounds <8 x i64>, <8 x i64> * %secondSource, i32 7
%s141 = load <8 x i64> * %arrayidx78, align 64
%s151 = bitcast <8 x i64> %s141 to i512
%data.i1.i649.32.extract.shift = lshr i512 %s151, 256
%data.i1.i649.8.extract.shift = lshr i512 %s151, 64
%data.i1.i649.8.extract.trunc = trunc i512 %data.i1.i649.8.extract.shift to i64
%s155 = insertelement <8 x i64> %s154, i64 %data.i1.i649.8.extract.trunc, i32 3
- %arrayidx83 = getelementptr inbounds <8 x i64> * %dest, i32 7
+ %arrayidx83 = getelementptr inbounds <8 x i64>, <8 x i64> * %dest, i32 7
store <8 x i64> %s155, <8 x i64> * %arrayidx83, align 64
ret void
}
%add = add nsw i32 %mul17, %w
%sub19 = sub i32 %add, %Width
%sub20 = add i32 %sub19, -1
- %arrayidx21 = getelementptr inbounds i8* %call1, i32 %sub20
+ %arrayidx21 = getelementptr inbounds i8, i8* %call1, i32 %sub20
%0 = load i8* %arrayidx21, align 1
%conv22 = zext i8 %0 to i32
- %arrayidx25 = getelementptr inbounds i8* %call1, i32 %sub19
+ %arrayidx25 = getelementptr inbounds i8, i8* %call1, i32 %sub19
%1 = load i8* %arrayidx25, align 1
%conv26 = zext i8 %1 to i32
%mul23189 = add i32 %conv26, %conv22
%add30 = add i32 %sub19, 1
- %arrayidx31 = getelementptr inbounds i8* %call1, i32 %add30
+ %arrayidx31 = getelementptr inbounds i8, i8* %call1, i32 %add30
%2 = load i8* %arrayidx31, align 1
%conv32 = zext i8 %2 to i32
; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
; CHECK-NEXT: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #1]
%add28190 = add i32 %mul23189, %conv32
%sub35 = add i32 %add, -1
- %arrayidx36 = getelementptr inbounds i8* %call1, i32 %sub35
+ %arrayidx36 = getelementptr inbounds i8, i8* %call1, i32 %sub35
%3 = load i8* %arrayidx36, align 1
%conv37 = zext i8 %3 to i32
%add34191 = add i32 %add28190, %conv37
- %arrayidx40 = getelementptr inbounds i8* %call1, i32 %add
+ %arrayidx40 = getelementptr inbounds i8, i8* %call1, i32 %add
%4 = load i8* %arrayidx40, align 1
%conv41 = zext i8 %4 to i32
%mul42 = mul nsw i32 %conv41, 255
%add44 = add i32 %add, 1
- %arrayidx45 = getelementptr inbounds i8* %call1, i32 %add44
+ %arrayidx45 = getelementptr inbounds i8, i8* %call1, i32 %add44
%5 = load i8* %arrayidx45, align 1
%conv46 = zext i8 %5 to i32
; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
; CHECK-NEXT: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #1]
%add49 = add i32 %add, %Width
%sub50 = add i32 %add49, -1
- %arrayidx51 = getelementptr inbounds i8* %call1, i32 %sub50
+ %arrayidx51 = getelementptr inbounds i8, i8* %call1, i32 %sub50
%6 = load i8* %arrayidx51, align 1
%conv52 = zext i8 %6 to i32
- %arrayidx56 = getelementptr inbounds i8* %call1, i32 %add49
+ %arrayidx56 = getelementptr inbounds i8, i8* %call1, i32 %add49
%7 = load i8* %arrayidx56, align 1
%conv57 = zext i8 %7 to i32
%add61 = add i32 %add49, 1
- %arrayidx62 = getelementptr inbounds i8* %call1, i32 %add61
+ %arrayidx62 = getelementptr inbounds i8, i8* %call1, i32 %add61
%8 = load i8* %arrayidx62, align 1
%conv63 = zext i8 %8 to i32
; CHECK: ldrb r{{[0-9]*}}, [r{{[0-9]*}}, #-1]
%add65 = add i32 %tmp196, %mul42
%9 = lshr i32 %add65, 8
%conv68 = trunc i32 %9 to i8
- %arrayidx69 = getelementptr inbounds i8* %call3, i32 %add
+ %arrayidx69 = getelementptr inbounds i8, i8* %call3, i32 %add
store i8 %conv68, i8* %arrayidx69, align 1
ret i8 %conv68
}
; CHECK-NOT: str r[[REG:[0-9]+]], [r[[REG]]], #4
%val = ptrtoint i32* %addr to i32
store i32 %val, i32* %addr
- %new = getelementptr i32* %addr, i32 1
+ %new = getelementptr i32, i32* %addr, i32 1
ret i32* %new
}
%val = ptrtoint i16* %addr to i32
%tr = trunc i32 %val to i16
store i16 %tr, i16* %addr
- %new = getelementptr i16* %addr, i32 1
+ %new = getelementptr i16, i16* %addr, i32 1
ret i16* %new
}
%val = ptrtoint i8* %addr to i32
%tr = trunc i32 %val to i8
store i8 %tr, i8* %addr
- %new = getelementptr i8* %addr, i32 1
+ %new = getelementptr i8, i8* %addr, i32 1
ret i8* %new
}
; Offsets less than 8 can be generated in a single add
; CHECK: adds [[NEWBASE:r[0-9]]], r0, #4
- %1 = getelementptr inbounds i32* %p, i32 1
- %2 = getelementptr inbounds i32* %p, i32 2
- %3 = getelementptr inbounds i32* %p, i32 3
- %4 = getelementptr inbounds i32* %p, i32 4
+ %1 = getelementptr inbounds i32, i32* %p, i32 1
+ %2 = getelementptr inbounds i32, i32* %p, i32 2
+ %3 = getelementptr inbounds i32, i32* %p, i32 3
+ %4 = getelementptr inbounds i32, i32* %p, i32 4
; CHECK-NEXT: ldm [[NEWBASE]],
%5 = load i32* %1, align 4
; CHECK-V4T: movs [[NEWBASE:r[0-9]]], r0
; CHECK-V6M: mov [[NEWBASE:r[0-9]]], r0
; CHECK-NEXT: adds [[NEWBASE]], #8
- %1 = getelementptr inbounds i32* %p, i32 2
- %2 = getelementptr inbounds i32* %p, i32 3
- %3 = getelementptr inbounds i32* %p, i32 4
- %4 = getelementptr inbounds i32* %p, i32 5
+ %1 = getelementptr inbounds i32, i32* %p, i32 2
+ %2 = getelementptr inbounds i32, i32* %p, i32 3
+ %3 = getelementptr inbounds i32, i32* %p, i32 4
+ %4 = getelementptr inbounds i32, i32* %p, i32 5
; CHECK-NEXT: ldm [[NEWBASE]],
%5 = load i32* %1, align 4
%.09 = phi i32 [ %n, %0 ], [ %11, %1 ]
%.08 = phi i8* [ %b, %0 ], [ %10, %1 ]
%.0 = phi i32* [ %a, %0 ], [ %2, %1 ]
- %2 = getelementptr inbounds i32* %.0, i32 1
+ %2 = getelementptr inbounds i32, i32* %.0, i32 1
%3 = load i32* %.0, align 1
- %4 = getelementptr inbounds i8* %c, i32 %3
+ %4 = getelementptr inbounds i8, i8* %c, i32 %3
%5 = load i8* %4, align 1
%6 = add i32 %3, 1
- %7 = getelementptr inbounds i8* %c, i32 %6
+ %7 = getelementptr inbounds i8, i8* %c, i32 %6
%8 = load i8* %7, align 1
store i8 %5, i8* %.08, align 1
- %9 = getelementptr inbounds i8* %.08, i32 1
+ %9 = getelementptr inbounds i8, i8* %.08, i32 1
store i8 %8, i8* %9, align 1
- %10 = getelementptr inbounds i8* %.08, i32 2
+ %10 = getelementptr inbounds i8, i8* %.08, i32 2
%11 = add nsw i32 %.09, -1
%12 = icmp eq i32 %11, 0
br i1 %12, label %13, label %1
%.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
- %2 = getelementptr inbounds i8* %.0, i32 1
+ %2 = getelementptr inbounds i8, i8* %.0, i32 1
%3 = load i8* %.0, align 1
%4 = sext i8 %3 to i32
- %5 = getelementptr inbounds i8* %c, i32 %4
+ %5 = getelementptr inbounds i8, i8* %c, i32 %4
%6 = load i8* %5, align 1
%7 = add i32 %4, 1
- %8 = getelementptr inbounds i8* %c, i32 %7
+ %8 = getelementptr inbounds i8, i8* %c, i32 %7
%9 = load i8* %8, align 1
store i8 %6, i8* %.08, align 1
- %10 = getelementptr inbounds i8* %.08, i32 1
+ %10 = getelementptr inbounds i8, i8* %.08, i32 1
store i8 %9, i8* %10, align 1
- %11 = getelementptr inbounds i8* %.08, i32 2
+ %11 = getelementptr inbounds i8, i8* %.08, i32 2
%12 = add nsw i32 %.09, -1
%13 = icmp eq i32 %12, 0
br i1 %13, label %14, label %1
%.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
- %2 = getelementptr inbounds i8* %.0, i32 1
+ %2 = getelementptr inbounds i8, i8* %.0, i32 1
%3 = load i8* %.0, align 1
%4 = sext i8 %3 to i32
- %5 = getelementptr inbounds i8* %c, i32 %4
+ %5 = getelementptr inbounds i8, i8* %c, i32 %4
%6 = load i8* %5, align 1
%7 = add i8 %3, 1
%wrap.4 = sext i8 %7 to i32
- %8 = getelementptr inbounds i8* %c, i32 %wrap.4
+ %8 = getelementptr inbounds i8, i8* %c, i32 %wrap.4
%9 = load i8* %8, align 1
store i8 %6, i8* %.08, align 1
- %10 = getelementptr inbounds i8* %.08, i32 1
+ %10 = getelementptr inbounds i8, i8* %.08, i32 1
store i8 %9, i8* %10, align 1
- %11 = getelementptr inbounds i8* %.08, i32 2
+ %11 = getelementptr inbounds i8, i8* %.08, i32 2
%12 = add nsw i32 %.09, -1
%13 = icmp eq i32 %12, 0
br i1 %13, label %14, label %1
store i32 %i, i32* %i.addr, align 4
%0 = load i32* %i.addr, align 4
%rem = urem i32 %0, 4096
- %arrayidx = getelementptr inbounds [4096 x i8]* %buffer, i32 0, i32 %rem
+ %arrayidx = getelementptr inbounds [4096 x i8], [4096 x i8]* %buffer, i32 0, i32 %rem
%1 = load volatile i8* %arrayidx, align 1
ret i8 %1
}
define dllexport arm_aapcs_vfpcc signext i8 @function(i32 %offset) #0 {
entry:
%buffer = alloca [4096 x i8], align 1
- %0 = getelementptr inbounds [4096 x i8]* %buffer, i32 0, i32 0
+ %0 = getelementptr inbounds [4096 x i8], [4096 x i8]* %buffer, i32 0, i32 0
call arm_aapcs_vfpcc void @initialise(i8* %0)
- %arrayidx = getelementptr inbounds [4096 x i8]* %buffer, i32 0, i32 %offset
+ %arrayidx = getelementptr inbounds [4096 x i8], [4096 x i8]* %buffer, i32 0, i32 %offset
%1 = load i8* %arrayidx, align 1
ret i8 %1
}
define arm_aapcs_vfpcc i8 @function(i32 %sz, i32 %idx) {
entry:
%vla = alloca i8, i32 %sz, align 1
- %arrayidx = getelementptr inbounds i8* %vla, i32 %idx
+ %arrayidx = getelementptr inbounds i8, i8* %vla, i32 %idx
%0 = load volatile i8* %arrayidx, align 1
ret i8 %0
}
; CHECK: vmov.{{.*}} d{{[0-9]+}},
%oldcount = phi i32 [0, %entry], [%newcount, %loop]
%newcount = add i32 %oldcount, 1
- %p1 = getelementptr <4 x i8> *%in, i32 %newcount
- %p2 = getelementptr <4 x i8> *%out, i32 %newcount
+ %p1 = getelementptr <4 x i8>, <4 x i8> *%in, i32 %newcount
+ %p2 = getelementptr <4 x i8>, <4 x i8> *%out, i32 %newcount
%tmp1 = load <4 x i8> *%p1, align 4
store <4 x i8> %tmp1, <4 x i8> *%p2
%cmp = icmp eq i32 %newcount, %n
; T2: foo
define %struct.Foo* @foo(%struct.Foo* %this, i32 %acc) nounwind readonly align 2 {
entry:
- %scevgep = getelementptr %struct.Foo* %this, i32 1
+ %scevgep = getelementptr %struct.Foo, %struct.Foo* %this, i32 1
br label %tailrecurse
tailrecurse: ; preds = %sw.bb, %entry
%lsr.iv = phi i32 [ %lsr.iv.next, %sw.bb ], [ 1, %entry ]
%acc.tr = phi i32 [ %or, %sw.bb ], [ %acc, %entry ]
%lsr.iv24 = bitcast %struct.Foo* %lsr.iv2 to i8**
- %scevgep5 = getelementptr i8** %lsr.iv24, i32 -1
+ %scevgep5 = getelementptr i8*, i8** %lsr.iv24, i32 -1
%tmp2 = load i8** %scevgep5
%0 = ptrtoint i8* %tmp2 to i32
%shl = shl i32 %acc.tr, 1
%or = or i32 %and, %shl
%lsr.iv.next = add i32 %lsr.iv, 1
- %scevgep3 = getelementptr %struct.Foo* %lsr.iv2, i32 1
+ %scevgep3 = getelementptr %struct.Foo, %struct.Foo* %lsr.iv2, i32 1
br label %tailrecurse
sw.bb6: ; preds = %tailrecurse.switch
sw.bb8: ; preds = %tailrecurse.switch
%tmp1 = add i32 %acc.tr, %lsr.iv
- %add.ptr11 = getelementptr inbounds %struct.Foo* %this, i32 %tmp1
+ %add.ptr11 = getelementptr inbounds %struct.Foo, %struct.Foo* %this, i32 %tmp1
ret %struct.Foo* %add.ptr11
sw.epilog: ; preds = %tailrecurse.switch
; V8-LABEL: bar:
define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
entry:
- %0 = getelementptr inbounds %struct.S* %x, i32 0, i32 1, i32 0
+ %0 = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1, i32 0
%1 = load i8* %0, align 1
%2 = zext i8 %1 to i32
; ARM: ands
bb: ; preds = %entry
; V8-NEXT: %bb
- %5 = getelementptr inbounds %struct.S* %y, i32 0, i32 1, i32 0
+ %5 = getelementptr inbounds %struct.S, %struct.S* %y, i32 0, i32 1, i32 0
%6 = load i8* %5, align 1
%7 = zext i8 %6 to i32
; ARM: andsne
bb: ; preds = %bb, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%i_addr.09.0 = sub i32 %i, %indvar ; <i32> [#uses=1]
- %tmp2 = getelementptr i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr i32, i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
store i32 %A, i32* %tmp2
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
icmp eq i32 %indvar.next, %i ; <i1>:1 [#uses=1]
bb: ; preds = %bb, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%i_addr.09.0 = sub i32 %i, %indvar ; <i32> [#uses=1]
- %tmp2 = getelementptr i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr i32, i32* %P, i32 %i_addr.09.0 ; <i32*> [#uses=1]
store i32 %A, i32* %tmp2
store i32 %indvar, i32* null
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ]
%ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ]
%0 = load i32* %ptr1.addr.09, align 4
- %arrayidx1 = getelementptr inbounds i32* %ptr1.addr.09, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 1
%1 = load i32* %arrayidx1, align 4
- %arrayidx3 = getelementptr inbounds i32* %ptr1.addr.09, i32 2
+ %arrayidx3 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 2
%2 = load i32* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds i32* %ptr1.addr.09, i32 3
+ %arrayidx4 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 3
%3 = load i32* %arrayidx4, align 4
- %add.ptr = getelementptr inbounds i32* %ptr1.addr.09, i32 4
+ %add.ptr = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 4
%mul = mul i32 %1, %0
%mul5 = mul i32 %mul, %2
%mul6 = mul i32 %mul5, %3
store i32 %mul6, i32* %ptr2.addr.08, align 4
- %incdec.ptr = getelementptr inbounds i32* %ptr2.addr.08, i32 -1
+ %incdec.ptr = getelementptr inbounds i32, i32* %ptr2.addr.08, i32 -1
%tobool = icmp eq i32* %incdec.ptr, null
br i1 %tobool, label %while.end, label %while.body
%ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ]
%ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ]
%0 = load i32* %ptr1.addr.09, align 4
- %arrayidx1 = getelementptr inbounds i32* %ptr1.addr.09, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 1
%1 = load i32* %arrayidx1, align 4
- %arrayidx3 = getelementptr inbounds i32* %ptr1.addr.09, i32 2
+ %arrayidx3 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 2
%2 = load i32* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds i32* %ptr1.addr.09, i32 3
+ %arrayidx4 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 3
%3 = load i32* %arrayidx4, align 4
- %add.ptr = getelementptr inbounds i32* %ptr1.addr.09, i32 4
+ %add.ptr = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 4
%mul = mul i32 %1, %0
%mul5 = mul i32 %mul, %2
%mul6 = mul i32 %mul5, %3
store i32 %mul6, i32* %ptr2.addr.08, align 4
- %incdec.ptr = getelementptr inbounds i32* %ptr2.addr.08, i32 -1
+ %incdec.ptr = getelementptr inbounds i32, i32* %ptr2.addr.08, i32 -1
%tobool = icmp eq i32* %incdec.ptr, null
br i1 %tobool, label %while.end, label %while.body
; CHECK-NOT: movs
%0 = load double* %q, align 4
%cmp = fcmp olt double %0, 1.000000e+01
- %incdec.ptr1 = getelementptr inbounds i32* %p, i32 1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %p, i32 1
br i1 %cmp, label %if.then, label %if.else
if.then:
store i32 7, i32* %p, align 4
- %incdec.ptr2 = getelementptr inbounds i32* %p, i32 2
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %p, i32 2
store i32 8, i32* %incdec.ptr1, align 4
store i32 9, i32* %incdec.ptr2, align 4
br label %if.end
if.else:
store i32 3, i32* %p, align 4
- %incdec.ptr5 = getelementptr inbounds i32* %p, i32 2
+ %incdec.ptr5 = getelementptr inbounds i32, i32* %p, i32 2
store i32 5, i32* %incdec.ptr1, align 4
store i32 6, i32* %incdec.ptr5, align 4
br label %if.end
%and1 = lshr i32 %x, 16
%shr2 = and i32 %and1, 255
%shr4 = lshr i32 %x, 24
- %arrayidx = getelementptr inbounds i32* %ctx, i32 %shr4
+ %arrayidx = getelementptr inbounds i32, i32* %ctx, i32 %shr4
%0 = load i32* %arrayidx, align 4
- %arrayidx5 = getelementptr inbounds i32* %ctx, i32 %shr2
+ %arrayidx5 = getelementptr inbounds i32, i32* %ctx, i32 %shr2
%1 = load i32* %arrayidx5, align 4
%add = add i32 %1, %0
- %arrayidx6 = getelementptr inbounds i32* %ctx, i32 %shr
+ %arrayidx6 = getelementptr inbounds i32, i32* %ctx, i32 %shr
%2 = load i32* %arrayidx6, align 4
%add7 = add i32 %add, %2
ret i32 %add7
bb: ; preds = %bb1
%gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr i8* %L, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
+ %tmp7 = getelementptr i8, i8* %L, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
store i8 0, i8* %tmp7
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
br label %bb1
%5 = inttoptr i32 %0 to i32* ; <i32*> [#uses=1]
%t35 = load volatile i32* %5 ; <i32> [#uses=1]
%6 = inttoptr i32 %t35 to i32** ; <i32**> [#uses=1]
- %7 = getelementptr i32** %6, i32 86 ; <i32**> [#uses=1]
+ %7 = getelementptr i32*, i32** %6, i32 86 ; <i32**> [#uses=1]
%8 = load i32** %7 ; <i32*> [#uses=1]
%9 = bitcast i32* %8 to i32* (i32, i32*, i32, i32*, i32*, i32*)* ; <i32* (i32, i32*, i32, i32*, i32*, i32*)*> [#uses=1]
%10 = call i32* %9(i32 %0, i32* null, i32 %1, i32* %2, i32* %3, i32* %4) ; <i32*> [#uses=1]
bb115.i.i: ; preds = %bb115.i.i.bb115.i.i_crit_edge, %newFuncRoot
%i_addr.3210.0.i.i = phi i32 [ %tmp166.i.i, %bb115.i.i.bb115.i.i_crit_edge ], [ 0, %newFuncRoot ] ; <i32> [#uses=7]
- %tmp124.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 0 ; <i32*> [#uses=1]
+ %tmp124.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 0 ; <i32*> [#uses=1]
%tmp125.i.i = load i32* %tmp124.i.i ; <i32> [#uses=1]
- %tmp126.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp125.i.i ; <i32*> [#uses=1]
+ %tmp126.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp125.i.i ; <i32*> [#uses=1]
%tmp127.i.i = load i32* %tmp126.i.i ; <i32> [#uses=1]
- %tmp131.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 1 ; <i32*> [#uses=1]
+ %tmp131.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 1 ; <i32*> [#uses=1]
%tmp132.i.i = load i32* %tmp131.i.i ; <i32> [#uses=1]
- %tmp133.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp132.i.i ; <i32*> [#uses=1]
+ %tmp133.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp132.i.i ; <i32*> [#uses=1]
%tmp134.i.i = load i32* %tmp133.i.i ; <i32> [#uses=1]
- %tmp138.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 2 ; <i32*> [#uses=1]
+ %tmp138.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 1, i32 2 ; <i32*> [#uses=1]
%tmp139.i.i = load i32* %tmp138.i.i ; <i32> [#uses=1]
- %tmp140.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp139.i.i ; <i32*> [#uses=1]
+ %tmp140.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp139.i.i ; <i32*> [#uses=1]
%tmp141.i.i = load i32* %tmp140.i.i ; <i32> [#uses=1]
%tmp143.i.i = add i32 %i_addr.3210.0.i.i, 12 ; <i32> [#uses=1]
- %tmp146.i.i = getelementptr [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 0 ; <i32*> [#uses=1]
+ %tmp146.i.i = getelementptr [2 x { i32, [3 x i32] }], [2 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %i_addr.3210.0.i.i, i32 0 ; <i32*> [#uses=1]
%tmp147.i.i = load i32* %tmp146.i.i ; <i32> [#uses=1]
- %tmp149.i.i = getelementptr [13 x %struct.anon]* @isa, i32 0, i32 %tmp147.i.i, i32 0 ; <i32 (i32, i32, i32)**> [#uses=1]
+ %tmp149.i.i = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 %tmp147.i.i, i32 0 ; <i32 (i32, i32, i32)**> [#uses=1]
%tmp150.i.i = load i32 (i32, i32, i32)** %tmp149.i.i ; <i32 (i32, i32, i32)*> [#uses=1]
%tmp154.i.i = tail call i32 %tmp150.i.i( i32 %tmp127.i.i, i32 %tmp134.i.i, i32 %tmp141.i.i ) ; <i32> [#uses=1]
- %tmp155.i.i = getelementptr [14 x i32]* @r, i32 0, i32 %tmp143.i.i ; <i32*> [#uses=1]
+ %tmp155.i.i = getelementptr [14 x i32], [14 x i32]* @r, i32 0, i32 %tmp143.i.i ; <i32*> [#uses=1]
store i32 %tmp154.i.i, i32* %tmp155.i.i
- %tmp159.i.i = getelementptr [2 x i32]* @counter, i32 0, i32 %i_addr.3210.0.i.i ; <i32*> [#uses=2]
+ %tmp159.i.i = getelementptr [2 x i32], [2 x i32]* @counter, i32 0, i32 %i_addr.3210.0.i.i ; <i32*> [#uses=2]
%tmp160.i.i = load i32* %tmp159.i.i ; <i32> [#uses=1]
%tmp161.i.i = add i32 %tmp160.i.i, 1 ; <i32> [#uses=1]
store i32 %tmp161.i.i, i32* %tmp159.i.i
%0 = bitcast float* %p to i8*
%vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4)
%vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1
- %add.ptr = getelementptr inbounds float* %p, i32 8
+ %add.ptr = getelementptr inbounds float, float* %p, i32 8
%1 = bitcast float* %add.ptr to i8*
tail call void @llvm.arm.neon.vst2.v4f32(i8* %1, <4 x float> %vld221, <4 x float> undef, i32 4)
ret void
%0 = bitcast float* %p to i8*
%vld2 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %0, i32 4)
%vld221 = extractvalue { <4 x float>, <4 x float> } %vld2, 1
- %add.ptr = getelementptr inbounds float* %p, i32 8
+ %add.ptr = getelementptr inbounds float, float* %p, i32 8
%1 = bitcast float* %add.ptr to i8*
%vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4)
%vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0
%qq0.0.1.0 = phi <4 x float> [ %vld224, %entry ], [ %vld2216, %do.body ]
%c.addr.0 = phi i32 [ %c, %entry ], [ %dec, %do.body ]
%p.addr.0 = phi float* [ %p, %entry ], [ %add.ptr, %do.body ]
- %add.ptr = getelementptr inbounds float* %p.addr.0, i32 8
+ %add.ptr = getelementptr inbounds float, float* %p.addr.0, i32 8
%1 = bitcast float* %add.ptr to i8*
%vld22 = tail call { <4 x float>, <4 x float> } @llvm.arm.neon.vld2.v4f32(i8* %1, i32 4)
%vld2215 = extractvalue { <4 x float>, <4 x float> } %vld22, 0
; CHECK-NOT: vorr
define void @f3(float* %p, float* %q) nounwind ssp {
entry:
- %arrayidx = getelementptr inbounds float* %p, i32 3
+ %arrayidx = getelementptr inbounds float, float* %p, i32 3
%0 = load float* %arrayidx, align 4
%vecins = insertelement <2 x float> undef, float %0, i32 1
%tobool = icmp eq float* %q, null
if.then: ; preds = %entry
%1 = load float* %q, align 4
- %arrayidx2 = getelementptr inbounds float* %q, i32 1
+ %arrayidx2 = getelementptr inbounds float, float* %q, i32 1
%2 = load float* %arrayidx2, align 4
%add = fadd float %1, %2
%vecins3 = insertelement <2 x float> %vecins, float %add, i32 0
br label %if.end
if.else: ; preds = %entry
- %arrayidx4 = getelementptr inbounds float* %p, i32 2
+ %arrayidx4 = getelementptr inbounds float, float* %p, i32 2
%3 = load float* %arrayidx4, align 4
%vecins5 = insertelement <2 x float> %vecins, float %3, i32 0
br label %if.end
if.end: ; preds = %if.else, %if.then
%x.0 = phi <2 x float> [ %vecins3, %if.then ], [ %vecins5, %if.else ]
- %add.ptr = getelementptr inbounds float* %p, i32 4
+ %add.ptr = getelementptr inbounds float, float* %p, i32 4
%4 = bitcast float* %add.ptr to i8*
tail call void @llvm.arm.neon.vst1.v2f32(i8* %4, <2 x float> %x.0, i32 4)
ret void
if.then: ; preds = %entry
%1 = load float* %q, align 4
- %arrayidx1 = getelementptr inbounds float* %q, i32 1
+ %arrayidx1 = getelementptr inbounds float, float* %q, i32 1
%2 = load float* %arrayidx1, align 4
%add = fadd float %1, %2
%vecins = insertelement <2 x float> %vld1, float %add, i32 1
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
- %arrayidx = getelementptr inbounds float* %q, i32 1
+ %arrayidx = getelementptr inbounds float, float* %q, i32 1
%1 = load float* %arrayidx, align 4
%add4 = fadd float %vecext, %1
%2 = load float* %q, align 4
%add6 = fadd float %vecext1, %2
- %arrayidx7 = getelementptr inbounds float* %q, i32 2
+ %arrayidx7 = getelementptr inbounds float, float* %q, i32 2
%3 = load float* %arrayidx7, align 4
%add8 = fadd float %vecext2, %3
br label %if.end
; CHECK: bx lr
%list_addr.05 = phi %struct.list_head* [ %2, %bb ], [ %list, %entry ]
%next.04 = phi %struct.list_head* [ %list_addr.05, %bb ], [ null, %entry ]
- %1 = getelementptr inbounds %struct.list_head* %list_addr.05, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.list_head, %struct.list_head* %list_addr.05, i32 0, i32 0
%2 = load %struct.list_head** %1, align 4
store %struct.list_head* %next.04, %struct.list_head** %1, align 4
%3 = icmp eq %struct.list_head* %2, null
%indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %bb2.preheader ] ; <i32> [#uses=2]
%sum.08 = phi i32 [ %2, %bb1 ], [ %sum.110, %bb2.preheader ] ; <i32> [#uses=1]
%tmp17 = sub i32 %i.07, %indvar ; <i32> [#uses=1]
- %scevgep = getelementptr i32* %src, i32 %tmp17 ; <i32*> [#uses=1]
+ %scevgep = getelementptr i32, i32* %src, i32 %tmp17 ; <i32*> [#uses=1]
%1 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
%2 = add nsw i32 %1, %sum.08 ; <i32> [#uses=2]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%i.012 = phi i32 [ 0, %entry ], [ %inc, %if.end8 ]
%BestCost.011 = phi i32 [ -1, %entry ], [ %BestCost.1, %if.end8 ]
%BestIdx.010 = phi i32 [ 0, %entry ], [ %BestIdx.1, %if.end8 ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %i.012
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.012
%0 = load i32* %arrayidx, align 4
%mul = mul i32 %0, %0
%sub = add nsw i32 %i.012, -5
define void @test3(float* %glob, i32 %X) {
entry:
%tmp = load float* %glob ; <float> [#uses=1]
- %tmp2 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1]
+ %tmp2 = getelementptr float, float* %glob, i32 2 ; <float*> [#uses=1]
%tmp3 = load float* %tmp2 ; <float> [#uses=1]
%tmp.upgrd.1 = fcmp ogt float %tmp, %tmp3 ; <i1> [#uses=1]
br i1 %tmp.upgrd.1, label %cond_true, label %UnifiedReturnBlock
; SOURCE-SCHED: cmp
; SOURCE-SCHED: bne
%i.031 = phi i32 [ 0, %for.body.lr.ph ], [ %0, %for.body ]
- %arrayidx11 = getelementptr float* %t, i32 %i.031
- %arrayidx15 = getelementptr float* %u, i32 %i.031
- %arrayidx19 = getelementptr i8* %red, i32 %i.031
- %arrayidx22 = getelementptr i8* %green, i32 %i.031
- %arrayidx25 = getelementptr i8* %blue, i32 %i.031
- %arrayidx28 = getelementptr i8* %alpha, i32 %i.031
+ %arrayidx11 = getelementptr float, float* %t, i32 %i.031
+ %arrayidx15 = getelementptr float, float* %u, i32 %i.031
+ %arrayidx19 = getelementptr i8, i8* %red, i32 %i.031
+ %arrayidx22 = getelementptr i8, i8* %green, i32 %i.031
+ %arrayidx25 = getelementptr i8, i8* %blue, i32 %i.031
+ %arrayidx28 = getelementptr i8, i8* %alpha, i32 %i.031
%tmp12 = load float* %arrayidx11, align 4
tail call fastcc void @sample_3d_nearest(i8* %tObj, i8* undef, float undef, float %tmp12, float undef, i8* %arrayidx19, i8* %arrayidx22, i8* %arrayidx25, i8* %arrayidx28)
%0 = add i32 %i.031, 1
bb2:
%tmp120 = add i32 %tmp119, 0
- %scevgep810.i = getelementptr %struct.foo* null, i32 %tmp120, i32 1
+ %scevgep810.i = getelementptr %struct.foo, %struct.foo* null, i32 %tmp120, i32 1
store i32 undef, i32* %scevgep810.i, align 4
br i1 undef, label %bb2, label %bb3
for.body: ; preds = %entry, %for.body
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%ap.cur = load i8** %vl, align 4
- %ap.next = getelementptr i8* %ap.cur, i32 4
+ %ap.next = getelementptr i8, i8* %ap.cur, i32 4
store i8* %ap.next, i8** %vl, align 4
%0 = bitcast i8* %ap.cur to i32*
%1 = load i32* %0, align 4
store %0* %loadedMydata, %0** %1, align 4
call void @llvm.dbg.declare(metadata %0** %1, metadata !130, metadata !{!"0x102"}), !dbg !131
%2 = bitcast %struct.CR* %bounds to %1*
- %3 = getelementptr %1* %2, i32 0, i32 0
+ %3 = getelementptr %1, %1* %2, i32 0, i32 0
store [4 x i32] %bounds.coerce0, [4 x i32]* %3
call void @llvm.dbg.declare(metadata %struct.CR* %bounds, metadata !132, metadata !{!"0x102"}), !dbg !133
%4 = bitcast %struct.CR* %data to %1*
- %5 = getelementptr %1* %4, i32 0, i32 0
+ %5 = getelementptr %1, %1* %4, i32 0, i32 0
store [4 x i32] %data.coerce0, [4 x i32]* %5
call void @llvm.dbg.declare(metadata %struct.CR* %data, metadata !134, metadata !{!"0x102"}), !dbg !135
%6 = bitcast i8* %.block_descriptor to %2*
- %7 = getelementptr inbounds %2* %6, i32 0, i32 6
+ %7 = getelementptr inbounds %2, %2* %6, i32 0, i32 6
call void @llvm.dbg.declare(metadata %2* %6, metadata !136, metadata !163), !dbg !137
call void @llvm.dbg.declare(metadata %2* %6, metadata !138, metadata !164), !dbg !137
call void @llvm.dbg.declare(metadata %2* %6, metadata !139, metadata !165), !dbg !140
%10 = bitcast %0* %8 to i8*, !dbg !141
%11 = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %10, i8* %9), !dbg !141
%12 = bitcast i8* %11 to %0*, !dbg !141
- %13 = getelementptr inbounds %2* %6, i32 0, i32 5, !dbg !141
+ %13 = getelementptr inbounds %2, %2* %6, i32 0, i32 5, !dbg !141
%14 = load i8** %13, !dbg !141
%15 = bitcast i8* %14 to %struct.__block_byref_mydata*, !dbg !141
- %16 = getelementptr inbounds %struct.__block_byref_mydata* %15, i32 0, i32 1, !dbg !141
+ %16 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %15, i32 0, i32 1, !dbg !141
%17 = load %struct.__block_byref_mydata** %16, !dbg !141
- %18 = getelementptr inbounds %struct.__block_byref_mydata* %17, i32 0, i32 6, !dbg !141
+ %18 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %17, i32 0, i32 6, !dbg !141
store %0* %12, %0** %18, align 4, !dbg !141
- %19 = getelementptr inbounds %2* %6, i32 0, i32 6, !dbg !143
+ %19 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !143
%20 = load %3** %19, align 4, !dbg !143
%21 = load i32* @"OBJC_IVAR_$_MyWork._data", !dbg !143
%22 = bitcast %3* %20 to i8*, !dbg !143
- %23 = getelementptr inbounds i8* %22, i32 %21, !dbg !143
+ %23 = getelementptr inbounds i8, i8* %22, i32 %21, !dbg !143
%24 = bitcast i8* %23 to %struct.CR*, !dbg !143
%25 = bitcast %struct.CR* %24 to i8*, !dbg !143
%26 = bitcast %struct.CR* %data to i8*, !dbg !143
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %25, i8* %26, i32 16, i32 4, i1 false), !dbg !143
- %27 = getelementptr inbounds %2* %6, i32 0, i32 6, !dbg !144
+ %27 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !144
%28 = load %3** %27, align 4, !dbg !144
%29 = load i32* @"OBJC_IVAR_$_MyWork._bounds", !dbg !144
%30 = bitcast %3* %28 to i8*, !dbg !144
- %31 = getelementptr inbounds i8* %30, i32 %29, !dbg !144
+ %31 = getelementptr inbounds i8, i8* %30, i32 %29, !dbg !144
%32 = bitcast i8* %31 to %struct.CR*, !dbg !144
%33 = bitcast %struct.CR* %32 to i8*, !dbg !144
%34 = bitcast %struct.CR* %bounds to i8*, !dbg !144
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %33, i8* %34, i32 16, i32 4, i1 false), !dbg !144
- %35 = getelementptr inbounds %2* %6, i32 0, i32 6, !dbg !145
+ %35 = getelementptr inbounds %2, %2* %6, i32 0, i32 6, !dbg !145
%36 = load %3** %35, align 4, !dbg !145
- %37 = getelementptr inbounds %2* %6, i32 0, i32 5, !dbg !145
+ %37 = getelementptr inbounds %2, %2* %6, i32 0, i32 5, !dbg !145
%38 = load i8** %37, !dbg !145
%39 = bitcast i8* %38 to %struct.__block_byref_mydata*, !dbg !145
- %40 = getelementptr inbounds %struct.__block_byref_mydata* %39, i32 0, i32 1, !dbg !145
+ %40 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %39, i32 0, i32 1, !dbg !145
%41 = load %struct.__block_byref_mydata** %40, !dbg !145
- %42 = getelementptr inbounds %struct.__block_byref_mydata* %41, i32 0, i32 6, !dbg !145
+ %42 = getelementptr inbounds %struct.__block_byref_mydata, %struct.__block_byref_mydata* %41, i32 0, i32 6, !dbg !145
%43 = load %0** %42, align 4, !dbg !145
%44 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_222", !dbg !145
%45 = bitcast %3* %36 to i8*, !dbg !145
%1 = fadd double %0, 5.555552e+05, !dbg !35
tail call void @llvm.dbg.value(metadata double %1, i64 0, metadata !24, metadata !{!"0x102"}), !dbg !35
%2 = tail call i32 @puts(i8* getelementptr inbounds ([6 x i8]* @.str1, i32 0, i32 0)) nounwind, !dbg !36
- %3 = getelementptr inbounds i8* bitcast (i32 (i32, i8**)* @main to i8*), i32 %argc, !dbg !37
+ %3 = getelementptr inbounds i8, i8* bitcast (i32 (i32, i8**)* @main to i8*), i32 %argc, !dbg !37
%4 = trunc i32 %argc to i8, !dbg !37
%5 = add i8 %4, 97, !dbg !37
tail call void @llvm.dbg.value(metadata i8* %3, i64 0, metadata !19, metadata !{!"0x102"}) nounwind, !dbg !38
%conv1 = fptrunc double %add to float, !dbg !38
tail call void @llvm.dbg.value(metadata float %conv1, i64 0, metadata !22, metadata !{!"0x102"}), !dbg !38
%call = tail call i32 @puts(i8* getelementptr inbounds ([6 x i8]* @.str1, i32 0, i32 0)) nounwind optsize, !dbg !39
- %add.ptr = getelementptr i8* bitcast (i32 (i32, i8**)* @main to i8*), i32 %argc, !dbg !40
+ %add.ptr = getelementptr i8, i8* bitcast (i32 (i32, i8**)* @main to i8*), i32 %argc, !dbg !40
%add5 = add nsw i32 %argc, 97, !dbg !40
%conv6 = trunc i32 %add5 to i8, !dbg !40
tail call void @llvm.dbg.value(metadata i8* %add.ptr, i64 0, metadata !8, metadata !{!"0x102"}) nounwind, !dbg !41
%div = sdiv i32 %x, %y
store i32 %div, i32* %P, align 4
%rem = srem i32 %x, %y
- %arrayidx6 = getelementptr inbounds i32* %P, i32 1
+ %arrayidx6 = getelementptr inbounds i32, i32* %P, i32 1
store i32 %rem, i32* %arrayidx6, align 4
ret void
}
%div = udiv i32 %x, %y
store i32 %div, i32* %P, align 4
%rem = urem i32 %x, %y
- %arrayidx6 = getelementptr inbounds i32* %P, i32 1
+ %arrayidx6 = getelementptr inbounds i32, i32* %P, i32 1
store i32 %rem, i32* %arrayidx6, align 4
ret void
}
bb1:
%line_indent_len.0 = phi i32 [ %4, %bb ], [ 0, %entry ]
- %8 = getelementptr inbounds i8* null, i32 %line_indent_len.0
+ %8 = getelementptr inbounds i8, i8* null, i32 %line_indent_len.0
store i8 0, i8* %8, align 1
ret void
}
bb43: ; preds = %bb123.preheader
call fastcc void @f1(float* %tmp8, float* null, i32 0)
%tmp70 = load i32* null
- %tmp85 = getelementptr float* %tmp8, i32 0
+ %tmp85 = getelementptr float, float* %tmp8, i32 0
call fastcc void @f2(float* null, float* null, float* %tmp85, i32 %tmp70)
ret void
%tmp6 = alloca i8, i32 %tmp5
%tmp9 = call i8* @strcpy(i8* %tmp6, i8* %tag)
%tmp6.len = call i32 @strlen(i8* %tmp6)
- %tmp6.indexed = getelementptr i8* %tmp6, i32 %tmp6.len
+ %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8]* @str215, i32 0, i32 0), i32 2, i32 1, i1 false)
%tmp15 = call i8* @strcat(i8* %tmp6, i8* %contents)
call fastcc void @comment_add(%struct.comment* %vc, i8* %tmp6)
%add = fadd float %x, %y
%0 = load %struct.anon** @a, align 4
- %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0
+ %x1 = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 0
store float %add, float* %x1, align 1
ret void
}
%0 = alloca %class.TAlignTest*, align 4
store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4
%1 = load %class.TAlignTest** %0
- %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1
+ %2 = getelementptr inbounds %class.TAlignTest, %class.TAlignTest* %1, i32 0, i32 1
%3 = load float* %2, align 1
%4 = fcmp une float %3, 0.000000e+00
; ARM: ldr r[[R:[0-9]+]], [r0, #2]
define i32 @t1(i32* nocapture %ptr) nounwind readonly {
entry:
; ARM: t1
- %add.ptr = getelementptr inbounds i32* %ptr, i32 1
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 1
%0 = load i32* %add.ptr, align 4
; ARM: ldr r{{[0-9]}}, [r0, #4]
ret i32 %0
define i32 @t2(i32* nocapture %ptr) nounwind readonly {
entry:
; ARM: t2
- %add.ptr = getelementptr inbounds i32* %ptr, i32 63
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 63
%0 = load i32* %add.ptr, align 4
; ARM: ldr.w r{{[0-9]}}, [r0, #252]
ret i32 %0
define zeroext i16 @t3(i16* nocapture %ptr) nounwind readonly {
entry:
; ARM: t3
- %add.ptr = getelementptr inbounds i16* %ptr, i16 1
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i16 1
%0 = load i16* %add.ptr, align 4
; ARM: ldrh r{{[0-9]}}, [r0, #2]
ret i16 %0
define zeroext i16 @t4(i16* nocapture %ptr) nounwind readonly {
entry:
; ARM: t4
- %add.ptr = getelementptr inbounds i16* %ptr, i16 63
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i16 63
%0 = load i16* %add.ptr, align 4
; ARM: ldrh.w r{{[0-9]}}, [r0, #126]
ret i16 %0
define zeroext i8 @t5(i8* nocapture %ptr) nounwind readonly {
entry:
; ARM: t5
- %add.ptr = getelementptr inbounds i8* %ptr, i8 1
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i8 1
%0 = load i8* %add.ptr, align 4
; ARM: ldrb r{{[0-9]}}, [r0, #1]
ret i8 %0
define zeroext i8 @t6(i8* nocapture %ptr) nounwind readonly {
entry:
; ARM: t6
- %add.ptr = getelementptr inbounds i8* %ptr, i8 63
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i8 63
%0 = load i8* %add.ptr, align 4
; ARM: ldrb.w r{{[0-9]}}, [r0, #63]
ret i8 %0
define i32 @t1(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t1
- %add.ptr = getelementptr inbounds i32* %ptr, i32 -1
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -1
%0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0, #-4]
ret i32 %0
define i32 @t2(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t2
- %add.ptr = getelementptr inbounds i32* %ptr, i32 -63
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -63
%0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0, #-252]
ret i32 %0
define i32 @t3(i32* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t3
- %add.ptr = getelementptr inbounds i32* %ptr, i32 -64
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -64
%0 = load i32* %add.ptr, align 4
; THUMB: ldr r{{[0-9]}}, [r0]
ret i32 %0
define zeroext i16 @t4(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t4
- %add.ptr = getelementptr inbounds i16* %ptr, i32 -1
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -1
%0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0, #-2]
ret i16 %0
define zeroext i16 @t5(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t5
- %add.ptr = getelementptr inbounds i16* %ptr, i32 -127
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -127
%0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0, #-254]
ret i16 %0
define zeroext i16 @t6(i16* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t6
- %add.ptr = getelementptr inbounds i16* %ptr, i32 -128
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -128
%0 = load i16* %add.ptr, align 2
; THUMB: ldrh r{{[0-9]}}, [r0]
ret i16 %0
define zeroext i8 @t7(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t7
- %add.ptr = getelementptr inbounds i8* %ptr, i32 -1
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -1
%0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0, #-1]
ret i8 %0
define zeroext i8 @t8(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t8
- %add.ptr = getelementptr inbounds i8* %ptr, i32 -255
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -255
%0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0, #-255]
ret i8 %0
define zeroext i8 @t9(i8* nocapture %ptr) nounwind readonly {
entry:
; THUMB: t9
- %add.ptr = getelementptr inbounds i8* %ptr, i32 -256
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -256
%0 = load i8* %add.ptr, align 1
; THUMB: ldrb r{{[0-9]}}, [r0]
ret i8 %0
define void @t10(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t10
- %add.ptr = getelementptr inbounds i32* %ptr, i32 -1
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -1
store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0, #-4]
ret void
define void @t11(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t11
- %add.ptr = getelementptr inbounds i32* %ptr, i32 -63
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -63
store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0, #-252]
ret void
define void @t12(i32* nocapture %ptr) nounwind {
entry:
; THUMB: t12
- %add.ptr = getelementptr inbounds i32* %ptr, i32 -64
+ %add.ptr = getelementptr inbounds i32, i32* %ptr, i32 -64
store i32 0, i32* %add.ptr, align 4
; THUMB: str r{{[0-9]}}, [r0]
ret void
define void @t13(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t13
- %add.ptr = getelementptr inbounds i16* %ptr, i32 -1
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -1
store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0, #-2]
ret void
define void @t14(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t14
- %add.ptr = getelementptr inbounds i16* %ptr, i32 -127
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -127
store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0, #-254]
ret void
define void @t15(i16* nocapture %ptr) nounwind {
entry:
; THUMB: t15
- %add.ptr = getelementptr inbounds i16* %ptr, i32 -128
+ %add.ptr = getelementptr inbounds i16, i16* %ptr, i32 -128
store i16 0, i16* %add.ptr, align 2
; THUMB: strh r{{[0-9]}}, [r0]
ret void
define void @t16(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t16
- %add.ptr = getelementptr inbounds i8* %ptr, i32 -1
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -1
store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0, #-1]
ret void
define void @t17(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t17
- %add.ptr = getelementptr inbounds i8* %ptr, i32 -255
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -255
store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0, #-255]
ret void
define void @t18(i8* nocapture %ptr) nounwind {
entry:
; THUMB: t18
- %add.ptr = getelementptr inbounds i8* %ptr, i32 -256
+ %add.ptr = getelementptr inbounds i8, i8* %ptr, i32 -256
store i8 0, i8* %add.ptr, align 1
; THUMB: strb r{{[0-9]}}, [r0]
ret void
define zeroext i16 @t1(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t1
- %add.ptr = getelementptr inbounds i16* %a, i64 -8
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 -8
%0 = load i16* %add.ptr, align 2
; ARM: ldrh r0, [r0, #-16]
ret i16 %0
define zeroext i16 @t2(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t2
- %add.ptr = getelementptr inbounds i16* %a, i64 -16
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 -16
%0 = load i16* %add.ptr, align 2
; ARM: ldrh r0, [r0, #-32]
ret i16 %0
define zeroext i16 @t3(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t3
- %add.ptr = getelementptr inbounds i16* %a, i64 -127
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 -127
%0 = load i16* %add.ptr, align 2
; ARM: ldrh r0, [r0, #-254]
ret i16 %0
define zeroext i16 @t4(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t4
- %add.ptr = getelementptr inbounds i16* %a, i64 -128
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 -128
%0 = load i16* %add.ptr, align 2
; ARM: mvn r{{[1-9]}}, #255
; ARM: add r0, r0, r{{[1-9]}}
define zeroext i16 @t5(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t5
- %add.ptr = getelementptr inbounds i16* %a, i64 8
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 8
%0 = load i16* %add.ptr, align 2
; ARM: ldrh r0, [r0, #16]
ret i16 %0
define zeroext i16 @t6(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t6
- %add.ptr = getelementptr inbounds i16* %a, i64 16
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 16
%0 = load i16* %add.ptr, align 2
; ARM: ldrh r0, [r0, #32]
ret i16 %0
define zeroext i16 @t7(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t7
- %add.ptr = getelementptr inbounds i16* %a, i64 127
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 127
%0 = load i16* %add.ptr, align 2
; ARM: ldrh r0, [r0, #254]
ret i16 %0
define zeroext i16 @t8(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t8
- %add.ptr = getelementptr inbounds i16* %a, i64 128
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 128
%0 = load i16* %add.ptr, align 2
; ARM: add r0, r0, #256
; ARM: ldrh r0, [r0]
define void @t9(i16* nocapture %a) nounwind uwtable ssp {
entry:
; ARM: t9
- %add.ptr = getelementptr inbounds i16* %a, i64 -8
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 -8
store i16 0, i16* %add.ptr, align 2
; ARM: strh r1, [r0, #-16]
ret void
define void @t10(i16* nocapture %a) nounwind uwtable ssp {
entry:
; ARM: t10
- %add.ptr = getelementptr inbounds i16* %a, i64 -128
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 -128
store i16 0, i16* %add.ptr, align 2
; ARM: mvn r{{[1-9]}}, #255
; ARM: add r0, r0, r{{[1-9]}}
define void @t11(i16* nocapture %a) nounwind uwtable ssp {
entry:
; ARM: t11
- %add.ptr = getelementptr inbounds i16* %a, i64 8
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 8
store i16 0, i16* %add.ptr, align 2
; ARM: strh r{{[1-9]}}, [r0, #16]
ret void
define void @t12(i16* nocapture %a) nounwind uwtable ssp {
entry:
; ARM: t12
- %add.ptr = getelementptr inbounds i16* %a, i64 128
+ %add.ptr = getelementptr inbounds i16, i16* %a, i64 128
store i16 0, i16* %add.ptr, align 2
; ARM: add r0, r0, #256
; ARM: strh r{{[1-9]}}, [r0]
define signext i8 @t13(i8* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t13
- %add.ptr = getelementptr inbounds i8* %a, i64 -8
+ %add.ptr = getelementptr inbounds i8, i8* %a, i64 -8
%0 = load i8* %add.ptr, align 2
; ARM: ldrsb r0, [r0, #-8]
ret i8 %0
define signext i8 @t14(i8* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t14
- %add.ptr = getelementptr inbounds i8* %a, i64 -255
+ %add.ptr = getelementptr inbounds i8, i8* %a, i64 -255
%0 = load i8* %add.ptr, align 2
; ARM: ldrsb r0, [r0, #-255]
ret i8 %0
define signext i8 @t15(i8* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t15
- %add.ptr = getelementptr inbounds i8* %a, i64 -256
+ %add.ptr = getelementptr inbounds i8, i8* %a, i64 -256
%0 = load i8* %add.ptr, align 2
; ARM: mvn r{{[1-9]}}, #255
; ARM: add r0, r0, r{{[1-9]}}
%tmp = load i64* %data, align 4
%tmp1 = load i8** %p.addr, align 4
%tmp2 = load i32* %offset.addr, align 4
- %add.ptr = getelementptr i8* %tmp1, i32 %tmp2
+ %add.ptr = getelementptr i8, i8* %tmp1, i32 %tmp2
%0 = bitcast i8* %add.ptr to i64*
- %arrayidx = getelementptr inbounds i64* %0, i32 0
+ %arrayidx = getelementptr inbounds i64, i64* %0, i32 0
store i64 %tmp, i64* %arrayidx
ret void
}
target triple = "thumbv7-apple-macosx10.6.7"
define i32 @f(i32* %x) nounwind ssp {
- %y = getelementptr inbounds i32* %x, i32 5000
+ %y = getelementptr inbounds i32, i32* %x, i32 5000
%tmp103 = load i32* %y, align 4
ret i32 %tmp103
}
; CHECK-LABEL: _gep_promotion:
; CHECK: ldrb {{r[0-9]+}}, {{\[r[0-9]+\]}}
- %arrayidx = getelementptr inbounds i8* %0, i8 %add
+ %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
%1 = load i8* %arrayidx, align 1
ret i8 %1
define fastcc void @func(%struct.gs_matrix* nocapture %pm1) nounwind {
entry:
- %0 = getelementptr inbounds %struct.gs_matrix* %pm1, i32 0, i32 6
+ %0 = getelementptr inbounds %struct.gs_matrix, %struct.gs_matrix* %pm1, i32 0, i32 6
%1 = load float* %0, align 4
- %2 = getelementptr inbounds %struct.gs_matrix* %pm1, i32 0, i32 8
+ %2 = getelementptr inbounds %struct.gs_matrix, %struct.gs_matrix* %pm1, i32 0, i32 8
%3 = load float* %2, align 4
- %4 = getelementptr inbounds %struct.gs_matrix* %pm1, i32 0, i32 2
+ %4 = getelementptr inbounds %struct.gs_matrix, %struct.gs_matrix* %pm1, i32 0, i32 2
%5 = bitcast float* %4 to i32*
%6 = load i32* %5, align 4
%7 = or i32 0, %6
define float @f2offset(float* %v, float %u) {
; CHECK-LABEL: f2offset:
; CHECK: vldr{{.*}}, #4]
- %addr = getelementptr float* %v, i32 1
+ %addr = getelementptr float, float* %v, i32 1
%tmp = load float* %addr
%tmp1 = fadd float %tmp, %u
ret float %tmp1
define float @f2noffset(float* %v, float %u) {
; CHECK-LABEL: f2noffset:
; CHECK: vldr{{.*}}, #-4]
- %addr = getelementptr float* %v, i32 -1
+ %addr = getelementptr float, float* %v, i32 -1
%tmp = load float* %addr
%tmp1 = fadd float %tmp, %u
ret float %tmp1
; %R0 should be killed here, however after if-conversion the %R0 kill
; has to be removed because if.then will follow after this and still
; read it.
- %addr = getelementptr inbounds i32* %ptr, i32 4
+ %addr = getelementptr inbounds i32, i32* %ptr, i32 4
%vale = load i32* %addr, align 4
br label %return
br label %_ZN1M6spliceEv.exit
_ZN1M6spliceEv.exit:
- %LIS = getelementptr inbounds %classK* %this, i32 0, i32 1
+ %LIS = getelementptr inbounds %classK, %classK* %this, i32 0, i32 1
call void @_ZN1F10handleMoveEb(%classF* %LIS, i1 zeroext false)
unreachable
}
%struct.S = type { i8* (i8*)*, [1 x i8] }
define internal zeroext i8 @bar(%struct.S* %x, %struct.S* nocapture %y) nounwind readonly {
entry:
- %0 = getelementptr inbounds %struct.S* %x, i32 0, i32 1, i32 0
+ %0 = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1, i32 0
%1 = load i8* %0, align 1
%2 = zext i8 %1 to i32
%3 = and i32 %2, 112
br i1 %4, label %return, label %bb
bb:
- %5 = getelementptr inbounds %struct.S* %y, i32 0, i32 1, i32 0
+ %5 = getelementptr inbounds %struct.S, %struct.S* %y, i32 0, i32 1, i32 0
%6 = load i8* %5, align 1
%7 = zext i8 %6 to i32
%8 = and i32 %7, 112
; CHECK: vmrs APSR_nzcv, fpscr
%r.19 = phi i32 [ 0, %bb.nph ], [ %r.0, %bb4 ]
%n.08 = phi i32 [ 0, %bb.nph ], [ %10, %bb4 ]
- %scevgep10 = getelementptr inbounds %struct.xyz_t* %p, i32 %n.08, i32 0
- %scevgep11 = getelementptr %struct.xyz_t* %p, i32 %n.08, i32 1
+ %scevgep10 = getelementptr inbounds %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 0
+ %scevgep11 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 1
%3 = load double* %scevgep10, align 4
%4 = load double* %scevgep11, align 4
%5 = fcmp uge double %3, %4
; CHECK-NOT: vmrsmi
; CHECK: vcmpe.f64
; CHECK: vmrs APSR_nzcv, fpscr
- %scevgep12 = getelementptr %struct.xyz_t* %p, i32 %n.08, i32 2
+ %scevgep12 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 2
%6 = load double* %scevgep12, align 4
%7 = fcmp uge double %3, %6
br i1 %7, label %bb3, label %bb2
define void @switch_to_stack(%struct.my_stack* %stack) nounwind {
entry:
- %regs = getelementptr inbounds %struct.my_stack* %stack, i32 0, i32 0
+ %regs = getelementptr inbounds %struct.my_stack, %struct.my_stack* %stack, i32 0, i32 0
tail call void asm "\0A", "=*r,*0"(%struct.myjmp_buf* %regs, %struct.myjmp_buf* %regs)
ret void
}
%5 = zext i1 %3 to i32
%6 = mul i32 %5, 287
%7 = add i32 %6, 2
- %8 = getelementptr [2 x i32]* @DWJumpTable2808, i32 0, i32 %5
+ %8 = getelementptr [2 x i32], [2 x i32]* @DWJumpTable2808, i32 0, i32 %5
%9 = load i32* %8
%10 = add i32 %9, ptrtoint (i8* blockaddress(@func, %4) to i32)
%11 = inttoptr i32 %10 to i8*
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
+ %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
%gotovar.4.0.pre = load i8** %2, align 4 ; <i8*> [#uses=1]
br label %bb2
%c2 = alloca %struct.float4, align 4
%c3 = alloca %struct.float4, align 4
call void asm sideeffect "vmul.f32 ${2:q}, ${0:q}, ${1:q}", "=*r,=*r,*w"(%struct.float4* %c1, %struct.float4* %c2, %struct.float4* %c3) #1, !srcloc !1
- %x = getelementptr inbounds %struct.float4* %c3, i32 0, i32 0
+ %x = getelementptr inbounds %struct.float4, %struct.float4* %c3, i32 0, i32 0
%1 = load float* %x, align 4
ret float %1
}
; check: strexd {{r[0-9]?[02468]}}, {{r[0-9]?[13579]}}, [r{{[0-9]+}}]
tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
- %incdec.ptr = getelementptr inbounds i64* %p, i32 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %p, i32 1
tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
ret void
call void @llvm.arm.dmb(i32 15) ; CHECK: dmb sy
- %d1 = getelementptr i32* %d, i32 1
+ %d1 = getelementptr i32, i32* %d, i32 1
store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
ret void
call void @llvm.arm.dsb(i32 15) ; CHECK: dsb sy
- %d1 = getelementptr i32* %d, i32 1
+ %d1 = getelementptr i32, i32* %d, i32 1
store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
ret void
call void @llvm.arm.isb(i32 15) ; CHECK: isb sy
- %d1 = getelementptr i32* %d, i32 1
+ %d1 = getelementptr i32, i32* %d, i32 1
store i32 %b, i32* %d1 ; CHECK: str {{r[0-9]+}}, [{{r[0-9]+}}, #4]
ret void
; CHECK-LABEL: f2:
; CHECK: ldr r0
entry:
- %tmp2 = getelementptr i32* %v, i32 1023
+ %tmp2 = getelementptr i32, i32* %v, i32 1023
%tmp = load i32* %tmp2
ret i32 %tmp
}
; CHECK: mov
; CHECK: ldr r0
entry:
- %tmp2 = getelementptr i32* %v, i32 1024
+ %tmp2 = getelementptr i32, i32* %v, i32 1024
%tmp = load i32* %tmp2
ret i32 %tmp
}
define i32 @f1() {
%buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 0
+ %tmp = getelementptr [32 x i32], [32 x i32]* %buf, i32 0, i32 0
%tmp1 = load i32* %tmp
ret i32 %tmp1
}
define i32 @f2() {
%buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 0
+ %tmp = getelementptr [32 x i8], [32 x i8]* %buf, i32 0, i32 0
%tmp1 = load i8* %tmp
%tmp2 = zext i8 %tmp1 to i32
ret i32 %tmp2
define i32 @f3() {
%buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 32
+ %tmp = getelementptr [32 x i32], [32 x i32]* %buf, i32 0, i32 32
%tmp1 = load i32* %tmp
ret i32 %tmp1
}
define i32 @f4() {
%buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 2
+ %tmp = getelementptr [32 x i8], [32 x i8]* %buf, i32 0, i32 2
%tmp1 = load i8* %tmp
%tmp2 = zext i8 %tmp1 to i32
ret i32 %tmp2
; CHECK: ldr {{.*!}}
; CHECK-NOT: ldr
define i32* @test1(i32* %X, i32* %dest) {
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
+ %Y = getelementptr i32, i32* %X, i32 4 ; <i32*> [#uses=2]
%A = load i32* %Y ; <i32> [#uses=1]
store i32 %A, i32* %dest
ret i32* %Y
bb: ; preds = %bb, %entry
%i.03 = phi i32 [ %tmp, %bb ], [ 0, %entry ] ; <i32> [#uses=3]
- %scevgep = getelementptr i32* %a, i32 %i.03 ; <i32*> [#uses=1]
- %scevgep4 = getelementptr i32* %b, i32 %i.03 ; <i32*> [#uses=1]
+ %scevgep = getelementptr i32, i32* %a, i32 %i.03 ; <i32*> [#uses=1]
+ %scevgep4 = getelementptr i32, i32* %b, i32 %i.03 ; <i32*> [#uses=1]
%tmp = add i32 %i.03, 1 ; <i32> [#uses=3]
- %scevgep5 = getelementptr i32* %a, i32 %tmp ; <i32*> [#uses=1]
+ %scevgep5 = getelementptr i32, i32* %a, i32 %tmp ; <i32*> [#uses=1]
%2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
%3 = load i32* %scevgep5, align 4 ; <i32> [#uses=1]
%4 = add nsw i32 %3, %2 ; <i32> [#uses=1]
; CHECK: str [[REGISTER]], [{{r[0-9]+}}], #4
%j.05 = phi i32 [ %2, %bb ], [ 0, %entry ]
%tmp = mul i32 %j.05, %index
- %uglygep = getelementptr i8* %src6, i32 %tmp
+ %uglygep = getelementptr i8, i8* %src6, i32 %tmp
%src_addr.04 = bitcast i8* %uglygep to float*
- %dst_addr.03 = getelementptr float* %dst, i32 %j.05
+ %dst_addr.03 = getelementptr float, float* %dst, i32 %j.05
%1 = load float* %src_addr.04, align 4
store float %1, float* %dst_addr.03, align 4
%2 = add i32 %j.05, 1
define void @excl_addrmode() {
; CHECK-T2ADDRMODE-LABEL: excl_addrmode:
%base1020 = load i32** @base
- %offset1020 = getelementptr i32* %base1020, i32 255
+ %offset1020 = getelementptr i32, i32* %base1020, i32 255
call i32 @llvm.arm.ldrex.p0i32(i32* %offset1020)
call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1020)
; CHECK-T2ADDRMODE: ldrex {{r[0-9]+}}, [{{r[0-9]+}}, #1020]
; CHECK-T2ADDRMODE: strex {{r[0-9]+}}, {{r[0-9]+}}, [{{r[0-9]+}}, #1020]
%base1024 = load i32** @base
- %offset1024 = getelementptr i32* %base1024, i32 256
+ %offset1024 = getelementptr i32, i32* %base1024, i32 256
call i32 @llvm.arm.ldrex.p0i32(i32* %offset1024)
call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1024)
; CHECK-T2ADDRMODE: add.w r[[ADDR:[0-9]+]], {{r[0-9]+}}, #1024
%base1 = load i32** @base
%addr8 = bitcast i32* %base1 to i8*
- %offset1_8 = getelementptr i8* %addr8, i32 1
+ %offset1_8 = getelementptr i8, i8* %addr8, i32 1
%offset1 = bitcast i8* %offset1_8 to i32*
call i32 @llvm.arm.ldrex.p0i32(i32* %offset1)
call i32 @llvm.arm.strex.p0i32(i32 0, i32* %offset1)
bb: ; preds = %cond_next59, %entry
%indvar = phi i32 [ 0, %entry ], [ %k.069.0, %cond_next59 ] ; <i32> [#uses=6]
%k.069.0 = add i32 %indvar, 1 ; <i32> [#uses=3]
- %tmp3 = getelementptr i32* %mpp, i32 %indvar ; <i32*> [#uses=1]
+ %tmp3 = getelementptr i32, i32* %mpp, i32 %indvar ; <i32*> [#uses=1]
%tmp4 = load i32* %tmp3 ; <i32> [#uses=1]
- %tmp8 = getelementptr i32* %tpmm, i32 %indvar ; <i32*> [#uses=1]
+ %tmp8 = getelementptr i32, i32* %tpmm, i32 %indvar ; <i32*> [#uses=1]
%tmp9 = load i32* %tmp8 ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, %tmp4 ; <i32> [#uses=2]
- %tmp13 = getelementptr i32* %mc, i32 %k.069.0 ; <i32*> [#uses=5]
+ %tmp13 = getelementptr i32, i32* %mc, i32 %k.069.0 ; <i32*> [#uses=5]
store i32 %tmp10, i32* %tmp13
- %tmp17 = getelementptr i32* %ip, i32 %indvar ; <i32*> [#uses=1]
+ %tmp17 = getelementptr i32, i32* %ip, i32 %indvar ; <i32*> [#uses=1]
%tmp18 = load i32* %tmp17 ; <i32> [#uses=1]
- %tmp22 = getelementptr i32* %tpim, i32 %indvar ; <i32*> [#uses=1]
+ %tmp22 = getelementptr i32, i32* %tpim, i32 %indvar ; <i32*> [#uses=1]
%tmp23 = load i32* %tmp22 ; <i32> [#uses=1]
%tmp24 = add i32 %tmp23, %tmp18 ; <i32> [#uses=2]
%tmp30 = icmp sgt i32 %tmp24, %tmp10 ; <i1> [#uses=1]
cond_next: ; preds = %cond_true, %bb
%tmp39 = load i32* %tmp13 ; <i32> [#uses=1]
- %tmp42 = getelementptr i32* %ms, i32 %k.069.0 ; <i32*> [#uses=1]
+ %tmp42 = getelementptr i32, i32* %ms, i32 %k.069.0 ; <i32*> [#uses=1]
%tmp43 = load i32* %tmp42 ; <i32> [#uses=1]
%tmp44 = add i32 %tmp43, %tmp39 ; <i32> [#uses=2]
store i32 %tmp44, i32* %tmp13
%bi.06 = phi i32 [ %i.addr.0.bi.0, %for.body ], [ 0, %entry ]
%i.addr.05 = phi i32 [ %sub, %for.body ], [ %i, %entry ]
%b.04 = phi i32 [ %.b.0, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %i.addr.05
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.addr.05
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, %b.04
%.b.0 = select i1 %cmp1, i32 %0, i32 %b.04
cond_next: ; preds = %cond_next, %entry
%indvar = phi i32 [ 0, %entry ], [ %tmp25, %cond_next ] ; <i32> [#uses=1]
%tmp25 = add i32 %indvar, 1 ; <i32> [#uses=3]
- %tmp36 = getelementptr i32* %a, i32 %tmp25 ; <i32*> [#uses=1]
+ %tmp36 = getelementptr i32, i32* %a, i32 %tmp25 ; <i32*> [#uses=1]
store i32 0, i32* %tmp36
icmp eq i32 %tmp25, -1 ; <i1>:0 [#uses=1]
br i1 %0, label %return, label %cond_next
outer.loop: ; preds = %for.inc69, %entry
%overlap.081 = phi i32 [ %overlap.4, %for.inc69 ], [ 0, %entry ]
%0 = phi i32 [ %inc71, %for.inc69 ], [ 0, %entry ]
- %offset = getelementptr %struct.partition_entry* %part, i32 %0, i32 2
- %len = getelementptr %struct.partition_entry* %part, i32 %0, i32 3
+ %offset = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %0, i32 2
+ %len = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %0, i32 3
%tmp5 = load i64* %offset, align 4
%tmp15 = load i64* %len, align 4
%add = add nsw i64 %tmp15, %tmp5
br i1 %cmp23, label %for.inc, label %if.end
if.end: ; preds = %inner.loop
- %len39 = getelementptr %struct.partition_entry* %part, i32 %1, i32 3
- %offset28 = getelementptr %struct.partition_entry* %part, i32 %1, i32 2
+ %len39 = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %1, i32 3
+ %offset28 = getelementptr %struct.partition_entry, %struct.partition_entry* %part, i32 %1, i32 2
%tmp29 = load i64* %offset28, align 4
%tmp40 = load i64* %len39, align 4
%add41 = add nsw i64 %tmp40, %tmp29
; CHECK-NOT: sub
; CHECK: str
store i32 %s3, i32* %offset, align 4
-%add.ptr = getelementptr inbounds i8* %base, i32 %sub
+%add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub
br label %return
return:
bb: ; preds = %bb, %bb.nph
%1 = phi i32 [ %.pre, %bb.nph ], [ %3, %bb ] ; <i32> [#uses=1]
%i.03 = phi i32 [ 0, %bb.nph ], [ %4, %bb ] ; <i32> [#uses=2]
- %scevgep = getelementptr i32* %vals, i32 %i.03 ; <i32*> [#uses=1]
+ %scevgep = getelementptr i32, i32* %vals, i32 %i.03 ; <i32*> [#uses=1]
%2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
%3 = add nsw i32 %1, %2 ; <i32> [#uses=2]
store i32 %3, i32* @GV, align 4
; CHECK: vst1.16 {d{{[0-9]+}}, d{{[0-9]+}}}, [r1]
; CHECK: vst1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r0]
%buf = alloca [26 x i8], align 1
- %0 = getelementptr inbounds [26 x i8]* %buf, i32 0, i32 0
+ %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i32 1, i1 false)
call void @something(i8* %0) nounwind
ret void
%indvars.iv = phi i32 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%s.05 = phi i32 [ %mul, %for.body ], [ 0, %entry ]
%indvars.iv.next = add i32 %indvars.iv, %s
- %arrayidx = getelementptr inbounds i32* %d, i32 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %d, i32 %indvars.iv
%0 = load i32* %arrayidx, align 4
%mul = mul nsw i32 %0, %s.05
%exitcond = icmp eq i32 %indvars.iv.next, %a
if.end28: ; preds = %if.then24, %while.cond, %while.cond
%dst.1 = phi %struct.rtx_def* [ undef, %if.then24 ], [ %dst.0, %while.cond ], [ %dst.0, %while.cond ]
- %arrayidx30 = getelementptr inbounds %struct.rtx_def* %dst.1, i32 0, i32 1, i32 0
+ %arrayidx30 = getelementptr inbounds %struct.rtx_def, %struct.rtx_def* %dst.1, i32 0, i32 1, i32 0
%rtx31 = bitcast %union.rtunion_def* %arrayidx30 to %struct.rtx_def**
%0 = load %struct.rtx_def** %rtx31, align 4
br label %while.cond
;CHECK-NOT: sub
;CHECK: ldr r{{.*}}, [r0, #-16]
;CHECK: ldr r{{.*}}, [r0, #-8]
- %arrayidx = getelementptr inbounds i32* %p, i32 -4
+ %arrayidx = getelementptr inbounds i32, i32* %p, i32 -4
%0 = load i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %p, i32 -2
+ %arrayidx1 = getelementptr inbounds i32, i32* %p, i32 -2
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %1, %0
ret i32 %add
%0 = alloca %foo, align 4
%1 = alloca %foo, align 4
%2 = alloca %foo, align 4
- %.native = getelementptr inbounds %foo* %0, i32 0, i32 0
- %.native.value = getelementptr inbounds %Sf* %.native, i32 0, i32 0
+ %.native = getelementptr inbounds %foo, %foo* %0, i32 0, i32 0
+ %.native.value = getelementptr inbounds %Sf, %Sf* %.native, i32 0, i32 0
store float 0.000000e+00, float* %.native.value, align 4
- %.native1 = getelementptr inbounds %foo* %1, i32 0, i32 0
- %.native1.value = getelementptr inbounds %Sf* %.native1, i32 0, i32 0
+ %.native1 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
+ %.native1.value = getelementptr inbounds %Sf, %Sf* %.native1, i32 0, i32 0
store float 1.000000e+00, float* %.native1.value, align 4
- %.native2 = getelementptr inbounds %foo* %2, i32 0, i32 0
- %.native2.value = getelementptr inbounds %Sf* %.native2, i32 0, i32 0
+ %.native2 = getelementptr inbounds %foo, %foo* %2, i32 0, i32 0
+ %.native2.value = getelementptr inbounds %Sf, %Sf* %.native2, i32 0, i32 0
store float 5.000000e+00, float* %.native2.value, align 4
br i1 true, label %3, label %4
; <label>:3 ; preds = %entry
- %.native4 = getelementptr inbounds %foo* %1, i32 0, i32 0
- %.native4.value = getelementptr inbounds %Sf* %.native4, i32 0, i32 0
+ %.native4 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
+ %.native4.value = getelementptr inbounds %Sf, %Sf* %.native4, i32 0, i32 0
store float 2.000000e+00, float* %.native4.value, align 4
br label %4
; <label>:4 ; preds = %3, %entry
%5 = call float @llvm.ceil.f32(float 5.000000e+00)
- %.native3 = getelementptr inbounds %foo* %1, i32 0, i32 0
- %.native3.value = getelementptr inbounds %Sf* %.native3, i32 0, i32 0
+ %.native3 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
+ %.native3.value = getelementptr inbounds %Sf, %Sf* %.native3, i32 0, i32 0
%6 = load float* %.native3.value, align 4
%7 = call float @llvm.ceil.f32(float %6)
%8 = insertvalue { float, float, float } { float 0.000000e+00, float undef, float undef }, float %5, 1
%0 = alloca %foo, align 4
%1 = alloca %foo, align 4
%2 = alloca %foo, align 4
- %.native = getelementptr inbounds %foo* %0, i32 0, i32 0
- %.native.value = getelementptr inbounds %Sf* %.native, i32 0, i32 0
+ %.native = getelementptr inbounds %foo, %foo* %0, i32 0, i32 0
+ %.native.value = getelementptr inbounds %Sf, %Sf* %.native, i32 0, i32 0
store float 0.000000e+00, float* %.native.value, align 4
- %.native1 = getelementptr inbounds %foo* %1, i32 0, i32 0
- %.native1.value = getelementptr inbounds %Sf* %.native1, i32 0, i32 0
+ %.native1 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
+ %.native1.value = getelementptr inbounds %Sf, %Sf* %.native1, i32 0, i32 0
store float 1.000000e+00, float* %.native1.value, align 4
- %.native2 = getelementptr inbounds %foo* %2, i32 0, i32 0
- %.native2.value = getelementptr inbounds %Sf* %.native2, i32 0, i32 0
+ %.native2 = getelementptr inbounds %foo, %foo* %2, i32 0, i32 0
+ %.native2.value = getelementptr inbounds %Sf, %Sf* %.native2, i32 0, i32 0
store float 5.000000e+00, float* %.native2.value, align 4
br i1 true, label %3, label %4
; <label>:3 ; preds = %entry
- %.native4 = getelementptr inbounds %foo* %1, i32 0, i32 0
- %.native4.value = getelementptr inbounds %Sf* %.native4, i32 0, i32 0
+ %.native4 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
+ %.native4.value = getelementptr inbounds %Sf, %Sf* %.native4, i32 0, i32 0
store float 2.000000e+00, float* %.native4.value, align 4
br label %4
; <label>:4 ; preds = %3, %entry
%5 = call float @llvm.ceil.f32(float 5.000000e+00)
- %.native3 = getelementptr inbounds %foo* %1, i32 0, i32 0
- %.native3.value = getelementptr inbounds %Sf* %.native3, i32 0, i32 0
+ %.native3 = getelementptr inbounds %foo, %foo* %1, i32 0, i32 0
+ %.native3.value = getelementptr inbounds %Sf, %Sf* %.native3, i32 0, i32 0
%6 = load float* %.native3.value, align 4
%7 = call float @llvm.ceil.f32(float %6)
%8 = insertvalue { float, float } { float 0.000000e+00, float undef }, float %7, 1
br i1 %a, label %lblock, label %rblock
lblock:
- %lbranch = getelementptr i32* %b, i32 1
+ %lbranch = getelementptr i32, i32* %b, i32 1
br label %end
rblock:
- %rbranch = getelementptr i32* %b, i32 1
+ %rbranch = getelementptr i32, i32* %b, i32 1
br label %end
end:
bb3: ; preds = %bb3, %bb
%tmp = phi i8* [ %tmp5, %bb3 ], [ %arg, %bb ]
%tmp4 = load i8* %tmp, align 1
- %tmp5 = getelementptr inbounds i8* %tmp, i32 1
+ %tmp5 = getelementptr inbounds i8, i8* %tmp, i32 1
br i1 undef, label %bb3, label %bb7
bb7: ; preds = %bb13, %bb3
%tmp8 = phi i8 [ %tmp14, %bb13 ], [ %tmp4, %bb3 ]
%tmp9 = phi i8* [ %tmp12, %bb13 ], [ %tmp, %bb3 ]
%tmp10 = icmp ne i8 %tmp8, %arg1
- %tmp12 = getelementptr inbounds i8* %tmp9, i32 1
+ %tmp12 = getelementptr inbounds i8, i8* %tmp9, i32 1
br i1 %tmp10, label %bb13, label %bb15
bb13: ; preds = %bb7
; THUMB2-LABEL: t2:
; THUMB2: pld [r0, #1023]
- %tmp = getelementptr i8* %ptr, i32 1023
+ %tmp = getelementptr i8, i8* %ptr, i32 1023
tail call void @llvm.prefetch( i8* %tmp, i32 0, i32 3, i32 1 )
ret void
}
;THUMB2: pld [sp, #50]
%red = alloca [100 x i8], align 1
-%0 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 0
-%1 = getelementptr inbounds [100 x i8]* %red, i32 0, i32 50
+%0 = getelementptr inbounds [100 x i8], [100 x i8]* %red, i32 0, i32 0
+%1 = getelementptr inbounds [100 x i8], [100 x i8]* %red, i32 0, i32 50
call void @llvm.prefetch(i8* %0, i32 0, i32 3, i32 1)
call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
ret void
; CHECK: vshrn.i32
; CHECK-NOT: vmov d
; CHECK-NEXT: vst1.16
- %0 = getelementptr inbounds %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
+ %0 = getelementptr inbounds %struct.int32x4_t, %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
%1 = load <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1]
- %2 = getelementptr inbounds %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
+ %2 = getelementptr inbounds %struct.int32x4_t, %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
%3 = load <4 x i32>* %2, align 16 ; <<4 x i32>> [#uses=1]
%4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1]
%5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1]
; CHECK-NOT: vmov
; CHECK: vst1.16
; CHECK: vst1.16
- %0 = getelementptr inbounds %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
+ %0 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
%1 = load <8 x i16>* %0, align 16 ; <<8 x i16>> [#uses=1]
- %2 = getelementptr inbounds %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
+ %2 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
%3 = load <8 x i16>* %2, align 16 ; <<8 x i16>> [#uses=1]
%4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1]
%5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1]
- %6 = getelementptr inbounds i16* %i_ptr, i32 8 ; <i16*> [#uses=1]
+ %6 = getelementptr inbounds i16, i16* %i_ptr, i32 8 ; <i16*> [#uses=1]
%7 = bitcast i16* %6 to i8* ; <i8*> [#uses=1]
%8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7, i32 1) ; <<8 x i16>> [#uses=1]
%9 = mul <8 x i16> %1, %5 ; <<8 x i16>> [#uses=1]
%10 = mul <8 x i16> %3, %8 ; <<8 x i16>> [#uses=1]
%11 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1]
tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9, i32 1)
- %12 = getelementptr inbounds i16* %o_ptr, i32 8 ; <i16*> [#uses=1]
+ %12 = getelementptr inbounds i16, i16* %o_ptr, i32 8 ; <i16*> [#uses=1]
%13 = bitcast i16* %12 to i8* ; <i8*> [#uses=1]
tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10, i32 1)
ret void
; CHECK: bne
%tmp1 = bitcast i32* %in to i8* ; <i8*> [#uses=1]
%tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
- %tmp3 = getelementptr inbounds i32* %in, i32 8 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr inbounds i32, i32* %in, i32 8 ; <i32*> [#uses=1]
%tmp4 = bitcast i32* %tmp3 to i8* ; <i8*> [#uses=1]
%tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
%tmp8 = bitcast i32* %out to i8* ; <i8*> [#uses=1]
%1 = load float* %data2, align 4
%add = fadd float %mul, %1
%add2 = fadd float %add, 0.000000e+00
- %arrayidx.1 = getelementptr inbounds float* %data1, i32 1
+ %arrayidx.1 = getelementptr inbounds float, float* %data1, i32 1
%2 = load float* %arrayidx.1, align 4
%mul.1 = fmul float %2, %a
- %arrayidx1.1 = getelementptr inbounds float* %data2, i32 1
+ %arrayidx1.1 = getelementptr inbounds float, float* %data2, i32 1
%3 = load float* %arrayidx1.1, align 4
%add.1 = fadd float %mul.1, %3
%add2.1 = fadd float %add2, %add.1
- %arrayidx.2 = getelementptr inbounds float* %data1, i32 2
+ %arrayidx.2 = getelementptr inbounds float, float* %data1, i32 2
%4 = load float* %arrayidx.2, align 4
%mul.2 = fmul float %4, %a
- %arrayidx1.2 = getelementptr inbounds float* %data2, i32 2
+ %arrayidx1.2 = getelementptr inbounds float, float* %data2, i32 2
%5 = load float* %arrayidx1.2, align 4
%add.2 = fadd float %mul.2, %5
%add2.2 = fadd float %add2.1, %add.2
- %arrayidx.3 = getelementptr inbounds float* %data1, i32 3
+ %arrayidx.3 = getelementptr inbounds float, float* %data1, i32 3
%6 = load float* %arrayidx.3, align 4
%mul.3 = fmul float %6, %a
- %arrayidx1.3 = getelementptr inbounds float* %data2, i32 3
+ %arrayidx1.3 = getelementptr inbounds float, float* %data2, i32 3
%7 = load float* %arrayidx1.3, align 4
%add.3 = fadd float %mul.3, %7
%add2.3 = fadd float %add2.2, %add.3
- %arrayidx.4 = getelementptr inbounds float* %data1, i32 4
+ %arrayidx.4 = getelementptr inbounds float, float* %data1, i32 4
%8 = load float* %arrayidx.4, align 4
%mul.4 = fmul float %8, %a
- %arrayidx1.4 = getelementptr inbounds float* %data2, i32 4
+ %arrayidx1.4 = getelementptr inbounds float, float* %data2, i32 4
%9 = load float* %arrayidx1.4, align 4
%add.4 = fadd float %mul.4, %9
%add2.4 = fadd float %add2.3, %add.4
- %arrayidx.5 = getelementptr inbounds float* %data1, i32 5
+ %arrayidx.5 = getelementptr inbounds float, float* %data1, i32 5
%10 = load float* %arrayidx.5, align 4
%mul.5 = fmul float %10, %a
- %arrayidx1.5 = getelementptr inbounds float* %data2, i32 5
+ %arrayidx1.5 = getelementptr inbounds float, float* %data2, i32 5
%11 = load float* %arrayidx1.5, align 4
%add.5 = fadd float %mul.5, %11
%add2.5 = fadd float %add2.4, %add.5
- %arrayidx.6 = getelementptr inbounds float* %data1, i32 6
+ %arrayidx.6 = getelementptr inbounds float, float* %data1, i32 6
%12 = load float* %arrayidx.6, align 4
%mul.6 = fmul float %12, %a
- %arrayidx1.6 = getelementptr inbounds float* %data2, i32 6
+ %arrayidx1.6 = getelementptr inbounds float, float* %data2, i32 6
%13 = load float* %arrayidx1.6, align 4
%add.6 = fadd float %mul.6, %13
%add2.6 = fadd float %add2.5, %add.6
- %arrayidx.7 = getelementptr inbounds float* %data1, i32 7
+ %arrayidx.7 = getelementptr inbounds float, float* %data1, i32 7
%14 = load float* %arrayidx.7, align 4
%mul.7 = fmul float %14, %a
- %arrayidx1.7 = getelementptr inbounds float* %data2, i32 7
+ %arrayidx1.7 = getelementptr inbounds float, float* %data2, i32 7
%15 = load float* %arrayidx1.7, align 4
%add.7 = fadd float %mul.7, %15
%add2.7 = fadd float %add2.6, %add.7
- %arrayidx.8 = getelementptr inbounds float* %data1, i32 8
+ %arrayidx.8 = getelementptr inbounds float, float* %data1, i32 8
%16 = load float* %arrayidx.8, align 4
%mul.8 = fmul float %16, %a
- %arrayidx1.8 = getelementptr inbounds float* %data2, i32 8
+ %arrayidx1.8 = getelementptr inbounds float, float* %data2, i32 8
%17 = load float* %arrayidx1.8, align 4
%add.8 = fadd float %mul.8, %17
%add2.8 = fadd float %add2.7, %add.8
- %arrayidx.9 = getelementptr inbounds float* %data1, i32 9
+ %arrayidx.9 = getelementptr inbounds float, float* %data1, i32 9
%18 = load float* %arrayidx.9, align 4
%mul.9 = fmul float %18, %a
- %arrayidx1.9 = getelementptr inbounds float* %data2, i32 9
+ %arrayidx1.9 = getelementptr inbounds float, float* %data2, i32 9
%19 = load float* %arrayidx1.9, align 4
%add.9 = fadd float %mul.9, %19
%add2.9 = fadd float %add2.8, %add.9
%0 = tail call i8* (...)* @malloc(i32 undef) nounwind
%1 = bitcast i8* %0 to i32*
%2 = sext i16 %addr to i32
- %3 = getelementptr inbounds i32* %1, i32 %2
+ %3 = getelementptr inbounds i32, i32* %1, i32 %2
%4 = load i32* %3, align 4
%5 = add nsw i32 %4, 1
store i32 %5, i32* %3, align 4
_ZNSt3__116allocator_traitsINS_9allocatorIcEEE9constructIccEEvRS2_PT_RKT0_.exit.i.i.i: ; preds = %new.notnull.i.i.i.i, %do.body.i.i.i
%1 = phi i8* [ null, %do.body.i.i.i ], [ %0, %new.notnull.i.i.i.i ]
- %incdec.ptr.i.i.i = getelementptr inbounds i8* %1, i32 1
+ %incdec.ptr.i.i.i = getelementptr inbounds i8, i8* %1, i32 1
%lsr.iv.next = add i32 %lsr.iv, 1
%cmp.i16.i.i = icmp eq i32 %lsr.iv.next, 0
br i1 %cmp.i16.i.i, label %invoke.cont, label %do.body.i.i.i
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
- %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
- %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
- %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ %arrayidx8 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
- %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ %arrayidx10 = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
- %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
- %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ %foo = getelementptr inbounds %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8], [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
- %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
- %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ %foo14 = getelementptr inbounds %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8], [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
- %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
- %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar, %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32], [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
- %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
- %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16], [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
- %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
- %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
- %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
- %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
- %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
- %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
- %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
- %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
- %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
- %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ %arrayidx8 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
- %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ %arrayidx10 = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
- %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
- %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ %foo = getelementptr inbounds %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8], [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
- %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
- %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ %foo14 = getelementptr inbounds %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8], [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
- %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
- %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar, %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32], [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
- %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
- %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16], [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
- %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
- %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
- %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
- %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
- %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
- %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
- %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
- %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
- %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
- %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ %arrayidx8 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
- %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ %arrayidx10 = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
- %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
- %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ %foo = getelementptr inbounds %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8], [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
- %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
- %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ %foo14 = getelementptr inbounds %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8], [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
- %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
- %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar, %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32], [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
- %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
- %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16], [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
- %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
- %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
- %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
- %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
- %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
- %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
- %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
%d1 = alloca %struct.struct_large_nonchar, align 8
%d2 = alloca %struct.struct_small_nonchar, align 2
%call = call signext i8 @get_struct_small_char()
- %foo = getelementptr inbounds %struct.struct_small_char* %a, i32 0, i32 0
- %arrayidx = getelementptr inbounds [2 x i8]* %foo, i32 0, i64 0
+ %foo = getelementptr inbounds %struct.struct_small_char, %struct.struct_small_char* %a, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %foo, i32 0, i64 0
store i8 %call, i8* %arrayidx, align 1
call void @end_struct_small_char()
%call1 = call signext i8 @get_struct_large_char2()
- %foo2 = getelementptr inbounds %struct.struct_large_char2* %b, i32 0, i32 1
- %arrayidx3 = getelementptr inbounds [8 x i8]* %foo2, i32 0, i64 0
+ %foo2 = getelementptr inbounds %struct.struct_large_char2, %struct.struct_large_char2* %b, i32 0, i32 1
+ %arrayidx3 = getelementptr inbounds [8 x i8], [8 x i8]* %foo2, i32 0, i64 0
store i8 %call1, i8* %arrayidx3, align 1
call void @end_struct_large_char2()
%0 = bitcast %struct.struct_large_char2* %b to %struct.struct_large_char*
- %coerce.dive = getelementptr %struct.struct_large_char* %0, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %0, i32 0, i32 0
%1 = bitcast [8 x i8]* %coerce.dive to i64*
%2 = load i64* %1, align 1
- %coerce.dive4 = getelementptr %struct.struct_small_char* %a, i32 0, i32 0
+ %coerce.dive4 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %a, i32 0, i32 0
%3 = bitcast [2 x i8]* %coerce.dive4 to i16*
%4 = load i16* %3, align 1
- %coerce.dive5 = getelementptr %struct.struct_small_nonchar* %d2, i32 0, i32 0
+ %coerce.dive5 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d2, i32 0, i32 0
%5 = bitcast [2 x i16]* %coerce.dive5 to i32*
%6 = load i32* %5, align 1
call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval align 8 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0)
; CHECK-THUMB2-NEXT: mov sp, r4
%a = alloca i8, align 4096
%0 = load double* %d, align 4
- %arrayidx1 = getelementptr inbounds double* %d, i32 1
+ %arrayidx1 = getelementptr inbounds double, double* %d, i32 1
%1 = load double* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds double* %d, i32 2
+ %arrayidx2 = getelementptr inbounds double, double* %d, i32 2
%2 = load double* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds double* %d, i32 3
+ %arrayidx3 = getelementptr inbounds double, double* %d, i32 3
%3 = load double* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds double* %d, i32 4
+ %arrayidx4 = getelementptr inbounds double, double* %d, i32 4
%4 = load double* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds double* %d, i32 5
+ %arrayidx5 = getelementptr inbounds double, double* %d, i32 5
%5 = load double* %arrayidx5, align 4
- %arrayidx6 = getelementptr inbounds double* %d, i32 6
+ %arrayidx6 = getelementptr inbounds double, double* %d, i32 6
%6 = load double* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds double* %d, i32 7
+ %arrayidx7 = getelementptr inbounds double, double* %d, i32 7
%7 = load double* %arrayidx7, align 4
- %arrayidx8 = getelementptr inbounds double* %d, i32 8
+ %arrayidx8 = getelementptr inbounds double, double* %d, i32 8
%8 = load double* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds double* %d, i32 9
+ %arrayidx9 = getelementptr inbounds double, double* %d, i32 9
%9 = load double* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds double* %d, i32 10
+ %arrayidx10 = getelementptr inbounds double, double* %d, i32 10
%10 = load double* %arrayidx10, align 4
- %arrayidx11 = getelementptr inbounds double* %d, i32 11
+ %arrayidx11 = getelementptr inbounds double, double* %d, i32 11
%11 = load double* %arrayidx11, align 4
- %arrayidx12 = getelementptr inbounds double* %d, i32 12
+ %arrayidx12 = getelementptr inbounds double, double* %d, i32 12
%12 = load double* %arrayidx12, align 4
- %arrayidx13 = getelementptr inbounds double* %d, i32 13
+ %arrayidx13 = getelementptr inbounds double, double* %d, i32 13
%13 = load double* %arrayidx13, align 4
- %arrayidx14 = getelementptr inbounds double* %d, i32 14
+ %arrayidx14 = getelementptr inbounds double, double* %d, i32 14
%14 = load double* %arrayidx14, align 4
- %arrayidx15 = getelementptr inbounds double* %d, i32 15
+ %arrayidx15 = getelementptr inbounds double, double* %d, i32 15
%15 = load double* %arrayidx15, align 4
- %arrayidx16 = getelementptr inbounds double* %d, i32 16
+ %arrayidx16 = getelementptr inbounds double, double* %d, i32 16
%16 = load double* %arrayidx16, align 4
- %arrayidx17 = getelementptr inbounds double* %d, i32 17
+ %arrayidx17 = getelementptr inbounds double, double* %d, i32 17
%17 = load double* %arrayidx17, align 4
- %arrayidx18 = getelementptr inbounds double* %d, i32 18
+ %arrayidx18 = getelementptr inbounds double, double* %d, i32 18
%18 = load double* %arrayidx18, align 4
- %arrayidx19 = getelementptr inbounds double* %d, i32 19
+ %arrayidx19 = getelementptr inbounds double, double* %d, i32 19
%19 = load double* %arrayidx19, align 4
- %arrayidx20 = getelementptr inbounds double* %d, i32 20
+ %arrayidx20 = getelementptr inbounds double, double* %d, i32 20
%20 = load double* %arrayidx20, align 4
- %arrayidx21 = getelementptr inbounds double* %d, i32 21
+ %arrayidx21 = getelementptr inbounds double, double* %d, i32 21
%21 = load double* %arrayidx21, align 4
- %arrayidx22 = getelementptr inbounds double* %d, i32 22
+ %arrayidx22 = getelementptr inbounds double, double* %d, i32 22
%22 = load double* %arrayidx22, align 4
- %arrayidx23 = getelementptr inbounds double* %d, i32 23
+ %arrayidx23 = getelementptr inbounds double, double* %d, i32 23
%23 = load double* %arrayidx23, align 4
- %arrayidx24 = getelementptr inbounds double* %d, i32 24
+ %arrayidx24 = getelementptr inbounds double, double* %d, i32 24
%24 = load double* %arrayidx24, align 4
- %arrayidx25 = getelementptr inbounds double* %d, i32 25
+ %arrayidx25 = getelementptr inbounds double, double* %d, i32 25
%25 = load double* %arrayidx25, align 4
- %arrayidx26 = getelementptr inbounds double* %d, i32 26
+ %arrayidx26 = getelementptr inbounds double, double* %d, i32 26
%26 = load double* %arrayidx26, align 4
- %arrayidx27 = getelementptr inbounds double* %d, i32 27
+ %arrayidx27 = getelementptr inbounds double, double* %d, i32 27
%27 = load double* %arrayidx27, align 4
- %arrayidx28 = getelementptr inbounds double* %d, i32 28
+ %arrayidx28 = getelementptr inbounds double, double* %d, i32 28
%28 = load double* %arrayidx28, align 4
- %arrayidx29 = getelementptr inbounds double* %d, i32 29
+ %arrayidx29 = getelementptr inbounds double, double* %d, i32 29
%29 = load double* %arrayidx29, align 4
%div = fdiv double %29, %28
%div30 = fdiv double %div, %27
%div86 = fdiv double %div85, %29
%mul = fmul double %div57, %div86
%conv = fptosi double %mul to i32
- %add.ptr = getelementptr inbounds i8* %a, i32 %conv
+ %add.ptr = getelementptr inbounds i8, i8* %a, i32 %conv
ret i8* %add.ptr
}
define i32 @main() #0 {
entry:
%title = alloca [15 x i8], align 1
- %0 = getelementptr inbounds [15 x i8]* %title, i32 0, i32 0
+ %0 = getelementptr inbounds [15 x i8], [15 x i8]* %title, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* getelementptr inbounds ([15 x i8]* @main.title, i32 0, i32 0), i32 15, i32 1, i1 false)
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8* %0) #3
ret i32 0
%a1 = alloca [256 x i32], align 4
%1 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %1)
- %2 = getelementptr inbounds [256 x i32]* %a1, i32 0, i32 0
+ %2 = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i32 0, i32 0
call void @foo3(i32* %2) #3
call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
call void @llvm.lifetime.end(i64 1024, i8* %1)
define void @test1(i32* %X, i32* %A, i32** %dest) {
%B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
+ %Y = getelementptr i32, i32* %X, i32 4 ; <i32*> [#uses=2]
store i32 %B, i32* %Y
store i32* %Y, i32** %dest
ret void
define i16* @test2(i16* %X, i32* %A) {
%B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i16* %X, i32 4 ; <i16*> [#uses=2]
+ %Y = getelementptr i16, i16* %X, i32 4 ; <i16*> [#uses=2]
%tmp = trunc i32 %B to i16 ; <i16> [#uses=1]
store i16 %tmp, i16* %Y
ret i16* %Y
entry:
%0 = load %structB** @img, align 4
%1 = load i32* undef, align 4
- %mb_data = getelementptr inbounds %structB* %0, i32 0, i32 61
+ %mb_data = getelementptr inbounds %structB, %structB* %0, i32 0, i32 61
%2 = load %structK** %mb_data, align 4
br label %for.body
if.end164: ; preds = %for.body119, %for.cond47.preheader, %if.end43
store i32*** null, i32**** @cofDC, align 4
- %mb_type = getelementptr inbounds %structK* %2, i32 %1, i32 8
+ %mb_type = getelementptr inbounds %structK, %structK* %2, i32 %1, i32 8
br i1 undef, label %if.end230, label %if.then169
if.then169: ; preds = %if.end164
if.end230: ; preds = %if.end164
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* bitcast ([4 x i32]* @b8mode to i8*), i32 16, i32 4, i1 false)
- %b8pdir = getelementptr inbounds %structK* %2, i32 %1, i32 15
+ %b8pdir = getelementptr inbounds %structK, %structK* %2, i32 %1, i32 15
%3 = bitcast [4 x i32]* %b8pdir to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* bitcast ([4 x i32]* @b8pdir to i8*), i32 16, i32 4, i1 false)
br i1 undef, label %if.end236, label %if.then233
if.end249: ; preds = %if.then248, %land.lhs.true246, %if.end236
%5 = load i32* @luma_transform_size_8x8_flag, align 4
%6 = load %structA** @rdopt, align 4
- %luma_transform_size_8x8_flag264 = getelementptr inbounds %structA* %6, i32 0, i32 21
+ %luma_transform_size_8x8_flag264 = getelementptr inbounds %structA, %structA* %6, i32 0, i32 21
store i32 %5, i32* %luma_transform_size_8x8_flag264, align 4
%7 = load i32* undef, align 4
%add281 = add nsw i32 %7, 0
for.body285: ; preds = %for.inc503, %if.end249
%8 = phi %structB* [ undef, %if.end249 ], [ %.pre1155, %for.inc503 ]
%i.21103 = phi i32 [ 0, %if.end249 ], [ %inc504, %for.inc503 ]
- %block_x286 = getelementptr inbounds %structB* %8, i32 0, i32 37
+ %block_x286 = getelementptr inbounds %structB, %structB* %8, i32 0, i32 37
%9 = load i32* %block_x286, align 4
%add287 = add nsw i32 %9, %i.21103
%shr289 = ashr i32 %i.21103, 1
%add290 = add nsw i32 %shr289, 0
- %arrayidx292 = getelementptr inbounds %structK* %2, i32 %1, i32 15, i32 %add290
+ %arrayidx292 = getelementptr inbounds %structK, %structK* %2, i32 %1, i32 15, i32 %add290
%10 = load %structM** @enc_picture, align 4
- %ref_idx = getelementptr inbounds %structM* %10, i32 0, i32 35
+ %ref_idx = getelementptr inbounds %structM, %structM* %10, i32 0, i32 35
%11 = load i8**** %ref_idx, align 4
%12 = load i8*** %11, align 4
- %arrayidx313 = getelementptr inbounds i8** %12, i32 %add281
+ %arrayidx313 = getelementptr inbounds i8*, i8** %12, i32 %add281
%13 = load i8** %arrayidx313, align 4
- %arrayidx314 = getelementptr inbounds i8* %13, i32 %add287
+ %arrayidx314 = getelementptr inbounds i8, i8* %13, i32 %add287
store i8 -1, i8* %arrayidx314, align 1
%14 = load %structB** @img, align 4
- %MbaffFrameFlag327 = getelementptr inbounds %structB* %14, i32 0, i32 100
+ %MbaffFrameFlag327 = getelementptr inbounds %structB, %structB* %14, i32 0, i32 100
%15 = load i32* %MbaffFrameFlag327, align 4
%tobool328 = icmp eq i32 %15, 0
br i1 %tobool328, label %if.end454, label %if.then329
; THUMB-LABEL: f4
; THUMB: blx _consumestruct
entry:
- %addr = getelementptr inbounds %struct.SmallStruct* %s, i32 0, i32 0
+ %addr = getelementptr inbounds %struct.SmallStruct, %struct.SmallStruct* %s, i32 0, i32 0
%0 = bitcast i32* %addr to i8*
tail call void @consumestruct(i8* %0, i32 80) optsize
ret void
; THUMB-LABEL: f6
; THUMB: b.w _consumestruct
entry:
- %addr = getelementptr inbounds %struct.SmallStruct* %s, i32 0, i32 0
+ %addr = getelementptr inbounds %struct.SmallStruct, %struct.SmallStruct* %s, i32 0, i32 0
%0 = bitcast i32* %addr to i8*
tail call void @consumestruct(i8* %0, i32 80) optsize
ret void
define void @test_vldm(double* %x, double * %y) {
entry:
- %addr1 = getelementptr double * %x, i32 1
- %addr2 = getelementptr double * %x, i32 2
- %addr3 = getelementptr double * %x, i32 3
+ %addr1 = getelementptr double, double * %x, i32 1
+ %addr2 = getelementptr double, double * %x, i32 2
+ %addr3 = getelementptr double, double * %x, i32 3
%d0 = load double * %y
%d1 = load double * %x
%d2 = load double * %addr1
define i32 @fn(i32* nocapture %opcodes) nounwind readonly ssp {
entry:
%0 = load i32* %opcodes, align 4
- %arrayidx = getelementptr inbounds [3 x i8*]* @fn.codetable, i32 0, i32 %0
+ %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @fn.codetable, i32 0, i32 %0
br label %indirectgoto
INCREMENT: ; preds = %indirectgoto
%inc = add nsw i32 %result.0, 1
%1 = load i32* %opcodes.addr.0, align 4
- %arrayidx2 = getelementptr inbounds [3 x i8*]* @fn.codetable, i32 0, i32 %1
+ %arrayidx2 = getelementptr inbounds [3 x i8*], [3 x i8*]* @fn.codetable, i32 0, i32 %1
br label %indirectgoto
DECREMENT: ; preds = %indirectgoto
%dec = add nsw i32 %result.0, -1
%2 = load i32* %opcodes.addr.0, align 4
- %arrayidx4 = getelementptr inbounds [3 x i8*]* @fn.codetable, i32 0, i32 %2
+ %arrayidx4 = getelementptr inbounds [3 x i8*], [3 x i8*]* @fn.codetable, i32 0, i32 %2
br label %indirectgoto
indirectgoto: ; preds = %DECREMENT, %INCREMENT, %entry
%result.0 = phi i32 [ 0, %entry ], [ %dec, %DECREMENT ], [ %inc, %INCREMENT ]
%opcodes.pn = phi i32* [ %opcodes, %entry ], [ %opcodes.addr.0, %DECREMENT ], [ %opcodes.addr.0, %INCREMENT ]
%indirect.goto.dest.in = phi i8** [ %arrayidx, %entry ], [ %arrayidx4, %DECREMENT ], [ %arrayidx2, %INCREMENT ]
- %opcodes.addr.0 = getelementptr inbounds i32* %opcodes.pn, i32 1
+ %opcodes.addr.0 = getelementptr inbounds i32, i32* %opcodes.pn, i32 1
%indirect.goto.dest = load i8** %indirect.goto.dest.in, align 4
indirectbr i8* %indirect.goto.dest, [label %RETURN, label %INCREMENT, label %DECREMENT]
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
%i.09 = phi i32 [ %add5.3, %for.body.3 ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i8* %a, i32 %i.09
+ %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.09
%0 = load i8* %arrayidx, align 1
%conv6 = zext i8 %0 to i32
- %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.09
+ %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.09
%1 = load i8* %arrayidx1, align 1
%conv27 = zext i8 %1 to i32
%add = add nsw i32 %conv27, %conv6
%conv3 = trunc i32 %add to i8
- %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.09
+ %arrayidx4 = getelementptr inbounds i8, i8* %c, i32 %i.09
store i8 %conv3, i8* %arrayidx4, align 1
%add5 = add i32 %i.09, %s
%cmp = icmp ult i32 %add5, %len
; CHECK: %for.body.1
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
- %arrayidx.1 = getelementptr inbounds i8* %a, i32 %add5
+ %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %add5
%2 = load i8* %arrayidx.1, align 1
%conv6.1 = zext i8 %2 to i32
- %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %add5
+ %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %add5
%3 = load i8* %arrayidx1.1, align 1
%conv27.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv27.1, %conv6.1
%conv3.1 = trunc i32 %add.1 to i8
- %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %add5
+ %arrayidx4.1 = getelementptr inbounds i8, i8* %c, i32 %add5
store i8 %conv3.1, i8* %arrayidx4.1, align 1
%add5.1 = add i32 %add5, %s
%cmp.1 = icmp ult i32 %add5.1, %len
; CHECK: %for.body.2
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
- %arrayidx.2 = getelementptr inbounds i8* %a, i32 %add5.1
+ %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %add5.1
%4 = load i8* %arrayidx.2, align 1
%conv6.2 = zext i8 %4 to i32
- %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %add5.1
+ %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %add5.1
%5 = load i8* %arrayidx1.2, align 1
%conv27.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv27.2, %conv6.2
%conv3.2 = trunc i32 %add.2 to i8
- %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %add5.1
+ %arrayidx4.2 = getelementptr inbounds i8, i8* %c, i32 %add5.1
store i8 %conv3.2, i8* %arrayidx4.2, align 1
%add5.2 = add i32 %add5.1, %s
%cmp.2 = icmp ult i32 %add5.2, %len
; CHECK: %for.body.3
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
; CHECK: ldrb {{r[0-9]|lr}}, [{{r[0-9]|lr}}, {{r[0-9]|lr}}]!
- %arrayidx.3 = getelementptr inbounds i8* %a, i32 %add5.2
+ %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %add5.2
%6 = load i8* %arrayidx.3, align 1
%conv6.3 = zext i8 %6 to i32
- %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %add5.2
+ %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %add5.2
%7 = load i8* %arrayidx1.3, align 1
%conv27.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv27.3, %conv6.3
%conv3.3 = trunc i32 %add.3 to i8
- %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %add5.2
+ %arrayidx4.3 = getelementptr inbounds i8, i8* %c, i32 %add5.2
store i8 %conv3.3, i8* %arrayidx4.3, align 1
%add5.3 = add i32 %add5.2, %s
%cmp.3 = icmp ult i32 %add5.3, %len
; CHECKT2D: b.w _B_ctor_base
%0 = bitcast %struct.C* %this to %struct.A*
%call = tail call %struct.A* @A_ctor_base(%struct.A* %0)
- %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
%call2 = tail call %struct.B* @B_ctor_base(%struct.B* %1, i32 %x)
ret %struct.C* %this
}
; CHECKT2D-NOT: b.w _B_ctor_base_nothisret
%0 = bitcast %struct.C* %this to %struct.A*
%call = tail call %struct.A* @A_ctor_base_nothisret(%struct.A* %0)
- %1 = getelementptr inbounds %struct.C* %this, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.C, %struct.C* %this, i32 0, i32 0
%call2 = tail call %struct.B* @B_ctor_base_nothisret(%struct.B* %1, i32 %x)
ret %struct.C* %this
}
; CHECKT2D: blx _B_ctor_complete
; CHECKT2D-NOT: mov r0, {{r[0-9]+}}
; CHECKT2D: b.w _B_ctor_complete
- %b = getelementptr inbounds %struct.D* %this, i32 0, i32 0
+ %b = getelementptr inbounds %struct.D, %struct.D* %this, i32 0, i32 0
%call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
ret %struct.D* %this
; CHECKELF-NOT: b B_ctor_complete
; CHECKT2D-LABEL: E_ctor_base:
; CHECKT2D-NOT: b.w _B_ctor_complete
- %b = getelementptr inbounds %struct.E* %this, i32 0, i32 0
+ %b = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 0
%call = tail call %struct.B* @B_ctor_complete(%struct.B* %b, i32 %x)
- %b2 = getelementptr inbounds %struct.E* %this, i32 0, i32 1
+ %b2 = getelementptr inbounds %struct.E, %struct.E* %this, i32 0, i32 1
%call2 = tail call %struct.B* @B_ctor_complete(%struct.B* %b2, i32 %x)
ret %struct.E* %this
}
%2 = alloca [1026 x i8], align 1
; CHECK: mov [[R0:r[0-9]+]], sp
; CHECK: adds {{r[0-9]+}}, [[R0]], {{r[0-9]+}}
- %3 = getelementptr inbounds [1026 x i8]* %2, i32 0, i32 0
+ %3 = getelementptr inbounds [1026 x i8], [1026 x i8]* %2, i32 0, i32 0
%4 = call i32 @_called_func(i8* %3, i32* %size) nounwind
%5 = icmp eq i32 %4, 0
br i1 %5, label %bb2, label %bb3
%struct.B = type { float, float, i32, i32, i32, [0 x i8] }
define i8 @f1(%struct.A* %d) {
- %tmp2 = getelementptr %struct.A* %d, i32 0, i32 4
+ %tmp2 = getelementptr %struct.A, %struct.A* %d, i32 0, i32 4
%tmp23 = bitcast i16* %tmp2 to i32*
%tmp4 = load i32* %tmp23
%tmp512 = lshr i32 %tmp4, 24
}
define i32 @f2(%struct.A* %d) {
- %tmp2 = getelementptr %struct.A* %d, i32 0, i32 4
+ %tmp2 = getelementptr %struct.A, %struct.A* %d, i32 0, i32 4
%tmp23 = bitcast i16* %tmp2 to i32*
%tmp4 = load i32* %tmp23
%tmp512 = lshr i32 %tmp4, 24
define void @v64_v8i8_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v8i8_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <8 x i8>*
%vo = bitcast i8* %po to <8 x i8>*
;CHECK: vld1.8
define void @v64_v4i16_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v4i16_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x i16>*
%vo = bitcast i8* %po to <4 x i16>*
;CHECK: vld1.8
define void @v64_v2i32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v2i32_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x i32>*
%vo = bitcast i8* %po to <2 x i32>*
;CHECK: vld1.8
define void @v64_v2f32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v2f32_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x float>*
%vo = bitcast i8* %po to <2 x float>*
;CHECK: vld1.8
define void @v128_v16i8_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v16i8_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <16 x i8>*
%vo = bitcast i8* %po to <16 x i8>*
;CHECK: vld1.8
define void @v128_v8i16_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v8i16_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <8 x i16>*
%vo = bitcast i8* %po to <8 x i16>*
;CHECK: vld1.8
define void @v128_v4i32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v4i32_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x i32>*
%vo = bitcast i8* %po to <4 x i32>*
;CHECK: vld1.8
define void @v128_v2i64_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v2i64_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x i64>*
%vo = bitcast i8* %po to <2 x i64>*
;CHECK: vld1.8
define void @v128_v4f32_1(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v4f32_1:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x float>*
%vo = bitcast i8* %po to <4 x float>*
;CHECK: vld1.8
define void @v64_v8i8_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v8i8_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <8 x i8>*
%vo = bitcast i8* %po to <8 x i8>*
;CHECK: vld1.16
define void @v64_v4i16_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v4i16_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x i16>*
%vo = bitcast i8* %po to <4 x i16>*
;CHECK: vld1.16
define void @v64_v2i32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v2i32_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x i32>*
%vo = bitcast i8* %po to <2 x i32>*
;CHECK: vld1.16
define void @v64_v2f32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v2f32_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x float>*
%vo = bitcast i8* %po to <2 x float>*
;CHECK: vld1.16
define void @v128_v16i8_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v16i8_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <16 x i8>*
%vo = bitcast i8* %po to <16 x i8>*
;CHECK: vld1.16
define void @v128_v8i16_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v8i16_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <8 x i16>*
%vo = bitcast i8* %po to <8 x i16>*
;CHECK: vld1.16
define void @v128_v4i32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v4i32_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x i32>*
%vo = bitcast i8* %po to <4 x i32>*
;CHECK: vld1.16
define void @v128_v2i64_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v2i64_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x i64>*
%vo = bitcast i8* %po to <2 x i64>*
;CHECK: vld1.16
define void @v128_v4f32_2(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v4f32_2:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x float>*
%vo = bitcast i8* %po to <4 x float>*
;CHECK: vld1.16
define void @v64_v8i8_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v8i8_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <8 x i8>*
%vo = bitcast i8* %po to <8 x i8>*
;CHECK: vldr
define void @v64_v4i16_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v4i16_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x i16>*
%vo = bitcast i8* %po to <4 x i16>*
;CHECK: vldr
define void @v64_v2i32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v2i32_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x i32>*
%vo = bitcast i8* %po to <2 x i32>*
;CHECK: vldr
define void @v64_v2f32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v64_v2f32_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x float>*
%vo = bitcast i8* %po to <2 x float>*
;CHECK: vldr
define void @v128_v16i8_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v16i8_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <16 x i8>*
%vo = bitcast i8* %po to <16 x i8>*
;CHECK: vld1.32
define void @v128_v8i16_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v8i16_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <8 x i16>*
%vo = bitcast i8* %po to <8 x i16>*
;CHECK: vld1.32
define void @v128_v4i32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v4i32_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x i32>*
%vo = bitcast i8* %po to <4 x i32>*
;CHECK: vld1.32
define void @v128_v2i64_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v2i64_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <2 x i64>*
%vo = bitcast i8* %po to <2 x i64>*
;CHECK: vld1.32
define void @v128_v4f32_4(i8* noalias nocapture %out, i8* noalias nocapture %in) nounwind {
;CHECK-LABEL: v128_v4f32_4:
entry:
- %po = getelementptr i8* %out, i32 0
- %pi = getelementptr i8* %in, i32 0
+ %po = getelementptr i8, i8* %out, i32 0
+ %pi = getelementptr i8, i8* %in, i32 0
%vi = bitcast i8* %pi to <4 x float>*
%vo = bitcast i8* %po to <4 x float>*
;CHECK: vld1.32
; CHECK: ldr r0, [r0]
; CHECK: bx lr
%0 = sext i16 undef to i32
- %1 = getelementptr inbounds i32* %a, i32 %0
+ %1 = getelementptr inbounds i32, i32* %a, i32 %0
%2 = load i32* %1, align 4
ret i32 %2
}
;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <8 x i8>** %ptr
%lA = load <8 x i8>* %A, align 1
- %inc = getelementptr <8 x i8>* %A, i38 1
+ %inc = getelementptr <8 x i8>, <8 x i8>* %A, i38 1
store <8 x i8>* %inc, <8 x i8>** %ptr
ret <8 x i8> %lA
}
;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i16>** %ptr
%lA = load <4 x i16>* %A, align 1
- %inc = getelementptr <4 x i16>* %A, i34 1
+ %inc = getelementptr <4 x i16>, <4 x i16>* %A, i34 1
store <4 x i16>* %inc, <4 x i16>** %ptr
ret <4 x i16> %lA
}
;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i32>** %ptr
%lA = load <2 x i32>* %A, align 1
- %inc = getelementptr <2 x i32>* %A, i32 1
+ %inc = getelementptr <2 x i32>, <2 x i32>* %A, i32 1
store <2 x i32>* %inc, <2 x i32>** %ptr
ret <2 x i32> %lA
}
;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x float>** %ptr
%lA = load <2 x float>* %A, align 1
- %inc = getelementptr <2 x float>* %A, i32 1
+ %inc = getelementptr <2 x float>, <2 x float>* %A, i32 1
store <2 x float>* %inc, <2 x float>** %ptr
ret <2 x float> %lA
}
;CHECK: vld1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <1 x i64>** %ptr
%lA = load <1 x i64>* %A, align 1
- %inc = getelementptr <1 x i64>* %A, i31 1
+ %inc = getelementptr <1 x i64>, <1 x i64>* %A, i31 1
store <1 x i64>* %inc, <1 x i64>** %ptr
ret <1 x i64> %lA
}
;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <16 x i8>** %ptr
%lA = load <16 x i8>* %A, align 1
- %inc = getelementptr <16 x i8>* %A, i316 1
+ %inc = getelementptr <16 x i8>, <16 x i8>* %A, i316 1
store <16 x i8>* %inc, <16 x i8>** %ptr
ret <16 x i8> %lA
}
;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <8 x i16>** %ptr
%lA = load <8 x i16>* %A, align 1
- %inc = getelementptr <8 x i16>* %A, i38 1
+ %inc = getelementptr <8 x i16>, <8 x i16>* %A, i38 1
store <8 x i16>* %inc, <8 x i16>** %ptr
ret <8 x i16> %lA
}
;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i32>** %ptr
%lA = load <4 x i32>* %A, align 1
- %inc = getelementptr <4 x i32>* %A, i34 1
+ %inc = getelementptr <4 x i32>, <4 x i32>* %A, i34 1
store <4 x i32>* %inc, <4 x i32>** %ptr
ret <4 x i32> %lA
}
;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x float>** %ptr
%lA = load <4 x float>* %A, align 1
- %inc = getelementptr <4 x float>* %A, i34 1
+ %inc = getelementptr <4 x float>, <4 x float>* %A, i34 1
store <4 x float>* %inc, <4 x float>** %ptr
ret <4 x float> %lA
}
;CHECK: vld1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
%lA = load <2 x i64>* %A, align 1
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret <2 x i64> %lA
}
;CHECK: vld1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
%lA = load <2 x i64>* %A, align 2
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret <2 x i64> %lA
}
;CHECK: vld1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
%lA = load <2 x i64>* %A, align 4
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret <2 x i64> %lA
}
;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
%lA = load <2 x i64>* %A, align 8
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret <2 x i64> %lA
}
;CHECK: vld1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
%A = load <2 x i64>** %ptr
%lA = load <2 x i64>* %A, align 16
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret <2 x i64> %lA
}
;CHECK: vmovl.u16 {{q[0-9]+}}, {{d[0-9]+}}
%A = load <4 x i8>** %ptr
%lA = load <4 x i8>* %A, align 4
- %inc = getelementptr <4 x i8>* %A, i38 4
+ %inc = getelementptr <4 x i8>, <4 x i8>* %A, i38 4
store <4 x i8>* %inc, <4 x i8>** %ptr
%zlA = zext <4 x i8> %lA to <4 x i32>
ret <4 x i32> %zlA
; CHECK: vldmia
define void @test(<8 x i64>* %src) #0 {
entry:
- %0 = getelementptr inbounds <8 x i64>* %src, i32 0
+ %0 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 0
%1 = load <8 x i64>* %0, align 8
- %2 = getelementptr inbounds <8 x i64>* %src, i32 1
+ %2 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 1
%3 = load <8 x i64>* %2, align 8
- %4 = getelementptr inbounds <8 x i64>* %src, i32 2
+ %4 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 2
%5 = load <8 x i64>* %4, align 8
- %6 = getelementptr inbounds <8 x i64>* %src, i32 3
+ %6 = getelementptr inbounds <8 x i64>, <8 x i64>* %src, i32 3
%7 = load <8 x i64>* %6, align 8
%8 = shufflevector <8 x i64> %1, <8 x i64> %3, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 2, i32 10, i32 3, i32 11>
;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <8 x i8>** %ptr
store <8 x i8> %val, <8 x i8>* %A, align 1
- %inc = getelementptr <8 x i8>* %A, i38 1
+ %inc = getelementptr <8 x i8>, <8 x i8>* %A, i38 1
store <8 x i8>* %inc, <8 x i8>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i16>** %ptr
store <4 x i16> %val, <4 x i16>* %A, align 1
- %inc = getelementptr <4 x i16>* %A, i34 1
+ %inc = getelementptr <4 x i16>, <4 x i16>* %A, i34 1
store <4 x i16>* %inc, <4 x i16>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i32>** %ptr
store <2 x i32> %val, <2 x i32>* %A, align 1
- %inc = getelementptr <2 x i32>* %A, i32 1
+ %inc = getelementptr <2 x i32>, <2 x i32>* %A, i32 1
store <2 x i32>* %inc, <2 x i32>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x float>** %ptr
store <2 x float> %val, <2 x float>* %A, align 1
- %inc = getelementptr <2 x float>* %A, i32 1
+ %inc = getelementptr <2 x float>, <2 x float>* %A, i32 1
store <2 x float>* %inc, <2 x float>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <1 x i64>** %ptr
store <1 x i64> %val, <1 x i64>* %A, align 1
- %inc = getelementptr <1 x i64>* %A, i31 1
+ %inc = getelementptr <1 x i64>, <1 x i64>* %A, i31 1
store <1 x i64>* %inc, <1 x i64>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <16 x i8>** %ptr
store <16 x i8> %val, <16 x i8>* %A, align 1
- %inc = getelementptr <16 x i8>* %A, i316 1
+ %inc = getelementptr <16 x i8>, <16 x i8>* %A, i316 1
store <16 x i8>* %inc, <16 x i8>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <8 x i16>** %ptr
store <8 x i16> %val, <8 x i16>* %A, align 1
- %inc = getelementptr <8 x i16>* %A, i38 1
+ %inc = getelementptr <8 x i16>, <8 x i16>* %A, i38 1
store <8 x i16>* %inc, <8 x i16>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x i32>** %ptr
store <4 x i32> %val, <4 x i32>* %A, align 1
- %inc = getelementptr <4 x i32>* %A, i34 1
+ %inc = getelementptr <4 x i32>, <4 x i32>* %A, i34 1
store <4 x i32>* %inc, <4 x i32>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <4 x float>** %ptr
store <4 x float> %val, <4 x float>* %A, align 1
- %inc = getelementptr <4 x float>* %A, i34 1
+ %inc = getelementptr <4 x float>, <4 x float>* %A, i34 1
store <4 x float>* %inc, <4 x float>** %ptr
ret void
}
;CHECK: vst1.8 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
store <2 x i64> %val, <2 x i64>* %A, align 1
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret void
}
;CHECK: vst1.16 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
store <2 x i64> %val, <2 x i64>* %A, align 2
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret void
}
;CHECK: vst1.32 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
store <2 x i64> %val, <2 x i64>* %A, align 4
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret void
}
;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}]!
%A = load <2 x i64>** %ptr
store <2 x i64> %val, <2 x i64>* %A, align 8
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret void
}
;CHECK: vst1.64 {{{d[0-9]+, d[0-9]+}}}, [{{r[0-9]+}}:128]!
%A = load <2 x i64>** %ptr
store <2 x i64> %val, <2 x i64>* %A, align 16
- %inc = getelementptr <2 x i64>* %A, i32 1
+ %inc = getelementptr <2 x i64>, <2 x i64>* %A, i32 1
store <2 x i64>* %inc, <2 x i64>** %ptr
ret void
}
%A = load <4 x i8>** %ptr
%trunc = trunc <4 x i32> %val to <4 x i8>
store <4 x i8> %trunc, <4 x i8>* %A, align 4
- %inc = getelementptr <4 x i8>* %A, i38 4
+ %inc = getelementptr <4 x i8>, <4 x i8>* %A, i38 4
store <4 x i8>* %inc, <4 x i8>** %ptr
ret void
}
;CHECK-LABEL: test_cmp:
entry:
%tmp = load float* %glob ; <float> [#uses=2]
- %tmp3 = getelementptr float* %glob, i32 2 ; <float*> [#uses=1]
+ %tmp3 = getelementptr float, float* %glob, i32 2 ; <float*> [#uses=1]
%tmp4 = load float* %tmp3 ; <float> [#uses=2]
%tmp.upgrd.1 = fcmp oeq float %tmp, %tmp4 ; <i1> [#uses=1]
%tmp5 = fcmp uno float %tmp, %tmp4 ; <i1> [#uses=1]
%A = load i16** %ptr
%tmp0 = bitcast i16* %A to i8*
%tmp1 = call <4 x i16> @llvm.arm.neon.vld1.v4i16(i8* %tmp0, i32 1)
- %tmp2 = getelementptr i16* %A, i32 4
+ %tmp2 = getelementptr i16, i16* %A, i32 4
store i16* %tmp2, i16** %ptr
ret <4 x i16> %tmp1
}
%A = load i32** %ptr
%tmp0 = bitcast i32* %A to i8*
%tmp1 = call <2 x i32> @llvm.arm.neon.vld1.v2i32(i8* %tmp0, i32 1)
- %tmp2 = getelementptr i32* %A, i32 %inc
+ %tmp2 = getelementptr i32, i32* %A, i32 %inc
store i32* %tmp2, i32** %ptr
ret <2 x i32> %tmp1
}
;CHECK: vld1.8 {d16, d17}, [{{r[0-9]+}}:64]!
%A = load i8** %ptr
%tmp1 = call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %A, i32 8)
- %tmp2 = getelementptr i8* %A, i32 16
+ %tmp2 = getelementptr i8, i8* %A, i32 16
store i8* %tmp2, i8** %ptr
ret <16 x i8> %tmp1
}
%tmp2 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 1
%tmp4 = fadd <2 x float> %tmp2, %tmp3
- %tmp5 = getelementptr float* %A, i32 4
+ %tmp5 = getelementptr float, float* %A, i32 4
store float* %tmp5, float** %ptr
ret <2 x float> %tmp4
}
%tmp2 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x16x2_t %tmp1, 1
%tmp4 = add <16 x i8> %tmp2, %tmp3
- %tmp5 = getelementptr i8* %A, i32 %inc
+ %tmp5 = getelementptr i8, i8* %A, i32 %inc
store i8* %tmp5, i8** %ptr
ret <16 x i8> %tmp4
}
%tmp2 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 2
%tmp4 = add <4 x i16> %tmp2, %tmp3
- %tmp5 = getelementptr i16* %A, i32 %inc
+ %tmp5 = getelementptr i16, i16* %A, i32 %inc
store i16* %tmp5, i16** %ptr
ret <4 x i16> %tmp4
}
;CHECK: vld1.64 {d16, d17, d18}, [r1:64]!
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x3_t @llvm.arm.neon.vld3.v1i64(i8* %tmp0, i32 16)
- %tmp5 = getelementptr i64* %A, i32 3
+ %tmp5 = getelementptr i64, i64* %A, i32 3
store i64* %tmp5, i64** %ptr
%tmp2 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int64x1x3_t %tmp1, 2
%tmp2 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int32x4x3_t %tmp1, 2
%tmp4 = add <4 x i32> %tmp2, %tmp3
- %tmp5 = getelementptr i32* %A, i32 12
+ %tmp5 = getelementptr i32, i32* %A, i32 12
store i32* %tmp5, i32** %ptr
ret <4 x i32> %tmp4
}
%tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
%tmp4 = add <8 x i8> %tmp2, %tmp3
- %tmp5 = getelementptr i8* %A, i32 %inc
+ %tmp5 = getelementptr i8, i8* %A, i32 %inc
store i8* %tmp5, i8** %ptr
ret <8 x i8> %tmp4
}
;CHECK: vld1.64 {d16, d17, d18, d19}, [r1:256]!
%tmp0 = bitcast i64* %A to i8*
%tmp1 = call %struct.__neon_int64x1x4_t @llvm.arm.neon.vld4.v1i64(i8* %tmp0, i32 64)
- %tmp5 = getelementptr i64* %A, i32 4
+ %tmp5 = getelementptr i64, i64* %A, i32 4
store i64* %tmp5, i64** %ptr
%tmp2 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int64x1x4_t %tmp1, 2
%tmp2 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 0
%tmp3 = extractvalue %struct.__neon_int16x8x4_t %tmp1, 2
%tmp4 = add <8 x i16> %tmp2, %tmp3
- %tmp5 = getelementptr i16* %A, i32 32
+ %tmp5 = getelementptr i16, i16* %A, i32 32
store i16* %tmp5, i16** %ptr
ret <8 x i16> %tmp4
}
%tmp3 = extractvalue %struct.__neon_int4x16x2_t %tmp0, 1
%tmp4 = shufflevector <4 x i16> %tmp3, <4 x i16> undef, <4 x i32> zeroinitializer
%tmp5 = add <4 x i16> %tmp2, %tmp4
- %tmp6 = getelementptr i16* %A, i32 2
+ %tmp6 = getelementptr i16, i16* %A, i32 2
store i16* %tmp6, i16** %ptr
ret <4 x i16> %tmp5
}
%tmp6 = shufflevector <8 x i8> %tmp5, <8 x i8> undef, <8 x i32> zeroinitializer
%tmp7 = add <8 x i8> %tmp2, %tmp4
%tmp8 = add <8 x i8> %tmp7, %tmp6
- %tmp9 = getelementptr i8* %A, i32 %inc
+ %tmp9 = getelementptr i8, i8* %A, i32 %inc
store i8* %tmp9, i8** %ptr
ret <8 x i8> %tmp8
}
%tmp9 = add <4 x i16> %tmp2, %tmp4
%tmp10 = add <4 x i16> %tmp6, %tmp8
%tmp11 = add <4 x i16> %tmp9, %tmp10
- %tmp12 = getelementptr i16* %A, i32 4
+ %tmp12 = getelementptr i16, i16* %A, i32 4
store i16* %tmp12, i16** %ptr
ret <4 x i16> %tmp11
}
%tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0
%tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1
%tmp5 = add <2 x i32> %tmp3, %tmp4
- %tmp6 = getelementptr i32* %A, i32 2
+ %tmp6 = getelementptr i32, i32* %A, i32 2
store i32* %tmp6, i32** %ptr
ret <2 x i32> %tmp5
}
%tmp5 = extractvalue %struct.__neon_int16x8x3_t %tmp2, 2
%tmp6 = add <8 x i16> %tmp3, %tmp4
%tmp7 = add <8 x i16> %tmp5, %tmp6
- %tmp8 = getelementptr i16* %A, i32 %inc
+ %tmp8 = getelementptr i16, i16* %A, i32 %inc
store i16* %tmp8, i16** %ptr
ret <8 x i16> %tmp7
}
%tmp7 = add <8 x i8> %tmp3, %tmp4
%tmp8 = add <8 x i8> %tmp5, %tmp6
%tmp9 = add <8 x i8> %tmp7, %tmp8
- %tmp10 = getelementptr i8* %A, i32 4
+ %tmp10 = getelementptr i8, i8* %A, i32 4
store i8* %tmp10, i8** %ptr
ret <8 x i8> %tmp9
}
; CHECK: vldr s3, [r0, #8]
; CHECK: vldmia r0, {s0, s1}
; CHECK: vldr s2, [r0, #16]
- %off0 = getelementptr float* %ptr, i32 0
+ %off0 = getelementptr float, float* %ptr, i32 0
%val0 = load float* %off0
- %off1 = getelementptr float* %ptr, i32 1
+ %off1 = getelementptr float, float* %ptr, i32 1
%val1 = load float* %off1
- %off4 = getelementptr float* %ptr, i32 4
+ %off4 = getelementptr float, float* %ptr, i32 4
%val4 = load float* %off4
- %off2 = getelementptr float* %ptr, i32 2
+ %off2 = getelementptr float, float* %ptr, i32 2
%val2 = load float* %off2
%vec1 = insertelement <4 x float> undef, float %val0, i32 0
; CHECK: vldmia
define void @test(i64* %src) #0 {
entry:
- %arrayidx39 = getelementptr inbounds i64* %src, i32 13
+ %arrayidx39 = getelementptr inbounds i64, i64* %src, i32 13
%vecinit285 = shufflevector <16 x i64> undef, <16 x i64> <i64 15, i64 16, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef, i64 undef>, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 16, i32 17>
store <16 x i64> %vecinit285, <16 x i64>* undef, align 128
%0 = load i64* undef, align 8
entry:
;CHECK-LABEL: vdupn128:
;CHECK: vmov.i8 d{{.*}}, #0x80
- %0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
+ %0 = getelementptr inbounds %struct.int8x8_t, %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
store <8 x i8> <i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128, i8 -128>, <8 x i8>* %0, align 8
ret void
}
entry:
;CHECK-LABEL: vdupnneg75:
;CHECK: vmov.i8 d{{.*}}, #0xb5
- %0 = getelementptr inbounds %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
+ %0 = getelementptr inbounds %struct.int8x8_t, %struct.int8x8_t* %agg.result, i32 0, i32 0 ; <<8 x i8>*> [#uses=1]
store <8 x i8> <i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75, i8 -75>, <8 x i8>* %0, align 8
ret void
}
%8 = bitcast double %7 to <8 x i8>
%9 = add <8 x i8> %6, %8
%10 = mul <8 x i8> %9, %2
- %11 = getelementptr inbounds %struct.uint8x8_t* %dst, i32 0, i32 0
+ %11 = getelementptr inbounds %struct.uint8x8_t, %struct.uint8x8_t* %dst, i32 0, i32 0
store <8 x i8> %10, <8 x i8>* %11, align 8
ret void
}
%8 = bitcast double %7 to <8 x i8>
%9 = add <8 x i8> %6, %8
%10 = mul <8 x i8> %2, %9
- %11 = getelementptr inbounds %struct.uint8x8_t* %dst, i32 0, i32 0
+ %11 = getelementptr inbounds %struct.uint8x8_t, %struct.uint8x8_t* %dst, i32 0, i32 0
store <8 x i8> %10, <8 x i8>* %11, align 8
ret void
}
%0 = bitcast float* %source to <4 x float>*
%tmp2 = load <4 x float>* %0, align 4
%tmp5 = shufflevector <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x float> %tmp2, <4 x i32> <i32 0, i32 7, i32 0, i32 0>
- %arrayidx8 = getelementptr inbounds <4 x float>* %dest, i32 11
+ %arrayidx8 = getelementptr inbounds <4 x float>, <4 x float>* %dest, i32 11
store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4
ret void
}
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <2 x float>* %B
call void @llvm.arm.neon.vst1.v2f32(i8* %tmp0, <2 x float> %tmp1, i32 1)
- %tmp2 = getelementptr float* %A, i32 2
+ %tmp2 = getelementptr float, float* %A, i32 2
store float* %tmp2, float** %ptr
ret void
}
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
call void @llvm.arm.neon.vst1.v8i16(i8* %tmp0, <8 x i16> %tmp1, i32 8)
- %tmp2 = getelementptr i16* %A, i32 %inc
+ %tmp2 = getelementptr i16, i16* %A, i32 %inc
store i16* %tmp2, i16** %ptr
ret void
}
%A = load i8** %ptr
%tmp1 = load <8 x i8>* %B
call void @llvm.arm.neon.vst2.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 4)
- %tmp2 = getelementptr i8* %A, i32 %inc
+ %tmp2 = getelementptr i8, i8* %A, i32 %inc
store i8* %tmp2, i8** %ptr
ret void
}
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>* %B
call void @llvm.arm.neon.vst2.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 8)
- %tmp2 = getelementptr i64* %A, i32 2
+ %tmp2 = getelementptr i64, i64* %A, i32 2
store i64* %tmp2, i64** %ptr
ret void
}
;CHECK: vst2.16 {d16, d17}, [r0]!
%tmp1 = load <4 x i16>* %B
tail call void @llvm.arm.neon.vst2.v4i16(i8* %out, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 2)
- %t5 = getelementptr inbounds i8* %out, i32 16
+ %t5 = getelementptr inbounds i8, i8* %out, i32 16
ret i8* %t5
}
;CHECK: vst2.32 {d16, d17, d18, d19}, [r0]!
%tmp1 = load <4 x float>* %this
call void @llvm.arm.neon.vst2.v4f32(i8* %out, <4 x float> %tmp1, <4 x float> %tmp1, i32 4) nounwind
- %tmp2 = getelementptr inbounds i8* %out, i32 32
+ %tmp2 = getelementptr inbounds i8, i8* %out, i32 32
ret i8* %tmp2
}
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <2 x i32>* %B
call void @llvm.arm.neon.vst3.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- %tmp2 = getelementptr i32* %A, i32 6
+ %tmp2 = getelementptr i32, i32* %A, i32 6
store i32* %tmp2, i32** %ptr
ret void
}
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>* %B
call void @llvm.arm.neon.vst3.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 1)
- %tmp2 = getelementptr i64* %A, i32 3
+ %tmp2 = getelementptr i64, i64* %A, i32 3
store i64* %tmp2, i64** %ptr
ret void
}
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <8 x i16>* %B
call void @llvm.arm.neon.vst3.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1)
- %tmp2 = getelementptr i16* %A, i32 24
+ %tmp2 = getelementptr i16, i16* %A, i32 24
store i16* %tmp2, i16** %ptr
ret void
}
%A = load i8** %ptr
%tmp1 = load <8 x i8>* %B
call void @llvm.arm.neon.vst4.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 16)
- %tmp2 = getelementptr i8* %A, i32 %inc
+ %tmp2 = getelementptr i8, i8* %A, i32 %inc
store i8* %tmp2, i8** %ptr
ret void
}
%tmp0 = bitcast i64* %A to i8*
%tmp1 = load <1 x i64>* %B
call void @llvm.arm.neon.vst4.v1i64(i8* %tmp0, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, <1 x i64> %tmp1, i32 1)
- %tmp2 = getelementptr i64* %A, i32 4
+ %tmp2 = getelementptr i64, i64* %A, i32 4
store i64* %tmp2, i64** %ptr
ret void
}
%tmp0 = bitcast float* %A to i8*
%tmp1 = load <4 x float>* %B
call void @llvm.arm.neon.vst4.v4f32(i8* %tmp0, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, <4 x float> %tmp1, i32 1)
- %tmp2 = getelementptr float* %A, i32 16
+ %tmp2 = getelementptr float, float* %A, i32 16
store float* %tmp2, float** %ptr
ret void
}
%tmp1 = load <8 x i8>* %B
%tmp2 = extractelement <8 x i8> %tmp1, i32 3
store i8 %tmp2, i8* %A, align 8
- %tmp3 = getelementptr i8* %A, i32 1
+ %tmp3 = getelementptr i8, i8* %A, i32 1
store i8* %tmp3, i8** %ptr
ret void
}
%tmp1 = load <4 x i32>* %B
%tmp2 = extractelement <4 x i32> %tmp1, i32 3
store i32 %tmp2, i32* %A, align 8
- %tmp3 = getelementptr i32* %A, i32 1
+ %tmp3 = getelementptr i32, i32* %A, i32 1
store i32* %tmp3, i32** %ptr
ret void
}
%tmp0 = bitcast i16* %A to i8*
%tmp1 = load <4 x i16>* %B
call void @llvm.arm.neon.vst2lane.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1, i32 2)
- %tmp2 = getelementptr i16* %A, i32 %inc
+ %tmp2 = getelementptr i16, i16* %A, i32 %inc
store i16* %tmp2, i16** %ptr
ret void
}
%tmp0 = bitcast i32* %A to i8*
%tmp1 = load <4 x i32>* %B
call void @llvm.arm.neon.vst3lane.v4i32(i8* %tmp0, <4 x i32> %tmp1, <4 x i32> %tmp1, <4 x i32> %tmp1, i32 0, i32 1)
- %tmp2 = getelementptr i32* %A, i32 3
+ %tmp2 = getelementptr i32, i32* %A, i32 3
store i32* %tmp2, i32** %ptr
ret void
}
%A = load i8** %ptr
%tmp1 = load <8 x i8>* %B
call void @llvm.arm.neon.vst4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 8)
- %tmp2 = getelementptr i8* %A, i32 4
+ %tmp2 = getelementptr i8, i8* %A, i32 4
store i8* %tmp2, i8** %ptr
ret void
}
define void @nowarn() nounwind ssp {
entry:
%buffer = alloca [12 x i8], align 1
- %arraydecay = getelementptr inbounds [12 x i8]* %buffer, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [12 x i8], [12 x i8]* %buffer, i64 0, i64 0
call void @doit(i8* %arraydecay) nounwind
ret void
}
define void @warn() nounwind ssp {
entry:
%buffer = alloca [80 x i8], align 1
- %arraydecay = getelementptr inbounds [80 x i8]* %buffer, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [80 x i8], [80 x i8]* %buffer, i64 0, i64 0
call void @doit(i8* %arraydecay) nounwind
ret void
}
define i32* @wrong-t2stmia-size-reduction(i32* %addr, i32 %val0) minsize {
store i32 %val0, i32* %addr
- %addr1 = getelementptr i32* %addr, i32 1
+ %addr1 = getelementptr i32, i32* %addr, i32 1
%lr = call i8* @llvm.returnaddress(i32 0)
%lr32 = ptrtoint i8* %lr to i32
store i32 %lr32, i32* %addr1
- %addr2 = getelementptr i32* %addr1, i32 1
+ %addr2 = getelementptr i32, i32* %addr1, i32 1
ret i32* %addr2
}
; CHECK: pop
define void @quux(%struct.eggs* %arg) {
bb:
- %tmp1 = getelementptr inbounds %struct.eggs* %arg, i32 0, i32 1
+ %tmp1 = getelementptr inbounds %struct.eggs, %struct.eggs* %arg, i32 0, i32 1
%0 = load i16* %tmp1, align 2
%tobool = icmp eq i16 %0, 0
br i1 %tobool, label %bb16, label %bb3
%tmp5 = ptrtoint i16* %tmp1 to i32
%tmp6 = shl i32 %tmp5, 20
%tmp7 = ashr exact i32 %tmp6, 20
- %tmp14 = getelementptr inbounds %struct.barney* undef, i32 %tmp7
+ %tmp14 = getelementptr inbounds %struct.barney, %struct.barney* undef, i32 %tmp7
%tmp15 = tail call i32 @widget(%struct.barney* %tmp14, i8* %tmp4, i32 %tmp7)
br label %bb16
define i32 @store_imm(i32* %a, i32* %b) {
entry:
store i32 0, i32* %a, align 4
- %0 = getelementptr inbounds i32* %b, i32 1
+ %0 = getelementptr inbounds i32, i32* %b, i32 1
store i32 0, i32* %0, align 4
ret i32 0
}
define void @bar(i32 %a) #0 {
entry:
%.compoundliteral = alloca %struct.S, align 8
- %arrayinit.begin = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 0
+ %arrayinit.begin = getelementptr inbounds %struct.S, %struct.S* %.compoundliteral, i64 0, i32 0, i64 0
store i32 1, i32* %arrayinit.begin, align 8
- %arrayinit.element = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 1
+ %arrayinit.element = getelementptr inbounds %struct.S, %struct.S* %.compoundliteral, i64 0, i32 0, i64 1
store i32 2, i32* %arrayinit.element, align 4
- %arrayinit.element2 = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 2
+ %arrayinit.element2 = getelementptr inbounds %struct.S, %struct.S* %.compoundliteral, i64 0, i32 0, i64 2
store i32 3, i32* %arrayinit.element2, align 8
- %arrayinit.start = getelementptr inbounds %struct.S* %.compoundliteral, i64 0, i32 0, i64 3
+ %arrayinit.start = getelementptr inbounds %struct.S, %struct.S* %.compoundliteral, i64 0, i32 0, i64 3
%scevgep4 = bitcast i32* %arrayinit.start to i8*
call void @llvm.memset.p0i8.i64(i8* %scevgep4, i8 0, i64 28, i32 4, i1 false)
call void @foo(i32 %a, %struct.S* byval align 8 %.compoundliteral) #3
define i32 @bpf_prog1(%struct.bpf_context* nocapture %ctx) #0 section "events/net/netif_receive_skb" {
%devname = alloca [3 x i8], align 1
%fmt = alloca [15 x i8], align 1
- %1 = getelementptr inbounds [3 x i8]* %devname, i64 0, i64 0
+ %1 = getelementptr inbounds [3 x i8], [3 x i8]* %devname, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([3 x i8]* @bpf_prog1.devname, i64 0, i64 0), i64 3, i32 1, i1 false)
- %2 = getelementptr inbounds %struct.bpf_context* %ctx, i64 0, i32 0
+ %2 = getelementptr inbounds %struct.bpf_context, %struct.bpf_context* %ctx, i64 0, i32 0
%3 = load i64* %2, align 8
%4 = inttoptr i64 %3 to %struct.sk_buff*
- %5 = getelementptr inbounds %struct.sk_buff* %4, i64 0, i32 2
+ %5 = getelementptr inbounds %struct.sk_buff, %struct.sk_buff* %4, i64 0, i32 2
%6 = bitcast i64* %5 to i8*
%7 = call i8* inttoptr (i64 4 to i8* (i8*)*)(i8* %6) #1
%8 = call i32 inttoptr (i64 9 to i32 (i8*, i8*, i32)*)(i8* %7, i8* %1, i32 2) #1
br i1 %9, label %10, label %13
; <label>:10 ; preds = %0
- %11 = getelementptr inbounds [15 x i8]* %fmt, i64 0, i64 0
+ %11 = getelementptr inbounds [15 x i8], [15 x i8]* %fmt, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %11, i8* getelementptr inbounds ([15 x i8]* @bpf_prog1.fmt, i64 0, i64 0), i64 15, i32 1, i1 false)
%12 = call i32 (i8*, i32, ...)* inttoptr (i64 11 to i32 (i8*, i32, ...)*)(i8* %11, i32 15, %struct.sk_buff* %4, i8* %7) #1
; CHECK-LABEL: bpf_prog1:
; CHECK: ldh r0, 0(r1)
define i16 @am5(i16* %a) nounwind {
- %1 = getelementptr i16* %a, i16 2
+ %1 = getelementptr i16, i16* %a, i16 2
%2 = load i16* %1
ret i16 %2
}
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: add:
; CHECK: add r{{[0-9]+}}, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: sub:
; CHECK: sub r{{[0-9]+}}, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: or:
; CHECK: or r{{[0-9]+}}, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: xor:
; CHECK: xor r{{[0-9]+}}, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: and:
; CHECK: and r{{[0-9]+}}, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
; Function Attrs: nounwind uwtable
define void @foo_printf() #1 {
%fmt = alloca [9 x i8], align 1
- %1 = getelementptr inbounds [9 x i8]* %fmt, i64 0, i64 0
+ %1 = getelementptr inbounds [9 x i8], [9 x i8]* %fmt, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* getelementptr inbounds ([9 x i8]* @foo_printf.fmt, i64 0, i64 0), i64 9, i32 1, i1 false)
; CHECK-LABEL: foo_printf:
; CHECK: ld_64 r1, 729618802566522216
%det_routing_arch = alloca %struct..s_det_routing_arch ; <%struct..s_det_routing_arch*> [#uses=11]
%segment_inf = alloca %struct..s_segment_inf* ; <%struct..s_segment_inf**> [#uses=1]
%timing_inf = alloca { i32, float, float, float, float, float, float, float, float, float, float } ; <{ i32, float, float, float, float, float, float, float, float, float, float }*> [#uses=11]
- %tmp.101 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 4 ; <i8**> [#uses=1]
- %tmp.105 = getelementptr [300 x i8]* %net_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.106 = getelementptr [300 x i8]* %arch_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.107 = getelementptr [300 x i8]* %place_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.108 = getelementptr [300 x i8]* %route_file, i64 0, i64 0 ; <i8*> [#uses=1]
- %tmp.109 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 0 ; <i32*> [#uses=1]
- %tmp.112 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 0 ; <i32*> [#uses=1]
- %tmp.114 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 6 ; <i32*> [#uses=1]
- %tmp.118 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 7 ; <i32*> [#uses=1]
+ %tmp.101 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 4 ; <i8**> [#uses=1]
+ %tmp.105 = getelementptr [300 x i8], [300 x i8]* %net_file, i64 0, i64 0 ; <i8*> [#uses=1]
+ %tmp.106 = getelementptr [300 x i8], [300 x i8]* %arch_file, i64 0, i64 0 ; <i8*> [#uses=1]
+ %tmp.107 = getelementptr [300 x i8], [300 x i8]* %place_file, i64 0, i64 0 ; <i8*> [#uses=1]
+ %tmp.108 = getelementptr [300 x i8], [300 x i8]* %route_file, i64 0, i64 0 ; <i8*> [#uses=1]
+ %tmp.109 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.112 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.114 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 6 ; <i32*> [#uses=1]
+ %tmp.118 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 7 ; <i32*> [#uses=1]
%tmp.135 = load i32* %operation ; <i32> [#uses=1]
%tmp.137 = load i32* %tmp.112 ; <i32> [#uses=1]
- %tmp.138 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 1 ; <float*> [#uses=1]
+ %tmp.138 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 1 ; <float*> [#uses=1]
%tmp.139 = load float* %tmp.138 ; <float> [#uses=1]
- %tmp.140 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 2 ; <i32*> [#uses=1]
+ %tmp.140 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 2 ; <i32*> [#uses=1]
%tmp.141 = load i32* %tmp.140 ; <i32> [#uses=1]
- %tmp.142 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 3 ; <i32*> [#uses=1]
+ %tmp.142 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 3 ; <i32*> [#uses=1]
%tmp.143 = load i32* %tmp.142 ; <i32> [#uses=1]
%tmp.145 = load i8** %tmp.101 ; <i8*> [#uses=1]
- %tmp.146 = getelementptr %struct..s_placer_opts* %placer_opts, i64 0, i32 5 ; <i32*> [#uses=1]
+ %tmp.146 = getelementptr %struct..s_placer_opts, %struct..s_placer_opts* %placer_opts, i64 0, i32 5 ; <i32*> [#uses=1]
%tmp.147 = load i32* %tmp.146 ; <i32> [#uses=1]
%tmp.149 = load i32* %tmp.114 ; <i32> [#uses=1]
%tmp.154 = load i32* %full_stats ; <i32> [#uses=1]
%tmp.155 = load i32* %verify_binary_search ; <i32> [#uses=1]
- %tmp.156 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.156 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 0 ; <i32*> [#uses=1]
%tmp.157 = load i32* %tmp.156 ; <i32> [#uses=1]
- %tmp.158 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 1 ; <float*> [#uses=1]
+ %tmp.158 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 1 ; <float*> [#uses=1]
%tmp.159 = load float* %tmp.158 ; <float> [#uses=1]
- %tmp.160 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 2 ; <float*> [#uses=1]
+ %tmp.160 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 2 ; <float*> [#uses=1]
%tmp.161 = load float* %tmp.160 ; <float> [#uses=1]
- %tmp.162 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 3 ; <float*> [#uses=1]
+ %tmp.162 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 3 ; <float*> [#uses=1]
%tmp.163 = load float* %tmp.162 ; <float> [#uses=1]
- %tmp.164 = getelementptr %struct..s_annealing_sched* %annealing_sched, i64 0, i32 4 ; <float*> [#uses=1]
+ %tmp.164 = getelementptr %struct..s_annealing_sched, %struct..s_annealing_sched* %annealing_sched, i64 0, i32 4 ; <float*> [#uses=1]
%tmp.165 = load float* %tmp.164 ; <float> [#uses=1]
- %tmp.166 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 0 ; <float*> [#uses=1]
+ %tmp.166 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 0 ; <float*> [#uses=1]
%tmp.167 = load float* %tmp.166 ; <float> [#uses=1]
- %tmp.168 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 1 ; <float*> [#uses=1]
+ %tmp.168 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 1 ; <float*> [#uses=1]
%tmp.169 = load float* %tmp.168 ; <float> [#uses=1]
- %tmp.170 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 2 ; <float*> [#uses=1]
+ %tmp.170 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 2 ; <float*> [#uses=1]
%tmp.171 = load float* %tmp.170 ; <float> [#uses=1]
- %tmp.172 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 3 ; <float*> [#uses=1]
+ %tmp.172 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 3 ; <float*> [#uses=1]
%tmp.173 = load float* %tmp.172 ; <float> [#uses=1]
- %tmp.174 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 4 ; <float*> [#uses=1]
+ %tmp.174 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 4 ; <float*> [#uses=1]
%tmp.175 = load float* %tmp.174 ; <float> [#uses=1]
- %tmp.176 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 5 ; <i32*> [#uses=1]
+ %tmp.176 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 5 ; <i32*> [#uses=1]
%tmp.177 = load i32* %tmp.176 ; <i32> [#uses=1]
- %tmp.178 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 6 ; <i32*> [#uses=1]
+ %tmp.178 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 6 ; <i32*> [#uses=1]
%tmp.179 = load i32* %tmp.178 ; <i32> [#uses=1]
%tmp.181 = load i32* %tmp.118 ; <i32> [#uses=1]
- %tmp.182 = getelementptr %struct..s_router_opts* %router_opts, i64 0, i32 8 ; <i32*> [#uses=1]
+ %tmp.182 = getelementptr %struct..s_router_opts, %struct..s_router_opts* %router_opts, i64 0, i32 8 ; <i32*> [#uses=1]
%tmp.183 = load i32* %tmp.182 ; <i32> [#uses=1]
- %tmp.184 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.184 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 0 ; <i32*> [#uses=1]
%tmp.185 = load i32* %tmp.184 ; <i32> [#uses=1]
- %tmp.186 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 1 ; <float*> [#uses=1]
+ %tmp.186 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 1 ; <float*> [#uses=1]
%tmp.187 = load float* %tmp.186 ; <float> [#uses=1]
- %tmp.188 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 2 ; <float*> [#uses=1]
+ %tmp.188 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 2 ; <float*> [#uses=1]
%tmp.189 = load float* %tmp.188 ; <float> [#uses=1]
- %tmp.190 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 3 ; <float*> [#uses=1]
+ %tmp.190 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 3 ; <float*> [#uses=1]
%tmp.191 = load float* %tmp.190 ; <float> [#uses=1]
- %tmp.192 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 4 ; <i32*> [#uses=1]
+ %tmp.192 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 4 ; <i32*> [#uses=1]
%tmp.193 = load i32* %tmp.192 ; <i32> [#uses=1]
- %tmp.194 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 5 ; <i32*> [#uses=1]
+ %tmp.194 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 5 ; <i32*> [#uses=1]
%tmp.195 = load i32* %tmp.194 ; <i32> [#uses=1]
- %tmp.196 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 6 ; <i16*> [#uses=1]
+ %tmp.196 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 6 ; <i16*> [#uses=1]
%tmp.197 = load i16* %tmp.196 ; <i16> [#uses=1]
- %tmp.198 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 7 ; <i16*> [#uses=1]
+ %tmp.198 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 7 ; <i16*> [#uses=1]
%tmp.199 = load i16* %tmp.198 ; <i16> [#uses=1]
- %tmp.200 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 8 ; <i16*> [#uses=1]
+ %tmp.200 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 8 ; <i16*> [#uses=1]
%tmp.201 = load i16* %tmp.200 ; <i16> [#uses=1]
- %tmp.202 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 9 ; <float*> [#uses=1]
+ %tmp.202 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 9 ; <float*> [#uses=1]
%tmp.203 = load float* %tmp.202 ; <float> [#uses=1]
- %tmp.204 = getelementptr %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 10 ; <float*> [#uses=1]
+ %tmp.204 = getelementptr %struct..s_det_routing_arch, %struct..s_det_routing_arch* %det_routing_arch, i64 0, i32 10 ; <float*> [#uses=1]
%tmp.205 = load float* %tmp.204 ; <float> [#uses=1]
%tmp.206 = load %struct..s_segment_inf** %segment_inf ; <%struct..s_segment_inf*> [#uses=1]
%tmp.208 = load i32* %tmp.109 ; <i32> [#uses=1]
- %tmp.209 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 1 ; <float*> [#uses=1]
+ %tmp.209 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 1 ; <float*> [#uses=1]
%tmp.210 = load float* %tmp.209 ; <float> [#uses=1]
- %tmp.211 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 2 ; <float*> [#uses=1]
+ %tmp.211 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 2 ; <float*> [#uses=1]
%tmp.212 = load float* %tmp.211 ; <float> [#uses=1]
- %tmp.213 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 3 ; <float*> [#uses=1]
+ %tmp.213 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 3 ; <float*> [#uses=1]
%tmp.214 = load float* %tmp.213 ; <float> [#uses=1]
- %tmp.215 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 4 ; <float*> [#uses=1]
+ %tmp.215 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 4 ; <float*> [#uses=1]
%tmp.216 = load float* %tmp.215 ; <float> [#uses=1]
- %tmp.217 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 5 ; <float*> [#uses=1]
+ %tmp.217 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 5 ; <float*> [#uses=1]
%tmp.218 = load float* %tmp.217 ; <float> [#uses=1]
- %tmp.219 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 6 ; <float*> [#uses=1]
+ %tmp.219 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 6 ; <float*> [#uses=1]
%tmp.220 = load float* %tmp.219 ; <float> [#uses=1]
- %tmp.221 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 7 ; <float*> [#uses=1]
+ %tmp.221 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 7 ; <float*> [#uses=1]
%tmp.222 = load float* %tmp.221 ; <float> [#uses=1]
- %tmp.223 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 8 ; <float*> [#uses=1]
+ %tmp.223 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 8 ; <float*> [#uses=1]
%tmp.224 = load float* %tmp.223 ; <float> [#uses=1]
- %tmp.225 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 9 ; <float*> [#uses=1]
+ %tmp.225 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 9 ; <float*> [#uses=1]
%tmp.226 = load float* %tmp.225 ; <float> [#uses=1]
- %tmp.227 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 10 ; <float*> [#uses=1]
+ %tmp.227 = getelementptr { i32, float, float, float, float, float, float, float, float, float, float }, { i32, float, float, float, float, float, float, float, float, float, float }* %timing_inf, i64 0, i32 10 ; <float*> [#uses=1]
%tmp.228 = load float* %tmp.227 ; <float> [#uses=1]
call void @place_and_route( i32 %tmp.135, i32 %tmp.137, float %tmp.139, i32 %tmp.141, i32 %tmp.143, i8* %tmp.145, i32 %tmp.147, i32 %tmp.149, i8* %tmp.107, i8* %tmp.105, i8* %tmp.106, i8* %tmp.108, i32 %tmp.154, i32 %tmp.155, i32 %tmp.157, float %tmp.159, float %tmp.161, float %tmp.163, float %tmp.165, float %tmp.167, float %tmp.169, float %tmp.171, float %tmp.173, float %tmp.175, i32 %tmp.177, i32 %tmp.179, i32 %tmp.181, i32 %tmp.183, i32 %tmp.185, float %tmp.187, float %tmp.189, float %tmp.191, i32 %tmp.193, i32 %tmp.195, i16 %tmp.197, i16 %tmp.199, i16 %tmp.201, float %tmp.203, float %tmp.205, %struct..s_segment_inf* %tmp.206, i32 %tmp.208, float %tmp.210, float %tmp.212, float %tmp.214, float %tmp.216, float %tmp.218, float %tmp.220, float %tmp.222, float %tmp.224, float %tmp.226, float %tmp.228 )
%tmp.231 = load i32* %show_graphics ; <i32> [#uses=1]
define internal i32 @OpenOutput(i8* %filename.1) {
entry:
%tmp.0 = load %FileType** @Output ; <%FileType*> [#uses=1]
- %tmp.4 = getelementptr %FileType* %tmp.0, i64 1 ; <%FileType*> [#uses=1]
- %addrOfGlobal = getelementptr [16 x %FileType]* @OutputFiles, i64 0 ; <[16 x %FileType]*> [#uses=1]
- %constantGEP = getelementptr [16 x %FileType]* %addrOfGlobal, i64 1 ; <[16 x %FileType]*> [#uses=1]
- %constantGEP.upgrd.1 = getelementptr [16 x %FileType]* %constantGEP, i64 0, i64 0 ; <%FileType*> [#uses=1]
+ %tmp.4 = getelementptr %FileType, %FileType* %tmp.0, i64 1 ; <%FileType*> [#uses=1]
+ %addrOfGlobal = getelementptr [16 x %FileType], [16 x %FileType]* @OutputFiles, i64 0 ; <[16 x %FileType]*> [#uses=1]
+ %constantGEP = getelementptr [16 x %FileType], [16 x %FileType]* %addrOfGlobal, i64 1 ; <[16 x %FileType]*> [#uses=1]
+ %constantGEP.upgrd.1 = getelementptr [16 x %FileType], [16 x %FileType]* %constantGEP, i64 0, i64 0 ; <%FileType*> [#uses=1]
%tmp.10 = icmp eq %FileType* %tmp.4, %constantGEP.upgrd.1 ; <i1> [#uses=1]
br i1 %tmp.10, label %return, label %endif.0
loopentry: ; preds = %loopentry, %entry
%i = phi i64 [ 0, %entry ], [ %inc.i, %loopentry ] ; <i64> [#uses=3]
- %cptr = getelementptr [6 x i8]* @yy_ec, i64 0, i64 %i ; <i8*> [#uses=1]
+ %cptr = getelementptr [6 x i8], [6 x i8]* @yy_ec, i64 0, i64 %i ; <i8*> [#uses=1]
%c = load i8* %cptr ; <i8> [#uses=1]
%ignore = call i32 (i8*, ...)* @printf( i8* getelementptr ([8 x i8]* @.str_3, i64 0, i64 0), i64 %i ) ; <i32> [#uses=0]
%ignore2 = call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str_4, i64 0, i64 0), i8 %c ) ; <i32> [#uses=0]
cond_next18: ; preds = %cond_next12, %cond_true
%tmp20 = bitcast %struct.tree_node* %tmp2 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
- %tmp21 = getelementptr %struct.tree_type* %tmp20, i32 0, i32 17 ; <%struct.tree_node**> [#uses=1]
+ %tmp21 = getelementptr %struct.tree_type, %struct.tree_type* %tmp20, i32 0, i32 17 ; <%struct.tree_node**> [#uses=1]
%tmp22 = load %struct.tree_node** %tmp21 ; <%struct.tree_node*> [#uses=6]
%tmp24 = icmp eq %struct.tree_node* %tmp22, %tmp23 ; <i1> [#uses=1]
br i1 %tmp24, label %return, label %cond_next28
cond_next28: ; preds = %cond_next18
%tmp30 = bitcast %struct.tree_node* %tmp2 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp = getelementptr %struct.tree_common* %tmp30, i32 0, i32 2 ; <i8*> [#uses=1]
+ %tmp = getelementptr %struct.tree_common, %struct.tree_common* %tmp30, i32 0, i32 2 ; <i8*> [#uses=1]
%tmp.upgrd.1 = bitcast i8* %tmp to i32* ; <i32*> [#uses=1]
%tmp.upgrd.2 = load i32* %tmp.upgrd.1 ; <i32> [#uses=1]
%tmp32 = trunc i32 %tmp.upgrd.2 to i8 ; <i8> [#uses=1]
cond_true92: ; preds = %cond_true92.preheader, %cond_next84, %cond_true34
%t.0.0 = phi %struct.tree_node* [ %parms, %cond_true92.preheader ], [ %tmp6, %cond_true34 ], [ %tmp6, %cond_next84 ] ; <%struct.tree_node*> [#uses=2]
%tmp.upgrd.4 = bitcast %struct.tree_node* %t.0.0 to %struct.tree_list* ; <%struct.tree_list*> [#uses=1]
- %tmp.upgrd.5 = getelementptr %struct.tree_list* %tmp.upgrd.4, i32 0, i32 2 ; <%struct.tree_node**> [#uses=1]
+ %tmp.upgrd.5 = getelementptr %struct.tree_list, %struct.tree_list* %tmp.upgrd.4, i32 0, i32 2 ; <%struct.tree_node**> [#uses=1]
%tmp2 = load %struct.tree_node** %tmp.upgrd.5 ; <%struct.tree_node*> [#uses=5]
%tmp4 = bitcast %struct.tree_node* %t.0.0 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp5 = getelementptr %struct.tree_common* %tmp4, i32 0, i32 0 ; <%struct.tree_node**> [#uses=1]
+ %tmp5 = getelementptr %struct.tree_common, %struct.tree_common* %tmp4, i32 0, i32 0 ; <%struct.tree_node**> [#uses=1]
%tmp6 = load %struct.tree_node** %tmp5 ; <%struct.tree_node*> [#uses=3]
%tmp.upgrd.6 = icmp eq %struct.tree_node* %tmp6, null ; <i1> [#uses=3]
br i1 %tmp.upgrd.6, label %cond_true, label %cond_next12
define void @OUTPUT_TABLE(%struct.SYMBOL_TABLE_ENTRY* %SYM_TAB) {
entry:
- %tmp11 = getelementptr %struct.SYMBOL_TABLE_ENTRY* %SYM_TAB, i32 0, i32 1, i32 0 ; <i8*> [#uses=2]
+ %tmp11 = getelementptr %struct.SYMBOL_TABLE_ENTRY, %struct.SYMBOL_TABLE_ENTRY* %SYM_TAB, i32 0, i32 1, i32 0 ; <i8*> [#uses=2]
%tmp.i = bitcast i8* %tmp11 to i8* ; <i8*> [#uses=1]
br label %bb.i
br i1 %tmp22, label %cond_true23, label %cond_next159
cond_true23: ; preds = %entry
- %tmp138 = getelementptr %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8 ; <i8*> [#uses=1]
+ %tmp138 = getelementptr %struct.cl_perfunc_opts, %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8 ; <i8*> [#uses=1]
%tmp138.upgrd.1 = bitcast i8* %tmp138 to i32* ; <i32*> [#uses=2]
%tmp139 = load i32* %tmp138.upgrd.1 ; <i32> [#uses=1]
%tmp140 = shl i32 1, 27 ; <i32> [#uses=1]
%tmp142 = and i32 %tmp139, -134217729 ; <i32> [#uses=1]
%tmp143 = or i32 %tmp142, %tmp141 ; <i32> [#uses=1]
store i32 %tmp143, i32* %tmp138.upgrd.1
- %tmp144 = getelementptr %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8 ; <i8*> [#uses=1]
+ %tmp144 = getelementptr %struct.cl_perfunc_opts, %struct.cl_perfunc_opts* @cl_pf_opts, i32 0, i32 8 ; <i8*> [#uses=1]
%tmp144.upgrd.2 = bitcast i8* %tmp144 to i32* ; <i32*> [#uses=1]
%tmp145 = load i32* %tmp144.upgrd.2 ; <i32> [#uses=1]
%tmp146 = shl i32 %tmp145, 22 ; <i32> [#uses=1]
%tmp1580 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 3) ; <i64> [#uses=1]
%tmp1591 = load i64* getelementptr (%struct.CHESS_POSITION* @search, i32 0, i32 4) ; <i64> [#uses=1]
%tmp1572 = tail call fastcc i32 @FirstOne( ) ; <i32> [#uses=5]
- %tmp1582 = getelementptr [64 x i32]* @bishop_shift_rl45, i32 0, i32 %tmp1572 ; <i32*> [#uses=1]
+ %tmp1582 = getelementptr [64 x i32], [64 x i32]* @bishop_shift_rl45, i32 0, i32 %tmp1572 ; <i32*> [#uses=1]
%tmp1583 = load i32* %tmp1582 ; <i32> [#uses=1]
%tmp1583.upgrd.1 = trunc i32 %tmp1583 to i8 ; <i8> [#uses=1]
%shift.upgrd.2 = zext i8 %tmp1583.upgrd.1 to i64 ; <i64> [#uses=1]
%tmp1584.upgrd.3 = trunc i64 %tmp1584 to i32 ; <i32> [#uses=1]
%tmp1585 = and i32 %tmp1584.upgrd.3, 255 ; <i32> [#uses=1]
%gep.upgrd.4 = zext i32 %tmp1585 to i64 ; <i64> [#uses=1]
- %tmp1587 = getelementptr [64 x [256 x i32]]* @bishop_mobility_rl45, i32 0, i32 %tmp1572, i64 %gep.upgrd.4 ; <i32*> [#uses=1]
+ %tmp1587 = getelementptr [64 x [256 x i32]], [64 x [256 x i32]]* @bishop_mobility_rl45, i32 0, i32 %tmp1572, i64 %gep.upgrd.4 ; <i32*> [#uses=1]
%tmp1588 = load i32* %tmp1587 ; <i32> [#uses=1]
- %tmp1593 = getelementptr [64 x i32]* @bishop_shift_rr45, i32 0, i32 %tmp1572 ; <i32*> [#uses=1]
+ %tmp1593 = getelementptr [64 x i32], [64 x i32]* @bishop_shift_rr45, i32 0, i32 %tmp1572 ; <i32*> [#uses=1]
%tmp1594 = load i32* %tmp1593 ; <i32> [#uses=1]
%tmp1594.upgrd.5 = trunc i32 %tmp1594 to i8 ; <i8> [#uses=1]
%shift.upgrd.6 = zext i8 %tmp1594.upgrd.5 to i64 ; <i64> [#uses=1]
%tmp1595.upgrd.7 = trunc i64 %tmp1595 to i32 ; <i32> [#uses=1]
%tmp1596 = and i32 %tmp1595.upgrd.7, 255 ; <i32> [#uses=1]
%gep.upgrd.8 = zext i32 %tmp1596 to i64 ; <i64> [#uses=1]
- %tmp1598 = getelementptr [64 x [256 x i32]]* @bishop_mobility_rr45, i32 0, i32 %tmp1572, i64 %gep.upgrd.8 ; <i32*> [#uses=1]
+ %tmp1598 = getelementptr [64 x [256 x i32]], [64 x [256 x i32]]* @bishop_mobility_rr45, i32 0, i32 %tmp1572, i64 %gep.upgrd.8 ; <i32*> [#uses=1]
%tmp1599 = load i32* %tmp1598 ; <i32> [#uses=1]
%tmp1600.neg = sub i32 0, %tmp1588 ; <i32> [#uses=1]
%tmp1602 = sub i32 %tmp1600.neg, %tmp1599 ; <i32> [#uses=1]
- %tmp1604 = getelementptr [64 x i8]* @black_outpost, i32 0, i32 %tmp1572 ; <i8*> [#uses=1]
+ %tmp1604 = getelementptr [64 x i8], [64 x i8]* @black_outpost, i32 0, i32 %tmp1572 ; <i8*> [#uses=1]
%tmp1605 = load i8* %tmp1604 ; <i8> [#uses=1]
%tmp1606 = icmp eq i8 %tmp1605, 0 ; <i1> [#uses=1]
br i1 %tmp1606, label %cond_next1637, label %cond_true1607
bb41: ; preds = %bb20
%tmp8182 = trunc i64 %tmp42.rle to i32 ; <i32> [#uses=1]
- %tmp83 = getelementptr [63 x i8]* @letters.3100, i32 0, i32 %tmp8182 ; <i8*> [#uses=1]
+ %tmp83 = getelementptr [63 x i8], [63 x i8]* @letters.3100, i32 0, i32 %tmp8182 ; <i8*> [#uses=1]
%tmp84 = load i8* %tmp83, align 1 ; <i8> [#uses=1]
store i8 %tmp84, i8* null, align 1
%tmp90 = urem i64 %tmp42.rle, 62 ; <i64> [#uses=1]
%tmp9091 = trunc i64 %tmp90 to i32 ; <i32> [#uses=1]
- %tmp92 = getelementptr [63 x i8]* @letters.3100, i32 0, i32 %tmp9091 ; <i8*> [#uses=1]
+ %tmp92 = getelementptr [63 x i8], [63 x i8]* @letters.3100, i32 0, i32 %tmp9091 ; <i8*> [#uses=1]
store i8* %tmp92, i8** null, align 1
ret i32 -1
}
; XFAIL: hexagon
define void @test(i32* %X) nounwind {
entry:
- %tmp1 = getelementptr i32* %X, i32 10 ; <i32*> [#uses=2]
+ %tmp1 = getelementptr i32, i32* %X, i32 10 ; <i32*> [#uses=2]
tail call void asm sideeffect " $0 $1 ", "=*im,*im,~{memory}"( i32* %tmp1, i32* %tmp1 ) nounwind
ret void
}
%2 = sext i32 %1 to i64
%3 = lshr i64 %2, 12
%4 = and i64 %3, 68719476735
- %5 = getelementptr inbounds i32* null, i64 %4
+ %5 = getelementptr inbounds i32, i32* null, i64 %4
store i32* %5, i32** @b, align 8
ret void
}
bb0:
call void @opaque( [497 x %Domain]* @domain_array )
%cann-indvar-idxcast = sext i32 %argc to i64 ; <i64> [#uses=1]
- %reg841 = getelementptr [497 x %Domain]* @domain_array, i64 0, i64 %cann-indvar-idxcast, i32 3 ; <i32*> [#uses=1]
- %reg846 = getelementptr i32* %reg841, i64 1 ; <i32*> [#uses=1]
+ %reg841 = getelementptr [497 x %Domain], [497 x %Domain]* @domain_array, i64 0, i64 %cann-indvar-idxcast, i32 3 ; <i32*> [#uses=1]
+ %reg846 = getelementptr i32, i32* %reg841, i64 1 ; <i32*> [#uses=1]
%reg820 = load i32* %reg846 ; <i32> [#uses=1]
ret i32 %reg820
}
define i32 @main() {
%a = load double* @A ; <double> [#uses=4]
- %a_fs = getelementptr [8 x i8]* @a_fstr, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_fs = getelementptr [8 x i8], [8 x i8]* @a_fstr, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_fs, double %a ) ; <i32>:1 [#uses=0]
%a_d2l = fptosi double %a to i64 ; <i64> [#uses=1]
- %a_ls = getelementptr [10 x i8]* @a_lstr, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_ls = getelementptr [10 x i8], [10 x i8]* @a_lstr, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_ls, i64 %a_d2l ) ; <i32>:2 [#uses=0]
%a_d2i = fptosi double %a to i32 ; <i32> [#uses=2]
- %a_ds = getelementptr [8 x i8]* @a_dstr, i64 0, i64 0 ; <i8*> [#uses=3]
+ %a_ds = getelementptr [8 x i8], [8 x i8]* @a_dstr, i64 0, i64 0 ; <i8*> [#uses=3]
call i32 (i8*, ...)* @printf( i8* %a_ds, i32 %a_d2i ) ; <i32>:3 [#uses=0]
%a_d2sb = fptosi double %a to i8 ; <i8> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_ds, i8 %a_d2sb ) ; <i32>:4 [#uses=0]
%a_d2i2sb = trunc i32 %a_d2i to i8 ; <i8> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_ds, i8 %a_d2i2sb ) ; <i32>:5 [#uses=0]
%b = load i32* @B ; <i32> [#uses=2]
- %b_ds = getelementptr [8 x i8]* @b_dstr, i64 0, i64 0 ; <i8*> [#uses=1]
+ %b_ds = getelementptr [8 x i8], [8 x i8]* @b_dstr, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %b_ds, i32 %b ) ; <i32>:6 [#uses=0]
%b_i2d = sitofp i32 %b to double ; <double> [#uses=1]
- %b_fs = getelementptr [8 x i8]* @b_fstr, i64 0, i64 0 ; <i8*> [#uses=1]
+ %b_fs = getelementptr [8 x i8], [8 x i8]* @b_fstr, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %b_fs, double %b_i2d ) ; <i32>:7 [#uses=0]
ret i32 0
}
%ScalarB = alloca %MixedB ; <%MixedB*> [#uses=1]
%ArrayA = alloca %MixedA, i32 4 ; <%MixedA*> [#uses=3]
%ArrayB = alloca %MixedB, i32 3 ; <%MixedB*> [#uses=2]
- %I1 = getelementptr %MixedA* %ScalarA, i64 0, i32 0 ; <float*> [#uses=2]
+ %I1 = getelementptr %MixedA, %MixedA* %ScalarA, i64 0, i32 0 ; <float*> [#uses=2]
store float 0x3FF6A09020000000, float* %I1
- %I2 = getelementptr %MixedB* %ScalarB, i64 0, i32 1, i32 0 ; <float*> [#uses=2]
+ %I2 = getelementptr %MixedB, %MixedB* %ScalarB, i64 0, i32 1, i32 0 ; <float*> [#uses=2]
store float 0x4005BF1420000000, float* %I2
- %fptrA = getelementptr %MixedA* %ArrayA, i64 1, i32 0 ; <float*> [#uses=1]
- %fptrB = getelementptr %MixedB* %ArrayB, i64 2, i32 1, i32 0 ; <float*> [#uses=1]
+ %fptrA = getelementptr %MixedA, %MixedA* %ArrayA, i64 1, i32 0 ; <float*> [#uses=1]
+ %fptrB = getelementptr %MixedB, %MixedB* %ArrayB, i64 2, i32 1, i32 0 ; <float*> [#uses=1]
store float 0x400921CAC0000000, float* %fptrA
store float 5.000000e+00, float* %fptrB
;; Test that a sequence of GEPs with constant indices are folded right
- %fptrA1 = getelementptr %MixedA* %ArrayA, i64 3 ; <%MixedA*> [#uses=1]
- %fptrA2 = getelementptr %MixedA* %fptrA1, i64 0, i32 1 ; <[15 x i32]*> [#uses=1]
- %fptrA3 = getelementptr [15 x i32]* %fptrA2, i64 0, i64 8 ; <i32*> [#uses=1]
+ %fptrA1 = getelementptr %MixedA, %MixedA* %ArrayA, i64 3 ; <%MixedA*> [#uses=1]
+ %fptrA2 = getelementptr %MixedA, %MixedA* %fptrA1, i64 0, i32 1 ; <[15 x i32]*> [#uses=1]
+ %fptrA3 = getelementptr [15 x i32], [15 x i32]* %fptrA2, i64 0, i64 8 ; <i32*> [#uses=1]
store i32 5, i32* %fptrA3
%sqrtTwo = load float* %I1 ; <float> [#uses=1]
%exp = load float* %I2 ; <float> [#uses=1]
- %I3 = getelementptr %MixedA* %ArrayA, i64 1, i32 0 ; <float*> [#uses=1]
+ %I3 = getelementptr %MixedA, %MixedA* %ArrayA, i64 1, i32 0 ; <float*> [#uses=1]
%pi = load float* %I3 ; <float> [#uses=1]
- %I4 = getelementptr %MixedB* %ArrayB, i64 2, i32 1, i32 0 ; <float*> [#uses=1]
+ %I4 = getelementptr %MixedB, %MixedB* %ArrayB, i64 2, i32 1, i32 0 ; <float*> [#uses=1]
%five = load float* %I4 ; <float> [#uses=1]
%dsqrtTwo = fpext float %sqrtTwo to double ; <double> [#uses=1]
%dexp = fpext float %exp to double ; <double> [#uses=1]
%dpi = fpext float %pi to double ; <double> [#uses=1]
%dfive = fpext float %five to double ; <double> [#uses=1]
- %castFmt = getelementptr [44 x i8]* @fmtArg, i64 0, i64 0 ; <i8*> [#uses=1]
+ %castFmt = getelementptr [44 x i8], [44 x i8]* @fmtArg, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %castFmt, double %dsqrtTwo, double %dexp, double %dpi, double %dfive ) ; <i32>:1 [#uses=0]
ret i32 0
}
define void @Parse_Camera(%struct.CAMERA** nocapture %Camera_Ptr) nounwind {
entry:
%.pre = load %struct.CAMERA** %Camera_Ptr, align 4
-%0 = getelementptr inbounds %struct.CAMERA* %.pre, i32 0, i32 1, i32 0
-%1 = getelementptr inbounds %struct.CAMERA* %.pre, i32 0, i32 1, i32 2
+%0 = getelementptr inbounds %struct.CAMERA, %struct.CAMERA* %.pre, i32 0, i32 1, i32 0
+%1 = getelementptr inbounds %struct.CAMERA, %struct.CAMERA* %.pre, i32 0, i32 1, i32 2
br label %bb32
bb32: ; preds = %bb6
br i1 undef, label %func_74.exit.for.cond29.thread_crit_edge, label %for.body.i
func_74.exit.for.cond29.thread_crit_edge: ; preds = %for.body.i
- %f13576.pre = getelementptr inbounds %struct.S0* undef, i64 0, i32 1
+ %f13576.pre = getelementptr inbounds %struct.S0, %struct.S0* undef, i64 0, i32 1
store i8 0, i8* %f13576.pre, align 4
br label %lbl_468
lbl_468: ; preds = %lbl_468, %func_74.exit.for.cond29.thread_crit_edge
%f13577.ph = phi i8* [ %f13576.pre, %func_74.exit.for.cond29.thread_crit_edge ], [ %f135.pre, %lbl_468 ]
store i8 1, i8* %f13577.ph, align 1
- %f135.pre = getelementptr inbounds %struct.S0* undef, i64 0, i32 1
+ %f135.pre = getelementptr inbounds %struct.S0, %struct.S0* undef, i64 0, i32 1
br i1 undef, label %lbl_468, label %for.end74
for.end74: ; preds = %lbl_468
declare i32 @printf(i8*, ...)
define i32 @main() {
- %s = getelementptr [7 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=1]
+ %s = getelementptr [7 x i8], [7 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %s ) ; <i32>:1 [#uses=0]
ret i32 0
}
%iscale = mul i32 %i, -1 ; <i32> [#uses=1]
%ioff = add i32 %iscale, 3 ; <i32> [#uses=2]
%ioff.upgrd.1 = zext i32 %ioff to i64 ; <i64> [#uses=1]
- %fptr = getelementptr %Results* %fval, i64 %ioff.upgrd.1 ; <%Results*> [#uses=1]
- %castFmt = getelementptr [39 x i8]* @fmtArg, i64 0, i64 0 ; <i8*> [#uses=1]
+ %fptr = getelementptr %Results, %Results* %fval, i64 %ioff.upgrd.1 ; <%Results*> [#uses=1]
+ %castFmt = getelementptr [39 x i8], [39 x i8]* @fmtArg, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %castFmt, i32 %ioff, %Results* %fval, %Results* %fptr ) ; <i32>:1 [#uses=0]
ret i32 0
}
declare i32 @printf(i8*, ...)
define i32 @main() {
- %f = getelementptr [4 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=3]
+ %f = getelementptr [4 x i8], [4 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=3]
%d = add i32 1, 0 ; <i32> [#uses=3]
call i32 (i8*, ...)* @printf( i8* %f, i32 %d ) ; <i32>:1 [#uses=0]
%e = add i32 38, 2 ; <i32> [#uses=2]
define i32 @main() {
%a = load double* @A ; <double> [#uses=12]
%b = load double* @B ; <double> [#uses=12]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_s, double %a ) ; <i32>:1 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %b_s, double %b ) ; <i32>:2 [#uses=0]
%add_r = fadd double %a, %b ; <double> [#uses=1]
%mul_r = fmul double %a, %b ; <double> [#uses=1]
%div_r = fdiv double %b, %a ; <double> [#uses=1]
%rem_r = frem double %b, %a ; <double> [#uses=1]
- %add_s = getelementptr [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %sub_s = getelementptr [12 x i8]* @sub_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %mul_s = getelementptr [12 x i8]* @mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %div_s = getelementptr [12 x i8]* @div_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %rem_s = getelementptr [13 x i8]* @rem_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %add_s = getelementptr [12 x i8], [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %sub_s = getelementptr [12 x i8], [12 x i8]* @sub_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %mul_s = getelementptr [12 x i8], [12 x i8]* @mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %div_s = getelementptr [12 x i8], [12 x i8]* @div_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %rem_s = getelementptr [13 x i8], [13 x i8]* @rem_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %add_s, double %add_r ) ; <i32>:3 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %sub_s, double %sub_r ) ; <i32>:4 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %mul_s, double %mul_r ) ; <i32>:5 [#uses=0]
%ge_r = fcmp oge double %a, %b ; <i1> [#uses=1]
%eq_r = fcmp oeq double %a, %b ; <i1> [#uses=1]
%ne_r = fcmp une double %a, %b ; <i1> [#uses=1]
- %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %lt_s, i1 %lt_r ) ; <i32>:8 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %le_s, i1 %le_r ) ; <i32>:9 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %gt_s, i1 %gt_r ) ; <i32>:10 [#uses=0]
define i32 @main() {
%a = load i32* @A ; <i32> [#uses=16]
%b = load i32* @B ; <i32> [#uses=17]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:1 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %b_s, i32 %b ) ; <i32>:2 [#uses=0]
%add_r = add i32 %a, %b ; <i32> [#uses=1]
%mul_r = mul i32 %a, %b ; <i32> [#uses=1]
%div_r = sdiv i32 %b, %a ; <i32> [#uses=1]
%rem_r = srem i32 %b, %a ; <i32> [#uses=1]
- %add_s = getelementptr [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %sub_s = getelementptr [12 x i8]* @sub_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %mul_s = getelementptr [12 x i8]* @mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %div_s = getelementptr [12 x i8]* @div_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %rem_s = getelementptr [13 x i8]* @rem_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %add_s = getelementptr [12 x i8], [12 x i8]* @add_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %sub_s = getelementptr [12 x i8], [12 x i8]* @sub_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %mul_s = getelementptr [12 x i8], [12 x i8]* @mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %div_s = getelementptr [12 x i8], [12 x i8]* @div_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %rem_s = getelementptr [13 x i8], [13 x i8]* @rem_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %add_s, i32 %add_r ) ; <i32>:3 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %sub_s, i32 %sub_r ) ; <i32>:4 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %mul_s, i32 %mul_r ) ; <i32>:5 [#uses=0]
%ge_r = icmp sge i32 %a, %b ; <i1> [#uses=1]
%eq_r = icmp eq i32 %a, %b ; <i1> [#uses=1]
%ne_r = icmp ne i32 %a, %b ; <i1> [#uses=1]
- %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %lt_s, i1 %lt_r ) ; <i32>:8 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %le_s, i1 %le_r ) ; <i32>:9 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %gt_s, i1 %gt_r ) ; <i32>:10 [#uses=0]
%shl_r = shl i32 %b, %shift.upgrd.1 ; <i32> [#uses=1]
%shift.upgrd.2 = zext i8 %u to i32 ; <i32> [#uses=1]
%shr_r = ashr i32 %b, %shift.upgrd.2 ; <i32> [#uses=1]
- %and_s = getelementptr [12 x i8]* @and_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %or_s = getelementptr [12 x i8]* @or_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %xor_s = getelementptr [12 x i8]* @xor_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %shl_s = getelementptr [13 x i8]* @shl_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %shr_s = getelementptr [13 x i8]* @shr_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %and_s = getelementptr [12 x i8], [12 x i8]* @and_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %or_s = getelementptr [12 x i8], [12 x i8]* @or_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %xor_s = getelementptr [12 x i8], [12 x i8]* @xor_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %shl_s = getelementptr [13 x i8], [13 x i8]* @shl_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %shr_s = getelementptr [13 x i8], [13 x i8]* @shr_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %and_s, i32 %and_r ) ; <i32>:14 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %or_s, i32 %or_r ) ; <i32>:15 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %xor_s, i32 %xor_r ) ; <i32>:16 [#uses=0]
declare i32 @printf(i8*, ...)
define i32 @main() {
- %f = getelementptr [4 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=1]
+ %f = getelementptr [4 x i8], [4 x i8]* @.str_1, i64 0, i64 0 ; <i8*> [#uses=1]
%d = add i32 0, 0 ; <i32> [#uses=1]
%tmp.0 = call i32 (i8*, ...)* @printf( i8* %f, i32 %d ) ; <i32> [#uses=0]
ret i32 0
define i32 @main() {
%a = load i32* @A ; <i32> [#uses=21]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %a_mul_s = getelementptr [13 x i8]* @a_mul_str, i64 0, i64 0 ; <i8*> [#uses=20]
+ %a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_mul_s = getelementptr [13 x i8], [13 x i8]* @a_mul_str, i64 0, i64 0 ; <i8*> [#uses=20]
call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:1 [#uses=0]
%r_0 = mul i32 %a, 0 ; <i32> [#uses=1]
%r_1 = mul i32 %a, 1 ; <i32> [#uses=1]
entry:
%a = load i32* @A ; <i32> [#uses=2]
%b = load i32* @B ; <i32> [#uses=1]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %a_mul_s = getelementptr [13 x i8]* @a_mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_mul_s = getelementptr [13 x i8], [13 x i8]* @a_mul_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:0 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %b_s, i32 %b ) ; <i32>:1 [#uses=0]
br label %shl_test
entry:
%a = load i32* @A ; <i32> [#uses=2]
%b = load i32* @B ; <i32> [#uses=1]
- %a_s = getelementptr [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %b_s = getelementptr [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
- %a_shl_s = getelementptr [14 x i8]* @a_shl_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_s = getelementptr [8 x i8], [8 x i8]* @a_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %b_s = getelementptr [8 x i8], [8 x i8]* @b_str, i64 0, i64 0 ; <i8*> [#uses=1]
+ %a_shl_s = getelementptr [14 x i8], [14 x i8]* @a_shl_str, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %a_s, i32 %a ) ; <i32>:0 [#uses=0]
call i32 (i8*, ...)* @printf( i8* %b_s, i32 %b ) ; <i32>:1 [#uses=0]
br label %shl_test
; Test case for folding getelementptr into a load/store
;
define i32 @checkFoldGEP(%Domain* %D, i64 %idx) {
- %reg841 = getelementptr %Domain* %D, i64 0, i32 1 ; <i32*> [#uses=1]
+ %reg841 = getelementptr %Domain, %Domain* %D, i64 0, i32 1 ; <i32*> [#uses=1]
%reg820 = load i32* %reg841 ; <i32> [#uses=1]
ret i32 %reg820
}
for.body:
%stack.addr.02 = phi %struct.xx_stack* [ %0, %for.body ], [ %stack, %entry ]
- %next = getelementptr inbounds %struct.xx_stack* %stack.addr.02, i64 0, i32 1
+ %next = getelementptr inbounds %struct.xx_stack, %struct.xx_stack* %stack.addr.02, i64 0, i32 1
%0 = load %struct.xx_stack** %next, align 8
%tobool = icmp eq %struct.xx_stack* %0, null
br i1 %tobool, label %for.end, label %for.body
for.end:
%top.0.lcssa = phi %struct.xx_stack* [ undef, %entry ], [ %stack.addr.02, %for.body ]
- %first = getelementptr inbounds %struct.xx_stack* %top.0.lcssa, i64 0, i32 0
+ %first = getelementptr inbounds %struct.xx_stack, %struct.xx_stack* %top.0.lcssa, i64 0, i32 0
%1 = load i32* %first, align 4
ret i32 %1
}
}
define <2 x i32*> @vector_gep(<2 x [3 x {i32, i32}]*> %a) {
- %w = getelementptr <2 x [3 x {i32, i32}]*> %a, <2 x i32> <i32 1, i32 2>, <2 x i32> <i32 2, i32 3>, <2 x i32> <i32 1, i32 1>
+ %w = getelementptr [3 x {i32, i32}], <2 x [3 x {i32, i32}]*> %a, <2 x i32> <i32 1, i32 2>, <2 x i32> <i32 2, i32 3>, <2 x i32> <i32 1, i32 1>
ret <2 x i32*> %w
}
unreachable
CuSuiteAdd.exit.us: ; preds = %for.body.us
- %arrayidx.i.us = getelementptr inbounds %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112* null, i32 0, i32 1, i32 %1
+ %arrayidx.i.us = getelementptr inbounds %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112, %struct.CuSuite.2.29.32.38.41.44.53.56.68.86.112* null, i32 0, i32 1, i32 %1
store %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111* %0, %struct.CuTest.1.28.31.37.40.43.52.55.67.85.111** %arrayidx.i.us, align 4
call void @llvm.trap()
unreachable
br i1 %tobool, label %if.then, label %if.end
if.then:
- %arrayidx1 = getelementptr inbounds i32* %a, i32 2000
+ %arrayidx1 = getelementptr inbounds i32, i32* %a, i32 2000
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %1, 300000
br label %return
if.end:
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1023
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1023
%2 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %2, 300
br label %return
br i1 %tobool, label %if.then, label %if.end
if.then:
- %arrayidx = getelementptr inbounds i8* %a, i32 1023
+ %arrayidx = getelementptr inbounds i8, i8* %a, i32 1023
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 300000
br label %return
if.end:
- %arrayidx1 = getelementptr inbounds i8* %a, i32 1024
+ %arrayidx1 = getelementptr inbounds i8, i8* %a, i32 1024
%1 = load i8* %arrayidx1, align 1
%conv2 = zext i8 %1 to i32
%add3 = add nsw i32 %conv2, 6000
%add = add nsw i32 %c, 200002
%0 = load i32* %a, align 4
%add1 = add nsw i32 %0, 200000
- %arrayidx2 = getelementptr inbounds i32* %a, i32 3000
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 3000
store i32 %add1, i32* %arrayidx2, align 4
%1 = load i32* %b, align 4
%add4 = add nsw i32 %1, 200001
- %arrayidx5 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx5 = getelementptr inbounds i32, i32* %a, i32 1
store i32 %add4, i32* %arrayidx5, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 1
%2 = load i32* %arrayidx7, align 4
%cmp = icmp sgt i32 %add4, %2
br i1 %cmp, label %if.then, label %if.else
if.then: ; preds = %entry
- %arrayidx8 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx8 = getelementptr inbounds i32, i32* %a, i32 2
%3 = load i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %b, i32 2000
+ %arrayidx9 = getelementptr inbounds i32, i32* %b, i32 2000
%4 = load i32* %arrayidx9, align 4
%sub = sub nsw i32 %3, %4
- %arrayidx10 = getelementptr inbounds i32* %a, i32 4000
+ %arrayidx10 = getelementptr inbounds i32, i32* %a, i32 4000
store i32 %sub, i32* %arrayidx10, align 4
br label %if.end
if.else: ; preds = %entry
- %arrayidx11 = getelementptr inbounds i32* %b, i32 3200
+ %arrayidx11 = getelementptr inbounds i32, i32* %b, i32 3200
store i32 %add, i32* %arrayidx11, align 4
br label %if.end
entry:
%0 = load i16* %a, align 2
%1 = zext i16 %0 to i64
- %add.ptr = getelementptr inbounds i16* %a, i32 1
+ %add.ptr = getelementptr inbounds i16, i16* %a, i32 1
%2 = load i16* %add.ptr, align 2
%3 = zext i16 %2 to i64
%4 = shl nuw nsw i64 %3, 16
entry:
%0 = load i8* %a, align 1
%1 = zext i8 %0 to i64
- %add.ptr = getelementptr inbounds i8* %a, i32 1
+ %add.ptr = getelementptr inbounds i8, i8* %a, i32 1
%2 = load i8* %add.ptr, align 1
%3 = zext i8 %2 to i64
%4 = shl nuw nsw i64 %3, 8
%add = add nsw i32 %0, %sum.03
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, %n
- %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
+ %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
br i1 %exitcond, label %for.end.loopexit, label %for.body
for.end.loopexit:
%add = add nsw i32 %0, %sum.02
%inc = add nsw i32 %i.01, 1
%exitcond = icmp eq i32 %inc, 40
- %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
+ %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
br i1 %exitcond, label %for.end, label %for.body
for.end:
store i32 %i.01, i32* %arrayidx.phi, align 4
%inc = add nsw i32 %i.01, 1
%exitcond = icmp eq i32 %inc, 40
- %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
+ %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
br i1 %exitcond, label %for.end, label %for.body
for.end:
; CHECK: endloop
for.body: ; preds = %for.body, %entry
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds [25000 x i32]* @b, i32 0, i32 %i.02
+ %arrayidx = getelementptr inbounds [25000 x i32], [25000 x i32]* @b, i32 0, i32 %i.02
store i32 %i.02, i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds [25000 x i32]* @a, i32 0, i32 %i.02
+ %arrayidx1 = getelementptr inbounds [25000 x i32], [25000 x i32]* @a, i32 0, i32 %i.02
store i32 %i.02, i32* %arrayidx1, align 4
%inc = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %inc, 25000
%arrayidx.phi = phi i32* [ %a, %entry ], [ %arrayidx.inc, %for.body ]
%i.02 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%b.addr.01 = phi i32* [ %b, %entry ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i32* %b.addr.01, i32 1, !dbg !21
+ %incdec.ptr = getelementptr inbounds i32, i32* %b.addr.01, i32 1, !dbg !21
tail call void @llvm.dbg.value(metadata i32* %incdec.ptr, i64 0, metadata !14, metadata !{!"0x102"}), !dbg !21
%0 = load i32* %b.addr.01, align 4, !dbg !21
store i32 %0, i32* %arrayidx.phi, align 4, !dbg !21
%inc = add nsw i32 %i.02, 1, !dbg !26
tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !26
%exitcond = icmp eq i32 %inc, 10, !dbg !19
- %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
+ %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
br i1 %exitcond, label %for.end, label %for.body, !dbg !19
for.end: ; preds = %for.body
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%p_vector_iv14 = or i32 %polly.loopiv16, 1
%p_vector_iv3 = add i32 %p_vector_iv14, 1
%p_vector_iv415 = or i32 %polly.loopiv16, 3
- %p_arrayidx = getelementptr [400 x i8]* @A, i32 0, i32 %polly.loopiv16
- %p_arrayidx5 = getelementptr [400 x i8]* @A, i32 0, i32 %p_vector_iv14
- %p_arrayidx6 = getelementptr [400 x i8]* @A, i32 0, i32 %p_vector_iv3
- %p_arrayidx7 = getelementptr [400 x i8]* @A, i32 0, i32 %p_vector_iv415
+ %p_arrayidx = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %polly.loopiv16
+ %p_arrayidx5 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv14
+ %p_arrayidx6 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv3
+ %p_arrayidx7 = getelementptr [400 x i8], [400 x i8]* @A, i32 0, i32 %p_vector_iv415
store i8 123, i8* %p_arrayidx, align 1
store i8 123, i8* %p_arrayidx5, align 1
store i8 123, i8* %p_arrayidx6, align 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
%eq_r = fcmp oeq double %a, %b
%ne_r = fcmp une double %a, %b
%val1 = zext i1 %lt_r to i16
- %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0
- %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0
- %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0
- %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0
- %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0
- %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0
+ %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0
+ %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0
+ %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0
+ %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0
+ %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0
+ %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0
call i32 (i8*, ...)* @printf( i8* %lt_s, i16 %val1 )
ret i32 0
}
%ge_r = fcmp oge double %a, %b
%eq_r = fcmp oeq double %a, %b
%ne_r = fcmp une double %a, %b
- %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0
- %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0
- %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0
- %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0
- %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0
- %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0
+ %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0
+ %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0
+ %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0
+ %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0
+ %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0
+ %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0
call i32 (i8*, ...)* @printf( i8* %lt_s, i1 %lt_r )
call i32 (i8*, ...)* @printf( i8* %le_s, i1 %le_r )
call i32 (i8*, ...)* @printf( i8* %gt_s, i1 %gt_r )
%eq_r = fcmp oeq double %a, %b
%ne_r = fcmp une double %a, %b
%val1 = zext i1 %lt_r to i8
- %lt_s = getelementptr [12 x i8]* @lt_str, i64 0, i64 0
- %le_s = getelementptr [13 x i8]* @le_str, i64 0, i64 0
- %gt_s = getelementptr [12 x i8]* @gt_str, i64 0, i64 0
- %ge_s = getelementptr [13 x i8]* @ge_str, i64 0, i64 0
- %eq_s = getelementptr [13 x i8]* @eq_str, i64 0, i64 0
- %ne_s = getelementptr [13 x i8]* @ne_str, i64 0, i64 0
+ %lt_s = getelementptr [12 x i8], [12 x i8]* @lt_str, i64 0, i64 0
+ %le_s = getelementptr [13 x i8], [13 x i8]* @le_str, i64 0, i64 0
+ %gt_s = getelementptr [12 x i8], [12 x i8]* @gt_str, i64 0, i64 0
+ %ge_s = getelementptr [13 x i8], [13 x i8]* @ge_str, i64 0, i64 0
+ %eq_s = getelementptr [13 x i8], [13 x i8]* @eq_str, i64 0, i64 0
+ %ne_s = getelementptr [13 x i8], [13 x i8]* @ne_str, i64 0, i64 0
call i32 (i8*, ...)* @printf( i8* %lt_s, i8 %val1 )
ret i32 0
}
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memw(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#2)
entry:
%tmp = add i32 %n, %m
- %scevgep9 = getelementptr i32* %a, i32 %tmp
+ %scevgep9 = getelementptr i32, i32* %a, i32 %tmp
%val = load i32* %scevgep9, align 4
ret i32 %val
}
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memuh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1)
entry:
%tmp = add i32 %n, %m
- %scevgep9 = getelementptr i16* %a, i32 %tmp
+ %scevgep9 = getelementptr i16, i16* %a, i32 %tmp
%val = load i16* %scevgep9, align 2
ret i16 %val
}
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memh(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#1)
entry:
%tmp = add i32 %n, %m
- %scevgep9 = getelementptr i16* %a, i32 %tmp
+ %scevgep9 = getelementptr i16, i16* %a, i32 %tmp
%val = load i16* %scevgep9, align 2
%conv = sext i16 %val to i32
ret i32 %conv
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memub(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<#0)
entry:
%tmp = add i32 %n, %m
- %scevgep9 = getelementptr i8* %a, i32 %tmp
+ %scevgep9 = getelementptr i8, i8* %a, i32 %tmp
%val = load i8* %scevgep9, align 1
ret i8 %val
}
; CHECK: r{{[0-9]+}}{{ *}}={{ *}}memb(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#0)
entry:
%tmp = add i32 %n, %m
- %scevgep9 = getelementptr i8* %a, i32 %tmp
+ %scevgep9 = getelementptr i8, i8* %a, i32 %tmp
%val = load i8* %scevgep9, align 1
%conv = sext i8 %val to i32
ret i32 %conv
; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}memd(r{{[0-9]+}}{{ *}}+{{ *}}r{{[0-9]+}}{{ *}}<<{{ *}}#3)
entry:
%tmp = add i32 %n, %m
- %scevgep9 = getelementptr i64* %a, i32 %tmp
+ %scevgep9 = getelementptr i64, i64* %a, i32 %tmp
%val = load i64* %scevgep9, align 8
ret i64 %val
}
define void @memop_unsigned_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 5
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%add = add nsw i32 %conv1, %conv
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%sub = sub nsw i32 %conv1, %conv
define void @memop_unsigned_char_or_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
store i8 %or3, i8* %add.ptr, align 1
define void @memop_unsigned_char_and_index(i8* nocapture %p, i32 %i, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
store i8 %and3, i8* %add.ptr, align 1
define void @memop_unsigned_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%and = and i32 %conv, 223
define void @memop_unsigned_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%or = or i32 %conv, 128
define void @memop_unsigned_char_add5_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 5
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%add = add nsw i32 %conv1, %conv
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv1 = zext i8 %0 to i32
%sub = sub nsw i32 %conv1, %conv
define void @memop_unsigned_char_or_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
store i8 %or3, i8* %add.ptr, align 1
define void @memop_unsigned_char_and_index5(i8* nocapture %p, i8 zeroext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
store i8 %and3, i8* %add.ptr, align 1
define void @memop_unsigned_char_clrbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%and = and i32 %conv, 223
define void @memop_unsigned_char_setbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%or = or i32 %conv, 128
define void @memop_signed_char_add5_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%add = add nsw i32 %conv2, 5
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%add = add nsw i32 %conv13, %conv4
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
define void @memop_signed_char_or_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
store i8 %or3, i8* %add.ptr, align 1
define void @memop_signed_char_and_index(i8* nocapture %p, i32 %i, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
store i8 %and3, i8* %add.ptr, align 1
define void @memop_signed_char_clrbit_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%and = and i32 %conv2, 223
define void @memop_signed_char_setbit_index(i8* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 %i
%0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%or = or i32 %conv2, 128
define void @memop_signed_char_add5_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%add = add nsw i32 %conv2, 5
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%add = add nsw i32 %conv13, %conv4
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i8 %x to i32
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv13 = zext i8 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
define void @memop_signed_char_or_index5(i8* nocapture %p, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%or3 = or i8 %0, %x
store i8 %or3, i8* %add.ptr, align 1
define void @memop_signed_char_and_index5(i8* nocapture %p, i8 signext %x) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%and3 = and i8 %0, %x
store i8 %and3, i8* %add.ptr, align 1
define void @memop_signed_char_clrbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%and = and i32 %conv2, 223
define void @memop_signed_char_setbit_index5(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#5){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i8* %p, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 5
%0 = load i8* %add.ptr, align 1
%conv2 = zext i8 %0 to i32
%or = or i32 %conv2, 128
define void @memop_unsigned_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%add = add nsw i32 %conv, 5
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%add = add nsw i32 %conv1, %conv
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%sub = sub nsw i32 %conv1, %conv
define void @memop_unsigned_short_or_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
store i16 %or3, i16* %add.ptr, align 2
define void @memop_unsigned_short_and_index(i16* nocapture %p, i32 %i, i16 zeroext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
store i16 %and3, i16* %add.ptr, align 2
define void @memop_unsigned_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%and = and i32 %conv, 65503
define void @memop_unsigned_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%or = or i32 %conv, 128
define void @memop_unsigned_short_add5_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%add = add nsw i32 %conv, 5
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%add = add nsw i32 %conv1, %conv
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
%conv = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv1 = zext i16 %0 to i32
%sub = sub nsw i32 %conv1, %conv
define void @memop_unsigned_short_or_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
store i16 %or3, i16* %add.ptr, align 2
define void @memop_unsigned_short_and_index5(i16* nocapture %p, i16 zeroext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
store i16 %and3, i16* %add.ptr, align 2
define void @memop_unsigned_short_clrbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%and = and i32 %conv, 65503
define void @memop_unsigned_short_setbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv = zext i16 %0 to i32
%or = or i32 %conv, 128
define void @memop_signed_short_add5_index(i16* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%add = add nsw i32 %conv2, 5
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%add = add nsw i32 %conv13, %conv4
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
define void @memop_signed_short_or_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
store i16 %or3, i16* %add.ptr, align 2
define void @memop_signed_short_and_index(i16* nocapture %p, i32 %i, i16 signext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
store i16 %and3, i16* %add.ptr, align 2
define void @memop_signed_short_clrbit_index(i16* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%and = and i32 %conv2, 65503
define void @memop_signed_short_setbit_index(i16* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 %i
%0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%or = or i32 %conv2, 128
define void @memop_signed_short_add5_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%add = add nsw i32 %conv2, 5
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}+={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%add = add nsw i32 %conv13, %conv4
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}r{{[0-9]+}}
%conv4 = zext i16 %x to i32
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv13 = zext i16 %0 to i32
%sub = sub nsw i32 %conv13, %conv4
define void @memop_signed_short_or_index5(i16* nocapture %p, i16 signext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%or3 = or i16 %0, %x
store i16 %or3, i16* %add.ptr, align 2
define void @memop_signed_short_and_index5(i16* nocapture %p, i16 signext %x) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%and3 = and i16 %0, %x
store i16 %and3, i16* %add.ptr, align 2
define void @memop_signed_short_clrbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%and = and i32 %conv2, 65503
define void @memop_signed_short_setbit_index5(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i16* %p, i32 5
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 5
%0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%or = or i32 %conv2, 128
define void @memop_signed_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%add = add i32 %0, 5
store i32 %add, i32* %add.ptr, align 4
define void @memop_signed_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%add = add i32 %0, %x
store i32 %add, i32* %add.ptr, align 4
define void @memop_signed_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%sub = sub i32 %0, %x
store i32 %sub, i32* %add.ptr, align 4
define void @memop_signed_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
store i32 %or, i32* %add.ptr, align 4
define void @memop_signed_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
store i32 %and, i32* %add.ptr, align 4
define void @memop_signed_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
store i32 %and, i32* %add.ptr, align 4
define void @memop_signed_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
store i32 %or, i32* %add.ptr, align 4
define void @memop_signed_int_add5_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%add = add i32 %0, 5
store i32 %add, i32* %add.ptr, align 4
define void @memop_signed_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%add = add i32 %0, %x
store i32 %add, i32* %add.ptr, align 4
define void @memop_signed_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%sub = sub i32 %0, %x
store i32 %sub, i32* %add.ptr, align 4
define void @memop_signed_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
store i32 %or, i32* %add.ptr, align 4
define void @memop_signed_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
store i32 %and, i32* %add.ptr, align 4
define void @memop_signed_int_clrbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
store i32 %and, i32* %add.ptr, align 4
define void @memop_signed_int_setbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
store i32 %or, i32* %add.ptr, align 4
define void @memop_unsigned_int_add5_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, 5
store i32 %add, i32* %add.ptr, align 4
define void @memop_unsigned_int_add_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}+={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, %x
store i32 %add, i32* %add.ptr, align 4
define void @memop_unsigned_int_sub_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}-={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%sub = sub nsw i32 %0, %x
store i32 %sub, i32* %add.ptr, align 4
define void @memop_unsigned_int_or_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
store i32 %or, i32* %add.ptr, align 4
define void @memop_unsigned_int_and_index(i32* nocapture %p, i32 %i, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
store i32 %and, i32* %add.ptr, align 4
define void @memop_unsigned_int_clrbit_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
store i32 %and, i32* %add.ptr, align 4
define void @memop_unsigned_int_setbit_index(i32* nocapture %p, i32 %i) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#0){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 %i
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
store i32 %or, i32* %add.ptr, align 4
define void @memop_unsigned_int_add5_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}#5
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, 5
store i32 %add, i32* %add.ptr, align 4
define void @memop_unsigned_int_add_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}+={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%add = add nsw i32 %0, %x
store i32 %add, i32* %add.ptr, align 4
define void @memop_unsigned_int_sub_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%sub = sub nsw i32 %0, %x
store i32 %sub, i32* %add.ptr, align 4
define void @memop_unsigned_int_or_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}|={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, %x
store i32 %or, i32* %add.ptr, align 4
define void @memop_unsigned_int_and_index5(i32* nocapture %p, i32 %x) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}&={{ *}}r{{[0-9]+}}
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, %x
store i32 %and, i32* %add.ptr, align 4
define void @memop_unsigned_int_clrbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}clrbit({{ *}}#5{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%and = and i32 %0, -33
store i32 %and, i32* %add.ptr, align 4
define void @memop_unsigned_int_setbit_index5(i32* nocapture %p) nounwind {
entry:
; CHECK: memw(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}={{ *}}setbit({{ *}}#7{{ *}})
- %add.ptr = getelementptr inbounds i32* %p, i32 5
+ %add.ptr = getelementptr inbounds i32, i32* %p, i32 5
%0 = load i32* %add.ptr, align 4
%or = or i32 %0, 128
store i32 %or, i32* %add.ptr, align 4
%p.addr = alloca i32*, align 4
store i32* %p, i32** %p.addr, align 4
%0 = load i32** %p.addr, align 4
- %add.ptr = getelementptr inbounds i32* %0, i32 10
+ %add.ptr = getelementptr inbounds i32, i32* %0, i32 10
%1 = load i32* %add.ptr, align 4
%sub = sub nsw i32 %1, 1
store i32 %sub, i32* %add.ptr, align 4
store i32 %i, i32* %i.addr, align 4
%0 = load i32** %p.addr, align 4
%1 = load i32* %i.addr, align 4
- %add.ptr = getelementptr inbounds i32* %0, i32 %1
- %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 10
+ %add.ptr = getelementptr inbounds i32, i32* %0, i32 %1
+ %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 10
%2 = load i32* %add.ptr1, align 4
%sub = sub nsw i32 %2, 1
store i32 %sub, i32* %add.ptr1, align 4
define void @f(i16* nocapture %p) nounwind {
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
- %add.ptr = getelementptr inbounds i16* %p, i32 10
+ %add.ptr = getelementptr inbounds i16, i16* %p, i32 10
%0 = load i16* %add.ptr, align 2
%conv2 = zext i16 %0 to i32
%sub = add nsw i32 %conv2, 65535
entry:
; CHECK: memh(r{{[0-9]+}}{{ *}}+{{ *}}#20){{ *}}-={{ *}}#1
%add.ptr.sum = add i32 %i, 10
- %add.ptr1 = getelementptr inbounds i16* %p, i32 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds i16, i16* %p, i32 %add.ptr.sum
%0 = load i16* %add.ptr1, align 2
%conv3 = zext i16 %0 to i32
%sub = add nsw i32 %conv3, 65535
define void @f(i8* nocapture %p) nounwind {
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
- %add.ptr = getelementptr inbounds i8* %p, i32 10
+ %add.ptr = getelementptr inbounds i8, i8* %p, i32 10
%0 = load i8* %add.ptr, align 1
%conv = zext i8 %0 to i32
%sub = add nsw i32 %conv, 255
entry:
; CHECK: memb(r{{[0-9]+}}{{ *}}+{{ *}}#10){{ *}}-={{ *}}#1
%add.ptr.sum = add i32 %i, 10
- %add.ptr1 = getelementptr inbounds i8* %p, i32 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds i8, i8* %p, i32 %add.ptr.sum
%0 = load i8* %add.ptr1, align 1
%conv = zext i8 %0 to i32
%sub = add nsw i32 %conv, 255
%conv = sext i16 %1 to i32
%add = add i32 %0, %sum.03
%add2 = add i32 %add, %conv
- %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
- %arrayidx1.inc = getelementptr i16* %arrayidx1.phi, i32 1
+ %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+ %arrayidx1.inc = getelementptr i16, i16* %arrayidx1.phi, i32 1
%lsr.iv.next = add i32 %lsr.iv, -1
%exitcond = icmp eq i32 %lsr.iv.next, 0
br i1 %exitcond, label %for.end, label %for.body
%add3 = add i32 %factor, %conv
store i32 %add3, i32* %arrayidx.phi, align 4
- %arrayidx.inc = getelementptr i32* %arrayidx.phi, i32 1
- %arrayidx1.inc = getelementptr i16* %arrayidx1.phi, i32 1
+ %arrayidx.inc = getelementptr i32, i32* %arrayidx.phi, i32 1
+ %arrayidx1.inc = getelementptr i16, i16* %arrayidx1.phi, i32 1
%lsr.iv.next = add i32 %lsr.iv, -1
%exitcond = icmp eq i32 %lsr.iv.next, 0
br i1 %exitcond, label %for.end, label %for.body
i8* nocapture %scr_s_even_code_ptr, i8* nocapture %scr_s_odd_code_ptr)
nounwind {
entry:
- %scevgep = getelementptr %union.vect64* %sss_extracted_bit_rx_data_ptr, i32 1
- %scevgep28 = getelementptr %union.vect32* %s_odd, i32 1
- %scevgep32 = getelementptr %union.vect32* %s_even, i32 1
- %scevgep36 = getelementptr i8* %scr_s_odd_code_ptr, i32 1
- %scevgep39 = getelementptr i8* %scr_s_even_code_ptr, i32 1
+ %scevgep = getelementptr %union.vect64, %union.vect64* %sss_extracted_bit_rx_data_ptr, i32 1
+ %scevgep28 = getelementptr %union.vect32, %union.vect32* %s_odd, i32 1
+ %scevgep32 = getelementptr %union.vect32, %union.vect32* %s_even, i32 1
+ %scevgep36 = getelementptr i8, i8* %scr_s_odd_code_ptr, i32 1
+ %scevgep39 = getelementptr i8, i8* %scr_s_even_code_ptr, i32 1
br label %for.body
for.body: ; preds = %for.body, %entry
%10 = load i8* %lsr.iv37, align 1
%lftr.wideiv = trunc i32 %lsr.iv42 to i8
%exitcond = icmp eq i8 %lftr.wideiv, 32
- %scevgep26 = getelementptr %union.vect64* %lsr.iv, i32 1
- %scevgep30 = getelementptr %union.vect32* %lsr.iv29, i32 1
- %scevgep34 = getelementptr %union.vect32* %lsr.iv33, i32 1
- %scevgep38 = getelementptr i8* %lsr.iv37, i32 1
- %scevgep41 = getelementptr i8* %lsr.iv40, i32 1
+ %scevgep26 = getelementptr %union.vect64, %union.vect64* %lsr.iv, i32 1
+ %scevgep30 = getelementptr %union.vect32, %union.vect32* %lsr.iv29, i32 1
+ %scevgep34 = getelementptr %union.vect32, %union.vect32* %lsr.iv33, i32 1
+ %scevgep38 = getelementptr i8, i8* %lsr.iv37, i32 1
+ %scevgep41 = getelementptr i8, i8* %lsr.iv40, i32 1
%lsr.iv.next = add i32 %lsr.iv42, 1
br i1 %exitcond, label %for.end, label %for.body
entry:
%0 = load i32* %a, align 4
%1 = zext i32 %0 to i64
- %add.ptr = getelementptr inbounds i32* %a, i32 1
+ %add.ptr = getelementptr inbounds i32, i32* %a, i32 1
%2 = load i32* %add.ptr, align 4
%3 = zext i32 %2 to i64
%4 = shl nuw i64 %3, 32
while.body41.i: ; preds = %while.cond36.i
%tmp43.i = load i8** @foo ; <i8*> [#uses=2]
%tmp44.i = load i8* %tmp43.i ; <i8> [#uses=1]
- %ptrincdec50.i = getelementptr inbounds i8* %tmp43.i, i16 1 ; <i8*> [#uses=1]
+ %ptrincdec50.i = getelementptr inbounds i8, i8* %tmp43.i, i16 1 ; <i8*> [#uses=1]
store i8* %ptrincdec50.i, i8** @foo
%cmp55.i = icmp eq i8 %tmp44.i, %c ; <i1> [#uses=1]
br i1 %cmp55.i, label %do.end41, label %while.cond36.i
define i16 @main() noreturn nounwind {
entry:
%0 = tail call i8* asm "", "=r,0"(i8* getelementptr inbounds ([10 x i8]* @buf, i16 0, i16 0)) nounwind ; <i8*> [#uses=1]
- %sub.ptr = getelementptr inbounds i8* %0, i16 1 ; <i8*> [#uses=1]
+ %sub.ptr = getelementptr inbounds i8, i8* %0, i16 1 ; <i8*> [#uses=1]
%sub.ptr.lhs.cast = ptrtoint i8* %sub.ptr to i16 ; <i16> [#uses=1]
%sub.ptr.sub = sub i16 %sub.ptr.lhs.cast, ptrtoint ([10 x i8]* @buf to i16) ; <i16> [#uses=1]
%cmp = icmp eq i16 %sub.ptr.sub, 1 ; <i1> [#uses=1]
@bar = internal constant [2 x i8] [ i8 32, i8 64 ]
define i8 @am3(i8 %x, i16 %n) nounwind {
- %1 = getelementptr [2 x i8]* @bar, i16 0, i16 %n
+ %1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %n
%2 = load i8* %1
%3 = or i8 %2,%x
ret i8 %3
; CHECK: bis.w &32, r15
define i16 @am5(i16 %x, i16* %a) nounwind {
- %1 = getelementptr i16* %a, i16 2
+ %1 = getelementptr i16, i16* %a, i16 2
%2 = load i16* %1
%3 = or i16 %2,%x
ret i16 %3
@duh = internal constant %T { i16 16, [2 x i8][i8 32, i8 64 ] }
define i8 @am7(i8 %x, i16 %n) nounwind {
- %1 = getelementptr %T* @duh, i32 0, i32 1
- %2 = getelementptr [2 x i8]* %1, i16 0, i16 %n
+ %1 = getelementptr %T, %T* @duh, i32 0, i32 1
+ %2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
%3= load i8* %2
%4 = or i8 %3,%x
ret i8 %4
@bar = external global [2 x i8]
define void @am3(i16 %i, i8 %x) nounwind {
- %1 = getelementptr [2 x i8]* @bar, i16 0, i16 %i
+ %1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %i
%2 = load i8* %1
%3 = or i8 %x, %2
store i8 %3, i8* %1
; CHECK: bis.w r15, &32
define void @am5(i16* %a, i16 %x) readonly {
- %1 = getelementptr inbounds i16* %a, i16 2
+ %1 = getelementptr inbounds i16, i16* %a, i16 2
%2 = load i16* %1
%3 = or i16 %x, %2
store i16 %3, i16* %1
@duh = external global %T
define void @am7(i16 %n, i8 %x) nounwind {
- %1 = getelementptr %T* @duh, i32 0, i32 1
- %2 = getelementptr [2 x i8]* %1, i16 0, i16 %n
+ %1 = getelementptr %T, %T* @duh, i32 0, i32 1
+ %2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
%3 = load i8* %2
%4 = or i8 %x, %3
store i8 %4, i8* %2
@bar = internal constant [2 x i8] [ i8 32, i8 64 ]
define i8 @am3(i16 %n) nounwind {
- %1 = getelementptr [2 x i8]* @bar, i16 0, i16 %n
+ %1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %n
%2 = load i8* %1
ret i8 %2
}
; CHECK: mov.w &32, r15
define i16 @am5(i16* %a) nounwind {
- %1 = getelementptr i16* %a, i16 2
+ %1 = getelementptr i16, i16* %a, i16 2
%2 = load i16* %1
ret i16 %2
}
@duh = internal constant %T { i16 16, [2 x i8][i8 32, i8 64 ] }
define i8 @am7(i16 %n) nounwind {
- %1 = getelementptr %T* @duh, i32 0, i32 1
- %2 = getelementptr [2 x i8]* %1, i16 0, i16 %n
+ %1 = getelementptr %T, %T* @duh, i32 0, i32 1
+ %2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
%3= load i8* %2
ret i8 %3
}
@bar = external global [2 x i8]
define void @am3(i16 %i, i8 %a) nounwind {
- %1 = getelementptr [2 x i8]* @bar, i16 0, i16 %i
+ %1 = getelementptr [2 x i8], [2 x i8]* @bar, i16 0, i16 %i
store i8 %a, i8* %1
ret void
}
; CHECK: mov.w r15, &32
define void @am5(i16* nocapture %p, i16 %a) nounwind readonly {
- %1 = getelementptr inbounds i16* %p, i16 2
+ %1 = getelementptr inbounds i16, i16* %p, i16 2
store i16 %a, i16* %1
ret void
}
@duh = external global %T
define void @am7(i16 %n, i8 %a) nounwind {
- %1 = getelementptr %T* @duh, i32 0, i32 1
- %2 = getelementptr [2 x i8]* %1, i16 0, i16 %n
+ %1 = getelementptr %T, %T* @duh, i32 0, i32 1
+ %2 = getelementptr [2 x i8], [2 x i8]* %1, i16 0, i16 %n
store i8 %a, i8* %2
ret void
}
entry:
; CHECK-LABEL: callee:
; CHECK: mov.w 2(r1), r15
- %0 = getelementptr inbounds %struct.Foo* %f, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.Foo, %struct.Foo* %f, i32 0, i32 0
%1 = load i16* %0, align 2
ret i16 %1
}
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
+ %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
%gotovar.4.0.pre = load i8** %2, align 4 ; <i8*> [#uses=1]
br label %bb2
define internal i16 @foo(i16 %i) nounwind {
entry:
- %tmp1 = getelementptr inbounds [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
+ %tmp1 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i16 0, i16 %i ; <i8**> [#uses=1]
%gotovar.4.0 = load i8** %tmp1, align 4 ; <i8*> [#uses=1]
; CHECK: br .LC.0.2070(r12)
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: add:
; CHECK: add.w @r{{[0-9]+}}+, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: sub:
; CHECK: sub.w @r{{[0-9]+}}+, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: or:
; CHECK: bis.w @r{{[0-9]+}}+, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: xor:
; CHECK: xor.w @r{{[0-9]+}}+, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
for.body: ; preds = %for.body, %entry
%i.010 = phi i16 [ 0, %entry ], [ %inc, %for.body ] ; <i16> [#uses=2]
%sum.09 = phi i16 [ 0, %entry ], [ %add, %for.body ] ; <i16> [#uses=1]
- %arrayidx = getelementptr i16* %a, i16 %i.010 ; <i16*> [#uses=1]
+ %arrayidx = getelementptr i16, i16* %a, i16 %i.010 ; <i16*> [#uses=1]
; CHECK-LABEL: and:
; CHECK: and.w @r{{[0-9]+}}+, r{{[0-9]+}}
%tmp4 = load i16* %arrayidx ; <i16> [#uses=1]
; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
; CHECK: sw ${{[0-9]+}}, {{[0-9]+}}($4)
- getelementptr %struct.sret0* %agg.result, i32 0, i32 0 ; <i32*>:0 [#uses=1]
+ getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 0 ; <i32*>:0 [#uses=1]
store i32 %dummy, i32* %0, align 4
- getelementptr %struct.sret0* %agg.result, i32 0, i32 1 ; <i32*>:1 [#uses=1]
+ getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 1 ; <i32*>:1 [#uses=1]
store i32 %dummy, i32* %1, align 4
- getelementptr %struct.sret0* %agg.result, i32 0, i32 2 ; <i32*>:2 [#uses=1]
+ getelementptr %struct.sret0, %struct.sret0* %agg.result, i32 0, i32 2 ; <i32*>:2 [#uses=1]
store i32 %dummy, i32* %2, align 4
ret void
}
continue.outer: ; preds = %case4, %entry
%p.0.ph.rec = phi i32 [ 0, %entry ], [ %indvar.next, %case4 ] ; <i32> [#uses=2]
- %p.0.ph = getelementptr i8* %0, i32 %p.0.ph.rec ; <i8*> [#uses=1]
+ %p.0.ph = getelementptr i8, i8* %0, i32 %p.0.ph.rec ; <i8*> [#uses=1]
%1 = load i8* %p.0.ph ; <i8> [#uses=1]
switch i8 %1, label %infloop [
i8 0, label %return.split
load i16* %xseed, align 2 ; <i16>:0 [#uses=1]
uitofp i16 %0 to double ; <double>:1 [#uses=1]
tail call double @ldexp( double %1, i32 -48 ) nounwind ; <double>:2 [#uses=1]
- getelementptr i16* %xseed, i32 1 ; <i16*>:3 [#uses=1]
+ getelementptr i16, i16* %xseed, i32 1 ; <i16*>:3 [#uses=1]
load i16* %3, align 2 ; <i16>:4 [#uses=1]
uitofp i16 %4 to double ; <double>:5 [#uses=1]
tail call double @ldexp( double %5, i32 -32 ) nounwind ; <double>:6 [#uses=1]
fadd double %2, %6 ; <double>:7 [#uses=1]
- getelementptr i16* %xseed, i32 2 ; <i16*>:8 [#uses=1]
+ getelementptr i16, i16* %xseed, i32 2 ; <i16*>:8 [#uses=1]
load i16* %8, align 2 ; <i16>:9 [#uses=1]
uitofp i16 %9 to double ; <double>:10 [#uses=1]
tail call double @ldexp( double %10, i32 -16 ) nounwind ; <double>:11 [#uses=1]
load i16* %xseed, align 2 ; <i16>:1 [#uses=1]
uitofp i16 %1 to double ; <double>:2 [#uses=1]
tail call double @ldexp( double %2, i32 -48 ) nounwind ; <double>:3 [#uses=1]
- getelementptr i16* %xseed, i32 1 ; <i16*>:4 [#uses=1]
+ getelementptr i16, i16* %xseed, i32 1 ; <i16*>:4 [#uses=1]
load i16* %4, align 2 ; <i16>:5 [#uses=1]
uitofp i16 %5 to double ; <double>:6 [#uses=1]
tail call double @ldexp( double %6, i32 -32 ) nounwind ; <double>:7 [#uses=1]
fadd double %3, %7 ; <double>:8 [#uses=1]
- getelementptr i16* %xseed, i32 2 ; <i16*>:9 [#uses=1]
+ getelementptr i16, i16* %xseed, i32 2 ; <i16*>:9 [#uses=1]
load i16* %9, align 2 ; <i16>:10 [#uses=1]
uitofp i16 %10 to double ; <double>:11 [#uses=1]
tail call double @ldexp( double %11, i32 -16 ) nounwind ; <double>:12 [#uses=1]
entry:
; CHECK-LABEL: .ent foo
%0 = load float** @y, align 4
- %arrayidx = getelementptr inbounds float* %0, i32 64000
+ %arrayidx = getelementptr inbounds float, float* %0, i32 64000
store float 5.500000e+00, float* %arrayidx, align 4
; CHECK: lui $[[REG_FPCONST_INT:[0-9]+]], 16560
; CHECK: mtc1 $[[REG_FPCONST_INT]], $f[[REG_FPCONST:[0-9]+]]
entry:
; CHECK-LABEL: .ent goo
%0 = load float** @y, align 4
- %arrayidx = getelementptr inbounds float* %0, i32 64000
+ %arrayidx = getelementptr inbounds float, float* %0, i32 64000
%1 = load float* %arrayidx, align 4
store float %1, float* @result, align 4
; CHECK-DAG: lw $[[REG_RESULT:[0-9]+]], %got(result)(${{[0-9]+}})
for.body3:
%s.120 = phi i32 [ %s.022, %for.cond1.preheader ], [ %add7, %for.body3 ]
%j.019 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ]
- %arrayidx4 = getelementptr inbounds [256 x i32]* %a, i32 %i.021, i32 %j.019
+ %arrayidx4 = getelementptr inbounds [256 x i32], [256 x i32]* %a, i32 %i.021, i32 %j.019
%0 = load i32* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds [256 x i32]* %b, i32 %i.021, i32 %j.019
+ %arrayidx6 = getelementptr inbounds [256 x i32], [256 x i32]* %b, i32 %i.021, i32 %j.019
%1 = load i32* %arrayidx6, align 4
%add = add i32 %0, %s.120
%add7 = add i32 %add, %1
%zz = alloca i32, align 4
%z = alloca i32, align 4
%0 = load i32* @i, align 4
- %arrayidx = getelementptr inbounds [512 x i32]* %y, i32 0, i32 10
+ %arrayidx = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10
store i32 %0, i32* %arrayidx, align 4
%1 = load i32* @i, align 4
store i32 %1, i32* %x, align 8
call void @p(i32* %x)
- %arrayidx1 = getelementptr inbounds [512 x i32]* %y, i32 0, i32 10
+ %arrayidx1 = getelementptr inbounds [512 x i32], [512 x i32]* %y, i32 0, i32 10
call void @p(i32* %arrayidx1)
ret void
}
; CHECK: move $4, $[[T0]]
; CHECK: move $4, $[[T2]]
%tmp1 = alloca i8, i32 %size, align 4
- %add.ptr = getelementptr inbounds i8* %tmp1, i32 5
+ %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 5
store i8 97, i8* %add.ptr, align 1
%tmp4 = alloca i8, i32 %size, align 4
call void @foo2(double 1.000000e+00, double 2.000000e+00, i32 3) nounwind
if.then: ; preds = %entry
; CHECK: addiu $4, $[[T0]], 40
- %add.ptr = getelementptr inbounds i8* %tmp1, i32 40
+ %add.ptr = getelementptr inbounds i8, i8* %tmp1, i32 40
%1 = bitcast i8* %add.ptr to i32*
call void @foo3(i32* %1) nounwind
- %arrayidx15.pre = getelementptr inbounds i8* %tmp1, i32 12
+ %arrayidx15.pre = getelementptr inbounds i8, i8* %tmp1, i32 12
%.pre = bitcast i8* %arrayidx15.pre to i32*
br label %if.end
if.else: ; preds = %entry
; CHECK: addiu $4, $[[T0]], 12
- %add.ptr5 = getelementptr inbounds i8* %tmp1, i32 12
+ %add.ptr5 = getelementptr inbounds i8, i8* %tmp1, i32 12
%2 = bitcast i8* %add.ptr5 to i32*
call void @foo3(i32* %2) nounwind
br label %if.end
%.pre-phi = phi i32* [ %2, %if.else ], [ %.pre, %if.then ]
%tmp7 = load i32* %0, align 4
- %arrayidx9 = getelementptr inbounds i8* %tmp1, i32 4
+ %arrayidx9 = getelementptr inbounds i8, i8* %tmp1, i32 4
%3 = bitcast i8* %arrayidx9 to i32*
%tmp10 = load i32* %3, align 4
- %arrayidx12 = getelementptr inbounds i8* %tmp1, i32 8
+ %arrayidx12 = getelementptr inbounds i8, i8* %tmp1, i32 8
%4 = bitcast i8* %arrayidx12 to i32*
%tmp13 = load i32* %4, align 4
%tmp16 = load i32* %.pre-phi, align 4
- %arrayidx18 = getelementptr inbounds i8* %tmp1, i32 16
+ %arrayidx18 = getelementptr inbounds i8, i8* %tmp1, i32 16
%5 = bitcast i8* %arrayidx18 to i32*
%tmp19 = load i32* %5, align 4
- %arrayidx21 = getelementptr inbounds i8* %tmp1, i32 20
+ %arrayidx21 = getelementptr inbounds i8, i8* %tmp1, i32 20
%6 = bitcast i8* %arrayidx21 to i32*
%tmp22 = load i32* %6, align 4
- %arrayidx24 = getelementptr inbounds i8* %tmp1, i32 24
+ %arrayidx24 = getelementptr inbounds i8, i8* %tmp1, i32 24
%7 = bitcast i8* %arrayidx24 to i32*
%tmp25 = load i32* %7, align 4
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([22 x i8]* @.str, i32 0, i32 0), i32 %tmp7, i32 %tmp10, i32 %tmp13, i32 %tmp16, i32 %tmp19, i32 %tmp22, i32 %tmp25) nounwind
%5 = load i32* @jjjj, align 4
%6 = load i32* @iiii, align 4
%7 = load i32** %ip, align 4
- %arrayidx = getelementptr inbounds i32* %7, i32 %6
+ %arrayidx = getelementptr inbounds i32, i32* %7, i32 %6
store i32 %5, i32* %arrayidx, align 4
%8 = load i32* @kkkk, align 4
%9 = load i32* @jjjj, align 4
%10 = load i32** %ip, align 4
- %arrayidx1 = getelementptr inbounds i32* %10, i32 %9
+ %arrayidx1 = getelementptr inbounds i32, i32* %10, i32 %9
store i32 %8, i32* %arrayidx1, align 4
%11 = load i32* @iiii, align 4
%12 = load i32* @kkkk, align 4
%13 = load i32** %ip, align 4
- %arrayidx2 = getelementptr inbounds i32* %13, i32 %12
+ %arrayidx2 = getelementptr inbounds i32, i32* %13, i32 %12
store i32 %11, i32* %arrayidx2, align 4
%14 = load i32** %ip, align 4
- %arrayidx3 = getelementptr inbounds i32* %14, i32 25
+ %arrayidx3 = getelementptr inbounds i32, i32* %14, i32 25
%15 = load i32* %arrayidx3, align 4
store i32 %15, i32* @riii, align 4
%16 = load i32** %ip, align 4
- %arrayidx4 = getelementptr inbounds i32* %16, i32 35
+ %arrayidx4 = getelementptr inbounds i32, i32* %16, i32 35
%17 = load i32* %arrayidx4, align 4
store i32 %17, i32* @rjjj, align 4
%18 = load i32** %ip, align 4
- %arrayidx5 = getelementptr inbounds i32* %18, i32 100
+ %arrayidx5 = getelementptr inbounds i32, i32* %18, i32 100
%19 = load i32* %arrayidx5, align 4
store i32 %19, i32* @rkkk, align 4
%20 = load i32* @t, align 4
%21 = load i32** %ip, align 4
- %arrayidx6 = getelementptr inbounds i32* %21, i32 %20
+ %arrayidx6 = getelementptr inbounds i32, i32* %21, i32 %20
%22 = load i32* %arrayidx6, align 4
; 16: addiu $sp, -16
call void @temp(i32 %22)
for.body: ; preds = %entry, %for.body
%s.06 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%i.05 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %i.05
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.05
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %s.06
%inc = add nsw i32 %i.05, 1
%i.2 = phi i32 [ %i.1, %L2 ], [ %inc, %L3 ]
%puts7 = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str7, i32 0, i32 0))
%inc = add i32 %i.2, 1
- %arrayidx = getelementptr inbounds [5 x i8*]* @main.L, i32 0, i32 %i.2
+ %arrayidx = getelementptr inbounds [5 x i8*], [5 x i8*]* @main.L, i32 0, i32 %i.2
%0 = load i8** %arrayidx, align 4
indirectbr i8* %0, [label %L1, label %L2, label %L3, label %L4]
; 16: jrc ${{[0-9]+}}
define void @double_args(double %a, double %b, double %c, double %d, double %e,
double %f, double %g, double %h, double %i) nounwind {
entry:
- %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
store volatile double %a, double* %0
- %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2
+ %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
store volatile double %b, double* %1
- %2 = getelementptr [11 x double]* @doubles, i32 0, i32 3
+ %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3
store volatile double %c, double* %2
- %3 = getelementptr [11 x double]* @doubles, i32 0, i32 4
+ %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4
store volatile double %d, double* %3
- %4 = getelementptr [11 x double]* @doubles, i32 0, i32 5
+ %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5
store volatile double %e, double* %4
- %5 = getelementptr [11 x double]* @doubles, i32 0, i32 6
+ %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6
store volatile double %f, double* %5
- %6 = getelementptr [11 x double]* @doubles, i32 0, i32 7
+ %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7
store volatile double %g, double* %6
- %7 = getelementptr [11 x double]* @doubles, i32 0, i32 8
+ %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8
store volatile double %h, double* %7
- %8 = getelementptr [11 x double]* @doubles, i32 0, i32 9
+ %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9
store volatile double %i, double* %8
ret void
}
float %f, float %g, float %h, float %i, float %j)
nounwind {
entry:
- %0 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
store volatile float %a, float* %0
- %1 = getelementptr [11 x float]* @floats, i32 0, i32 2
+ %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
store volatile float %b, float* %1
- %2 = getelementptr [11 x float]* @floats, i32 0, i32 3
+ %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3
store volatile float %c, float* %2
- %3 = getelementptr [11 x float]* @floats, i32 0, i32 4
+ %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4
store volatile float %d, float* %3
- %4 = getelementptr [11 x float]* @floats, i32 0, i32 5
+ %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5
store volatile float %e, float* %4
- %5 = getelementptr [11 x float]* @floats, i32 0, i32 6
+ %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6
store volatile float %f, float* %5
- %6 = getelementptr [11 x float]* @floats, i32 0, i32 7
+ %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7
store volatile float %g, float* %6
- %7 = getelementptr [11 x float]* @floats, i32 0, i32 8
+ %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8
store volatile float %h, float* %7
- %8 = getelementptr [11 x float]* @floats, i32 0, i32 9
+ %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9
store volatile float %i, float* %8
- %9 = getelementptr [11 x float]* @floats, i32 0, i32 10
+ %9 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 10
store volatile float %j, float* %9
ret void
}
define void @double_arg2(i8 %a, double %b) nounwind {
entry:
- %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
store volatile double %b, double* %1
ret void
}
define void @float_arg2(i8 signext %a, float %b) nounwind {
entry:
- %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
store volatile float %b, float* %1
ret void
}
define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
entry:
- %0 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 1
+ %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1
store volatile fp128 %a, fp128* %0
- %1 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 2
+ %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2
store volatile fp128 %b, fp128* %1
- %2 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 3
+ %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3
store volatile fp128 %c, fp128* %2
- %3 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 4
+ %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4
store volatile fp128 %d, fp128* %3
- %4 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 5
+ %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5
store volatile fp128 %e, fp128* %4
ret void
}
define void @double_args(double %a, ...)
nounwind {
entry:
- %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
store volatile double %a, double* %0
%ap = alloca i8*
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
%b = va_arg i8** %ap, double
- %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2
+ %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
store volatile double %b, double* %1
call void @llvm.va_end(i8* %ap2)
ret void
define void @float_args(float %a, ...) nounwind {
entry:
- %0 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
store volatile float %a, float* %0
%ap = alloca i8*
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
%b = va_arg i8** %ap, float
- %1 = getelementptr [11 x float]* @floats, i32 0, i32 2
+ %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
store volatile float %b, float* %1
call void @llvm.va_end(i8* %ap2)
ret void
define void @double_args(double %a, double %b, double %c, double %d, double %e,
double %f, double %g, double %h, double %i) nounwind {
entry:
- %0 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ %0 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
store volatile double %a, double* %0
- %1 = getelementptr [11 x double]* @doubles, i32 0, i32 2
+ %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 2
store volatile double %b, double* %1
- %2 = getelementptr [11 x double]* @doubles, i32 0, i32 3
+ %2 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 3
store volatile double %c, double* %2
- %3 = getelementptr [11 x double]* @doubles, i32 0, i32 4
+ %3 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 4
store volatile double %d, double* %3
- %4 = getelementptr [11 x double]* @doubles, i32 0, i32 5
+ %4 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 5
store volatile double %e, double* %4
- %5 = getelementptr [11 x double]* @doubles, i32 0, i32 6
+ %5 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 6
store volatile double %f, double* %5
- %6 = getelementptr [11 x double]* @doubles, i32 0, i32 7
+ %6 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 7
store volatile double %g, double* %6
- %7 = getelementptr [11 x double]* @doubles, i32 0, i32 8
+ %7 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 8
store volatile double %h, double* %7
- %8 = getelementptr [11 x double]* @doubles, i32 0, i32 9
+ %8 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 9
store volatile double %i, double* %8
ret void
}
define void @float_args(float %a, float %b, float %c, float %d, float %e,
float %f, float %g, float %h, float %i) nounwind {
entry:
- %0 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ %0 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
store volatile float %a, float* %0
- %1 = getelementptr [11 x float]* @floats, i32 0, i32 2
+ %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 2
store volatile float %b, float* %1
- %2 = getelementptr [11 x float]* @floats, i32 0, i32 3
+ %2 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 3
store volatile float %c, float* %2
- %3 = getelementptr [11 x float]* @floats, i32 0, i32 4
+ %3 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 4
store volatile float %d, float* %3
- %4 = getelementptr [11 x float]* @floats, i32 0, i32 5
+ %4 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 5
store volatile float %e, float* %4
- %5 = getelementptr [11 x float]* @floats, i32 0, i32 6
+ %5 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 6
store volatile float %f, float* %5
- %6 = getelementptr [11 x float]* @floats, i32 0, i32 7
+ %6 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 7
store volatile float %g, float* %6
- %7 = getelementptr [11 x float]* @floats, i32 0, i32 8
+ %7 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 8
store volatile float %h, float* %7
- %8 = getelementptr [11 x float]* @floats, i32 0, i32 9
+ %8 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 9
store volatile float %i, float* %8
ret void
}
define void @double_arg2(i8 %a, double %b) nounwind {
entry:
- %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x double]* @doubles, i32 0, i32 1
+ %1 = getelementptr [11 x double], [11 x double]* @doubles, i32 0, i32 1
store volatile double %b, double* %1
ret void
}
define void @float_arg2(i8 %a, float %b) nounwind {
entry:
- %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x float]* @floats, i32 0, i32 1
+ %1 = getelementptr [11 x float], [11 x float]* @floats, i32 0, i32 1
store volatile float %b, float* %1
ret void
}
define void @ldouble_args(fp128 %a, fp128 %b, fp128 %c, fp128 %d, fp128 %e) nounwind {
entry:
- %0 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 1
+ %0 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 1
store volatile fp128 %a, fp128* %0
- %1 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 2
+ %1 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 2
store volatile fp128 %b, fp128* %1
- %2 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 3
+ %2 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 3
store volatile fp128 %c, fp128* %2
- %3 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 4
+ %3 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 4
store volatile fp128 %d, fp128* %3
- %4 = getelementptr [11 x fp128]* @ldoubles, i32 0, i32 5
+ %4 = getelementptr [11 x fp128], [11 x fp128]* @ldoubles, i32 0, i32 5
store volatile fp128 %e, fp128* %4
ret void
}
store %struct.SmallStruct_1b* %ss, %struct.SmallStruct_1b** %ss.addr, align 8
%0 = load %struct.SmallStruct_1b** %ss.addr, align 8
%1 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
- %2 = getelementptr { i8 }* %1, i32 0, i32 0
+ %2 = getelementptr { i8 }, { i8 }* %1, i32 0, i32 0
%3 = load i8* %2, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %3)
ret void
store %struct.SmallStruct_2b* %ss, %struct.SmallStruct_2b** %ss.addr, align 8
%0 = load %struct.SmallStruct_2b** %ss.addr, align 8
%1 = bitcast %struct.SmallStruct_2b* %0 to { i16 }*
- %2 = getelementptr { i16 }* %1, i32 0, i32 0
+ %2 = getelementptr { i16 }, { i16 }* %1, i32 0, i32 0
%3 = load i16* %2, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i16 inreg %3)
ret void
%1 = bitcast { i24 }* %.coerce to i8*
%2 = bitcast %struct.SmallStruct_3b* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 3, i32 0, i1 false)
- %3 = getelementptr { i24 }* %.coerce, i32 0, i32 0
+ %3 = getelementptr { i24 }, { i24 }* %.coerce, i32 0, i32 0
%4 = load i24* %3, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i24 inreg %4)
ret void
store %struct.SmallStruct_4b* %ss, %struct.SmallStruct_4b** %ss.addr, align 8
%0 = load %struct.SmallStruct_4b** %ss.addr, align 8
%1 = bitcast %struct.SmallStruct_4b* %0 to { i32 }*
- %2 = getelementptr { i32 }* %1, i32 0, i32 0
+ %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
%3 = load i32* %2, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
ret void
%1 = bitcast { i40 }* %.coerce to i8*
%2 = bitcast %struct.SmallStruct_5b* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 5, i32 0, i1 false)
- %3 = getelementptr { i40 }* %.coerce, i32 0, i32 0
+ %3 = getelementptr { i40 }, { i40 }* %.coerce, i32 0, i32 0
%4 = load i40* %3, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i40 inreg %4)
ret void
%1 = bitcast { i48 }* %.coerce to i8*
%2 = bitcast %struct.SmallStruct_6b* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
- %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0
+ %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
%4 = load i48* %3, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
ret void
%1 = bitcast { i56 }* %.coerce to i8*
%2 = bitcast %struct.SmallStruct_7b* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 7, i32 0, i1 false)
- %3 = getelementptr { i56 }* %.coerce, i32 0, i32 0
+ %3 = getelementptr { i56 }, { i56 }* %.coerce, i32 0, i32 0
%4 = load i56* %3, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i56 inreg %4)
ret void
store %struct.SmallStruct_8b* %ss, %struct.SmallStruct_8b** %ss.addr, align 8
%0 = load %struct.SmallStruct_8b** %ss.addr, align 8
%1 = bitcast %struct.SmallStruct_8b* %0 to { i64 }*
- %2 = getelementptr { i64 }* %1, i32 0, i32 0
+ %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
%3 = load i64* %2, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
ret void
%1 = bitcast { i64, i8 }* %.coerce to i8*
%2 = bitcast %struct.SmallStruct_9b* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 9, i32 0, i1 false)
- %3 = getelementptr { i64, i8 }* %.coerce, i32 0, i32 0
+ %3 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 0
%4 = load i64* %3, align 1
- %5 = getelementptr { i64, i8 }* %.coerce, i32 0, i32 1
+ %5 = getelementptr { i64, i8 }, { i64, i8 }* %.coerce, i32 0, i32 1
%6 = load i8* %5, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %4, i8 inreg %6)
ret void
store %struct.SmallStruct_1b1s* %ss, %struct.SmallStruct_1b1s** %ss.addr, align 8
%0 = load %struct.SmallStruct_1b1s** %ss.addr, align 8
%1 = bitcast %struct.SmallStruct_1b1s* %0 to { i32 }*
- %2 = getelementptr { i32 }* %1, i32 0, i32 0
+ %2 = getelementptr { i32 }, { i32 }* %1, i32 0, i32 0
%3 = load i32* %2, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i32 inreg %3)
ret void
store %struct.SmallStruct_1b1i* %ss, %struct.SmallStruct_1b1i** %ss.addr, align 8
%0 = load %struct.SmallStruct_1b1i** %ss.addr, align 8
%1 = bitcast %struct.SmallStruct_1b1i* %0 to { i64 }*
- %2 = getelementptr { i64 }* %1, i32 0, i32 0
+ %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
%3 = load i64* %2, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
ret void
%1 = bitcast { i48 }* %.coerce to i8*
%2 = bitcast %struct.SmallStruct_1b1s1b* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
- %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0
+ %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
%4 = load i48* %3, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
ret void
store %struct.SmallStruct_1s1i* %ss, %struct.SmallStruct_1s1i** %ss.addr, align 8
%0 = load %struct.SmallStruct_1s1i** %ss.addr, align 8
%1 = bitcast %struct.SmallStruct_1s1i* %0 to { i64 }*
- %2 = getelementptr { i64 }* %1, i32 0, i32 0
+ %2 = getelementptr { i64 }, { i64 }* %1, i32 0, i32 0
%3 = load i64* %2, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 inreg %3)
ret void
%1 = bitcast { i48 }* %.coerce to i8*
%2 = bitcast %struct.SmallStruct_3b1s* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 6, i32 0, i1 false)
- %3 = getelementptr { i48 }* %.coerce, i32 0, i32 0
+ %3 = getelementptr { i48 }, { i48 }* %.coerce, i32 0, i32 0
%4 = load i48* %3, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i48 inreg %4)
ret void
%7 = load %struct.SmallStruct_1b** %ss8.addr, align 8
%8 = load %struct.SmallStruct_1b** %ss9.addr, align 8
%9 = bitcast %struct.SmallStruct_1b* %0 to { i8 }*
- %10 = getelementptr { i8 }* %9, i32 0, i32 0
+ %10 = getelementptr { i8 }, { i8 }* %9, i32 0, i32 0
%11 = load i8* %10, align 1
%12 = bitcast %struct.SmallStruct_1b* %1 to { i8 }*
- %13 = getelementptr { i8 }* %12, i32 0, i32 0
+ %13 = getelementptr { i8 }, { i8 }* %12, i32 0, i32 0
%14 = load i8* %13, align 1
%15 = bitcast %struct.SmallStruct_1b* %2 to { i8 }*
- %16 = getelementptr { i8 }* %15, i32 0, i32 0
+ %16 = getelementptr { i8 }, { i8 }* %15, i32 0, i32 0
%17 = load i8* %16, align 1
%18 = bitcast %struct.SmallStruct_1b* %3 to { i8 }*
- %19 = getelementptr { i8 }* %18, i32 0, i32 0
+ %19 = getelementptr { i8 }, { i8 }* %18, i32 0, i32 0
%20 = load i8* %19, align 1
%21 = bitcast %struct.SmallStruct_1b* %4 to { i8 }*
- %22 = getelementptr { i8 }* %21, i32 0, i32 0
+ %22 = getelementptr { i8 }, { i8 }* %21, i32 0, i32 0
%23 = load i8* %22, align 1
%24 = bitcast %struct.SmallStruct_1b* %5 to { i8 }*
- %25 = getelementptr { i8 }* %24, i32 0, i32 0
+ %25 = getelementptr { i8 }, { i8 }* %24, i32 0, i32 0
%26 = load i8* %25, align 1
%27 = bitcast %struct.SmallStruct_1b* %6 to { i8 }*
- %28 = getelementptr { i8 }* %27, i32 0, i32 0
+ %28 = getelementptr { i8 }, { i8 }* %27, i32 0, i32 0
%29 = load i8* %28, align 1
%30 = bitcast %struct.SmallStruct_1b* %7 to { i8 }*
- %31 = getelementptr { i8 }* %30, i32 0, i32 0
+ %31 = getelementptr { i8 }, { i8 }* %30, i32 0, i32 0
%32 = load i8* %31, align 1
%33 = bitcast %struct.SmallStruct_1b* %8 to { i8 }*
- %34 = getelementptr { i8 }* %33, i32 0, i32 0
+ %34 = getelementptr { i8 }, { i8 }* %33, i32 0, i32 0
%35 = load i8* %34, align 1
call void (i8*, ...)* @varArgF_SmallStruct(i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i8 inreg %11, i8 inreg %14, i8 inreg %17, i8 inreg %20, i8 inreg %23, i8 inreg %26, i8 inreg %29, i8 inreg %32, i8 inreg %35)
ret void
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i16
- %e1 = getelementptr [3 x i16]* @hwords, i32 0, i32 1
+ %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
store volatile i16 %arg1, i16* %e1, align 2
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i16
- %e2 = getelementptr [3 x i16]* @hwords, i32 0, i32 2
+ %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
store volatile i16 %arg2, i16* %e2, align 2
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i32
- %e1 = getelementptr [3 x i32]* @words, i32 0, i32 1
+ %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
store volatile i32 %arg1, i32* %e1, align 4
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i32
- %e2 = getelementptr [3 x i32]* @words, i32 0, i32 2
+ %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
store volatile i32 %arg2, i32* %e2, align 4
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i64
- %e1 = getelementptr [3 x i64]* @dwords, i32 0, i32 1
+ %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
store volatile i64 %arg1, i64* %e1, align 8
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i64
- %e2 = getelementptr [3 x i64]* @dwords, i32 0, i32 2
+ %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
store volatile i64 %arg2, i64* %e2, align 8
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i16
- %e1 = getelementptr [3 x i16]* @hwords, i32 0, i32 1
+ %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
store volatile i16 %arg1, i16* %e1, align 2
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i16
- %e2 = getelementptr [3 x i16]* @hwords, i32 0, i32 2
+ %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
store volatile i16 %arg2, i16* %e2, align 2
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i32
- %e1 = getelementptr [3 x i32]* @words, i32 0, i32 1
+ %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
store volatile i32 %arg1, i32* %e1, align 4
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i32
- %e2 = getelementptr [3 x i32]* @words, i32 0, i32 2
+ %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
store volatile i32 %arg2, i32* %e2, align 4
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i64
- %e1 = getelementptr [3 x i64]* @dwords, i32 0, i32 1
+ %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
store volatile i64 %arg1, i64* %e1, align 8
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i64
- %e2 = getelementptr [3 x i64]* @dwords, i32 0, i32 2
+ %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
store volatile i64 %arg2, i64* %e2, align 8
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i16
- %e1 = getelementptr [3 x i16]* @hwords, i32 0, i32 1
+ %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
store volatile i16 %arg1, i16* %e1, align 2
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i16
- %e2 = getelementptr [3 x i16]* @hwords, i32 0, i32 2
+ %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
store volatile i16 %arg2, i16* %e2, align 2
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i32
- %e1 = getelementptr [3 x i32]* @words, i32 0, i32 1
+ %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
store volatile i32 %arg1, i32* %e1, align 4
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i32
- %e2 = getelementptr [3 x i32]* @words, i32 0, i32 2
+ %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
store volatile i32 %arg2, i32* %e2, align 4
call void @llvm.va_end(i8* %ap2)
call void asm sideeffect "# ANCHOR1", ""()
%arg1 = va_arg i8** %ap, i64
- %e1 = getelementptr [3 x i64]* @dwords, i32 0, i32 1
+ %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
store volatile i64 %arg1, i64* %e1, align 8
call void asm sideeffect "# ANCHOR2", ""()
%arg2 = va_arg i8** %ap, i64
- %e2 = getelementptr [3 x i64]* @dwords, i32 0, i32 2
+ %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
store volatile i64 %arg2, i64* %e2, align 8
call void @llvm.va_end(i8* %ap2)
i8 signext %g, i8 signext %h, i8 signext %i,
i8 signext %j) nounwind {
entry:
- %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x i8]* @bytes, i32 0, i32 2
+ %1 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2
store volatile i8 %b, i8* %1
- %2 = getelementptr [11 x i8]* @bytes, i32 0, i32 3
+ %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3
store volatile i8 %c, i8* %2
- %3 = getelementptr [11 x i8]* @bytes, i32 0, i32 4
+ %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4
store volatile i8 %d, i8* %3
- %4 = getelementptr [11 x i8]* @bytes, i32 0, i32 5
+ %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5
store volatile i8 %e, i8* %4
- %5 = getelementptr [11 x i8]* @bytes, i32 0, i32 6
+ %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6
store volatile i8 %f, i8* %5
- %6 = getelementptr [11 x i8]* @bytes, i32 0, i32 7
+ %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7
store volatile i8 %g, i8* %6
- %7 = getelementptr [11 x i8]* @bytes, i32 0, i32 8
+ %7 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 8
store volatile i8 %h, i8* %7
- %8 = getelementptr [11 x i8]* @bytes, i32 0, i32 9
+ %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 9
store volatile i8 %i, i8* %8
- %9 = getelementptr [11 x i8]* @bytes, i32 0, i32 10
+ %9 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 10
store volatile i8 %j, i8* %9
ret void
}
i8 signext %d, i8 signext %e, i8 signext %f,
i8 signext %g, i64 signext %i, i8 signext %j) nounwind {
entry:
- %0 = getelementptr [11 x i8]* @bytes, i32 0, i32 1
+ %0 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 1
store volatile i8 %a, i8* %0
- %1 = getelementptr [11 x i64]* @dwords, i32 0, i32 1
+ %1 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 1
store volatile i64 %b, i64* %1
- %2 = getelementptr [11 x i8]* @bytes, i32 0, i32 2
+ %2 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 2
store volatile i8 %c, i8* %2
- %3 = getelementptr [11 x i8]* @bytes, i32 0, i32 3
+ %3 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 3
store volatile i8 %d, i8* %3
- %4 = getelementptr [11 x i8]* @bytes, i32 0, i32 4
+ %4 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 4
store volatile i8 %e, i8* %4
- %5 = getelementptr [11 x i8]* @bytes, i32 0, i32 5
+ %5 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 5
store volatile i8 %f, i8* %5
- %6 = getelementptr [11 x i8]* @bytes, i32 0, i32 6
+ %6 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 6
store volatile i8 %g, i8* %6
- %7 = getelementptr [11 x i64]* @dwords, i32 0, i32 2
+ %7 = getelementptr [11 x i64], [11 x i64]* @dwords, i32 0, i32 2
store volatile i64 %i, i64* %7
- %8 = getelementptr [11 x i8]* @bytes, i32 0, i32 7
+ %8 = getelementptr [11 x i8], [11 x i8]* @bytes, i32 0, i32 7
store volatile i8 %j, i8* %8
ret void
}
define void @getSubImagesLuma(%struct.StorablePicture* nocapture %s) #0 {
entry:
- %size_y = getelementptr inbounds %struct.StorablePicture* %s, i32 0, i32 1
+ %size_y = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %s, i32 0, i32 1
%0 = load i32* %size_y, align 4
%sub = add nsw i32 %0, -1
%add5 = add nsw i32 %0, 20
define zeroext i8 @test_lbux(i8* nocapture %b, i32 %i) {
entry:
- %add.ptr = getelementptr inbounds i8* %b, i32 %i
+ %add.ptr = getelementptr inbounds i8, i8* %b, i32 %i
%0 = load i8* %add.ptr, align 1
ret i8 %0
}
define signext i16 @test_lhx(i16* nocapture %b, i32 %i) {
entry:
- %add.ptr = getelementptr inbounds i16* %b, i32 %i
+ %add.ptr = getelementptr inbounds i16, i16* %b, i32 %i
%0 = load i16* %add.ptr, align 2
ret i16 %0
}
define i32 @test_lwx(i32* nocapture %b, i32 %i) {
entry:
- %add.ptr = getelementptr inbounds i32* %b, i32 %i
+ %add.ptr = getelementptr inbounds i32, i32* %b, i32 %i
%0 = load i32* %add.ptr, align 4
ret i32 %0
}
; CHECK-NACL-NOT: lwxc1
- %arrayidx = getelementptr inbounds float* %b, i32 %o
+ %arrayidx = getelementptr inbounds float, float* %b, i32 %o
%0 = load float* %arrayidx, align 4
ret float %0
}
; CHECK-NACL-NOT: ldxc1
- %arrayidx = getelementptr inbounds double* %b, i32 %o
+ %arrayidx = getelementptr inbounds double, double* %b, i32 %o
%0 = load double* %arrayidx, align 8
ret double %0
}
; luxc1 was removed in MIPS64r6
; MIPS64R6-NOT: luxc1
- %arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
%0 = load float* %arrayidx1, align 1
ret float %0
}
; CHECK-NACL-NOT: swxc1
%0 = load float* @gf, align 4
- %arrayidx = getelementptr inbounds float* %b, i32 %o
+ %arrayidx = getelementptr inbounds float, float* %b, i32 %o
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK-NACL-NOT: sdxc1
%0 = load double* @gd, align 8
- %arrayidx = getelementptr inbounds double* %b, i32 %o
+ %arrayidx = getelementptr inbounds double, double* %b, i32 %o
store double %0, double* %arrayidx, align 8
ret void
}
; MIPS64R6-NOT: suxc1
%0 = load float* @gf, align 4
- %arrayidx1 = getelementptr inbounds [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S], [4 x %struct.S]* @s, i32 0, i32 %b, i32 0, i32 %c
store float %0, float* %arrayidx1, align 1
ret void
}
; MIPS64R6-NOT: luxc1
- %arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
%0 = load double* %arrayidx1, align 1
ret double %0
}
; MIPS64R6-NOT: suxc1
%0 = load double* @gd, align 8
- %arrayidx1 = getelementptr inbounds [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
+ %arrayidx1 = getelementptr inbounds [4 x %struct.S2], [4 x %struct.S2]* @s2, i32 0, i32 %b, i32 0, i32 %c
store double %0, double* %arrayidx1, align 1
ret void
}
; CHECK: sw $fp
; CHECK: lw $fp
%0 = load i32* %b, align 4
- %arrayidx.1 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx.1 = getelementptr inbounds i32, i32* %b, i32 1
%1 = load i32* %arrayidx.1, align 4
%add.1 = add nsw i32 %1, 1
- %arrayidx.2 = getelementptr inbounds i32* %b, i32 2
+ %arrayidx.2 = getelementptr inbounds i32, i32* %b, i32 2
%2 = load i32* %arrayidx.2, align 4
%add.2 = add nsw i32 %2, 2
- %arrayidx.3 = getelementptr inbounds i32* %b, i32 3
+ %arrayidx.3 = getelementptr inbounds i32, i32* %b, i32 3
%3 = load i32* %arrayidx.3, align 4
%add.3 = add nsw i32 %3, 3
- %arrayidx.4 = getelementptr inbounds i32* %b, i32 4
+ %arrayidx.4 = getelementptr inbounds i32, i32* %b, i32 4
%4 = load i32* %arrayidx.4, align 4
%add.4 = add nsw i32 %4, 4
- %arrayidx.5 = getelementptr inbounds i32* %b, i32 5
+ %arrayidx.5 = getelementptr inbounds i32, i32* %b, i32 5
%5 = load i32* %arrayidx.5, align 4
%add.5 = add nsw i32 %5, 5
- %arrayidx.6 = getelementptr inbounds i32* %b, i32 6
+ %arrayidx.6 = getelementptr inbounds i32, i32* %b, i32 6
%6 = load i32* %arrayidx.6, align 4
%add.6 = add nsw i32 %6, 6
- %arrayidx.7 = getelementptr inbounds i32* %b, i32 7
+ %arrayidx.7 = getelementptr inbounds i32, i32* %b, i32 7
%7 = load i32* %arrayidx.7, align 4
%add.7 = add nsw i32 %7, 7
call void @foo2(i32 %0, i32 %add.1, i32 %add.2, i32 %add.3, i32 %add.4, i32 %add.5, i32 %add.6, i32 %add.7) nounwind
define { float, float } @scv() #0 {
entry:
%retval = alloca { float, float }, align 4
- %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
store float 5.000000e+00, float* %real
store float 9.900000e+01, float* %imag
%0 = load { float, float }* %retval
define { double, double } @dcv() #0 {
entry:
%retval = alloca { double, double }, align 8
- %real = getelementptr inbounds { double, double }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { double, double }* %retval, i32 0, i32 1
+ %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1
store double 0x416BC8B0A0000000, double* %real
store double 0x41CDCCB763800000, double* %imag
%0 = load { double, double }* %retval
define void @f() nounwind {
entry:
%a1 = alloca [1073741824 x i8], align 1
- %arrayidx = getelementptr inbounds [1073741824 x i8]* %a1, i32 0, i32 1048676
+ %arrayidx = getelementptr inbounds [1073741824 x i8], [1073741824 x i8]* %a1, i32 0, i32 1048676
call void @f2(i8* %arrayidx) nounwind
ret void
}
; 64: sd $ra, 24($[[R1]])
%agg.tmp = alloca %struct.S1, align 1
- %tmp = getelementptr inbounds %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
+ %tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i32 1, i1 false)
call void @f2(%struct.S1* byval %agg.tmp) nounwind
ret void
entry:
; CHECK-NOT: call16(memcpy
- %arraydecay = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1, i32 0
+ %arraydecay = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 0
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %arraydecay, i8* getelementptr inbounds ([31 x i8]* @.str, i32 0, i32 0), i32 31, i32 1, i1 false)
- %arrayidx = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1, i32 40
+ %arrayidx = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1, i32 40
store i8 %n, i8* %arrayidx, align 1
ret void
}
%i.0 = phi i32 [ 0, %entry ], [ %inc, %L1 ]
%puts = tail call i32 @puts(i8* getelementptr inbounds ([2 x i8]* @str, i32 0, i32 0))
%inc = add i32 %i.0, 1
- %arrayidx = getelementptr inbounds [3 x i8*]* @main.L, i32 0, i32 %i.0
+ %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @main.L, i32 0, i32 %i.0
%0 = load i8** %arrayidx, align 4, !tbaa !1
indirectbr i8* %0, [label %L1, label %L2]
@bar_ary = common global [4 x %struct.barstruct] zeroinitializer, align 4
define float* @spooky(i32 signext %i) #0 {
- %safe = getelementptr inbounds [4 x %struct.barstruct]* @bar_ary, i32 0, i32 %i, i32 1
+ %safe = getelementptr inbounds [4 x %struct.barstruct], [4 x %struct.barstruct]* @bar_ary, i32 0, i32 %i, i32 1
store float 1.420000e+02, float* %safe, align 4, !tbaa !1
ret float* %safe
}
%2 = load i32** %p.addr, align 4
store i32 %add, i32* %2, align 4
%3 = load i32** %p.addr, align 4
- %add.ptr = getelementptr inbounds i32* %3, i32 1
+ %add.ptr = getelementptr inbounds i32, i32* %3, i32 1
%4 = load i32* %add.ptr, align 4
%add1 = add nsw i32 7, %4
%5 = load i32** %p.addr, align 4
- %add.ptr2 = getelementptr inbounds i32* %5, i32 1
+ %add.ptr2 = getelementptr inbounds i32, i32* %5, i32 1
store i32 %add1, i32* %add.ptr2, align 4
ret void
}
%retval = alloca { float, float }, align 4
%cx.real = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 0)
%cx.imag = load float* getelementptr inbounds ({ float, float }* @cx, i32 0, i32 1)
- %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
store float %cx.real, float* %real
store float %cx.imag, float* %imag
%0 = load { float, float }* %retval
%retval = alloca { double, double }, align 8
%dcx.real = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 0)
%dcx.imag = load double* getelementptr inbounds ({ double, double }* @dcx, i32 0, i32 1)
- %real = getelementptr inbounds { double, double }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { double, double }* %retval, i32 0, i32 1
+ %real = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { double, double }, { double, double }* %retval, i32 0, i32 1
store double %dcx.real, double* %real
store double %dcx.imag, double* %imag
%0 = load { double, double }* %retval
%1 = phi i8 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
%i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%from.addr.09 = phi i8* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i8* %from.addr.09, i32 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %from.addr.09, i32 1
%2 = load i8* %from.addr.09, align 1
%conv27 = zext i8 %2 to i32
%conv36 = zext i8 %1 to i32
%1 = phi i16 [ %.pre, %for.body.lr.ph ], [ %conv4, %for.body ]
%i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%from.addr.09 = phi i16* [ %from, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
- %incdec.ptr = getelementptr inbounds i16* %from.addr.09, i32 1
+ %incdec.ptr = getelementptr inbounds i16, i16* %from.addr.09, i32 1
%2 = load i16* %from.addr.09, align 2
%conv27 = zext i16 %2 to i32
%conv36 = zext i16 %1 to i32
define double @test_ldxc1(double* nocapture readonly %a, i32 %i) {
entry:
- %arrayidx = getelementptr inbounds double* %a, i32 %i
+ %arrayidx = getelementptr inbounds double, double* %a, i32 %i
%0 = load double* %arrayidx, align 8
ret double %0
}
define void @test_sdxc1(double %b, double* nocapture %a, i32 %i) {
entry:
- %arrayidx = getelementptr inbounds double* %a, i32 %i
+ %arrayidx = getelementptr inbounds double, double* %a, i32 %i
store double %b, double* %arrayidx, align 8
ret void
}
%1 = alloca [2 x <8 x i16>]
%2 = bitcast [2 x <8 x i16>]* %1 to i8*
- %3 = getelementptr i8* %2, i32 1
+ %3 = getelementptr i8, i8* %2, i32 1
%4 = bitcast i8* %3 to [2 x <8 x i16>]*
- %5 = getelementptr [2 x <8 x i16>]* %4, i32 0, i32 0
+ %5 = getelementptr [2 x <8 x i16>], [2 x <8 x i16>]* %4, i32 0, i32 0
%6 = load volatile <8 x i16>* %5
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
%1 = alloca [2 x <4 x i32>]
%2 = bitcast [2 x <4 x i32>]* %1 to i8*
- %3 = getelementptr i8* %2, i32 1
+ %3 = getelementptr i8, i8* %2, i32 1
%4 = bitcast i8* %3 to [2 x <4 x i32>]*
- %5 = getelementptr [2 x <4 x i32>]* %4, i32 0, i32 0
+ %5 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %4, i32 0, i32 0
%6 = load volatile <4 x i32>* %5
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
%1 = alloca [2 x <2 x i64>]
%2 = bitcast [2 x <2 x i64>]* %1 to i8*
- %3 = getelementptr i8* %2, i32 1
+ %3 = getelementptr i8, i8* %2, i32 1
%4 = bitcast i8* %3 to [2 x <2 x i64>]*
- %5 = getelementptr [2 x <2 x i64>]* %4, i32 0, i32 0
+ %5 = getelementptr [2 x <2 x i64>], [2 x <2 x i64>]* %4, i32 0, i32 0
%6 = load volatile <2 x i64>* %5
; MIPS32-AE: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
define i32 @test_i8(<16 x i8>* %p0, <16 x i8>* %q1) nounwind {
entry:
- %p1 = getelementptr <16 x i8>* %p0, i32 1
- %p2 = getelementptr <16 x i8>* %p0, i32 2
- %p3 = getelementptr <16 x i8>* %p0, i32 3
- %p4 = getelementptr <16 x i8>* %p0, i32 4
- %p5 = getelementptr <16 x i8>* %p0, i32 5
- %p6 = getelementptr <16 x i8>* %p0, i32 6
- %p7 = getelementptr <16 x i8>* %p0, i32 7
- %p8 = getelementptr <16 x i8>* %p0, i32 8
- %p9 = getelementptr <16 x i8>* %p0, i32 9
- %p10 = getelementptr <16 x i8>* %p0, i32 10
- %p11 = getelementptr <16 x i8>* %p0, i32 11
- %p12 = getelementptr <16 x i8>* %p0, i32 12
- %p13 = getelementptr <16 x i8>* %p0, i32 13
- %p14 = getelementptr <16 x i8>* %p0, i32 14
- %p15 = getelementptr <16 x i8>* %p0, i32 15
- %p16 = getelementptr <16 x i8>* %p0, i32 16
- %p17 = getelementptr <16 x i8>* %p0, i32 17
- %p18 = getelementptr <16 x i8>* %p0, i32 18
- %p19 = getelementptr <16 x i8>* %p0, i32 19
- %p20 = getelementptr <16 x i8>* %p0, i32 20
- %p21 = getelementptr <16 x i8>* %p0, i32 21
- %p22 = getelementptr <16 x i8>* %p0, i32 22
- %p23 = getelementptr <16 x i8>* %p0, i32 23
- %p24 = getelementptr <16 x i8>* %p0, i32 24
- %p25 = getelementptr <16 x i8>* %p0, i32 25
- %p26 = getelementptr <16 x i8>* %p0, i32 26
- %p27 = getelementptr <16 x i8>* %p0, i32 27
- %p28 = getelementptr <16 x i8>* %p0, i32 28
- %p29 = getelementptr <16 x i8>* %p0, i32 29
- %p30 = getelementptr <16 x i8>* %p0, i32 30
- %p31 = getelementptr <16 x i8>* %p0, i32 31
- %p32 = getelementptr <16 x i8>* %p0, i32 32
- %p33 = getelementptr <16 x i8>* %p0, i32 33
+ %p1 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 1
+ %p2 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 2
+ %p3 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 3
+ %p4 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 4
+ %p5 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 5
+ %p6 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 6
+ %p7 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 7
+ %p8 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 8
+ %p9 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 9
+ %p10 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 10
+ %p11 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 11
+ %p12 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 12
+ %p13 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 13
+ %p14 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 14
+ %p15 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 15
+ %p16 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 16
+ %p17 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 17
+ %p18 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 18
+ %p19 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 19
+ %p20 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 20
+ %p21 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 21
+ %p22 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 22
+ %p23 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 23
+ %p24 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 24
+ %p25 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 25
+ %p26 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 26
+ %p27 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 27
+ %p28 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 28
+ %p29 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 29
+ %p30 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 30
+ %p31 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 31
+ %p32 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 32
+ %p33 = getelementptr <16 x i8>, <16 x i8>* %p0, i32 33
%0 = load <16 x i8>* %p0, align 16
%1 = load <16 x i8>* %p1, align 16
%2 = load <16 x i8>* %p2, align 16
define i32 @test_i16(<8 x i16>* %p0, <8 x i16>* %q1) nounwind {
entry:
- %p1 = getelementptr <8 x i16>* %p0, i32 1
- %p2 = getelementptr <8 x i16>* %p0, i32 2
- %p3 = getelementptr <8 x i16>* %p0, i32 3
- %p4 = getelementptr <8 x i16>* %p0, i32 4
- %p5 = getelementptr <8 x i16>* %p0, i32 5
- %p6 = getelementptr <8 x i16>* %p0, i32 6
- %p7 = getelementptr <8 x i16>* %p0, i32 7
- %p8 = getelementptr <8 x i16>* %p0, i32 8
- %p9 = getelementptr <8 x i16>* %p0, i32 9
- %p10 = getelementptr <8 x i16>* %p0, i32 10
- %p11 = getelementptr <8 x i16>* %p0, i32 11
- %p12 = getelementptr <8 x i16>* %p0, i32 12
- %p13 = getelementptr <8 x i16>* %p0, i32 13
- %p14 = getelementptr <8 x i16>* %p0, i32 14
- %p15 = getelementptr <8 x i16>* %p0, i32 15
- %p16 = getelementptr <8 x i16>* %p0, i32 16
- %p17 = getelementptr <8 x i16>* %p0, i32 17
- %p18 = getelementptr <8 x i16>* %p0, i32 18
- %p19 = getelementptr <8 x i16>* %p0, i32 19
- %p20 = getelementptr <8 x i16>* %p0, i32 20
- %p21 = getelementptr <8 x i16>* %p0, i32 21
- %p22 = getelementptr <8 x i16>* %p0, i32 22
- %p23 = getelementptr <8 x i16>* %p0, i32 23
- %p24 = getelementptr <8 x i16>* %p0, i32 24
- %p25 = getelementptr <8 x i16>* %p0, i32 25
- %p26 = getelementptr <8 x i16>* %p0, i32 26
- %p27 = getelementptr <8 x i16>* %p0, i32 27
- %p28 = getelementptr <8 x i16>* %p0, i32 28
- %p29 = getelementptr <8 x i16>* %p0, i32 29
- %p30 = getelementptr <8 x i16>* %p0, i32 30
- %p31 = getelementptr <8 x i16>* %p0, i32 31
- %p32 = getelementptr <8 x i16>* %p0, i32 32
- %p33 = getelementptr <8 x i16>* %p0, i32 33
+ %p1 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 1
+ %p2 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 2
+ %p3 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 3
+ %p4 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 4
+ %p5 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 5
+ %p6 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 6
+ %p7 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 7
+ %p8 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 8
+ %p9 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 9
+ %p10 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 10
+ %p11 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 11
+ %p12 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 12
+ %p13 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 13
+ %p14 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 14
+ %p15 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 15
+ %p16 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 16
+ %p17 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 17
+ %p18 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 18
+ %p19 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 19
+ %p20 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 20
+ %p21 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 21
+ %p22 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 22
+ %p23 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 23
+ %p24 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 24
+ %p25 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 25
+ %p26 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 26
+ %p27 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 27
+ %p28 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 28
+ %p29 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 29
+ %p30 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 30
+ %p31 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 31
+ %p32 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 32
+ %p33 = getelementptr <8 x i16>, <8 x i16>* %p0, i32 33
%0 = load <8 x i16>* %p0, align 16
%1 = load <8 x i16>* %p1, align 16
%2 = load <8 x i16>* %p2, align 16
define i32 @test_i32(<4 x i32>* %p0, <4 x i32>* %q1) nounwind {
entry:
- %p1 = getelementptr <4 x i32>* %p0, i32 1
- %p2 = getelementptr <4 x i32>* %p0, i32 2
- %p3 = getelementptr <4 x i32>* %p0, i32 3
- %p4 = getelementptr <4 x i32>* %p0, i32 4
- %p5 = getelementptr <4 x i32>* %p0, i32 5
- %p6 = getelementptr <4 x i32>* %p0, i32 6
- %p7 = getelementptr <4 x i32>* %p0, i32 7
- %p8 = getelementptr <4 x i32>* %p0, i32 8
- %p9 = getelementptr <4 x i32>* %p0, i32 9
- %p10 = getelementptr <4 x i32>* %p0, i32 10
- %p11 = getelementptr <4 x i32>* %p0, i32 11
- %p12 = getelementptr <4 x i32>* %p0, i32 12
- %p13 = getelementptr <4 x i32>* %p0, i32 13
- %p14 = getelementptr <4 x i32>* %p0, i32 14
- %p15 = getelementptr <4 x i32>* %p0, i32 15
- %p16 = getelementptr <4 x i32>* %p0, i32 16
- %p17 = getelementptr <4 x i32>* %p0, i32 17
- %p18 = getelementptr <4 x i32>* %p0, i32 18
- %p19 = getelementptr <4 x i32>* %p0, i32 19
- %p20 = getelementptr <4 x i32>* %p0, i32 20
- %p21 = getelementptr <4 x i32>* %p0, i32 21
- %p22 = getelementptr <4 x i32>* %p0, i32 22
- %p23 = getelementptr <4 x i32>* %p0, i32 23
- %p24 = getelementptr <4 x i32>* %p0, i32 24
- %p25 = getelementptr <4 x i32>* %p0, i32 25
- %p26 = getelementptr <4 x i32>* %p0, i32 26
- %p27 = getelementptr <4 x i32>* %p0, i32 27
- %p28 = getelementptr <4 x i32>* %p0, i32 28
- %p29 = getelementptr <4 x i32>* %p0, i32 29
- %p30 = getelementptr <4 x i32>* %p0, i32 30
- %p31 = getelementptr <4 x i32>* %p0, i32 31
- %p32 = getelementptr <4 x i32>* %p0, i32 32
- %p33 = getelementptr <4 x i32>* %p0, i32 33
+ %p1 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 1
+ %p2 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 2
+ %p3 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 3
+ %p4 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 4
+ %p5 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 5
+ %p6 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 6
+ %p7 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 7
+ %p8 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 8
+ %p9 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 9
+ %p10 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 10
+ %p11 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 11
+ %p12 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 12
+ %p13 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 13
+ %p14 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 14
+ %p15 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 15
+ %p16 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 16
+ %p17 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 17
+ %p18 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 18
+ %p19 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 19
+ %p20 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 20
+ %p21 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 21
+ %p22 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 22
+ %p23 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 23
+ %p24 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 24
+ %p25 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 25
+ %p26 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 26
+ %p27 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 27
+ %p28 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 28
+ %p29 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 29
+ %p30 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 30
+ %p31 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 31
+ %p32 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 32
+ %p33 = getelementptr <4 x i32>, <4 x i32>* %p0, i32 33
%0 = load <4 x i32>* %p0, align 16
%1 = load <4 x i32>* %p1, align 16
%2 = load <4 x i32>* %p2, align 16
define i32 @test_i64(<2 x i64>* %p0, <2 x i64>* %q1) nounwind {
entry:
- %p1 = getelementptr <2 x i64>* %p0, i32 1
- %p2 = getelementptr <2 x i64>* %p0, i32 2
- %p3 = getelementptr <2 x i64>* %p0, i32 3
- %p4 = getelementptr <2 x i64>* %p0, i32 4
- %p5 = getelementptr <2 x i64>* %p0, i32 5
- %p6 = getelementptr <2 x i64>* %p0, i32 6
- %p7 = getelementptr <2 x i64>* %p0, i32 7
- %p8 = getelementptr <2 x i64>* %p0, i32 8
- %p9 = getelementptr <2 x i64>* %p0, i32 9
- %p10 = getelementptr <2 x i64>* %p0, i32 10
- %p11 = getelementptr <2 x i64>* %p0, i32 11
- %p12 = getelementptr <2 x i64>* %p0, i32 12
- %p13 = getelementptr <2 x i64>* %p0, i32 13
- %p14 = getelementptr <2 x i64>* %p0, i32 14
- %p15 = getelementptr <2 x i64>* %p0, i32 15
- %p16 = getelementptr <2 x i64>* %p0, i32 16
- %p17 = getelementptr <2 x i64>* %p0, i32 17
- %p18 = getelementptr <2 x i64>* %p0, i32 18
- %p19 = getelementptr <2 x i64>* %p0, i32 19
- %p20 = getelementptr <2 x i64>* %p0, i32 20
- %p21 = getelementptr <2 x i64>* %p0, i32 21
- %p22 = getelementptr <2 x i64>* %p0, i32 22
- %p23 = getelementptr <2 x i64>* %p0, i32 23
- %p24 = getelementptr <2 x i64>* %p0, i32 24
- %p25 = getelementptr <2 x i64>* %p0, i32 25
- %p26 = getelementptr <2 x i64>* %p0, i32 26
- %p27 = getelementptr <2 x i64>* %p0, i32 27
- %p28 = getelementptr <2 x i64>* %p0, i32 28
- %p29 = getelementptr <2 x i64>* %p0, i32 29
- %p30 = getelementptr <2 x i64>* %p0, i32 30
- %p31 = getelementptr <2 x i64>* %p0, i32 31
- %p32 = getelementptr <2 x i64>* %p0, i32 32
- %p33 = getelementptr <2 x i64>* %p0, i32 33
+ %p1 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 1
+ %p2 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 2
+ %p3 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 3
+ %p4 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 4
+ %p5 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 5
+ %p6 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 6
+ %p7 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 7
+ %p8 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 8
+ %p9 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 9
+ %p10 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 10
+ %p11 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 11
+ %p12 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 12
+ %p13 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 13
+ %p14 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 14
+ %p15 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 15
+ %p16 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 16
+ %p17 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 17
+ %p18 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 18
+ %p19 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 19
+ %p20 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 20
+ %p21 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 21
+ %p22 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 22
+ %p23 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 23
+ %p24 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 24
+ %p25 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 25
+ %p26 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 26
+ %p27 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 27
+ %p28 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 28
+ %p29 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 29
+ %p30 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 30
+ %p31 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 31
+ %p32 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 32
+ %p33 = getelementptr <2 x i64>, <2 x i64>* %p0, i32 33
%0 = load <2 x i64>* %p0, align 16
%1 = load <2 x i64>* %p1, align 16
%2 = load <2 x i64>* %p2, align 16
define i32 @test2(i32 %i) {
entry:
- %elementptr = getelementptr inbounds [2 x i8*]* @bb_array, i32 0, i32 %i
+ %elementptr = getelementptr inbounds [2 x i8*], [2 x i8*]* @bb_array, i32 0, i32 %i
%0 = load i8** %elementptr, align 4
indirectbr i8* %0, [label %bb1, label %bb2]
%agg.tmp10 = alloca %struct.S3, align 4
call void @callee1(float 2.000000e+01, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
call void @callee2(%struct.S2* byval @f1.s2) nounwind
- %tmp11 = getelementptr inbounds %struct.S3* %agg.tmp10, i32 0, i32 0
+ %tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0
store i8 11, i8* %tmp11, align 4
call void @callee3(float 2.100000e+01, %struct.S3* byval %agg.tmp10, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind
ret void
; CHECK: sw $[[R3]], 16($sp)
; CHECK: mfc1 $6, $f[[F0]]
- %i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
+ %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5
%tmp = load i32* %i2, align 4
- %d = getelementptr inbounds %struct.S1* %s1, i32 0, i32 4
+ %d = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 4
%tmp1 = load double* %d, align 8
- %ll = getelementptr inbounds %struct.S1* %s1, i32 0, i32 3
+ %ll = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 3
%tmp2 = load i64* %ll, align 8
- %i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
+ %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2
%tmp3 = load i32* %i, align 4
- %s = getelementptr inbounds %struct.S1* %s1, i32 0, i32 1
+ %s = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 1
%tmp4 = load i16* %s, align 2
- %c = getelementptr inbounds %struct.S1* %s1, i32 0, i32 0
+ %c = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 0
%tmp5 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double %tmp1, i64 %tmp2, i32 %tmp3, i16 signext %tmp4, i8 signext %tmp5, float %f) nounwind
ret void
; CHECK: lw $[[R0:[0-9]+]], 60($sp)
; CHECK: sw $[[R0]], 24($sp)
- %arrayidx = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 0
+ %arrayidx = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 0
%tmp = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds %struct.S2* %s2, i32 0, i32 0, i32 3
+ %arrayidx2 = getelementptr inbounds %struct.S2, %struct.S2* %s2, i32 0, i32 0, i32 3
%tmp3 = load i32* %arrayidx2, align 4
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp3, i16 signext 4, i8 signext 5, float 6.000000e+00) nounwind
ret void
; CHECK: sw $[[R0]], 32($sp)
; CHECK: sw $[[R1]], 24($sp)
- %i = getelementptr inbounds %struct.S1* %s1, i32 0, i32 2
+ %i = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 2
%tmp = load i32* %i, align 4
- %i2 = getelementptr inbounds %struct.S1* %s1, i32 0, i32 5
+ %i2 = getelementptr inbounds %struct.S1, %struct.S1* %s1, i32 0, i32 5
%tmp1 = load i32* %i2, align 4
- %c = getelementptr inbounds %struct.S3* %s3, i32 0, i32 0
+ %c = getelementptr inbounds %struct.S3, %struct.S3* %s3, i32 0, i32 0
%tmp2 = load i8* %c, align 1
tail call void @callee4(i32 %tmp, double 2.000000e+00, i64 3, i32 %tmp1, i16 signext 4, i8 signext %tmp2, float 6.000000e+00) nounwind
ret void
; <label>:9 ; preds = %8
%10 = and i32 %b8, 1
%11 = shl nuw nsw i32 %10, 3
- %12 = getelementptr inbounds %struct.Slice* null, i32 0, i32 9
+ %12 = getelementptr inbounds %struct.Slice, %struct.Slice* null, i32 0, i32 9
br i1 undef, label %.preheader, label %.preheader11
.preheader11: ; preds = %21, %9
br label %15
; <label>:15 ; preds = %14, %13
- %16 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
+ %16 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
%17 = load i32* %16, align 4
- %18 = getelementptr inbounds %struct.datapartition* null, i32 %17, i32 2
+ %18 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %17, i32 2
%19 = load i32 (%struct.syntaxelement*, %struct.img_par*, %struct.datapartition*)** %18, align 4
%20 = call i32 %19(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* undef)
br i1 false, label %.loopexit, label %21
; <label>:21 ; preds = %15
%22 = add i32 %coef_ctr.013, 1
%23 = add i32 %22, 0
- %24 = getelementptr inbounds [2 x i8]* %7, i32 %23, i32 0
+ %24 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %23, i32 0
%25 = add nsw i32 0, %11
- %26 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25
+ %26 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %25
store i32 0, i32* %26, align 4
%27 = add nsw i32 %k.014, 1
%28 = icmp slt i32 %27, 65
br label %31
; <label>:31 ; preds = %30, %29
- %32 = getelementptr inbounds [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
+ %32 = getelementptr inbounds [0 x [20 x i32]], [0 x [20 x i32]]* @assignSE2partition, i32 0, i32 %1, i32 undef
%33 = load i32* %32, align 4
- %34 = getelementptr inbounds %struct.datapartition* null, i32 %33
+ %34 = getelementptr inbounds %struct.datapartition, %struct.datapartition* null, i32 %33
%35 = call i32 undef(%struct.syntaxelement* undef, %struct.img_par* %img, %struct.datapartition* %34)
br i1 false, label %.loopexit, label %36
%37 = load i32* undef, align 4
%38 = add i32 %coef_ctr.29, 1
%39 = add i32 %38, %37
- %40 = getelementptr inbounds [2 x i8]* %7, i32 %39, i32 0
+ %40 = getelementptr inbounds [2 x i8], [2 x i8]* %7, i32 %39, i32 0
%41 = load i8* %40, align 1
%42 = zext i8 %41 to i32
%43 = add nsw i32 %42, %11
- %44 = getelementptr inbounds %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43
+ %44 = getelementptr inbounds %struct.img_par, %struct.img_par* %img, i32 0, i32 27, i32 undef, i32 %43
store i32 0, i32* %44, align 4
%45 = add nsw i32 %k.110, 1
%46 = icmp slt i32 %45, 65
define void @foo1() #0 {
entry:
%c = alloca [10 x i8], align 1
- %arraydecay = getelementptr inbounds [10 x i8]* %c, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0
call void @x(i8* %arraydecay)
- %arraydecay1 = getelementptr inbounds [10 x i8]* %c, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [10 x i8], [10 x i8]* %c, i32 0, i32 0
call void @x(i8* %arraydecay1)
ret void
; CHECK: .ent foo1
define void @foo2() #0 {
entry:
%c = alloca [150 x i8], align 1
- %arraydecay = getelementptr inbounds [150 x i8]* %c, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0
call void @x(i8* %arraydecay)
- %arraydecay1 = getelementptr inbounds [150 x i8]* %c, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [150 x i8], [150 x i8]* %c, i32 0, i32 0
call void @x(i8* %arraydecay1)
ret void
; CHECK: .ent foo2
%b = alloca [16 x i32], align 4
%0 = bitcast [16 x i32]* %b to i8*
call void @llvm.lifetime.start(i64 64, i8* %0)
- %arraydecay = getelementptr inbounds [16 x i32]* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i32], [16 x i32]* %b, i32 0, i32 0
br label %for.body
for.body: ; preds = %for.body, %entry
%i.05 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%v.04 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%1 = load i32** @g1, align 4
- %arrayidx = getelementptr inbounds i32* %1, i32 %i.05
+ %arrayidx = getelementptr inbounds i32, i32* %1, i32 %i.05
%2 = load i32* %arrayidx, align 4
%call = call i32 @foo2(i32 %2, i32* %arraydecay)
%add = add nsw i32 %call, %v.04
entry:
; CHECK: swl $zero
; CHECK: swr $zero
- %x = getelementptr inbounds %struct.unaligned* %p, i32 0, i32 0
+ %x = getelementptr inbounds %struct.unaligned, %struct.unaligned* %p, i32 0, i32 0
store i32 0, i32* %x, align 1
ret void
}
; PTX: bar.sync 0;
; gep cast; load
- %5 = getelementptr inbounds [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
+ %5 = getelementptr inbounds [10 x float], [10 x float]* addrspacecast ([10 x float] addrspace(3)* @array to [10 x float]*), i32 0, i32 5
%6 = load float* %5, align 4
; PTX: ld.shared.f32 %f{{[0-9]+}}, [array+20];
; gep cast; store
; cast; gep; load
%7 = addrspacecast [10 x float] addrspace(3)* @array to [10 x float]*
- %8 = getelementptr inbounds [10 x float]* %7, i32 0, i32 %i
+ %8 = getelementptr inbounds [10 x float], [10 x float]* %7, i32 0, i32 %i
%9 = load float* %8, align 4
; PTX: ld.shared.f32 %f{{[0-9]+}}, [%{{(r|rl|rd)[0-9]+}}];
; cast; gep; store
; CHECK-LABEL @_Z22TakesStruct1SPi
; CHECK: bitcast %struct.S* %input to i8*
; CHECK: call i8 addrspace(101)* @llvm.nvvm.ptr.gen.to.param.p101i8.p0i8
- %b = getelementptr inbounds %struct.S* %input, i64 0, i32 1
+ %b = getelementptr inbounds %struct.S, %struct.S* %input, i64 0, i32 1
%0 = load i32* %b, align 4
store i32 %0, i32* %output, align 4
ret void
%ret_vec.sroa.8.i.val = load float* %ret_vec.sroa.8.i, align 4
%11 = select i1 %10, float 0.000000e+00, float %ret_vec.sroa.8.i.val
call void @llvm.lifetime.end(i64 4, i8* %6)
- %12 = getelementptr inbounds %class.float3* %dst, i64 %5, i32 0
+ %12 = getelementptr inbounds %class.float3, %class.float3* %dst, i64 %5, i32 0
store float 0.000000e+00, float* %12, align 4
- %13 = getelementptr inbounds %class.float3* %dst, i64 %5, i32 1
+ %13 = getelementptr inbounds %class.float3, %class.float3* %dst, i64 %5, i32 1
store float %11, float* %13, align 4
- %14 = getelementptr inbounds %class.float3* %dst, i64 %5, i32 2
+ %14 = getelementptr inbounds %class.float3, %class.float3* %dst, i64 %5, i32 2
store float 0.000000e+00, float* %14, align 4
ret void
}
%0 = load float* %a, align 4
%1 = bitcast [16 x i8]* %buf to float*
store float %0, float* %1, align 4
- %arrayidx2 = getelementptr inbounds float* %a, i64 1
+ %arrayidx2 = getelementptr inbounds float, float* %a, i64 1
%2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 1
+ %arrayidx3 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 1
%3 = bitcast i8* %arrayidx3 to float*
store float %2, float* %3, align 4
- %arrayidx4 = getelementptr inbounds float* %a, i64 2
+ %arrayidx4 = getelementptr inbounds float, float* %a, i64 2
%4 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 2
+ %arrayidx5 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 2
%5 = bitcast i8* %arrayidx5 to float*
store float %4, float* %5, align 4
- %arrayidx6 = getelementptr inbounds float* %a, i64 3
+ %arrayidx6 = getelementptr inbounds float, float* %a, i64 3
%6 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 3
+ %arrayidx7 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 3
%7 = bitcast i8* %arrayidx7 to float*
store float %6, float* %7, align 4
; CHECK-NEXT: call.uni
; CHECK-NEXT: callee,
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i64 0, i64 0
call void @callee(float* %a, i8* %arraydecay) #2
ret void
}
define void @reg_plus_offset(i32* %a) {
; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+32];
; CHECK: ldu.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}+36];
- %p2 = getelementptr i32* %a, i32 8
+ %p2 = getelementptr i32, i32* %a, i32 8
%t1 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p2, i32 4)
- %p3 = getelementptr i32* %a, i32 9
+ %p3 = getelementptr i32, i32* %a, i32 9
%t2 = call i32 @llvm.nvvm.ldu.global.i.i32.p0i32(i32* %p3, i32 4)
%t3 = mul i32 %t1, %t2
store i32 %t3, i32* %a
define void @main(i1* %a1, i32 %a2, i32* %arg3) {
; CHECK: ld.u8
; CHECK-NOT: ld.u1
- %t1 = getelementptr i1* %a1, i32 %a2
+ %t1 = getelementptr i1, i1* %a1, i32 %a2
%t2 = load i1* %t1
%t3 = sext i1 %t2 to i32
store i32 %t3, i32* %arg3
%output.addr = alloca float*, align 8
store float* %output, float** %output.addr, align 8
%0 = load float** %output.addr, align 8
- %arrayidx = getelementptr inbounds float* %0, i64 0
+ %arrayidx = getelementptr inbounds float, float* %0, i64 0
%1 = load float* %arrayidx, align 4
%conv = fpext float %1 to double
%cmp = fcmp olt double %conv, 1.000000e+01
if.end: ; preds = %if.else, %if.then
call void @llvm.cuda.syncthreads()
%6 = load float** %output.addr, align 8
- %arrayidx6 = getelementptr inbounds float* %6, i64 0
+ %arrayidx6 = getelementptr inbounds float, float* %6, i64 0
%7 = load float* %arrayidx6, align 4
%conv7 = fpext float %7 to double
%cmp8 = fcmp olt double %conv7, 1.000000e+01
; CHECK: .pragma "nounroll"
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.06 to i64
- %arrayidx = getelementptr inbounds float* %input, i64 %idxprom
+ %arrayidx = getelementptr inbounds float, float* %input, i64 %idxprom
%0 = load float* %arrayidx, align 4
; CHECK: ld.f32
- %arrayidx2 = getelementptr inbounds float* %output, i64 %idxprom
+ %arrayidx2 = getelementptr inbounds float, float* %output, i64 %idxprom
store float %0, float* %arrayidx2, align 4
; CHECK: st.f32
%inc = add nuw nsw i32 %i.06, 1
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ %index.next, %vector.body ], [ 0, %entry ]
- %scevgep9 = getelementptr i8* %dst, i64 %index
+ %scevgep9 = getelementptr i8, i8* %dst, i64 %index
%scevgep910 = bitcast i8* %scevgep9 to <4 x i8>*
store <4 x i8> undef, <4 x i8>* %scevgep910, align 1
%index.next = add i64 %index, 4
br i1 undef, label %for.end, label %for.body.preheader1
for.body.preheader1: ; preds = %middle.block
- %scevgep2 = getelementptr i8* %dst, i64 0
+ %scevgep2 = getelementptr i8, i8* %dst, i64 0
br label %for.body
for.body: ; preds = %for.body, %for.body.preheader1
%lsr.iv3 = phi i8* [ %scevgep2, %for.body.preheader1 ], [ %scevgep4, %for.body ]
store i8 undef, i8* %lsr.iv3, align 1
- %scevgep4 = getelementptr i8* %lsr.iv3, i64 1
+ %scevgep4 = getelementptr i8, i8* %lsr.iv3, i64 1
br label %for.body
for.end: ; preds = %middle.block, %entry
; CHECK-NEXT: add.s32
; CHECK-NEXT: add.s32
; CHECK-NEXT: add.s32
- %ptr0 = getelementptr i32* %a, i32 0
+ %ptr0 = getelementptr i32, i32* %a, i32 0
%val0 = load i32* %ptr0
- %ptr1 = getelementptr i32* %a, i32 1
+ %ptr1 = getelementptr i32, i32* %a, i32 1
%val1 = load i32* %ptr1
- %ptr2 = getelementptr i32* %a, i32 2
+ %ptr2 = getelementptr i32, i32* %a, i32 2
%val2 = load i32* %ptr2
- %ptr3 = getelementptr i32* %a, i32 3
+ %ptr3 = getelementptr i32, i32* %a, i32 3
%val3 = load i32* %ptr3
%t0 = add i32 %val0, %val1
; CHECK-NEXT: add.s32
; CHECK-NEXT: add.s32
; CHECK-NEXT: add.s32
- %ptr0 = getelementptr <2 x i32>* %a, i32 0
+ %ptr0 = getelementptr <2 x i32>, <2 x i32>* %a, i32 0
%val0 = load <2 x i32>* %ptr0
- %ptr1 = getelementptr <2 x i32>* %a, i32 1
+ %ptr1 = getelementptr <2 x i32>, <2 x i32>* %a, i32 1
%val1 = load <2 x i32>* %ptr1
- %ptr2 = getelementptr <2 x i32>* %a, i32 2
+ %ptr2 = getelementptr <2 x i32>, <2 x i32>* %a, i32 2
%val2 = load <2 x i32>* %ptr2
- %ptr3 = getelementptr <2 x i32>* %a, i32 3
+ %ptr3 = getelementptr <2 x i32>, <2 x i32>* %a, i32 3
%val3 = load <2 x i32>* %ptr3
%t0 = add <2 x i32> %val0, %val1
cond_true68: ; preds = %bb30
ret void
cond_next92: ; preds = %bb30
- %tmp173 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp173 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
%tmp174 = load i32* %tmp173 ; <i32> [#uses=1]
%tmp177 = and i32 %tmp174, -9 ; <i32> [#uses=1]
store i32 %tmp177, i32* %tmp173
- %tmp180 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
+ %tmp180 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
%tmp181 = load i32* %tmp180 ; <i32> [#uses=1]
- %tmp185 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp185 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
%tmp186 = load i32* %tmp185 ; <i32> [#uses=1]
%tmp183187 = shl i32 %tmp181, 1 ; <i32> [#uses=1]
%tmp188 = and i32 %tmp183187, 16 ; <i32> [#uses=1]
%tmp190 = and i32 %tmp186, -17 ; <i32> [#uses=1]
%tmp191 = or i32 %tmp190, %tmp188 ; <i32> [#uses=1]
store i32 %tmp191, i32* %tmp185
- %tmp193 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
+ %tmp193 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
%tmp194 = load i32* %tmp193 ; <i32> [#uses=1]
- %tmp198 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp198 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
%tmp199 = load i32* %tmp198 ; <i32> [#uses=1]
%tmp196200 = shl i32 %tmp194, 2 ; <i32> [#uses=1]
%tmp201 = and i32 %tmp196200, 64 ; <i32> [#uses=1]
%tmp203 = and i32 %tmp199, -65 ; <i32> [#uses=1]
%tmp204 = or i32 %tmp203, %tmp201 ; <i32> [#uses=1]
store i32 %tmp204, i32* %tmp198
- %tmp206 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
+ %tmp206 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=1]
%tmp207 = load i32* %tmp206 ; <i32> [#uses=1]
- %tmp211 = getelementptr %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
+ %tmp211 = getelementptr %struct.attr_desc, %struct.attr_desc* null, i32 0, i32 4 ; <i32*> [#uses=2]
%tmp212 = load i32* %tmp211 ; <i32> [#uses=1]
%tmp209213 = shl i32 %tmp207, 1 ; <i32> [#uses=1]
%tmp214 = and i32 %tmp209213, 128 ; <i32> [#uses=1]
define i32 @test(i32 %i) {
%tmp = load i8** @lens ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8* %tmp, i32 %i ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
%tmp.upgrd.1 = load i8* %tmp1 ; <i8> [#uses=1]
%tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
%tmp3 = load i32** @vals ; <i32*> [#uses=1]
%tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
- %tmp6 = getelementptr i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
+ %tmp6 = getelementptr i32, i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
%tmp7 = load i32* %tmp6 ; <i32> [#uses=1]
ret i32 %tmp7
}
bb12.i: ; preds = %bb12.i, %bb19
%i.0.i = phi i32 [ %tmp11.i, %bb12.i ], [ 0, %bb19 ] ; <i32> [#uses=2]
%gep.upgrd.1 = zext i32 %i.0.i to i64 ; <i64> [#uses=1]
- %tmp9.i = getelementptr [256 x i32]* %RMask.i, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp9.i = getelementptr [256 x i32], [256 x i32]* %RMask.i, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp9.i
%tmp11.i = add i32 %i.0.i, 1 ; <i32> [#uses=1]
br label %bb12.i
define void @test1() {
entry:
%Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A* %Out, i32 0, i32 1
+ %tmp2 = getelementptr %struct.A, %struct.A* %Out, i32 0, i32 1
%tmp5 = call i32 asm "lwbrx $0, $1", "=r,m"(i32* %tmp2 )
ret void
}
define void @test2() {
entry:
%Out = alloca %struct.A, align 4 ; <%struct.A*> [#uses=1]
- %tmp2 = getelementptr %struct.A* %Out, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr %struct.A, %struct.A* %Out, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp5 = call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,m"( i8* null, i32 0, i32* %tmp2 ) ; <i32> [#uses=0]
ret void
}
define void @test(<4 x float>*, { { i16, i16, i32 } }*) {
xOperationInitMasks.exit:
- %.sub7896 = getelementptr [4 x <4 x i32>]* null, i32 0, i32 0 ; <<4 x i32>*> [#uses=24]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 175, i32 3 ; <<4 x float>*>:2 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 2 ; <<4 x float>*>:3 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 3 ; <<4 x float>*>:4 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 1 ; <<4 x float>*>:5 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 2 ; <<4 x float>*>:6 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 3 ; <<4 x float>*>:7 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 1 ; <<4 x float>*>:8 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 2 ; <<4 x float>*>:9 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 3 ; <<4 x float>*>:10 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 1 ; <<4 x float>*>:11 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 2 ; <<4 x float>*>:12 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 3 ; <<4 x float>*>:13 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 1 ; <<4 x float>*>:14 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 2 ; <<4 x float>*>:15 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 3 ; <<4 x float>*>:16 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 1 ; <<4 x float>*>:17 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 2 ; <<4 x float>*>:18 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 3 ; <<4 x float>*>:19 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 1 ; <<4 x float>*>:20 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 2 ; <<4 x float>*>:21 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 3 ; <<4 x float>*>:22 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 1 ; <<4 x float>*>:23 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 2 ; <<4 x float>*>:24 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 3 ; <<4 x float>*>:25 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 1 ; <<4 x float>*>:26 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 2 ; <<4 x float>*>:27 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 3 ; <<4 x float>*>:28 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 1 ; <<4 x float>*>:29 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 2 ; <<4 x float>*>:30 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 3 ; <<4 x float>*>:31 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 1 ; <<4 x float>*>:32 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 2 ; <<4 x float>*>:33 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 3 ; <<4 x float>*>:34 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 1 ; <<4 x float>*>:35 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 2 ; <<4 x float>*>:36 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 3 ; <<4 x float>*>:37 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 1 ; <<4 x float>*>:38 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 2 ; <<4 x float>*>:39 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 3 ; <<4 x float>*>:40 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 1 ; <<4 x float>*>:41 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 2 ; <<4 x float>*>:42 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 3 ; <<4 x float>*>:43 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 1 ; <<4 x float>*>:44 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 2 ; <<4 x float>*>:45 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 3 ; <<4 x float>*>:46 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 1 ; <<4 x float>*>:47 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 2 ; <<4 x float>*>:48 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 3 ; <<4 x float>*>:49 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 1 ; <<4 x float>*>:50 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 2 ; <<4 x float>*>:51 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 3 ; <<4 x float>*>:52 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 1 ; <<4 x float>*>:53 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 2 ; <<4 x float>*>:54 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 3 ; <<4 x float>*>:55 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 1 ; <<4 x float>*>:56 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 2 ; <<4 x float>*>:57 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 3 ; <<4 x float>*>:58 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 1 ; <<4 x float>*>:59 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 2 ; <<4 x float>*>:60 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 3 ; <<4 x float>*>:61 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 1 ; <<4 x float>*>:62 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 2 ; <<4 x float>*>:63 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 3 ; <<4 x float>*>:64 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 1 ; <<4 x float>*>:65 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 2 ; <<4 x float>*>:66 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 3 ; <<4 x float>*>:67 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 1 ; <<4 x float>*>:68 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 2 ; <<4 x float>*>:69 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 3 ; <<4 x float>*>:70 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 1 ; <<4 x float>*>:71 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 2 ; <<4 x float>*>:72 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 3 ; <<4 x float>*>:73 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 1 ; <<4 x float>*>:74 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 2 ; <<4 x float>*>:75 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 3 ; <<4 x float>*>:76 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 1 ; <<4 x float>*>:77 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 2 ; <<4 x float>*>:78 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 3 ; <<4 x float>*>:79 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 1 ; <<4 x float>*>:80 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 2 ; <<4 x float>*>:81 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 3 ; <<4 x float>*>:82 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 1 ; <<4 x float>*>:83 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 2 ; <<4 x float>*>:84 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 3 ; <<4 x float>*>:85 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 1 ; <<4 x float>*>:86 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 2 ; <<4 x float>*>:87 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 3 ; <<4 x float>*>:88 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 1 ; <<4 x float>*>:89 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 2 ; <<4 x float>*>:90 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 3 ; <<4 x float>*>:91 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 1 ; <<4 x float>*>:92 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 2 ; <<4 x float>*>:93 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 3 ; <<4 x float>*>:94 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 1 ; <<4 x float>*>:95 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 2 ; <<4 x float>*>:96 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 3 ; <<4 x float>*>:97 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 1 ; <<4 x float>*>:98 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 2 ; <<4 x float>*>:99 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 3 ; <<4 x float>*>:100 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 1 ; <<4 x float>*>:101 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 2 ; <<4 x float>*>:102 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 3 ; <<4 x float>*>:103 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 1 ; <<4 x float>*>:104 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 2 ; <<4 x float>*>:105 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 3 ; <<4 x float>*>:106 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 1 ; <<4 x float>*>:107 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 2 ; <<4 x float>*>:108 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 3 ; <<4 x float>*>:109 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 1 ; <<4 x float>*>:110 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 2 ; <<4 x float>*>:111 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 3 ; <<4 x float>*>:112 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 1 ; <<4 x float>*>:113 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 2 ; <<4 x float>*>:114 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 3 ; <<4 x float>*>:115 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 1 ; <<4 x float>*>:116 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 2 ; <<4 x float>*>:117 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 3 ; <<4 x float>*>:118 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 1 ; <<4 x float>*>:119 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 2 ; <<4 x float>*>:120 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 3 ; <<4 x float>*>:121 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 1 ; <<4 x float>*>:122 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 2 ; <<4 x float>*>:123 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 3 ; <<4 x float>*>:124 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 1 ; <<4 x float>*>:125 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 2 ; <<4 x float>*>:126 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 3 ; <<4 x float>*>:127 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 1 ; <<4 x float>*>:128 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 2 ; <<4 x float>*>:129 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 3 ; <<4 x float>*>:130 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 1 ; <<4 x float>*>:131 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 2 ; <<4 x float>*>:132 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 3 ; <<4 x float>*>:133 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 1 ; <<4 x float>*>:134 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 2 ; <<4 x float>*>:135 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 3 ; <<4 x float>*>:136 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 1 ; <<4 x float>*>:137 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 2 ; <<4 x float>*>:138 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 3 ; <<4 x float>*>:139 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 1 ; <<4 x float>*>:140 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 2 ; <<4 x float>*>:141 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 3 ; <<4 x float>*>:142 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 1 ; <<4 x float>*>:143 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 2 ; <<4 x float>*>:144 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 3 ; <<4 x float>*>:145 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 1 ; <<4 x float>*>:146 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 2 ; <<4 x float>*>:147 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 3 ; <<4 x float>*>:148 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 1 ; <<4 x float>*>:149 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 2 ; <<4 x float>*>:150 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 3 ; <<4 x float>*>:151 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 1 ; <<4 x float>*>:152 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 2 ; <<4 x float>*>:153 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 3 ; <<4 x float>*>:154 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 1 ; <<4 x float>*>:155 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 2 ; <<4 x float>*>:156 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 3 ; <<4 x float>*>:157 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 1 ; <<4 x float>*>:158 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 2 ; <<4 x float>*>:159 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 3 ; <<4 x float>*>:160 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 1 ; <<4 x float>*>:161 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 2 ; <<4 x float>*>:162 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 3 ; <<4 x float>*>:163 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 1 ; <<4 x float>*>:164 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 2 ; <<4 x float>*>:165 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 3 ; <<4 x float>*>:166 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 1 ; <<4 x float>*>:167 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 2 ; <<4 x float>*>:168 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 3 ; <<4 x float>*>:169 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 1 ; <<4 x float>*>:170 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 2 ; <<4 x float>*>:171 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 3 ; <<4 x float>*>:172 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 1 ; <<4 x float>*>:173 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 2 ; <<4 x float>*>:174 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 3 ; <<4 x float>*>:175 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 1 ; <<4 x float>*>:176 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 2 ; <<4 x float>*>:177 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 3 ; <<4 x float>*>:178 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 1 ; <<4 x float>*>:179 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 2 ; <<4 x float>*>:180 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 3 ; <<4 x float>*>:181 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 1 ; <<4 x float>*>:182 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 2 ; <<4 x float>*>:183 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 3 ; <<4 x float>*>:184 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 1 ; <<4 x float>*>:185 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 2 ; <<4 x float>*>:186 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 3 ; <<4 x float>*>:187 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 1 ; <<4 x float>*>:188 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 2 ; <<4 x float>*>:189 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 3 ; <<4 x float>*>:190 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 1 ; <<4 x float>*>:191 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 2 ; <<4 x float>*>:192 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 3 ; <<4 x float>*>:193 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 1 ; <<4 x float>*>:194 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 2 ; <<4 x float>*>:195 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 3 ; <<4 x float>*>:196 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 1 ; <<4 x float>*>:197 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 2 ; <<4 x float>*>:198 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 3 ; <<4 x float>*>:199 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 1 ; <<4 x float>*>:200 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 2 ; <<4 x float>*>:201 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 3 ; <<4 x float>*>:202 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 1 ; <<4 x float>*>:203 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 2 ; <<4 x float>*>:204 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 3 ; <<4 x float>*>:205 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 1 ; <<4 x float>*>:206 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 2 ; <<4 x float>*>:207 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 3 ; <<4 x float>*>:208 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 1 ; <<4 x float>*>:209 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 2 ; <<4 x float>*>:210 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 3 ; <<4 x float>*>:211 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 1 ; <<4 x float>*>:212 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 2 ; <<4 x float>*>:213 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 3 ; <<4 x float>*>:214 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 1 ; <<4 x float>*>:215 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 2 ; <<4 x float>*>:216 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 3 ; <<4 x float>*>:217 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 1 ; <<4 x float>*>:218 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 2 ; <<4 x float>*>:219 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 3 ; <<4 x float>*>:220 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 1 ; <<4 x float>*>:221 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 2 ; <<4 x float>*>:222 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 3 ; <<4 x float>*>:223 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 1 ; <<4 x float>*>:224 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 2 ; <<4 x float>*>:225 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 3 ; <<4 x float>*>:226 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 1 ; <<4 x float>*>:227 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 2 ; <<4 x float>*>:228 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 3 ; <<4 x float>*>:229 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 1 ; <<4 x float>*>:230 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 2 ; <<4 x float>*>:231 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 3 ; <<4 x float>*>:232 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 1 ; <<4 x float>*>:233 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 2 ; <<4 x float>*>:234 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 3 ; <<4 x float>*>:235 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 1 ; <<4 x float>*>:236 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 2 ; <<4 x float>*>:237 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 3 ; <<4 x float>*>:238 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 1 ; <<4 x float>*>:239 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 2 ; <<4 x float>*>:240 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 3 ; <<4 x float>*>:241 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 1 ; <<4 x float>*>:242 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 2 ; <<4 x float>*>:243 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 3 ; <<4 x float>*>:244 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 1 ; <<4 x float>*>:245 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 2 ; <<4 x float>*>:246 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 3 ; <<4 x float>*>:247 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 1 ; <<4 x float>*>:248 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 2 ; <<4 x float>*>:249 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 3 ; <<4 x float>*>:250 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 1 ; <<4 x float>*>:251 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 2 ; <<4 x float>*>:252 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 3 ; <<4 x float>*>:253 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 1 ; <<4 x float>*>:254 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 2 ; <<4 x float>*>:255 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 3 ; <<4 x float>*>:256 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 1 ; <<4 x float>*>:257 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 2 ; <<4 x float>*>:258 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 3 ; <<4 x float>*>:259 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 1 ; <<4 x float>*>:260 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 2 ; <<4 x float>*>:261 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 3 ; <<4 x float>*>:262 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 1 ; <<4 x float>*>:263 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 2 ; <<4 x float>*>:264 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 3 ; <<4 x float>*>:265 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 1 ; <<4 x float>*>:266 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 2 ; <<4 x float>*>:267 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 3 ; <<4 x float>*>:268 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 1 ; <<4 x float>*>:269 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 2 ; <<4 x float>*>:270 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 3 ; <<4 x float>*>:271 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 1 ; <<4 x float>*>:272 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 2 ; <<4 x float>*>:273 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 3 ; <<4 x float>*>:274 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 1 ; <<4 x float>*>:275 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 2 ; <<4 x float>*>:276 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 3 ; <<4 x float>*>:277 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 1 ; <<4 x float>*>:278 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 2 ; <<4 x float>*>:279 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 3 ; <<4 x float>*>:280 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 1 ; <<4 x float>*>:281 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 2 ; <<4 x float>*>:282 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 3 ; <<4 x float>*>:283 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 1 ; <<4 x float>*>:284 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 2 ; <<4 x float>*>:285 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 3 ; <<4 x float>*>:286 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 1 ; <<4 x float>*>:287 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 2 ; <<4 x float>*>:288 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 3 ; <<4 x float>*>:289 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 1 ; <<4 x float>*>:290 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 2 ; <<4 x float>*>:291 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 3 ; <<4 x float>*>:292 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 1 ; <<4 x float>*>:293 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 2 ; <<4 x float>*>:294 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 3 ; <<4 x float>*>:295 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 1 ; <<4 x float>*>:296 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 2 ; <<4 x float>*>:297 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 3 ; <<4 x float>*>:298 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 1 ; <<4 x float>*>:299 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 2 ; <<4 x float>*>:300 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 3 ; <<4 x float>*>:301 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 1 ; <<4 x float>*>:302 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 2 ; <<4 x float>*>:303 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 3 ; <<4 x float>*>:304 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 1 ; <<4 x float>*>:305 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 2 ; <<4 x float>*>:306 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 3 ; <<4 x float>*>:307 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 1 ; <<4 x float>*>:308 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 2 ; <<4 x float>*>:309 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 3 ; <<4 x float>*>:310 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 1 ; <<4 x float>*>:311 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 2 ; <<4 x float>*>:312 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 3 ; <<4 x float>*>:313 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 1 ; <<4 x float>*>:314 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 2 ; <<4 x float>*>:315 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 3 ; <<4 x float>*>:316 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 1 ; <<4 x float>*>:317 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 2 ; <<4 x float>*>:318 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 3 ; <<4 x float>*>:319 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 1 ; <<4 x float>*>:320 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 2 ; <<4 x float>*>:321 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 3 ; <<4 x float>*>:322 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 1 ; <<4 x float>*>:323 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 2 ; <<4 x float>*>:324 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 3 ; <<4 x float>*>:325 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 1 ; <<4 x float>*>:326 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 2 ; <<4 x float>*>:327 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 3 ; <<4 x float>*>:328 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 1 ; <<4 x float>*>:329 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 2 ; <<4 x float>*>:330 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 3 ; <<4 x float>*>:331 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 1 ; <<4 x float>*>:332 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 2 ; <<4 x float>*>:333 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 3 ; <<4 x float>*>:334 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 1 ; <<4 x float>*>:335 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 2 ; <<4 x float>*>:336 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 3 ; <<4 x float>*>:337 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 1 ; <<4 x float>*>:338 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 2 ; <<4 x float>*>:339 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 3 ; <<4 x float>*>:340 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 1 ; <<4 x float>*>:341 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 2 ; <<4 x float>*>:342 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 3 ; <<4 x float>*>:343 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 1 ; <<4 x float>*>:344 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 2 ; <<4 x float>*>:345 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 3 ; <<4 x float>*>:346 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 1 ; <<4 x float>*>:347 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 2 ; <<4 x float>*>:348 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 3 ; <<4 x float>*>:349 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 1 ; <<4 x float>*>:350 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 2 ; <<4 x float>*>:351 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 3 ; <<4 x float>*>:352 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 1 ; <<4 x float>*>:353 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 2 ; <<4 x float>*>:354 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 3 ; <<4 x float>*>:355 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 1 ; <<4 x float>*>:356 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 2 ; <<4 x float>*>:357 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 3 ; <<4 x float>*>:358 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 1 ; <<4 x float>*>:359 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 2 ; <<4 x float>*>:360 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 3 ; <<4 x float>*>:361 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 1 ; <<4 x float>*>:362 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 2 ; <<4 x float>*>:363 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 3 ; <<4 x float>*>:364 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 1 ; <<4 x float>*>:365 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 2 ; <<4 x float>*>:366 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 3 ; <<4 x float>*>:367 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 1 ; <<4 x float>*>:368 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 2 ; <<4 x float>*>:369 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 3 ; <<4 x float>*>:370 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 1 ; <<4 x float>*>:371 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 2 ; <<4 x float>*>:372 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 3 ; <<4 x float>*>:373 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 1 ; <<4 x float>*>:374 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 2 ; <<4 x float>*>:375 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 3 ; <<4 x float>*>:376 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 1 ; <<4 x float>*>:377 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 2 ; <<4 x float>*>:378 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 3 ; <<4 x float>*>:379 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 1 ; <<4 x float>*>:380 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 2 ; <<4 x float>*>:381 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 3 ; <<4 x float>*>:382 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 1 ; <<4 x float>*>:383 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 2 ; <<4 x float>*>:384 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 3 ; <<4 x float>*>:385 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 1 ; <<4 x float>*>:386 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 2 ; <<4 x float>*>:387 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 3 ; <<4 x float>*>:388 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 1 ; <<4 x float>*>:389 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 2 ; <<4 x float>*>:390 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 3 ; <<4 x float>*>:391 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 1 ; <<4 x float>*>:392 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 2 ; <<4 x float>*>:393 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 3 ; <<4 x float>*>:394 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 1 ; <<4 x float>*>:395 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 2 ; <<4 x float>*>:396 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 3 ; <<4 x float>*>:397 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 1 ; <<4 x float>*>:398 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 2 ; <<4 x float>*>:399 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 3 ; <<4 x float>*>:400 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 1 ; <<4 x float>*>:401 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 2 ; <<4 x float>*>:402 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 3 ; <<4 x float>*>:403 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 1 ; <<4 x float>*>:404 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 2 ; <<4 x float>*>:405 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 3 ; <<4 x float>*>:406 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 1 ; <<4 x float>*>:407 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 2 ; <<4 x float>*>:408 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 3 ; <<4 x float>*>:409 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 1 ; <<4 x float>*>:410 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 2 ; <<4 x float>*>:411 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 3 ; <<4 x float>*>:412 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 1 ; <<4 x float>*>:413 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 2 ; <<4 x float>*>:414 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 3 ; <<4 x float>*>:415 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 1 ; <<4 x float>*>:416 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 2 ; <<4 x float>*>:417 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 3 ; <<4 x float>*>:418 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 1 ; <<4 x float>*>:419 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 2 ; <<4 x float>*>:420 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 3 ; <<4 x float>*>:421 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 1 ; <<4 x float>*>:422 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 2 ; <<4 x float>*>:423 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 3 ; <<4 x float>*>:424 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 1 ; <<4 x float>*>:425 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 2 ; <<4 x float>*>:426 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 3 ; <<4 x float>*>:427 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 1 ; <<4 x float>*>:428 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 2 ; <<4 x float>*>:429 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 3 ; <<4 x float>*>:430 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 1 ; <<4 x float>*>:431 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 2 ; <<4 x float>*>:432 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 3 ; <<4 x float>*>:433 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 1 ; <<4 x float>*>:434 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 2 ; <<4 x float>*>:435 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 3 ; <<4 x float>*>:436 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 1 ; <<4 x float>*>:437 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 2 ; <<4 x float>*>:438 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 3 ; <<4 x float>*>:439 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 1 ; <<4 x float>*>:440 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 2 ; <<4 x float>*>:441 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 3 ; <<4 x float>*>:442 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 1 ; <<4 x float>*>:443 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 2 ; <<4 x float>*>:444 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 3 ; <<4 x float>*>:445 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 1 ; <<4 x float>*>:446 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 2 ; <<4 x float>*>:447 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 3 ; <<4 x float>*>:448 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 1 ; <<4 x float>*>:449 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 2 ; <<4 x float>*>:450 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 3 ; <<4 x float>*>:451 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 1 ; <<4 x float>*>:452 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 2 ; <<4 x float>*>:453 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 3 ; <<4 x float>*>:454 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 1 ; <<4 x float>*>:455 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 2 ; <<4 x float>*>:456 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 3 ; <<4 x float>*>:457 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 1 ; <<4 x float>*>:458 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 2 ; <<4 x float>*>:459 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 3 ; <<4 x float>*>:460 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 1 ; <<4 x float>*>:461 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 2 ; <<4 x float>*>:462 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 3 ; <<4 x float>*>:463 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 1 ; <<4 x float>*>:464 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 2 ; <<4 x float>*>:465 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 3 ; <<4 x float>*>:466 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 1 ; <<4 x float>*>:467 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 2 ; <<4 x float>*>:468 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 3 ; <<4 x float>*>:469 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 1 ; <<4 x float>*>:470 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 2 ; <<4 x float>*>:471 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 3 ; <<4 x float>*>:472 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 1 ; <<4 x float>*>:473 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 2 ; <<4 x float>*>:474 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 3 ; <<4 x float>*>:475 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 1 ; <<4 x float>*>:476 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 2 ; <<4 x float>*>:477 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 3 ; <<4 x float>*>:478 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 1 ; <<4 x float>*>:479 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 2 ; <<4 x float>*>:480 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 3 ; <<4 x float>*>:481 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 1 ; <<4 x float>*>:482 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 2 ; <<4 x float>*>:483 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 3 ; <<4 x float>*>:484 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:485 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:486 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:487 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:488 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:489 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:490 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 1 ; <<4 x float>*>:491 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 2 ; <<4 x float>*>:492 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 3 ; <<4 x float>*>:493 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 1 ; <<4 x float>*>:494 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 2 ; <<4 x float>*>:495 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 3 ; <<4 x float>*>:496 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 1 ; <<4 x float>*>:497 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 2 ; <<4 x float>*>:498 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 3 ; <<4 x float>*>:499 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 1 ; <<4 x float>*>:500 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 2 ; <<4 x float>*>:501 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 3 ; <<4 x float>*>:502 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 1 ; <<4 x float>*>:503 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 2 ; <<4 x float>*>:504 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 3 ; <<4 x float>*>:505 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 1 ; <<4 x float>*>:506 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 2 ; <<4 x float>*>:507 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 3 ; <<4 x float>*>:508 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 1 ; <<4 x float>*>:509 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 2 ; <<4 x float>*>:510 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 3 ; <<4 x float>*>:511 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 1 ; <<4 x float>*>:512 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 2 ; <<4 x float>*>:513 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 3 ; <<4 x float>*>:514 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 1 ; <<4 x float>*>:515 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 2 ; <<4 x float>*>:516 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 3 ; <<4 x float>*>:517 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 1 ; <<4 x float>*>:518 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 2 ; <<4 x float>*>:519 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 3 ; <<4 x float>*>:520 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 1 ; <<4 x float>*>:521 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 2 ; <<4 x float>*>:522 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 3 ; <<4 x float>*>:523 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 1 ; <<4 x float>*>:524 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 2 ; <<4 x float>*>:525 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 3 ; <<4 x float>*>:526 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:527 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:528 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:529 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:530 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:531 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:532 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:533 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:534 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:535 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 1 ; <<4 x float>*>:536 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 2 ; <<4 x float>*>:537 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 3 ; <<4 x float>*>:538 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 1 ; <<4 x float>*>:539 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 2 ; <<4 x float>*>:540 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 3 ; <<4 x float>*>:541 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:542 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:543 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:544 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 1 ; <<4 x float>*>:545 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 2 ; <<4 x float>*>:546 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 3 ; <<4 x float>*>:547 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 1 ; <<4 x float>*>:548 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 2 ; <<4 x float>*>:549 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 3 ; <<4 x float>*>:550 [#uses=0]
+ %.sub7896 = getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 0 ; <<4 x i32>*> [#uses=24]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 175, i32 3 ; <<4 x float>*>:2 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 2 ; <<4 x float>*>:3 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 174, i32 3 ; <<4 x float>*>:4 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 1 ; <<4 x float>*>:5 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 2 ; <<4 x float>*>:6 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 173, i32 3 ; <<4 x float>*>:7 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 1 ; <<4 x float>*>:8 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 2 ; <<4 x float>*>:9 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 172, i32 3 ; <<4 x float>*>:10 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 1 ; <<4 x float>*>:11 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 2 ; <<4 x float>*>:12 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 171, i32 3 ; <<4 x float>*>:13 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 1 ; <<4 x float>*>:14 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 2 ; <<4 x float>*>:15 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 170, i32 3 ; <<4 x float>*>:16 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 1 ; <<4 x float>*>:17 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 2 ; <<4 x float>*>:18 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 169, i32 3 ; <<4 x float>*>:19 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 1 ; <<4 x float>*>:20 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 2 ; <<4 x float>*>:21 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 168, i32 3 ; <<4 x float>*>:22 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 1 ; <<4 x float>*>:23 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 2 ; <<4 x float>*>:24 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 167, i32 3 ; <<4 x float>*>:25 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 1 ; <<4 x float>*>:26 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 2 ; <<4 x float>*>:27 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 166, i32 3 ; <<4 x float>*>:28 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 1 ; <<4 x float>*>:29 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 2 ; <<4 x float>*>:30 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 165, i32 3 ; <<4 x float>*>:31 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 1 ; <<4 x float>*>:32 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 2 ; <<4 x float>*>:33 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 164, i32 3 ; <<4 x float>*>:34 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 1 ; <<4 x float>*>:35 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 2 ; <<4 x float>*>:36 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 163, i32 3 ; <<4 x float>*>:37 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 1 ; <<4 x float>*>:38 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 2 ; <<4 x float>*>:39 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 162, i32 3 ; <<4 x float>*>:40 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 1 ; <<4 x float>*>:41 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 2 ; <<4 x float>*>:42 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 161, i32 3 ; <<4 x float>*>:43 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 1 ; <<4 x float>*>:44 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 2 ; <<4 x float>*>:45 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 160, i32 3 ; <<4 x float>*>:46 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 1 ; <<4 x float>*>:47 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 2 ; <<4 x float>*>:48 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 159, i32 3 ; <<4 x float>*>:49 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 1 ; <<4 x float>*>:50 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 2 ; <<4 x float>*>:51 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 158, i32 3 ; <<4 x float>*>:52 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 1 ; <<4 x float>*>:53 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 2 ; <<4 x float>*>:54 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 157, i32 3 ; <<4 x float>*>:55 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 1 ; <<4 x float>*>:56 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 2 ; <<4 x float>*>:57 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 156, i32 3 ; <<4 x float>*>:58 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 1 ; <<4 x float>*>:59 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 2 ; <<4 x float>*>:60 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 155, i32 3 ; <<4 x float>*>:61 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 1 ; <<4 x float>*>:62 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 2 ; <<4 x float>*>:63 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 154, i32 3 ; <<4 x float>*>:64 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 1 ; <<4 x float>*>:65 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 2 ; <<4 x float>*>:66 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 153, i32 3 ; <<4 x float>*>:67 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 1 ; <<4 x float>*>:68 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 2 ; <<4 x float>*>:69 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 152, i32 3 ; <<4 x float>*>:70 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 1 ; <<4 x float>*>:71 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 2 ; <<4 x float>*>:72 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 151, i32 3 ; <<4 x float>*>:73 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 1 ; <<4 x float>*>:74 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 2 ; <<4 x float>*>:75 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 150, i32 3 ; <<4 x float>*>:76 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 1 ; <<4 x float>*>:77 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 2 ; <<4 x float>*>:78 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 149, i32 3 ; <<4 x float>*>:79 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 1 ; <<4 x float>*>:80 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 2 ; <<4 x float>*>:81 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 148, i32 3 ; <<4 x float>*>:82 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 1 ; <<4 x float>*>:83 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 2 ; <<4 x float>*>:84 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 147, i32 3 ; <<4 x float>*>:85 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 1 ; <<4 x float>*>:86 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 2 ; <<4 x float>*>:87 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 146, i32 3 ; <<4 x float>*>:88 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 1 ; <<4 x float>*>:89 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 2 ; <<4 x float>*>:90 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 145, i32 3 ; <<4 x float>*>:91 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 1 ; <<4 x float>*>:92 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 2 ; <<4 x float>*>:93 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 144, i32 3 ; <<4 x float>*>:94 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 1 ; <<4 x float>*>:95 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 2 ; <<4 x float>*>:96 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 143, i32 3 ; <<4 x float>*>:97 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 1 ; <<4 x float>*>:98 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 2 ; <<4 x float>*>:99 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 142, i32 3 ; <<4 x float>*>:100 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 1 ; <<4 x float>*>:101 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 2 ; <<4 x float>*>:102 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 141, i32 3 ; <<4 x float>*>:103 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 1 ; <<4 x float>*>:104 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 2 ; <<4 x float>*>:105 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 140, i32 3 ; <<4 x float>*>:106 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 1 ; <<4 x float>*>:107 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 2 ; <<4 x float>*>:108 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 139, i32 3 ; <<4 x float>*>:109 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 1 ; <<4 x float>*>:110 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 2 ; <<4 x float>*>:111 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 138, i32 3 ; <<4 x float>*>:112 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 1 ; <<4 x float>*>:113 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 2 ; <<4 x float>*>:114 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 137, i32 3 ; <<4 x float>*>:115 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 1 ; <<4 x float>*>:116 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 2 ; <<4 x float>*>:117 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 136, i32 3 ; <<4 x float>*>:118 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 1 ; <<4 x float>*>:119 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 2 ; <<4 x float>*>:120 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 135, i32 3 ; <<4 x float>*>:121 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 1 ; <<4 x float>*>:122 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 2 ; <<4 x float>*>:123 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 134, i32 3 ; <<4 x float>*>:124 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 1 ; <<4 x float>*>:125 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 2 ; <<4 x float>*>:126 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 133, i32 3 ; <<4 x float>*>:127 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 1 ; <<4 x float>*>:128 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 2 ; <<4 x float>*>:129 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 132, i32 3 ; <<4 x float>*>:130 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 1 ; <<4 x float>*>:131 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 2 ; <<4 x float>*>:132 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 131, i32 3 ; <<4 x float>*>:133 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 1 ; <<4 x float>*>:134 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 2 ; <<4 x float>*>:135 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 130, i32 3 ; <<4 x float>*>:136 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 1 ; <<4 x float>*>:137 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 2 ; <<4 x float>*>:138 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 129, i32 3 ; <<4 x float>*>:139 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 1 ; <<4 x float>*>:140 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 2 ; <<4 x float>*>:141 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 128, i32 3 ; <<4 x float>*>:142 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 1 ; <<4 x float>*>:143 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 2 ; <<4 x float>*>:144 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 127, i32 3 ; <<4 x float>*>:145 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 1 ; <<4 x float>*>:146 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 2 ; <<4 x float>*>:147 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 126, i32 3 ; <<4 x float>*>:148 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 1 ; <<4 x float>*>:149 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 2 ; <<4 x float>*>:150 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 125, i32 3 ; <<4 x float>*>:151 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 1 ; <<4 x float>*>:152 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 2 ; <<4 x float>*>:153 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 124, i32 3 ; <<4 x float>*>:154 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 1 ; <<4 x float>*>:155 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 2 ; <<4 x float>*>:156 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 123, i32 3 ; <<4 x float>*>:157 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 1 ; <<4 x float>*>:158 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 2 ; <<4 x float>*>:159 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 122, i32 3 ; <<4 x float>*>:160 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 1 ; <<4 x float>*>:161 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 2 ; <<4 x float>*>:162 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 121, i32 3 ; <<4 x float>*>:163 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 1 ; <<4 x float>*>:164 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 2 ; <<4 x float>*>:165 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 120, i32 3 ; <<4 x float>*>:166 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 1 ; <<4 x float>*>:167 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 2 ; <<4 x float>*>:168 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 119, i32 3 ; <<4 x float>*>:169 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 1 ; <<4 x float>*>:170 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 2 ; <<4 x float>*>:171 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 118, i32 3 ; <<4 x float>*>:172 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 1 ; <<4 x float>*>:173 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 2 ; <<4 x float>*>:174 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 117, i32 3 ; <<4 x float>*>:175 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 1 ; <<4 x float>*>:176 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 2 ; <<4 x float>*>:177 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 116, i32 3 ; <<4 x float>*>:178 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 1 ; <<4 x float>*>:179 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 2 ; <<4 x float>*>:180 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 115, i32 3 ; <<4 x float>*>:181 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 1 ; <<4 x float>*>:182 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 2 ; <<4 x float>*>:183 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 114, i32 3 ; <<4 x float>*>:184 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 1 ; <<4 x float>*>:185 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 2 ; <<4 x float>*>:186 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 113, i32 3 ; <<4 x float>*>:187 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 1 ; <<4 x float>*>:188 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 2 ; <<4 x float>*>:189 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 112, i32 3 ; <<4 x float>*>:190 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 1 ; <<4 x float>*>:191 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 2 ; <<4 x float>*>:192 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 111, i32 3 ; <<4 x float>*>:193 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 1 ; <<4 x float>*>:194 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 2 ; <<4 x float>*>:195 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 110, i32 3 ; <<4 x float>*>:196 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 1 ; <<4 x float>*>:197 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 2 ; <<4 x float>*>:198 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 109, i32 3 ; <<4 x float>*>:199 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 1 ; <<4 x float>*>:200 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 2 ; <<4 x float>*>:201 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 108, i32 3 ; <<4 x float>*>:202 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 1 ; <<4 x float>*>:203 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 2 ; <<4 x float>*>:204 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 107, i32 3 ; <<4 x float>*>:205 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 1 ; <<4 x float>*>:206 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 2 ; <<4 x float>*>:207 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 106, i32 3 ; <<4 x float>*>:208 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 1 ; <<4 x float>*>:209 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 2 ; <<4 x float>*>:210 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 105, i32 3 ; <<4 x float>*>:211 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 1 ; <<4 x float>*>:212 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 2 ; <<4 x float>*>:213 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 104, i32 3 ; <<4 x float>*>:214 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 1 ; <<4 x float>*>:215 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 2 ; <<4 x float>*>:216 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 103, i32 3 ; <<4 x float>*>:217 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 1 ; <<4 x float>*>:218 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 2 ; <<4 x float>*>:219 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 102, i32 3 ; <<4 x float>*>:220 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 1 ; <<4 x float>*>:221 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 2 ; <<4 x float>*>:222 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 101, i32 3 ; <<4 x float>*>:223 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 1 ; <<4 x float>*>:224 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 2 ; <<4 x float>*>:225 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 100, i32 3 ; <<4 x float>*>:226 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 1 ; <<4 x float>*>:227 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 2 ; <<4 x float>*>:228 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 99, i32 3 ; <<4 x float>*>:229 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 1 ; <<4 x float>*>:230 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 2 ; <<4 x float>*>:231 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 98, i32 3 ; <<4 x float>*>:232 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 1 ; <<4 x float>*>:233 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 2 ; <<4 x float>*>:234 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 97, i32 3 ; <<4 x float>*>:235 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 1 ; <<4 x float>*>:236 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 2 ; <<4 x float>*>:237 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 96, i32 3 ; <<4 x float>*>:238 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 1 ; <<4 x float>*>:239 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 2 ; <<4 x float>*>:240 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 95, i32 3 ; <<4 x float>*>:241 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 1 ; <<4 x float>*>:242 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 2 ; <<4 x float>*>:243 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 94, i32 3 ; <<4 x float>*>:244 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 1 ; <<4 x float>*>:245 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 2 ; <<4 x float>*>:246 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 93, i32 3 ; <<4 x float>*>:247 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 1 ; <<4 x float>*>:248 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 2 ; <<4 x float>*>:249 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 92, i32 3 ; <<4 x float>*>:250 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 1 ; <<4 x float>*>:251 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 2 ; <<4 x float>*>:252 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 91, i32 3 ; <<4 x float>*>:253 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 1 ; <<4 x float>*>:254 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 2 ; <<4 x float>*>:255 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 90, i32 3 ; <<4 x float>*>:256 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 1 ; <<4 x float>*>:257 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 2 ; <<4 x float>*>:258 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 89, i32 3 ; <<4 x float>*>:259 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 1 ; <<4 x float>*>:260 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 2 ; <<4 x float>*>:261 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 88, i32 3 ; <<4 x float>*>:262 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 1 ; <<4 x float>*>:263 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 2 ; <<4 x float>*>:264 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 87, i32 3 ; <<4 x float>*>:265 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 1 ; <<4 x float>*>:266 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 2 ; <<4 x float>*>:267 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 86, i32 3 ; <<4 x float>*>:268 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 1 ; <<4 x float>*>:269 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 2 ; <<4 x float>*>:270 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 85, i32 3 ; <<4 x float>*>:271 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 1 ; <<4 x float>*>:272 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 2 ; <<4 x float>*>:273 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 84, i32 3 ; <<4 x float>*>:274 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 1 ; <<4 x float>*>:275 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 2 ; <<4 x float>*>:276 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 83, i32 3 ; <<4 x float>*>:277 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 1 ; <<4 x float>*>:278 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 2 ; <<4 x float>*>:279 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 82, i32 3 ; <<4 x float>*>:280 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 1 ; <<4 x float>*>:281 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 2 ; <<4 x float>*>:282 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 81, i32 3 ; <<4 x float>*>:283 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 1 ; <<4 x float>*>:284 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 2 ; <<4 x float>*>:285 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 80, i32 3 ; <<4 x float>*>:286 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 1 ; <<4 x float>*>:287 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 2 ; <<4 x float>*>:288 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 79, i32 3 ; <<4 x float>*>:289 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 1 ; <<4 x float>*>:290 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 2 ; <<4 x float>*>:291 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 78, i32 3 ; <<4 x float>*>:292 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 1 ; <<4 x float>*>:293 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 2 ; <<4 x float>*>:294 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 77, i32 3 ; <<4 x float>*>:295 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 1 ; <<4 x float>*>:296 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 2 ; <<4 x float>*>:297 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 76, i32 3 ; <<4 x float>*>:298 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 1 ; <<4 x float>*>:299 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 2 ; <<4 x float>*>:300 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 75, i32 3 ; <<4 x float>*>:301 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 1 ; <<4 x float>*>:302 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 2 ; <<4 x float>*>:303 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 74, i32 3 ; <<4 x float>*>:304 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 1 ; <<4 x float>*>:305 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 2 ; <<4 x float>*>:306 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 73, i32 3 ; <<4 x float>*>:307 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 1 ; <<4 x float>*>:308 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 2 ; <<4 x float>*>:309 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 72, i32 3 ; <<4 x float>*>:310 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 1 ; <<4 x float>*>:311 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 2 ; <<4 x float>*>:312 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 71, i32 3 ; <<4 x float>*>:313 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 1 ; <<4 x float>*>:314 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 2 ; <<4 x float>*>:315 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 70, i32 3 ; <<4 x float>*>:316 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 1 ; <<4 x float>*>:317 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 2 ; <<4 x float>*>:318 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 69, i32 3 ; <<4 x float>*>:319 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 1 ; <<4 x float>*>:320 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 2 ; <<4 x float>*>:321 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 68, i32 3 ; <<4 x float>*>:322 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 1 ; <<4 x float>*>:323 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 2 ; <<4 x float>*>:324 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 67, i32 3 ; <<4 x float>*>:325 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 1 ; <<4 x float>*>:326 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 2 ; <<4 x float>*>:327 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 66, i32 3 ; <<4 x float>*>:328 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 1 ; <<4 x float>*>:329 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 2 ; <<4 x float>*>:330 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 65, i32 3 ; <<4 x float>*>:331 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 1 ; <<4 x float>*>:332 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 2 ; <<4 x float>*>:333 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 64, i32 3 ; <<4 x float>*>:334 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 1 ; <<4 x float>*>:335 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 2 ; <<4 x float>*>:336 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 63, i32 3 ; <<4 x float>*>:337 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 1 ; <<4 x float>*>:338 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 2 ; <<4 x float>*>:339 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 62, i32 3 ; <<4 x float>*>:340 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 1 ; <<4 x float>*>:341 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 2 ; <<4 x float>*>:342 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 61, i32 3 ; <<4 x float>*>:343 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 1 ; <<4 x float>*>:344 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 2 ; <<4 x float>*>:345 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 60, i32 3 ; <<4 x float>*>:346 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 1 ; <<4 x float>*>:347 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 2 ; <<4 x float>*>:348 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 59, i32 3 ; <<4 x float>*>:349 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 1 ; <<4 x float>*>:350 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 2 ; <<4 x float>*>:351 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 58, i32 3 ; <<4 x float>*>:352 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 1 ; <<4 x float>*>:353 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 2 ; <<4 x float>*>:354 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 57, i32 3 ; <<4 x float>*>:355 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 1 ; <<4 x float>*>:356 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 2 ; <<4 x float>*>:357 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 56, i32 3 ; <<4 x float>*>:358 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 1 ; <<4 x float>*>:359 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 2 ; <<4 x float>*>:360 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 55, i32 3 ; <<4 x float>*>:361 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 1 ; <<4 x float>*>:362 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 2 ; <<4 x float>*>:363 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 54, i32 3 ; <<4 x float>*>:364 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 1 ; <<4 x float>*>:365 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 2 ; <<4 x float>*>:366 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 53, i32 3 ; <<4 x float>*>:367 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 1 ; <<4 x float>*>:368 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 2 ; <<4 x float>*>:369 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 52, i32 3 ; <<4 x float>*>:370 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 1 ; <<4 x float>*>:371 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 2 ; <<4 x float>*>:372 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 51, i32 3 ; <<4 x float>*>:373 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 1 ; <<4 x float>*>:374 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 2 ; <<4 x float>*>:375 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 50, i32 3 ; <<4 x float>*>:376 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 1 ; <<4 x float>*>:377 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 2 ; <<4 x float>*>:378 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 49, i32 3 ; <<4 x float>*>:379 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 1 ; <<4 x float>*>:380 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 2 ; <<4 x float>*>:381 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 48, i32 3 ; <<4 x float>*>:382 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 1 ; <<4 x float>*>:383 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 2 ; <<4 x float>*>:384 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 47, i32 3 ; <<4 x float>*>:385 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 1 ; <<4 x float>*>:386 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 2 ; <<4 x float>*>:387 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 46, i32 3 ; <<4 x float>*>:388 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 1 ; <<4 x float>*>:389 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 2 ; <<4 x float>*>:390 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 45, i32 3 ; <<4 x float>*>:391 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 1 ; <<4 x float>*>:392 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 2 ; <<4 x float>*>:393 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 44, i32 3 ; <<4 x float>*>:394 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 1 ; <<4 x float>*>:395 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 2 ; <<4 x float>*>:396 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 43, i32 3 ; <<4 x float>*>:397 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 1 ; <<4 x float>*>:398 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 2 ; <<4 x float>*>:399 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 42, i32 3 ; <<4 x float>*>:400 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 1 ; <<4 x float>*>:401 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 2 ; <<4 x float>*>:402 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 41, i32 3 ; <<4 x float>*>:403 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 1 ; <<4 x float>*>:404 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 2 ; <<4 x float>*>:405 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 40, i32 3 ; <<4 x float>*>:406 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 1 ; <<4 x float>*>:407 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 2 ; <<4 x float>*>:408 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 39, i32 3 ; <<4 x float>*>:409 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 1 ; <<4 x float>*>:410 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 2 ; <<4 x float>*>:411 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 38, i32 3 ; <<4 x float>*>:412 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 1 ; <<4 x float>*>:413 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 2 ; <<4 x float>*>:414 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 37, i32 3 ; <<4 x float>*>:415 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 1 ; <<4 x float>*>:416 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 2 ; <<4 x float>*>:417 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 36, i32 3 ; <<4 x float>*>:418 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 1 ; <<4 x float>*>:419 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 2 ; <<4 x float>*>:420 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 35, i32 3 ; <<4 x float>*>:421 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 1 ; <<4 x float>*>:422 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 2 ; <<4 x float>*>:423 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 34, i32 3 ; <<4 x float>*>:424 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 1 ; <<4 x float>*>:425 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 2 ; <<4 x float>*>:426 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 33, i32 3 ; <<4 x float>*>:427 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 1 ; <<4 x float>*>:428 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 2 ; <<4 x float>*>:429 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 32, i32 3 ; <<4 x float>*>:430 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 1 ; <<4 x float>*>:431 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 2 ; <<4 x float>*>:432 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 31, i32 3 ; <<4 x float>*>:433 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 1 ; <<4 x float>*>:434 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 2 ; <<4 x float>*>:435 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 30, i32 3 ; <<4 x float>*>:436 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 1 ; <<4 x float>*>:437 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 2 ; <<4 x float>*>:438 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 29, i32 3 ; <<4 x float>*>:439 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 1 ; <<4 x float>*>:440 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 2 ; <<4 x float>*>:441 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 28, i32 3 ; <<4 x float>*>:442 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 1 ; <<4 x float>*>:443 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 2 ; <<4 x float>*>:444 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 27, i32 3 ; <<4 x float>*>:445 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 1 ; <<4 x float>*>:446 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 2 ; <<4 x float>*>:447 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 26, i32 3 ; <<4 x float>*>:448 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 1 ; <<4 x float>*>:449 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 2 ; <<4 x float>*>:450 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 25, i32 3 ; <<4 x float>*>:451 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 1 ; <<4 x float>*>:452 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 2 ; <<4 x float>*>:453 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 24, i32 3 ; <<4 x float>*>:454 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 1 ; <<4 x float>*>:455 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 2 ; <<4 x float>*>:456 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 23, i32 3 ; <<4 x float>*>:457 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 1 ; <<4 x float>*>:458 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 2 ; <<4 x float>*>:459 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 22, i32 3 ; <<4 x float>*>:460 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 1 ; <<4 x float>*>:461 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 2 ; <<4 x float>*>:462 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 21, i32 3 ; <<4 x float>*>:463 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 1 ; <<4 x float>*>:464 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 2 ; <<4 x float>*>:465 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 20, i32 3 ; <<4 x float>*>:466 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 1 ; <<4 x float>*>:467 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 2 ; <<4 x float>*>:468 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 19, i32 3 ; <<4 x float>*>:469 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 1 ; <<4 x float>*>:470 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 2 ; <<4 x float>*>:471 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 18, i32 3 ; <<4 x float>*>:472 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 1 ; <<4 x float>*>:473 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 2 ; <<4 x float>*>:474 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 17, i32 3 ; <<4 x float>*>:475 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 1 ; <<4 x float>*>:476 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 2 ; <<4 x float>*>:477 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 16, i32 3 ; <<4 x float>*>:478 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 1 ; <<4 x float>*>:479 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 2 ; <<4 x float>*>:480 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 15, i32 3 ; <<4 x float>*>:481 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 1 ; <<4 x float>*>:482 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 2 ; <<4 x float>*>:483 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 14, i32 3 ; <<4 x float>*>:484 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:485 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:486 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:487 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:488 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:489 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:490 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 1 ; <<4 x float>*>:491 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 2 ; <<4 x float>*>:492 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 180, i32 3 ; <<4 x float>*>:493 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 1 ; <<4 x float>*>:494 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 2 ; <<4 x float>*>:495 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 181, i32 3 ; <<4 x float>*>:496 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 1 ; <<4 x float>*>:497 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 2 ; <<4 x float>*>:498 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 182, i32 3 ; <<4 x float>*>:499 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 1 ; <<4 x float>*>:500 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 2 ; <<4 x float>*>:501 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 183, i32 3 ; <<4 x float>*>:502 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 1 ; <<4 x float>*>:503 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 2 ; <<4 x float>*>:504 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 184, i32 3 ; <<4 x float>*>:505 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 1 ; <<4 x float>*>:506 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 2 ; <<4 x float>*>:507 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 185, i32 3 ; <<4 x float>*>:508 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 1 ; <<4 x float>*>:509 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 2 ; <<4 x float>*>:510 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 190, i32 3 ; <<4 x float>*>:511 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 1 ; <<4 x float>*>:512 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 2 ; <<4 x float>*>:513 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 9, i32 3 ; <<4 x float>*>:514 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 1 ; <<4 x float>*>:515 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 2 ; <<4 x float>*>:516 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 10, i32 3 ; <<4 x float>*>:517 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 1 ; <<4 x float>*>:518 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 2 ; <<4 x float>*>:519 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 11, i32 3 ; <<4 x float>*>:520 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 1 ; <<4 x float>*>:521 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 2 ; <<4 x float>*>:522 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 12, i32 3 ; <<4 x float>*>:523 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 1 ; <<4 x float>*>:524 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 2 ; <<4 x float>*>:525 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 13, i32 3 ; <<4 x float>*>:526 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:527 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:528 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:529 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:530 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:531 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:532 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:533 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:534 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:535 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 1 ; <<4 x float>*>:536 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 2 ; <<4 x float>*>:537 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 6, i32 3 ; <<4 x float>*>:538 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 1 ; <<4 x float>*>:539 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 2 ; <<4 x float>*>:540 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 7, i32 3 ; <<4 x float>*>:541 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:542 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:543 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:544 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 1 ; <<4 x float>*>:545 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 2 ; <<4 x float>*>:546 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 187, i32 3 ; <<4 x float>*>:547 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 1 ; <<4 x float>*>:548 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 2 ; <<4 x float>*>:549 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 8, i32 3 ; <<4 x float>*>:550 [#uses=0]
load <4 x float>* null ; <<4 x float>>:551 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:552 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:553 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:552 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:553 [#uses=1]
load <4 x float>* %553 ; <<4 x float>>:554 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 3 ; <<4 x float>*>:555 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 3 ; <<4 x float>*>:555 [#uses=0]
shufflevector <4 x float> %554, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:556 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vcmpgtfp( <4 x float> zeroinitializer, <4 x float> %556 ) ; <<4 x i32>>:557 [#uses=0]
bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:558 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:559 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:560 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:559 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:560 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %560
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:561 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:562 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:563 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:561 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:562 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 2 ; <<4 x float>*>:563 [#uses=0]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:564 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:565 [#uses=1]
store <4 x float> %565, <4 x float>* null
br i1 %566, label %.critedge, label %xPIF.exit
.critedge: ; preds = %xOperationInitMasks.exit
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:567 [#uses=0]
+ getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:567 [#uses=0]
and <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:568 [#uses=0]
or <4 x i32> zeroinitializer, zeroinitializer ; <<4 x i32>>:569 [#uses=0]
icmp eq i32 0, 0 ; <i1>:570 [#uses=1]
br label %xPIF.exit
xPIF.exit: ; preds = %.critedge7898, %xOperationInitMasks.exit
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:571 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 1 ; <<4 x float>*>:571 [#uses=0]
load <4 x float>* null ; <<4 x float>>:572 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:573 [#uses=0]
icmp eq i32 0, 0 ; <i1>:574 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:575 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:575 [#uses=0]
load <4 x float>* %0 ; <<4 x float>>:576 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:577 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 0 ; <<4 x float>*>:578 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:579 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:580 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:581 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:582 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 0 ; <<4 x float>*>:578 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:579 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:580 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:581 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:582 [#uses=0]
load <4 x float>* null ; <<4 x float>>:583 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:584 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:584 [#uses=1]
load <4 x float>* %584 ; <<4 x float>>:585 [#uses=1]
load <4 x float>* null ; <<4 x float>>:586 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:587 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:587 [#uses=1]
load <4 x float>* %587 ; <<4 x float>>:588 [#uses=1]
shufflevector <4 x float> %583, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:589 [#uses=1]
shufflevector <4 x float> %585, <4 x float> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x float>>:590 [#uses=1]
fmul <4 x float> zeroinitializer, %590 ; <<4 x float>>:593 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:594 [#uses=1]
fmul <4 x float> zeroinitializer, %591 ; <<4 x float>>:595 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:596 [#uses=2]
load <4 x float>* %596 ; <<4 x float>>:597 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %596
load <4 x float>* null ; <<4 x float>>:598 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:599 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:599 [#uses=0]
shufflevector <4 x float> %594, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:600 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:601 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:601 [#uses=2]
load <4 x float>* %601 ; <<4 x float>>:602 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %601
load <4 x float>* null ; <<4 x float>>:603 [#uses=0]
load <4 x float>* null ; <<4 x float>>:604 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:605 [#uses=1]
load <4 x float>* %605 ; <<4 x float>>:606 [#uses=1]
fsub <4 x float> zeroinitializer, %604 ; <<4 x float>>:607 [#uses=2]
fsub <4 x float> zeroinitializer, %606 ; <<4 x float>>:608 [#uses=2]
; <label>:610 ; preds = %xPIF.exit
load <4 x float>* null ; <<4 x float>>:611 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:612 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:612 [#uses=2]
load <4 x float>* %612 ; <<4 x float>>:613 [#uses=1]
shufflevector <4 x float> %607, <4 x float> %613, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:614 [#uses=1]
store <4 x float> %614, <4 x float>* %612
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:615 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:615 [#uses=2]
load <4 x float>* %615 ; <<4 x float>>:616 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %615
br label %xST.exit400
br i1 %621, label %625, label %622
; <label>:622 ; preds = %617
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:623 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:623 [#uses=0]
shufflevector <4 x float> %607, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:624 [#uses=0]
br label %625
; <label>:625 ; preds = %622, %617
load <4 x i32>* %.sub7896 ; <<4 x i32>>:626 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:627 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:628 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:628 [#uses=1]
load <4 x float>* %628 ; <<4 x float>>:629 [#uses=0]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:630 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:631 [#uses=1]
xST.exit400: ; preds = %633, %625, %610
%.17218 = phi <4 x float> [ zeroinitializer, %610 ], [ %608, %633 ], [ %608, %625 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:636 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:636 [#uses=1]
load <4 x float>* %636 ; <<4 x float>>:637 [#uses=0]
load <4 x float>* null ; <<4 x float>>:638 [#uses=2]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:639 [#uses=0]
load <4 x float>* null ; <<4 x float>>:640 [#uses=2]
fmul <4 x float> %638, %638 ; <<4 x float>>:641 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:642 [#uses=0]
br i1 %656, label %665, label %657
; <label>:657 ; preds = %xST.exit400
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:658 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:658 [#uses=0]
shufflevector <4 x float> %653, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:659 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:660 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:660 [#uses=1]
load <4 x float>* %660 ; <<4 x float>>:661 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:662 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:663 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:662 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:663 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:664 [#uses=0]
br label %xST.exit402
br label %xST.exit402
xST.exit402: ; preds = %669, %657
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:671 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:671 [#uses=0]
load <4 x float>* null ; <<4 x float>>:672 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:673 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:674 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:673 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:674 [#uses=1]
load <4 x float>* %674 ; <<4 x float>>:675 [#uses=1]
load <4 x float>* null ; <<4 x float>>:676 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:677 [#uses=1]
br i1 %682, label %689, label %683
; <label>:683 ; preds = %xST.exit402
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:684 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:684 [#uses=1]
load <4 x float>* %684 ; <<4 x float>>:685 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:686 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:687 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:686 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:687 [#uses=0]
shufflevector <4 x float> %681, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:688 [#uses=0]
br label %xST.exit405
br label %xST.exit405
xST.exit405: ; preds = %689, %683
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:695 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:695 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:696 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:697 [#uses=0]
load <4 x float>* null ; <<4 x float>>:698 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:699 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:700 [#uses=1]
fadd <4 x float> zeroinitializer, %700 ; <<4 x float>>:701 [#uses=0]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:702 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %702, <4 x i32> zeroinitializer ) ; <i32>:703 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:704 [#uses=2]
load <4 x float>* %704 ; <<4 x float>>:705 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %704
load <4 x float>* null ; <<4 x float>>:706 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:707 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:707 [#uses=2]
load <4 x float>* %707 ; <<4 x float>>:708 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %707
load <4 x float>* null ; <<4 x float>>:709 [#uses=0]
load <4 x float>* null ; <<4 x float>>:710 [#uses=0]
load <4 x float>* null ; <<4 x float>>:711 [#uses=1]
shufflevector <4 x float> %711, <4 x float> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x float>>:712 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:713 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:713 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:714 [#uses=1]
load <4 x float>* %714 ; <<4 x float>>:715 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:716 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:717 [#uses=1]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:718 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 0 ; <<4 x float>*>:719 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %719
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:720 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 1 ; <<4 x float>*>:720 [#uses=1]
shufflevector <4 x float> %717, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:721 [#uses=1]
store <4 x float> %721, <4 x float>* %720
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:722 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:722 [#uses=1]
load <4 x float>* %722 ; <<4 x float>>:723 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %723, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:724 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:725 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:725 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %725
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:726 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 2 ; <<4 x float>*>:726 [#uses=1]
load <4 x float>* %726 ; <<4 x float>>:727 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:728 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 5, i32 3 ; <<4 x float>*>:728 [#uses=1]
load <4 x float>* %728 ; <<4 x float>>:729 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:730 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:730 [#uses=1]
load <4 x float>* %730 ; <<4 x float>>:731 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:732 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:732 [#uses=1]
load <4 x float>* %732 ; <<4 x float>>:733 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:734 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:735 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:736 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:737 [#uses=1]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:739 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:740 [#uses=1]
icmp eq i32 %740, 0 ; <i1>:741 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 0 ; <<4 x float>*>:742 [#uses=2]
load <4 x float>* %742 ; <<4 x float>>:743 [#uses=1]
shufflevector <4 x float> %736, <4 x float> %743, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:744 [#uses=1]
store <4 x float> %744, <4 x float>* %742
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:745 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:745 [#uses=1]
load <4 x float>* %745 ; <<4 x float>>:746 [#uses=1]
shufflevector <4 x float> %737, <4 x float> %746, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:747 [#uses=0]
shufflevector <4 x float> %738, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:748 [#uses=1]
store <4 x float> %748, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:749 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:749 [#uses=1]
load <4 x float>* %749 ; <<4 x float>>:750 [#uses=1]
shufflevector <4 x float> %739, <4 x float> %750, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:751 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:752 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:753 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:752 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:753 [#uses=1]
load <4 x float>* %753 ; <<4 x float>>:754 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:755 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:755 [#uses=0]
load <4 x float>* null ; <<4 x float>>:756 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:757 [#uses=1]
shufflevector <4 x float> %756, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:758 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:759 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:759 [#uses=1]
load <4 x float>* %759 ; <<4 x float>>:760 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:761 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:762 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:763 [#uses=1]
fadd <4 x float> %757, zeroinitializer ; <<4 x float>>:764 [#uses=0]
br i1 false, label %773, label %767
; <label>:767 ; preds = %xST.exit405
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:768 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:768 [#uses=0]
load <4 x float>* null ; <<4 x float>>:769 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %769, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:770 [#uses=1]
store <4 x float> %770, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:771 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:771 [#uses=1]
load <4 x float>* %771 ; <<4 x float>>:772 [#uses=0]
br label %xST.exit422
xST.exit422: ; preds = %773, %767
%.07267 = phi <4 x float> [ %766, %767 ], [ undef, %773 ] ; <<4 x float>> [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:774 [#uses=0]
fmul <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:775 [#uses=0]
icmp eq i32 0, 0 ; <i1>:776 [#uses=1]
br i1 %776, label %780, label %777
; <label>:777 ; preds = %xST.exit422
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:778 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:779 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:778 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:779 [#uses=0]
br label %xST.exit431
; <label>:780 ; preds = %xST.exit422
load <4 x i32>* %.sub7896 ; <<4 x i32>>:781 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:782 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:782 [#uses=2]
load <4 x float>* %782 ; <<4 x float>>:783 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %782
load <4 x i32>* %.sub7896 ; <<4 x i32>>:784 [#uses=1]
br label %xST.exit431
xST.exit431: ; preds = %780, %777
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:787 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:787 [#uses=0]
load <4 x float>* null ; <<4 x float>>:788 [#uses=0]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:789 [#uses=2]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %789, <4 x i32> zeroinitializer ) ; <i32>:790 [#uses=1]
shufflevector <4 x i32> %801, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:802 [#uses=0]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:803 [#uses=0]
icmp eq i32 0, 0 ; <i1>:804 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:805 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 0 ; <<4 x float>*>:805 [#uses=1]
load <4 x float>* %805 ; <<4 x float>>:806 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:807 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:807 [#uses=1]
load <4 x float>* %807 ; <<4 x float>>:808 [#uses=0]
load <4 x float>* null ; <<4 x float>>:809 [#uses=0]
load <4 x float>* null ; <<4 x float>>:810 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:811 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:812 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:811 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:812 [#uses=1]
load <4 x float>* %812 ; <<4 x float>>:813 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:814 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:814 [#uses=1]
load <4 x float>* %814 ; <<4 x float>>:815 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:816 [#uses=0]
unreachable
xPBRK.exit: ; preds = %.critedge
store <4 x i32> < i32 -1, i32 -1, i32 -1, i32 -1 >, <4 x i32>* %.sub7896
store <4 x i32> zeroinitializer, <4 x i32>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:817 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:817 [#uses=1]
load <4 x float>* %817 ; <<4 x float>>:818 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:819 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:819 [#uses=1]
load <4 x float>* %819 ; <<4 x float>>:820 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:821 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:821 [#uses=1]
load <4 x float>* %821 ; <<4 x float>>:822 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:823 [#uses=1]
shufflevector <4 x float> %818, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:824 [#uses=1]
store <4 x float> %828, <4 x float>* null
load <4 x float>* null ; <<4 x float>>:829 [#uses=1]
shufflevector <4 x float> %825, <4 x float> %829, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:830 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:831 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:831 [#uses=2]
load <4 x float>* %831 ; <<4 x float>>:832 [#uses=1]
shufflevector <4 x float> %826, <4 x float> %832, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:833 [#uses=1]
store <4 x float> %833, <4 x float>* %831
%.17731 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07730, %1215 ] ; <<4 x float>> [#uses=2]
%.17735 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07734, %1215 ] ; <<4 x float>> [#uses=2]
%.17770 = phi <4 x float> [ undef, %xPBRK.exit ], [ %.07769, %1215 ] ; <<4 x float>> [#uses=2]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:834 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:834 [#uses=0]
load <4 x float>* null ; <<4 x float>>:835 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:836 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:837 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:836 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:837 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:838 [#uses=0]
shufflevector <4 x float> %835, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:839 [#uses=1]
- getelementptr <4 x float>* null, i32 878 ; <<4 x float>*>:840 [#uses=1]
+ getelementptr <4 x float>, <4 x float>* null, i32 878 ; <<4 x float>*>:840 [#uses=1]
load <4 x float>* %840 ; <<4 x float>>:841 [#uses=0]
call <4 x float> @llvm.ppc.altivec.vcfsx( <4 x i32> zeroinitializer, i32 0 ) ; <<4 x float>>:842 [#uses=1]
shufflevector <4 x float> %842, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:843 [#uses=2]
; <label>:849 ; preds = %xLS.exit449
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:850 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:851 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:851 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %851
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:852 [#uses=1]
store <4 x float> %852, <4 x float>* null
br i1 false, label %859, label %856
; <label>:856 ; preds = %854
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:857 [#uses=2]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:857 [#uses=2]
load <4 x float>* %857 ; <<4 x float>>:858 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %857
br label %859
br i1 false, label %864, label %861
; <label>:861 ; preds = %859
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:862 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:862 [#uses=1]
shufflevector <4 x float> %845, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:863 [#uses=1]
store <4 x float> %863, <4 x float>* %862
br label %864
br label %xST.exit451
xST.exit451: ; preds = %868, %849
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:870 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:871 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:870 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:871 [#uses=0]
load <4 x float>* null ; <<4 x float>>:872 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:873 [#uses=1]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:874 [#uses=1]
br i1 %882, label %888, label %883
; <label>:883 ; preds = %xST.exit451
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:884 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 0 ; <<4 x float>*>:884 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %884
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:885 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:885 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:886 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:887 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:887 [#uses=0]
br label %xST.exit453
; <label>:888 ; preds = %xST.exit451
br i1 false, label %894, label %891
; <label>:891 ; preds = %888
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:892 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:892 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:893 [#uses=1]
store <4 x float> %893, <4 x float>* %892
br label %894
br i1 false, label %xST.exit453, label %900
; <label>:900 ; preds = %898
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:901 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:901 [#uses=1]
load <4 x float>* %901 ; <<4 x float>>:902 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %902, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:903 [#uses=0]
br label %xST.exit453
xST.exit453: ; preds = %900, %898, %883
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:904 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 1 ; <<4 x float>*>:904 [#uses=0]
load <4 x float>* null ; <<4 x float>>:905 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:906 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 189, i32 3 ; <<4 x float>*>:906 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:907 [#uses=1]
shufflevector <4 x float> %905, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:908 [#uses=1]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:909 [#uses=0]
; <label>:915 ; preds = %xST.exit453
load <4 x i32>* %.sub7896 ; <<4 x i32>>:916 [#uses=0]
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:917 [#uses=1]
+ getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 3 ; <<4 x i32>*>:917 [#uses=1]
store <4 x i32> zeroinitializer, <4 x i32>* %917
load <4 x i32>* %.sub7896 ; <<4 x i32>>:918 [#uses=1]
and <4 x i32> %918, zeroinitializer ; <<4 x i32>>:919 [#uses=0]
unreachable
xPIF.exit455: ; preds = %xST.exit453
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:922 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 0 ; <<4 x float>*>:922 [#uses=1]
load <4 x float>* %922 ; <<4 x float>>:923 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:924 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 1 ; <<4 x float>*>:924 [#uses=1]
load <4 x float>* %924 ; <<4 x float>>:925 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:926 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:927 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 2 ; <<4 x float>*>:926 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 186, i32 3 ; <<4 x float>*>:927 [#uses=0]
shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:928 [#uses=0]
bitcast { { i16, i16, i32 } }* %1 to <4 x float>* ; <<4 x float>*>:929 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:930 [#uses=0]
xST.exit459: ; preds = %937, %934
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 2, i32 2, i32 2, i32 2 > ; <<4 x i32>>:938 [#uses=1]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> %938, <4 x i32> zeroinitializer ) ; <i32>:939 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:940 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 188, i32 2 ; <<4 x float>*>:940 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %940
load <4 x float>* null ; <<4 x float>>:941 [#uses=1]
shufflevector <4 x float> zeroinitializer, <4 x float> %941, <4 x i32> < i32 0, i32 5, i32 6, i32 7 > ; <<4 x float>>:942 [#uses=1]
br i1 false, label %955, label %953
; <label>:953 ; preds = %952
- getelementptr [4 x <4 x i32>]* null, i32 0, i32 2 ; <<4 x i32>*>:954 [#uses=0]
+ getelementptr [4 x <4 x i32>], [4 x <4 x i32>]* null, i32 0, i32 2 ; <<4 x i32>*>:954 [#uses=0]
br label %955
; <label>:955 ; preds = %953, %952
xStoreDestAddressWithMask.exit461: ; preds = %958, %955
load <4 x float>* %0 ; <<4 x float>>:960 [#uses=0]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:961 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 0 ; <<4 x float>*>:962 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 0 ; <<4 x float>*>:962 [#uses=0]
br i1 false, label %968, label %xST.exit463
xST.exit463: ; preds = %xStoreDestAddressWithMask.exit461
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:963 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:964 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:965 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 1 ; <<4 x float>*>:963 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 2 ; <<4 x float>*>:964 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 3, i32 3 ; <<4 x float>*>:965 [#uses=0]
load <4 x float>* %0 ; <<4 x float>>:966 [#uses=3]
call i32 @llvm.ppc.altivec.vcmpequw.p( i32 0, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32>:967 [#uses=0]
br i1 false, label %972, label %969
unreachable
; <label>:969 ; preds = %xST.exit463
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:970 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:971 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 1 ; <<4 x float>*>:970 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 2 ; <<4 x float>*>:971 [#uses=1]
store <4 x float> %966, <4 x float>* %971
store <4 x float> %966, <4 x float>* null
br label %xST.exit465
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> zeroinitializer, <4 x i32> %975, <4 x i32> zeroinitializer ) ; <<4 x i32>>:976 [#uses=1]
bitcast <4 x i32> %976 to <4 x float> ; <<4 x float>>:977 [#uses=1]
store <4 x float> %977, <4 x float>* null
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:978 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 2, i32 3 ; <<4 x float>*>:978 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:979 [#uses=1]
call <4 x i32> @llvm.ppc.altivec.vsel( <4 x i32> %979, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <<4 x i32>>:980 [#uses=1]
bitcast <4 x i32> %980 to <4 x float> ; <<4 x float>>:981 [#uses=0]
xST.exit465: ; preds = %972, %969
load <4 x float>* %0 ; <<4 x float>>:982 [#uses=3]
icmp eq i32 0, 0 ; <i1>:983 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:984 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 0 ; <<4 x float>*>:984 [#uses=1]
br i1 %983, label %989, label %985
; <label>:985 ; preds = %xST.exit465
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:986 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:987 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:986 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:987 [#uses=1]
store <4 x float> %982, <4 x float>* %987
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:988 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:988 [#uses=0]
br label %xST.exit467
; <label>:989 ; preds = %xST.exit465
bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:990 [#uses=0]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> zeroinitializer ; <<4 x i32>>:991 [#uses=0]
store <4 x float> zeroinitializer, <4 x float>* %984
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:992 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 1 ; <<4 x float>*>:992 [#uses=0]
load <4 x i32>* %.sub7896 ; <<4 x i32>>:993 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:994 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 2 ; <<4 x float>*>:994 [#uses=0]
bitcast <4 x i32> zeroinitializer to <4 x float> ; <<4 x float>>:995 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:996 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 1, i32 3 ; <<4 x float>*>:996 [#uses=0]
bitcast <4 x float> zeroinitializer to <4 x i32> ; <<4 x i32>>:997 [#uses=1]
bitcast <4 x float> %982 to <4 x i32> ; <<4 x i32>>:998 [#uses=1]
shufflevector <4 x i32> zeroinitializer, <4 x i32> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 > ; <<4 x i32>>:999 [#uses=1]
; <label>:1005 ; preds = %xST.exit467
load <4 x float>* null ; <<4 x float>>:1006 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1007 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1007 [#uses=1]
load <4 x float>* %1007 ; <<4 x float>>:1008 [#uses=0]
load <4 x float>* null ; <<4 x float>>:1009 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1010 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1010 [#uses=0]
br label %xST.exit469
; <label>:1011 ; preds = %xST.exit467
br i1 %1017, label %1021, label %1018
; <label>:1018 ; preds = %1015
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1019 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 1 ; <<4 x float>*>:1019 [#uses=0]
shufflevector <4 x float> %1002, <4 x float> zeroinitializer, <4 x i32> < i32 0, i32 1, i32 2, i32 7 > ; <<4 x float>>:1020 [#uses=0]
br label %1021
br i1 %1022, label %1025, label %1023
; <label>:1023 ; preds = %1021
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1024 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1024 [#uses=1]
store <4 x float> zeroinitializer, <4 x float>* %1024
br label %1025
br i1 %1026, label %xST.exit469, label %1027
; <label>:1027 ; preds = %1025
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1028 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1028 [#uses=0]
br label %xST.exit469
xST.exit469: ; preds = %1027, %1025, %1005
; <label>:1032 ; preds = %xST.exit469
load <4 x float>* null ; <<4 x float>>:1033 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:1034 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 2 ; <<4 x float>*>:1034 [#uses=1]
load <4 x float>* %1034 ; <<4 x float>>:1035 [#uses=0]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:1036 [#uses=0]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 3 ; <<4 x float>*>:1036 [#uses=0]
br label %xST.exit472
; <label>:1037 ; preds = %xST.exit469
br i1 %1041, label %1045, label %1042
; <label>:1042 ; preds = %1040
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:1043 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 4, i32 1 ; <<4 x float>*>:1043 [#uses=1]
load <4 x float>* %1043 ; <<4 x float>>:1044 [#uses=0]
br label %1045
%.07617 = phi <4 x float> [ %1133, %1134 ], [ %.17618, %1142 ], [ %.17618, %1141 ] ; <<4 x float>> [#uses=1]
%.07621 = phi <4 x float> [ %1132, %1134 ], [ %.17622, %1142 ], [ %.17622, %1141 ] ; <<4 x float>> [#uses=1]
load <4 x float>* null ; <<4 x float>>:1143 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1144 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1144 [#uses=1]
load <4 x float>* %1144 ; <<4 x float>>:1145 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1146 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1146 [#uses=1]
load <4 x float>* %1146 ; <<4 x float>>:1147 [#uses=1]
shufflevector <4 x float> %1143, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1148 [#uses=1]
shufflevector <4 x float> %1145, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:1149 [#uses=1]
%.07660 = phi <4 x float> [ %1152, %1155 ], [ %.17661, %1163 ], [ %.17661, %1162 ] ; <<4 x float>> [#uses=1]
%.07664 = phi <4 x float> [ %1151, %1155 ], [ %.17665, %1163 ], [ %.17665, %1162 ] ; <<4 x float>> [#uses=1]
load <4 x float>* null ; <<4 x float>>:1164 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1165 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 2 ; <<4 x float>*>:1165 [#uses=1]
load <4 x float>* %1165 ; <<4 x float>>:1166 [#uses=1]
- getelementptr [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1]
+ getelementptr [193 x [4 x <4 x float>]], [193 x [4 x <4 x float>]]* null, i32 0, i32 0, i32 3 ; <<4 x float>*>:1167 [#uses=1]
load <4 x float>* %1167 ; <<4 x float>>:1168 [#uses=1]
fadd <4 x float> zeroinitializer, zeroinitializer ; <<4 x float>>:1169 [#uses=1]
fadd <4 x float> zeroinitializer, %1164 ; <<4 x float>>:1170 [#uses=1]
bb: ; preds = %bb, %entry
%i.035.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %tmp8 = getelementptr float* %tmp56, i32 %i.035.0 ; <float*> [#uses=2]
+ %tmp8 = getelementptr float, float* %tmp56, i32 %i.035.0 ; <float*> [#uses=2]
%tmp101112 = bitcast float* %tmp8 to i8* ; <i8*> [#uses=1]
%tmp1617 = bitcast float* %tmp8 to i32* ; <i32*> [#uses=1]
%tmp21 = tail call i32 asm "lwbrx $0, $2, $1", "=r,r,bO,*m"( i8* %tmp101112, i32 0, i32* %tmp1617 ) ; <i32> [#uses=0]
define void @test(%struct.XState* %gldst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._GVMConstants* %cnstn, %struct.PPSToken* %pstrm, %struct.GVMFPContext* %vmctx, %struct.GVMTs* %txtrs, %struct.GVMFPStack* %fpstk, %struct.GVMFGAttrib* %start, %struct.GVMFGAttrib* %deriv, i32 %fragx, i32 %fragy) {
bb58.i:
- %tmp3405.i = getelementptr %struct.XTRec* null, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp3405.i = getelementptr %struct.XTRec, %struct.XTRec* null, i32 0, i32 1 ; <float*> [#uses=1]
%tmp34053406.i = bitcast float* %tmp3405.i to i8* ; <i8*> [#uses=1]
%tmp3407.i = call <4 x i32> @llvm.ppc.altivec.lvewx( i8* %tmp34053406.i ) ; <<4 x i32>> [#uses=0]
%tmp4146.i = call i32 @llvm.ppc.altivec.vcmpequw.p( i32 3, <4 x i32> zeroinitializer, <4 x i32> zeroinitializer ) ; <i32> [#uses=1]
cond_next30: ; preds = %cond_true28, %cond_false, %cond_true
%iftmp.0.043.1 = phi %struct._obstack_chunk* [ %iftmp.0.043.0, %cond_true28 ], [ null, %cond_true ], [ %tmp22, %cond_false ] ; <%struct._obstack_chunk*> [#uses=1]
- %tmp41 = getelementptr %struct._obstack_chunk* %iftmp.0.043.1, i32 0, i32 0 ; <i8**> [#uses=1]
+ %tmp41 = getelementptr %struct._obstack_chunk, %struct._obstack_chunk* %iftmp.0.043.1, i32 0, i32 0 ; <i8**> [#uses=1]
store i8* null, i8** %tmp41, align 8
ret i32 undef
}
entry:
%retval = alloca i32, align 4 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = getelementptr %struct.anon* @s, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp = getelementptr %struct.anon, %struct.anon* @s, i32 0, i32 1 ; <float*> [#uses=1]
%tmp1 = load float* %tmp, align 1 ; <float> [#uses=1]
- %tmp2 = getelementptr %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp2 = getelementptr %struct.anon, %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
store float %tmp1, float* %tmp2, align 1
- %tmp3 = getelementptr <{ i8, double }>* @u, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp3 = getelementptr <{ i8, double }>, <{ i8, double }>* @u, i32 0, i32 1 ; <double*> [#uses=1]
%tmp4 = load double* %tmp3, align 1 ; <double> [#uses=1]
- %tmp5 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp5 = getelementptr <{ i8, double }>, <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
store double %tmp4, double* %tmp5, align 1
br label %return
%retval = alloca i32, align 4 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = call i32 @foo( ) ; <i32> [#uses=0]
- %tmp1 = getelementptr %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp1 = getelementptr %struct.anon, %struct.anon* @t, i32 0, i32 1 ; <float*> [#uses=1]
%tmp2 = load float* %tmp1, align 1 ; <float> [#uses=1]
%tmp23 = fpext float %tmp2 to double ; <double> [#uses=1]
- %tmp4 = getelementptr <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp4 = getelementptr <{ i8, double }>, <{ i8, double }>* @v, i32 0, i32 1 ; <double*> [#uses=1]
%tmp5 = load double* %tmp4, align 1 ; <double> [#uses=1]
- %tmp6 = getelementptr [8 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp6 = getelementptr [8 x i8], [8 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp7 = call i32 (i8*, ...)* @printf( i8* %tmp6, double %tmp23, double %tmp5 ) ; <i32> [#uses=0]
br label %return
to label %bb30.preheader unwind label %unwind
bb30.preheader: ; preds = %entry
- %tmp26 = getelementptr %struct.Range* %effectiveRange, i64 0, i32 1 ; <i64*> [#uses=1]
+ %tmp26 = getelementptr %struct.Range, %struct.Range* %effectiveRange, i64 0, i32 1 ; <i64*> [#uses=1]
br label %bb30
unwind: ; preds = %cond_true, %entry
bb103: ; preds = %bb79
ret i32 0
bb130.preheader: ; preds = %bb94
- %tmp134 = getelementptr %struct.cpp_string* %from, i32 0, i32 1 ; <i8**> [#uses=0]
+ %tmp134 = getelementptr %struct.cpp_string, %struct.cpp_string* %from, i32 0, i32 1 ; <i8**> [#uses=0]
ret i32 0
bb729: ; preds = %bb94
call fastcc void @emit_numeric_escape( i32 %pfile, i32 0, %struct._cpp_strbuf* null, i32 %wide ) nounwind
%tmp2627 = ptrtoint i8* %rec to i64 ; <i64> [#uses=2]
%tmp28 = and i64 %tmp2627, -16384 ; <i64> [#uses=2]
%tmp2829 = inttoptr i64 %tmp28 to i8* ; <i8*> [#uses=1]
- %tmp37 = getelementptr i8* %tmp2829, i64 42 ; <i8*> [#uses=1]
+ %tmp37 = getelementptr i8, i8* %tmp2829, i64 42 ; <i8*> [#uses=1]
%tmp40 = load i8* %tmp37, align 1 ; <i8> [#uses=1]
%tmp4041 = zext i8 %tmp40 to i64 ; <i64> [#uses=1]
%tmp42 = shl i64 %tmp4041, 8 ; <i64> [#uses=1]
%tmp114115 = bitcast i8* %buffer to i16* ; <i16*> [#uses=1]
%tmp256 = lshr i64 %bufferSize, 1 ; <i64> [#uses=1]
%tmp256257 = trunc i64 %tmp256 to i32 ; <i32> [#uses=1]
- %tmp258 = getelementptr i16* %tmp114115, i32 %tmp256257 ; <i16*> [#uses=0]
+ %tmp258 = getelementptr i16, i16* %tmp114115, i32 %tmp256257 ; <i16*> [#uses=0]
ret i32 0
}
bb16: ; preds = %entry
bitcast %struct.PerMacroblockBoundaryStrengths* null to i32* ; <i32*>:1 [#uses=3]
- getelementptr i32* %1, i32 1 ; <i32*>:2 [#uses=0]
- getelementptr i32* %1, i32 2 ; <i32*>:3 [#uses=0]
- getelementptr i32* %1, i32 3 ; <i32*>:4 [#uses=0]
+ getelementptr i32, i32* %1, i32 1 ; <i32*>:2 [#uses=0]
+ getelementptr i32, i32* %1, i32 2 ; <i32*>:3 [#uses=0]
+ getelementptr i32, i32* %1, i32 3 ; <i32*>:4 [#uses=0]
bitcast [16 x i8]* null to i32* ; <i32*>:5 [#uses=3]
- getelementptr i32* %5, i32 1 ; <i32*>:6 [#uses=0]
- getelementptr i32* %5, i32 2 ; <i32*>:7 [#uses=0]
- getelementptr i32* %5, i32 3 ; <i32*>:8 [#uses=0]
+ getelementptr i32, i32* %5, i32 1 ; <i32*>:6 [#uses=0]
+ getelementptr i32, i32* %5, i32 2 ; <i32*>:7 [#uses=0]
+ getelementptr i32, i32* %5, i32 3 ; <i32*>:8 [#uses=0]
icmp eq i32 0, 0 ; <i1>:9 [#uses=0]
lshr i32 0, 30 ; <i32>:10 [#uses=0]
and i32 0, 268435455 ; <i32>:11 [#uses=0]
%.not658 = icmp ne i32 0, 0 ; <i1> [#uses=1]
and i32 0, 268369920 ; <i32>:20 [#uses=1]
icmp eq i32 %20, 268369920 ; <i1>:21 [#uses=2]
- getelementptr %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2 ; <[4 x i8]*>:22 [#uses=1]
- getelementptr %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2, i32 0 ; <i8*>:23 [#uses=0]
+ getelementptr %struct.PerMacroblockBoundaryStrengths, %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2 ; <[4 x i8]*>:22 [#uses=1]
+ getelementptr %struct.PerMacroblockBoundaryStrengths, %struct.PerMacroblockBoundaryStrengths* null, i32 0, i32 2, i32 0 ; <i8*>:23 [#uses=0]
and i32 0, -2 ; <i32>:24 [#uses=1]
add i32 %24, -1 ; <i32>:25 [#uses=0]
bitcast [4 x i8]* %22 to i32* ; <i32*>:26 [#uses=3]
- getelementptr i32* %26, i32 1 ; <i32*>:27 [#uses=0]
- getelementptr i32* %26, i32 2 ; <i32*>:28 [#uses=0]
- getelementptr i32* %26, i32 3 ; <i32*>:29 [#uses=0]
+ getelementptr i32, i32* %26, i32 1 ; <i32*>:27 [#uses=0]
+ getelementptr i32, i32* %26, i32 2 ; <i32*>:28 [#uses=0]
+ getelementptr i32, i32* %26, i32 3 ; <i32*>:29 [#uses=0]
br label %bb144
bb144: ; preds = %bb395, %bb16
%boundaryStrengthsV.1771 = phi i8* [ null, %bb16 ], [ %158, %bb395 ] ; <i8*> [#uses=2]
%numEdgesToTest.1770 = phi i32 [ 4, %bb16 ], [ %numEdgesToTest.2, %bb395 ] ; <i32> [#uses=1]
icmp eq i32 %idxEachField11.0773, 0 ; <i1>:30 [#uses=0]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %mbIndexLeft.2772 ; <%struct.BiPartSrcDescriptor**>:31 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %mbIndexLeft.2772 ; <%struct.BiPartSrcDescriptor**>:31 [#uses=1]
load %struct.BiPartSrcDescriptor** %31, align 4 ; <%struct.BiPartSrcDescriptor*>:32 [#uses=0]
%fMacroblockHasNonZeroBS.4 = select i1 %21, i32 1, i32 0 ; <i32> [#uses=1]
%numEdgesToTest.2 = select i1 %21, i32 1, i32 %numEdgesToTest.1770 ; <i32> [#uses=2]
add i32 %52, %42 ; <i32>:53 [#uses=1]
mul i32 %51, 0 ; <i32>:54 [#uses=1]
add i32 %46, %54 ; <i32>:55 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %53 ; <%struct.BiPartSrcDescriptor**>:56 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %53 ; <%struct.BiPartSrcDescriptor**>:56 [#uses=1]
load %struct.BiPartSrcDescriptor** %56, align 4 ; <%struct.BiPartSrcDescriptor*>:57 [#uses=7]
- getelementptr %struct.BiPartSrcDescriptor** null, i32 %55 ; <%struct.BiPartSrcDescriptor**>:58 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor*, %struct.BiPartSrcDescriptor** null, i32 %55 ; <%struct.BiPartSrcDescriptor**>:58 [#uses=1]
load %struct.BiPartSrcDescriptor** %58, align 4 ; <%struct.BiPartSrcDescriptor*>:59 [#uses=5]
icmp slt i32 %159, 0 ; <i1>:60 [#uses=0]
icmp eq %struct.BiPartSrcDescriptor* %57, %59 ; <i1>:61 [#uses=0]
bitcast %struct.BiPartSrcDescriptor* %57 to i16* ; <i16*>:62 [#uses=5]
load i16* %62, align 2 ; <i16>:63 [#uses=2]
- getelementptr i16* %62, i32 1 ; <i16*>:64 [#uses=1]
+ getelementptr i16, i16* %62, i32 1 ; <i16*>:64 [#uses=1]
load i16* %64, align 2 ; <i16>:65 [#uses=2]
- getelementptr i16* %62, i32 2 ; <i16*>:66 [#uses=1]
+ getelementptr i16, i16* %62, i32 2 ; <i16*>:66 [#uses=1]
load i16* %66, align 2 ; <i16>:67 [#uses=2]
- getelementptr i16* %62, i32 3 ; <i16*>:68 [#uses=1]
+ getelementptr i16, i16* %62, i32 3 ; <i16*>:68 [#uses=1]
load i16* %68, align 2 ; <i16>:69 [#uses=2]
- getelementptr i16* %62, i32 6 ; <i16*>:70 [#uses=1]
+ getelementptr i16, i16* %62, i32 6 ; <i16*>:70 [#uses=1]
load i16* %70, align 2 ; <i16>:71 [#uses=2]
bitcast %struct.BiPartSrcDescriptor* %59 to i16* ; <i16*>:72 [#uses=5]
load i16* %72, align 2 ; <i16>:73 [#uses=2]
- getelementptr i16* %72, i32 1 ; <i16*>:74 [#uses=1]
+ getelementptr i16, i16* %72, i32 1 ; <i16*>:74 [#uses=1]
load i16* %74, align 2 ; <i16>:75 [#uses=2]
- getelementptr i16* %72, i32 2 ; <i16*>:76 [#uses=1]
+ getelementptr i16, i16* %72, i32 2 ; <i16*>:76 [#uses=1]
load i16* %76, align 2 ; <i16>:77 [#uses=2]
- getelementptr i16* %72, i32 3 ; <i16*>:78 [#uses=1]
+ getelementptr i16, i16* %72, i32 3 ; <i16*>:78 [#uses=1]
load i16* %78, align 2 ; <i16>:79 [#uses=2]
- getelementptr i16* %72, i32 6 ; <i16*>:80 [#uses=1]
+ getelementptr i16, i16* %72, i32 6 ; <i16*>:80 [#uses=1]
load i16* %80, align 2 ; <i16>:81 [#uses=2]
sub i16 %63, %73 ; <i16>:82 [#uses=3]
sub i16 %65, %75 ; <i16>:83 [#uses=3]
sub i16 0, %86 ; <i16>:95 [#uses=1]
icmp slt i16 %86, 0 ; <i1>:96 [#uses=1]
%.663 = select i1 %96, i16 %95, i16 %86 ; <i16> [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 0 ; <i8*>:97 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 0 ; <i8*>:97 [#uses=1]
load i8* %97, align 1 ; <i8>:98 [#uses=1]
zext i8 %98 to i32 ; <i32>:99 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 1 ; <i8*>:100 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 1, i32 1 ; <i8*>:100 [#uses=1]
load i8* %100, align 1 ; <i8>:101 [#uses=1]
zext i8 %101 to i32 ; <i32>:102 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:103 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:103 [#uses=1]
load i8* %103, align 1 ; <i8>:104 [#uses=2]
zext i8 %104 to i32 ; <i32>:105 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:106 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 0 ; <i8*>:106 [#uses=1]
load i8* %106, align 1 ; <i8>:107 [#uses=2]
zext i8 %107 to i32 ; <i32>:108 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:109 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %57, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:109 [#uses=1]
load i8* %109, align 1 ; <i8>:110 [#uses=1]
zext i8 %110 to i32 ; <i32>:111 [#uses=1]
- getelementptr %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:112 [#uses=1]
+ getelementptr %struct.BiPartSrcDescriptor, %struct.BiPartSrcDescriptor* %59, i32 0, i32 0, i32 0, i32 3, i32 1 ; <i8*>:112 [#uses=1]
load i8* %112, align 1 ; <i8>:113 [#uses=1]
zext i8 %113 to i32 ; <i32>:114 [#uses=1]
lshr i32 %99, 4 ; <i32>:115 [#uses=1]
xor i8 %155, 32 ; <i8>:156 [#uses=1]
or i8 %153, %156 ; <i8>:157 [#uses=1]
store i8 %157, i8* %boundaryStrengthsV.3, align 1
- getelementptr i8* %boundaryStrengthsV.3, i32 4 ; <i8*>:158 [#uses=4]
+ getelementptr i8, i8* %boundaryStrengthsV.3, i32 4 ; <i8*>:158 [#uses=4]
shl i32 %bfNZ12.2, 4 ; <i32>:159 [#uses=4]
add i32 %ixEdge.1, 1 ; <i32>:160 [#uses=6]
icmp ult i32 %160, %numEdgesToTest.2 ; <i1>:161 [#uses=1]
bb.nph4945: ; preds = %entry
%2 = bitcast [2 x %struct.vv_t]* null to i64* ; <i64*> [#uses=6]
- %3 = getelementptr [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=6]
+ %3 = getelementptr [2 x i64], [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=6]
%4 = bitcast %struct.vv_t* null to i64* ; <i64*> [#uses=5]
- %5 = getelementptr [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=3]
+ %5 = getelementptr [2 x i64], [2 x i64]* null, i32 0, i32 1 ; <i64*> [#uses=3]
br label %bb2326
bb2217: ; preds = %bb2326
%6 = or i64 0, 0 ; <i64> [#uses=2]
%7 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
%8 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %9 = getelementptr float* null, i32 2 ; <float*> [#uses=1]
+ %9 = getelementptr float, float* null, i32 2 ; <float*> [#uses=1]
%10 = load float* %9, align 4 ; <float> [#uses=1]
- %11 = getelementptr float* null, i32 3 ; <float*> [#uses=1]
+ %11 = getelementptr float, float* null, i32 3 ; <float*> [#uses=1]
%12 = load float* %11, align 4 ; <float> [#uses=1]
%13 = fmul float %10, 6.553500e+04 ; <float> [#uses=1]
%14 = fadd float %13, 5.000000e-01 ; <float> [#uses=1]
%34 = and i64 %33, 281470681743360 ; <i64> [#uses=1]
store i64 %6, i64* %2, align 16
store i64 %31, i64* %3, align 8
- %35 = getelementptr i8* null, i32 0 ; <i8*> [#uses=1]
+ %35 = getelementptr i8, i8* null, i32 0 ; <i8*> [#uses=1]
%36 = bitcast i8* %35 to float* ; <float*> [#uses=4]
%37 = load float* %36, align 4 ; <float> [#uses=1]
- %38 = getelementptr float* %36, i32 1 ; <float*> [#uses=1]
+ %38 = getelementptr float, float* %36, i32 1 ; <float*> [#uses=1]
%39 = load float* %38, align 4 ; <float> [#uses=1]
%40 = fmul float %37, 6.553500e+04 ; <float> [#uses=1]
%41 = fadd float %40, 5.000000e-01 ; <float> [#uses=1]
%f1582.0 = phi float [ 0.000000e+00, %bb2265 ], [ %43, %bb2274 ] ; <float> [#uses=1]
%47 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
%48 = fptosi float %f1582.0 to i32 ; <i32> [#uses=1]
- %49 = getelementptr float* %36, i32 2 ; <float*> [#uses=1]
+ %49 = getelementptr float, float* %36, i32 2 ; <float*> [#uses=1]
%50 = load float* %49, align 4 ; <float> [#uses=1]
- %51 = getelementptr float* %36, i32 3 ; <float*> [#uses=1]
+ %51 = getelementptr float, float* %36, i32 3 ; <float*> [#uses=1]
%52 = load float* %51, align 4 ; <float> [#uses=1]
%53 = fmul float %50, 6.553500e+04 ; <float> [#uses=1]
%54 = fadd float %53, 5.000000e-01 ; <float> [#uses=1]
%68 = or i64 %64, %62 ; <i64> [#uses=1]
%69 = or i64 %68, %66 ; <i64> [#uses=1]
%70 = or i64 %69, %67 ; <i64> [#uses=2]
- %71 = getelementptr i8* null, i32 0 ; <i8*> [#uses=1]
+ %71 = getelementptr i8, i8* null, i32 0 ; <i8*> [#uses=1]
%72 = bitcast i8* %71 to float* ; <float*> [#uses=4]
%73 = load float* %72, align 4 ; <float> [#uses=1]
- %74 = getelementptr float* %72, i32 1 ; <float*> [#uses=1]
+ %74 = getelementptr float, float* %72, i32 1 ; <float*> [#uses=1]
%75 = load float* %74, align 4 ; <float> [#uses=1]
%76 = fmul float %73, 6.553500e+04 ; <float> [#uses=1]
%77 = fadd float %76, 5.000000e-01 ; <float> [#uses=3]
%82 = fcmp olt float %79, 0.000000e+00 ; <i1> [#uses=0]
%83 = fptosi float %f0569.0 to i32 ; <i32> [#uses=1]
%84 = fptosi float 0.000000e+00 to i32 ; <i32> [#uses=1]
- %85 = getelementptr float* %72, i32 2 ; <float*> [#uses=1]
+ %85 = getelementptr float, float* %72, i32 2 ; <float*> [#uses=1]
%86 = load float* %85, align 4 ; <float> [#uses=1]
- %87 = getelementptr float* %72, i32 3 ; <float*> [#uses=1]
+ %87 = getelementptr float, float* %72, i32 3 ; <float*> [#uses=1]
%88 = load float* %87, align 4 ; <float> [#uses=1]
%89 = fmul float %86, 6.553500e+04 ; <float> [#uses=1]
%90 = fadd float %89, 5.000000e-01 ; <float> [#uses=1]
unreachable
bb2318: ; preds = %bb2315
- %126 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 8 ; <%struct.vv_t*> [#uses=1]
+ %126 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 8 ; <%struct.vv_t*> [#uses=1]
%127 = bitcast %struct.vv_t* %126 to i64* ; <i64*> [#uses=1]
%128 = load i64* %127, align 8 ; <i64> [#uses=1]
%129 = trunc i64 %128 to i32 ; <i32> [#uses=4]
unreachable
bb2319: ; preds = %bb2326
- %141 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 2 ; <i8**> [#uses=1]
+ %141 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 2 ; <i8**> [#uses=1]
%142 = load i8** %141, align 4 ; <i8*> [#uses=4]
- %143 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
+ %143 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
%144 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %143 ) nounwind ; <i32> [#uses=1]
%145 = sext i32 %144 to i64 ; <i64> [#uses=2]
- %146 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
+ %146 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
%147 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %146 ) nounwind ; <i32> [#uses=1]
%148 = sext i32 %147 to i64 ; <i64> [#uses=2]
%149 = shl i64 %145, 48 ; <i64> [#uses=0]
%151 = and i64 %150, 281470681743360 ; <i64> [#uses=0]
store i64 %145, i64* %2, align 16
store i64 %148, i64* %3, align 8
- %152 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
+ %152 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
%153 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %152 ) nounwind ; <i32> [#uses=1]
%154 = sext i32 %153 to i64 ; <i64> [#uses=0]
- %155 = getelementptr i8* %142, i32 0 ; <i8*> [#uses=1]
+ %155 = getelementptr i8, i8* %142, i32 0 ; <i8*> [#uses=1]
%156 = call i32 (...)* @_u16_sf32( double 0.000000e+00, double 6.553500e+04, double 5.000000e-01, i8* %155 ) nounwind ; <i32> [#uses=0]
unreachable
bb2326: ; preds = %bb2325, %bb.nph4945
%indvar5021 = phi i32 [ 0, %bb.nph4945 ], [ %indvar.next5145, %bb2325 ] ; <i32> [#uses=6]
%157 = icmp slt i32 %indvar5021, %n ; <i1> [#uses=0]
- %158 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 10 ; <%struct.xx_t**> [#uses=1]
+ %158 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 10 ; <%struct.xx_t**> [#uses=1]
%159 = load %struct.xx_t** %158, align 4 ; <%struct.xx_t*> [#uses=5]
- %160 = getelementptr %struct.CGLSI* %src, i32 %indvar5021, i32 1 ; <i32*> [#uses=1]
+ %160 = getelementptr %struct.CGLSI, %struct.CGLSI* %src, i32 %indvar5021, i32 1 ; <i32*> [#uses=1]
%161 = load i32* %160, align 4 ; <i32> [#uses=1]
%162 = and i32 %161, 255 ; <i32> [#uses=1]
switch i32 %162, label %bb2325 [
ret void
bb21: ; preds = %entry
- %0 = getelementptr i8* %a, i32 0 ; <i8*> [#uses=2]
+ %0 = getelementptr i8, i8* %a, i32 0 ; <i8*> [#uses=2]
br label %bb35
bb29: ; preds = %bb35
bb7.i252: ; preds = %bb7.i252, %bb29
%pj.0.rec.i247 = phi i32 [ %indvar.next488, %bb7.i252 ], [ 0, %bb29 ] ; <i32> [#uses=2]
- %pi.0.i248 = getelementptr i8* %pa.1, i32 %pj.0.rec.i247 ; <i8*> [#uses=0]
+ %pi.0.i248 = getelementptr i8, i8* %pa.1, i32 %pj.0.rec.i247 ; <i8*> [#uses=0]
%indvar.next488 = add i32 %pj.0.rec.i247, 1 ; <i32> [#uses=1]
br i1 false, label %bb34, label %bb7.i252
%pj.0.rec.i156 = phi i32 [ %indvar.next394, %bb7.i161 ], [ 0, %bb50 ] ; <i32> [#uses=2]
%.sum279 = sub i32 %pj.0.rec.i156, %min ; <i32> [#uses=1]
%pb.0.sum542 = add i32 %pb.0.rec, %.sum279 ; <i32> [#uses=1]
- %pj.0.i158 = getelementptr i8* %0, i32 %pb.0.sum542 ; <i8*> [#uses=0]
+ %pj.0.i158 = getelementptr i8, i8* %0, i32 %pb.0.sum542 ; <i8*> [#uses=0]
%indvar.next394 = add i32 %pj.0.rec.i156, 1 ; <i32> [#uses=1]
br label %bb7.i161
}
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %y, i32* %y_addr
%0 = load i32* %y_addr, align 4 ; <i32> [#uses=1]
- %1 = getelementptr inbounds [0 x i32]* @x, i32 0, i32 %0 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds [0 x i32], [0 x i32]* @x, i32 0, i32 %0 ; <i32*> [#uses=1]
call void asm sideeffect "isync\0A\09eieio\0A\09stw $1, $0", "=*o,r,~{memory}"(i32* %1, i32 0) nounwind
br label %return
br label %bb49.3
bb48.4: ; preds = %bb49.3
- %0 = getelementptr inbounds [5 x i64*]* undef, i32 0, i32 %c_ix.0.3 ; <i64**> [#uses=0]
+ %0 = getelementptr inbounds [5 x i64*], [5 x i64*]* undef, i32 0, i32 %c_ix.0.3 ; <i64**> [#uses=0]
br label %bb51
}
%sub5.us = sub i64 31999, %indvars.iv20
%sext = shl i64 %sub5.us, 32
%idxprom.us = ashr exact i64 %sext, 32
- %arrayidx.us = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us
+ %arrayidx.us = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us
%2 = load float* %arrayidx.us, align 4
- %arrayidx7.us = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv
+ %arrayidx7.us = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv
%3 = load float* %arrayidx7.us, align 4
%add8.us = fadd float %3, %2
store float %add8.us, float* %arrayidx7.us, align 4
%sub5.us.1 = sub i64 31999, %indvars.iv20.1
%sext23 = shl i64 %sub5.us.1, 32
%idxprom.us.1 = ashr exact i64 %sext23, 32
- %arrayidx.us.1 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.1
+ %arrayidx.us.1 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.1
%5 = load float* %arrayidx.us.1, align 4
- %arrayidx7.us.1 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.1
+ %arrayidx7.us.1 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.1
%6 = load float* %arrayidx7.us.1, align 4
%add8.us.1 = fadd float %6, %5
store float %add8.us.1, float* %arrayidx7.us.1, align 4
%sub5.us.2 = sub i64 31999, %indvars.iv20.2
%sext24 = shl i64 %sub5.us.2, 32
%idxprom.us.2 = ashr exact i64 %sext24, 32
- %arrayidx.us.2 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.2
+ %arrayidx.us.2 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.2
%8 = load float* %arrayidx.us.2, align 4
- %arrayidx7.us.2 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.2
+ %arrayidx7.us.2 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.2
%9 = load float* %arrayidx7.us.2, align 4
%add8.us.2 = fadd float %9, %8
store float %add8.us.2, float* %arrayidx7.us.2, align 4
%sub5.us.3 = sub i64 31999, %indvars.iv20.3
%sext25 = shl i64 %sub5.us.3, 32
%idxprom.us.3 = ashr exact i64 %sext25, 32
- %arrayidx.us.3 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.3
+ %arrayidx.us.3 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.3
%11 = load float* %arrayidx.us.3, align 4
- %arrayidx7.us.3 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.3
+ %arrayidx7.us.3 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.3
%12 = load float* %arrayidx7.us.3, align 4
%add8.us.3 = fadd float %12, %11
store float %add8.us.3, float* %arrayidx7.us.3, align 4
%sub5.us.4 = sub i64 31999, %indvars.iv20.4
%sext26 = shl i64 %sub5.us.4, 32
%idxprom.us.4 = ashr exact i64 %sext26, 32
- %arrayidx.us.4 = getelementptr inbounds [32000 x float]* @b, i64 0, i64 %idxprom.us.4
+ %arrayidx.us.4 = getelementptr inbounds [32000 x float], [32000 x float]* @b, i64 0, i64 %idxprom.us.4
%14 = load float* %arrayidx.us.4, align 4
- %arrayidx7.us.4 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.4
+ %arrayidx7.us.4 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.4
%15 = load float* %arrayidx7.us.4, align 4
%add8.us.4 = fadd float %15, %14
store float %add8.us.4, float* %arrayidx7.us.4, align 4
%max.235 = phi float [ %max.139, %for.cond5.preheader ], [ %max.3.15, %for.body7 ]
%xindex.234 = phi i32 [ %xindex.138, %for.cond5.preheader ], [ %xindex.3.15, %for.body7 ]
%yindex.233 = phi i32 [ %yindex.137, %for.cond5.preheader ], [ %yindex.3.15, %for.body7 ]
- %arrayidx9 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv
+ %arrayidx9 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv
%1 = load float* %arrayidx9, align 16
%cmp10 = fcmp ogt float %1, %max.235
%2 = trunc i64 %indvars.iv to i32
%xindex.3 = select i1 %cmp10, i32 %3, i32 %xindex.234
%max.3 = select i1 %cmp10, float %1, float %max.235
%indvars.iv.next45 = or i64 %indvars.iv, 1
- %arrayidx9.1 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next45
+ %arrayidx9.1 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next45
%4 = load float* %arrayidx9.1, align 4
%cmp10.1 = fcmp ogt float %4, %max.3
%5 = trunc i64 %indvars.iv.next45 to i32
%xindex.3.1 = select i1 %cmp10.1, i32 %3, i32 %xindex.3
%max.3.1 = select i1 %cmp10.1, float %4, float %max.3
%indvars.iv.next.146 = or i64 %indvars.iv, 2
- %arrayidx9.2 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.146
+ %arrayidx9.2 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.146
%6 = load float* %arrayidx9.2, align 8
%cmp10.2 = fcmp ogt float %6, %max.3.1
%7 = trunc i64 %indvars.iv.next.146 to i32
%xindex.3.2 = select i1 %cmp10.2, i32 %3, i32 %xindex.3.1
%max.3.2 = select i1 %cmp10.2, float %6, float %max.3.1
%indvars.iv.next.247 = or i64 %indvars.iv, 3
- %arrayidx9.3 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.247
+ %arrayidx9.3 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.247
%8 = load float* %arrayidx9.3, align 4
%cmp10.3 = fcmp ogt float %8, %max.3.2
%9 = trunc i64 %indvars.iv.next.247 to i32
%xindex.3.3 = select i1 %cmp10.3, i32 %3, i32 %xindex.3.2
%max.3.3 = select i1 %cmp10.3, float %8, float %max.3.2
%indvars.iv.next.348 = or i64 %indvars.iv, 4
- %arrayidx9.4 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.348
+ %arrayidx9.4 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.348
%10 = load float* %arrayidx9.4, align 16
%cmp10.4 = fcmp ogt float %10, %max.3.3
%11 = trunc i64 %indvars.iv.next.348 to i32
%xindex.3.4 = select i1 %cmp10.4, i32 %3, i32 %xindex.3.3
%max.3.4 = select i1 %cmp10.4, float %10, float %max.3.3
%indvars.iv.next.449 = or i64 %indvars.iv, 5
- %arrayidx9.5 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.449
+ %arrayidx9.5 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.449
%12 = load float* %arrayidx9.5, align 4
%cmp10.5 = fcmp ogt float %12, %max.3.4
%13 = trunc i64 %indvars.iv.next.449 to i32
%xindex.3.5 = select i1 %cmp10.5, i32 %3, i32 %xindex.3.4
%max.3.5 = select i1 %cmp10.5, float %12, float %max.3.4
%indvars.iv.next.550 = or i64 %indvars.iv, 6
- %arrayidx9.6 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.550
+ %arrayidx9.6 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.550
%14 = load float* %arrayidx9.6, align 8
%cmp10.6 = fcmp ogt float %14, %max.3.5
%15 = trunc i64 %indvars.iv.next.550 to i32
%xindex.3.6 = select i1 %cmp10.6, i32 %3, i32 %xindex.3.5
%max.3.6 = select i1 %cmp10.6, float %14, float %max.3.5
%indvars.iv.next.651 = or i64 %indvars.iv, 7
- %arrayidx9.7 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.651
+ %arrayidx9.7 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.651
%16 = load float* %arrayidx9.7, align 4
%cmp10.7 = fcmp ogt float %16, %max.3.6
%17 = trunc i64 %indvars.iv.next.651 to i32
%xindex.3.7 = select i1 %cmp10.7, i32 %3, i32 %xindex.3.6
%max.3.7 = select i1 %cmp10.7, float %16, float %max.3.6
%indvars.iv.next.752 = or i64 %indvars.iv, 8
- %arrayidx9.8 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.752
+ %arrayidx9.8 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.752
%18 = load float* %arrayidx9.8, align 16
%cmp10.8 = fcmp ogt float %18, %max.3.7
%19 = trunc i64 %indvars.iv.next.752 to i32
%xindex.3.8 = select i1 %cmp10.8, i32 %3, i32 %xindex.3.7
%max.3.8 = select i1 %cmp10.8, float %18, float %max.3.7
%indvars.iv.next.853 = or i64 %indvars.iv, 9
- %arrayidx9.9 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.853
+ %arrayidx9.9 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.853
%20 = load float* %arrayidx9.9, align 4
%cmp10.9 = fcmp ogt float %20, %max.3.8
%21 = trunc i64 %indvars.iv.next.853 to i32
%xindex.3.9 = select i1 %cmp10.9, i32 %3, i32 %xindex.3.8
%max.3.9 = select i1 %cmp10.9, float %20, float %max.3.8
%indvars.iv.next.954 = or i64 %indvars.iv, 10
- %arrayidx9.10 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.954
+ %arrayidx9.10 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.954
%22 = load float* %arrayidx9.10, align 8
%cmp10.10 = fcmp ogt float %22, %max.3.9
%23 = trunc i64 %indvars.iv.next.954 to i32
%xindex.3.10 = select i1 %cmp10.10, i32 %3, i32 %xindex.3.9
%max.3.10 = select i1 %cmp10.10, float %22, float %max.3.9
%indvars.iv.next.1055 = or i64 %indvars.iv, 11
- %arrayidx9.11 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1055
+ %arrayidx9.11 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1055
%24 = load float* %arrayidx9.11, align 4
%cmp10.11 = fcmp ogt float %24, %max.3.10
%25 = trunc i64 %indvars.iv.next.1055 to i32
%xindex.3.11 = select i1 %cmp10.11, i32 %3, i32 %xindex.3.10
%max.3.11 = select i1 %cmp10.11, float %24, float %max.3.10
%indvars.iv.next.1156 = or i64 %indvars.iv, 12
- %arrayidx9.12 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1156
+ %arrayidx9.12 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1156
%26 = load float* %arrayidx9.12, align 16
%cmp10.12 = fcmp ogt float %26, %max.3.11
%27 = trunc i64 %indvars.iv.next.1156 to i32
%xindex.3.12 = select i1 %cmp10.12, i32 %3, i32 %xindex.3.11
%max.3.12 = select i1 %cmp10.12, float %26, float %max.3.11
%indvars.iv.next.1257 = or i64 %indvars.iv, 13
- %arrayidx9.13 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1257
+ %arrayidx9.13 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1257
%28 = load float* %arrayidx9.13, align 4
%cmp10.13 = fcmp ogt float %28, %max.3.12
%29 = trunc i64 %indvars.iv.next.1257 to i32
%xindex.3.13 = select i1 %cmp10.13, i32 %3, i32 %xindex.3.12
%max.3.13 = select i1 %cmp10.13, float %28, float %max.3.12
%indvars.iv.next.1358 = or i64 %indvars.iv, 14
- %arrayidx9.14 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1358
+ %arrayidx9.14 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1358
%30 = load float* %arrayidx9.14, align 8
%cmp10.14 = fcmp ogt float %30, %max.3.13
%31 = trunc i64 %indvars.iv.next.1358 to i32
%xindex.3.14 = select i1 %cmp10.14, i32 %3, i32 %xindex.3.13
%max.3.14 = select i1 %cmp10.14, float %30, float %max.3.13
%indvars.iv.next.1459 = or i64 %indvars.iv, 15
- %arrayidx9.15 = getelementptr inbounds [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1459
+ %arrayidx9.15 = getelementptr inbounds [256 x [256 x float]], [256 x [256 x float]]* @aa, i64 0, i64 %indvars.iv42, i64 %indvars.iv.next.1459
%32 = load float* %arrayidx9.15, align 4
%cmp10.15 = fcmp ogt float %32, %max.3.14
%33 = trunc i64 %indvars.iv.next.1459 to i32
define i8* @test(i8* %base, i8 %val) {
entry:
- %arrayidx = getelementptr inbounds i8* %base, i32 -1
+ %arrayidx = getelementptr inbounds i8, i8* %base, i32 -1
store i8 %val, i8* %arrayidx, align 1
- %arrayidx2 = getelementptr inbounds i8* %base, i32 1
+ %arrayidx2 = getelementptr inbounds i8, i8* %base, i32 1
store i8 %val, i8* %arrayidx2, align 1
ret i8* %arrayidx
}
define i64* @test64(i64* %base, i64 %val) {
entry:
- %arrayidx = getelementptr inbounds i64* %base, i32 -1
+ %arrayidx = getelementptr inbounds i64, i64* %base, i32 -1
store i64 %val, i64* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds i64* %base, i32 1
+ %arrayidx2 = getelementptr inbounds i64, i64* %base, i32 1
store i64 %val, i64* %arrayidx2, align 8
ret i64* %arrayidx
}
; Function Attrs: nounwind
define fastcc void @func_7() #0 {
entry:
- %arrayidx638 = getelementptr inbounds [3 x [1 x i32]]* undef, i64 0, i64 1, i64 0
+ %arrayidx638 = getelementptr inbounds [3 x [1 x i32]], [3 x [1 x i32]]* undef, i64 0, i64 1, i64 0
br i1 undef, label %for.cond940, label %if.end1018
for.cond940: ; preds = %for.cond940, %if.else876
define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind {
entry:
- %a.realp = getelementptr inbounds %0* %a, i32 0, i32 0
+ %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
%a.real = load double* %a.realp
- %a.imagp = getelementptr inbounds %0* %a, i32 0, i32 1
+ %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
%a.imag = load double* %a.imagp
- %b.realp = getelementptr inbounds %0* %b, i32 0, i32 0
+ %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
%b.real = load double* %b.realp
- %b.imagp = getelementptr inbounds %0* %b, i32 0, i32 1
+ %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
%b.imag = load double* %b.imagp
%mul.rl = fmul double %a.real, %b.real
%mul.rr = fmul double %a.imag, %b.imag
%mul.il = fmul double %a.imag, %b.real
%mul.ir = fmul double %a.real, %b.imag
%mul.i = fadd double %mul.il, %mul.ir
- %c.realp = getelementptr inbounds %0* %c, i32 0, i32 0
+ %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
%c.real = load double* %c.realp
- %c.imagp = getelementptr inbounds %0* %c, i32 0, i32 1
+ %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
%c.imag = load double* %c.imagp
%add.r = fadd double %mul.r, %c.real
%add.i = fadd double %mul.i, %c.imag
- %real = getelementptr inbounds %0* %agg.result, i32 0, i32 0
- %imag = getelementptr inbounds %0* %agg.result, i32 0, i32 1
+ %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0
+ %imag = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 1
store double %add.r, double* %real
store double %add.i, double* %imag
ret void
define i32* @test1() {
%X = alloca { i32, i32 }
- %Y = getelementptr {i32,i32}* %X, i32 0, i32 1
+ %Y = getelementptr {i32,i32}, {i32,i32}* %X, i32 0, i32 1
ret i32* %Y
; CHECK-LABEL: @test1
define i32* @test2() {
%X = alloca { i32, i32, i32, i32 }
- %Y = getelementptr {i32,i32,i32,i32}* %X, i32 0, i32 3
+ %Y = getelementptr {i32,i32,i32,i32}, {i32,i32,i32,i32}* %X, i32 0, i32 3
ret i32* %Y
; CHECK-LABEL: @test2
for.body.i: ; preds = %for.body.i.preheader, %for.body.i
%accumulator.09.i = phi double [ %add.i, %for.body.i ], [ 0.000000e+00, %entry ]
%i.08.i = phi i64 [ %inc.i, %for.body.i ], [ 0, %entry ]
- %arrayidx.i = getelementptr inbounds [2048 x float]* %x, i64 0, i64 %i.08.i
+ %arrayidx.i = getelementptr inbounds [2048 x float], [2048 x float]* %x, i64 0, i64 %i.08.i
%v14 = load float* %arrayidx.i, align 4
%conv.i = fpext float %v14 to double
- %arrayidx1.i = getelementptr inbounds [2048 x float]* %y, i64 0, i64 %i.08.i
+ %arrayidx1.i = getelementptr inbounds [2048 x float], [2048 x float]* %y, i64 0, i64 %i.08.i
%v15 = load float* %arrayidx1.i, align 4
%conv2.i = fpext float %v15 to double
%mul.i = fmul double %conv.i, %conv2.i
define i32 @test1([4 x i32]* %P, i32 %i) {
%tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.4 = getelementptr [4 x i32]* %P, i32 %tmp.2, i32 1 ; <i32*> [#uses=1]
+ %tmp.4 = getelementptr [4 x i32], [4 x i32]* %P, i32 %tmp.2, i32 1 ; <i32*> [#uses=1]
%tmp.5 = load i32* %tmp.4 ; <i32> [#uses=1]
ret i32 %tmp.5
}
define i32 @test2(%struct.X* %P, i32 %i) {
%tmp.2 = add i32 %i, 2 ; <i32> [#uses=1]
- %tmp.5 = getelementptr %struct.X* %P, i32 %tmp.2, i32 0, i32 1 ; <i8*> [#uses=1]
+ %tmp.5 = getelementptr %struct.X, %struct.X* %P, i32 %tmp.2, i32 0, i32 1 ; <i8*> [#uses=1]
%tmp.6 = load i8* %tmp.5 ; <i8> [#uses=1]
%tmp.7 = sext i8 %tmp.6 to i32 ; <i32> [#uses=1]
ret i32 %tmp.7
define i8* @func2({ i64, i8* } %array1, %tarray* byval %array2) {
entry:
%array1_ptr = extractvalue {i64, i8* } %array1, 1
- %tmp = getelementptr inbounds %tarray* %array2, i32 0, i32 1
+ %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
%array2_ptr = load i8** %tmp
%cond = icmp eq i8* %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
define i8* @func3({ i64, i8* }* byval %array1, %tarray* byval %array2) {
entry:
- %tmp1 = getelementptr inbounds { i64, i8* }* %array1, i32 0, i32 1
+ %tmp1 = getelementptr inbounds { i64, i8* }, { i64, i8* }* %array1, i32 0, i32 1
%array1_ptr = load i8** %tmp1
- %tmp2 = getelementptr inbounds %tarray* %array2, i32 0, i32 1
+ %tmp2 = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
%array2_ptr = load i8** %tmp2
%cond = icmp eq i8* %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
{ i64, i8* } %array1, %tarray* byval %array2) {
entry:
%array1_ptr = extractvalue {i64, i8* } %array1, 1
- %tmp = getelementptr inbounds %tarray* %array2, i32 0, i32 1
+ %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1
%array2_ptr = load i8** %tmp
%cond = icmp eq i8* %array1_ptr, %array2_ptr
br i1 %cond, label %equal, label %unequal
; CHECK: sync 0
; CHECK: lbzx
; CHECK: sync 1
- %ptr = getelementptr inbounds [100000 x i8]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000
%val = load atomic i8* %ptr seq_cst, align 1
ret i8 %val
}
; CHECK-LABEL: load_x_i16_acquire
; CHECK: lhzx
; CHECK: sync 1
- %ptr = getelementptr inbounds [100000 x i16]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000
%val = load atomic i16* %ptr acquire, align 2
ret i16 %val
}
; CHECK-LABEL: load_x_i32_monotonic
; CHECK: lwzx
; CHECK-NOT: sync
- %ptr = getelementptr inbounds [100000 x i32]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000
%val = load atomic i32* %ptr monotonic, align 4
ret i32 %val
}
; PPC64-NOT: __sync_
; PPC64: ldx
; CHECK-NOT: sync
- %ptr = getelementptr inbounds [100000 x i64]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000
%val = load atomic i64* %ptr unordered, align 8
ret i64 %val
}
; CHECK-LABEL: store_x_i8_seq_cst
; CHECK: sync 0
; CHECK: stbx
- %ptr = getelementptr inbounds [100000 x i8]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000
store atomic i8 42, i8* %ptr seq_cst, align 1
ret void
}
; CHECK-LABEL: store_x_i16_release
; CHECK: sync 1
; CHECK: sthx
- %ptr = getelementptr inbounds [100000 x i16]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000
store atomic i16 42, i16* %ptr release, align 2
ret void
}
; CHECK-LABEL: store_x_i32_monotonic
; CHECK-NOT: sync
; CHECK: stwx
- %ptr = getelementptr inbounds [100000 x i32]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000
store atomic i32 42, i32* %ptr monotonic, align 4
ret void
}
; PPC32: __sync_
; PPC64-NOT: __sync_
; PPC64: stdx
- %ptr = getelementptr inbounds [100000 x i64]* %mem, i64 0, i64 90000
+ %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000
store atomic i64 42, i64* %ptr unordered, align 8
ret void
}
for.body: ; preds = %for.body.for.body_crit_edge, %for.body.lr.ph
%0 = phi %struct.lua_TValue.17.692* [ undef, %for.body.lr.ph ], [ %.pre, %for.body.for.body_crit_edge ]
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body.for.body_crit_edge ]
- %tt = getelementptr inbounds %struct.lua_TValue.17.692* %0, i64 %indvars.iv, i32 1
+ %tt = getelementptr inbounds %struct.lua_TValue.17.692, %struct.lua_TValue.17.692* %0, i64 %indvars.iv, i32 1
%1 = load i32* %tt, align 4
store i32 %1, i32* undef, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
define void @STWBRX(i32 %i, i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.1 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
%tmp13 = tail call i32 @llvm.bswap.i32( i32 %i ) ; <i32> [#uses=1]
store i32 %tmp13, i32* %tmp1.upgrd.1
}
define i32 @LWBRX(i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.2 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
%tmp = load i32* %tmp1.upgrd.2 ; <i32> [#uses=1]
%tmp14 = tail call i32 @llvm.bswap.i32( i32 %tmp ) ; <i32> [#uses=1]
}
define void @STHBRX(i16 %s, i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.3 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1]
%tmp5 = call i16 @llvm.bswap.i16( i16 %s ) ; <i16> [#uses=1]
store i16 %tmp5, i16* %tmp1.upgrd.3
}
define i16 @LHBRX(i8* %ptr, i32 %off) {
- %tmp1 = getelementptr i8* %ptr, i32 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i32 %off ; <i8*> [#uses=1]
%tmp1.upgrd.4 = bitcast i8* %tmp1 to i16* ; <i16*> [#uses=1]
%tmp = load i16* %tmp1.upgrd.4 ; <i16> [#uses=1]
%tmp6 = call i16 @llvm.bswap.i16( i16 %tmp ) ; <i16> [#uses=1]
}
define void @STDBRX(i64 %i, i8* %ptr, i64 %off) {
- %tmp1 = getelementptr i8* %ptr, i64 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i64 %off ; <i8*> [#uses=1]
%tmp1.upgrd.1 = bitcast i8* %tmp1 to i64* ; <i64*> [#uses=1]
%tmp13 = tail call i64 @llvm.bswap.i64( i64 %i ) ; <i64> [#uses=1]
store i64 %tmp13, i64* %tmp1.upgrd.1
}
define i64 @LDBRX(i8* %ptr, i64 %off) {
- %tmp1 = getelementptr i8* %ptr, i64 %off ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %ptr, i64 %off ; <i8*> [#uses=1]
%tmp1.upgrd.2 = bitcast i8* %tmp1 to i64* ; <i64*> [#uses=1]
%tmp = load i64* %tmp1.upgrd.2 ; <i64> [#uses=1]
%tmp14 = tail call i64 @llvm.bswap.i64( i64 %tmp ) ; <i64> [#uses=1]
; Function Attrs: nounwind ssp
define void @foo(%struct.sm* byval %s) #0 {
entry:
- %a = getelementptr inbounds %struct.sm* %s, i32 0, i32 0
+ %a = getelementptr inbounds %struct.sm, %struct.sm* %s, i32 0, i32 0
%0 = load i8* %a, align 1
%conv2 = zext i8 %0 to i32
%add = add nuw nsw i32 %conv2, 3
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%induction45 = or i64 %index, 1
- %0 = getelementptr inbounds i32* %a, i64 %index
- %1 = getelementptr inbounds i32* %a, i64 %induction45
+ %0 = getelementptr inbounds i32, i32* %a, i64 %index
+ %1 = getelementptr inbounds i32, i32* %a, i64 %induction45
%2 = load i32* %0, align 4
%3 = load i32* %1, align 4
%4 = add nsw i32 %2, 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, 4
%mul = mul nsw i32 %add, 3
entry:
%retval = alloca { ppc_fp128, ppc_fp128 }, align 16
%x = alloca { ppc_fp128, ppc_fp128 }, align 16
- %real = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
- %imag = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
+ %real = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
+ %imag = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
store ppc_fp128 0xM400C0000000000000000000000000000, ppc_fp128* %real
store ppc_fp128 0xMC00547AE147AE1483CA47AE147AE147A, ppc_fp128* %imag
- %x.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
+ %x.realp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 0
%x.real = load ppc_fp128* %x.realp
- %x.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
+ %x.imagp = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %x, i32 0, i32 1
%x.imag = load ppc_fp128* %x.imagp
- %real1 = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 0
- %imag2 = getelementptr inbounds { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 1
+ %real1 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 0
+ %imag2 = getelementptr inbounds { ppc_fp128, ppc_fp128 }, { ppc_fp128, ppc_fp128 }* %retval, i32 0, i32 1
store ppc_fp128 %x.real, ppc_fp128* %real1
store ppc_fp128 %x.imag, ppc_fp128* %imag2
%0 = load { ppc_fp128, ppc_fp128 }* %retval
entry:
%retval = alloca { float, float }, align 4
%x = alloca { float, float }, align 4
- %real = getelementptr inbounds { float, float }* %x, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }* %x, i32 0, i32 1
+ %real = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1
store float 3.500000e+00, float* %real
store float 0xC00547AE20000000, float* %imag
- %x.realp = getelementptr inbounds { float, float }* %x, i32 0, i32 0
+ %x.realp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 0
%x.real = load float* %x.realp
- %x.imagp = getelementptr inbounds { float, float }* %x, i32 0, i32 1
+ %x.imagp = getelementptr inbounds { float, float }, { float, float }* %x, i32 0, i32 1
%x.imag = load float* %x.imagp
- %real1 = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
- %imag2 = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ %real1 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
+ %imag2 = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
store float %x.real, float* %real1
store float %x.imag, float* %imag2
%0 = load { float, float }* %retval
%conv300 = zext i16 %7 to i32
%sub301 = sub nsw i32 %cond.i5.i1516, %conv300
%idxprom302 = sext i32 %sub301 to i64
- %arrayidx303 = getelementptr inbounds i32* %cond, i64 %idxprom302
+ %arrayidx303 = getelementptr inbounds i32, i32* %cond, i64 %idxprom302
%8 = load i32* %arrayidx303, align 4
%add304 = add nsw i32 %8, %LineSadBlk0.01588
%9 = load i32* undef, align 4
%cond.i5.i1508 = select i1 %cmp.i4.i1507, i32 %cond.i.i1506, i32 %1
%sub329 = sub nsw i32 %cond.i5.i1508, 0
%idxprom330 = sext i32 %sub329 to i64
- %arrayidx331 = getelementptr inbounds i32* %cond, i64 %idxprom330
+ %arrayidx331 = getelementptr inbounds i32, i32* %cond, i64 %idxprom330
%11 = load i32* %arrayidx331, align 4
%add332 = add nsw i32 %add318, %11
%cmp.i.i1501 = icmp sgt i32 undef, 0
%cond.i.i1502 = select i1 %cmp.i.i1501, i32 undef, i32 0
%cmp.i4.i1503 = icmp slt i32 %cond.i.i1502, %1
%cond.i5.i1504 = select i1 %cmp.i4.i1503, i32 %cond.i.i1502, i32 %1
- %incdec.ptr341 = getelementptr inbounds i16* %srcptr.41591, i64 4
+ %incdec.ptr341 = getelementptr inbounds i16, i16* %srcptr.41591, i64 4
%12 = load i16* null, align 2
%conv342 = zext i16 %12 to i32
%sub343 = sub nsw i32 %cond.i5.i1504, %conv342
%idxprom344 = sext i32 %sub343 to i64
- %arrayidx345 = getelementptr inbounds i32* %cond, i64 %idxprom344
+ %arrayidx345 = getelementptr inbounds i32, i32* %cond, i64 %idxprom344
%13 = load i32* %arrayidx345, align 4
%add346 = add nsw i32 %add332, %13
- %incdec.ptr348 = getelementptr inbounds i16* %refptr.11590, i64 5
+ %incdec.ptr348 = getelementptr inbounds i16, i16* %refptr.11590, i64 5
%14 = load i16* null, align 2
%conv349 = zext i16 %14 to i32
%mul350 = mul nsw i32 %conv349, %2
%cond.i.i1498 = select i1 %cmp.i.i1497, i32 %add353, i32 0
%cmp.i4.i1499 = icmp slt i32 %cond.i.i1498, %1
%cond.i5.i1500 = select i1 %cmp.i4.i1499, i32 %cond.i.i1498, i32 %1
- %incdec.ptr355 = getelementptr inbounds i16* %srcptr.41591, i64 5
+ %incdec.ptr355 = getelementptr inbounds i16, i16* %srcptr.41591, i64 5
%15 = load i16* %incdec.ptr341, align 2
%conv356 = zext i16 %15 to i32
%sub357 = sub nsw i32 %cond.i5.i1500, %conv356
%idxprom358 = sext i32 %sub357 to i64
- %arrayidx359 = getelementptr inbounds i32* %cond, i64 %idxprom358
+ %arrayidx359 = getelementptr inbounds i32, i32* %cond, i64 %idxprom358
%16 = load i32* %arrayidx359, align 4
%add360 = add nsw i32 %16, %LineSadBlk1.01587
- %incdec.ptr362 = getelementptr inbounds i16* %refptr.11590, i64 6
+ %incdec.ptr362 = getelementptr inbounds i16, i16* %refptr.11590, i64 6
%17 = load i16* %incdec.ptr348, align 2
%conv363 = zext i16 %17 to i32
%mul364 = mul nsw i32 %conv363, %2
%cond.i.i1494 = select i1 %cmp.i.i1493, i32 %add367, i32 0
%cmp.i4.i1495 = icmp slt i32 %cond.i.i1494, %1
%cond.i5.i1496 = select i1 %cmp.i4.i1495, i32 %cond.i.i1494, i32 %1
- %incdec.ptr369 = getelementptr inbounds i16* %srcptr.41591, i64 6
+ %incdec.ptr369 = getelementptr inbounds i16, i16* %srcptr.41591, i64 6
%18 = load i16* %incdec.ptr355, align 2
%conv370 = zext i16 %18 to i32
%sub371 = sub nsw i32 %cond.i5.i1496, %conv370
%idxprom372 = sext i32 %sub371 to i64
- %arrayidx373 = getelementptr inbounds i32* %cond, i64 %idxprom372
+ %arrayidx373 = getelementptr inbounds i32, i32* %cond, i64 %idxprom372
%19 = load i32* %arrayidx373, align 4
%add374 = add nsw i32 %add360, %19
- %incdec.ptr376 = getelementptr inbounds i16* %refptr.11590, i64 7
+ %incdec.ptr376 = getelementptr inbounds i16, i16* %refptr.11590, i64 7
%20 = load i16* %incdec.ptr362, align 2
%conv377 = zext i16 %20 to i32
%mul378 = mul nsw i32 %conv377, %2
%cond.i.i1490 = select i1 %cmp.i.i1489, i32 %add381, i32 0
%cmp.i4.i1491 = icmp slt i32 %cond.i.i1490, %1
%cond.i5.i1492 = select i1 %cmp.i4.i1491, i32 %cond.i.i1490, i32 %1
- %incdec.ptr383 = getelementptr inbounds i16* %srcptr.41591, i64 7
+ %incdec.ptr383 = getelementptr inbounds i16, i16* %srcptr.41591, i64 7
%21 = load i16* %incdec.ptr369, align 2
%conv384 = zext i16 %21 to i32
%sub385 = sub nsw i32 %cond.i5.i1492, %conv384
%idxprom386 = sext i32 %sub385 to i64
- %arrayidx387 = getelementptr inbounds i32* %cond, i64 %idxprom386
+ %arrayidx387 = getelementptr inbounds i32, i32* %cond, i64 %idxprom386
%22 = load i32* %arrayidx387, align 4
%add388 = add nsw i32 %add374, %22
%23 = load i16* %incdec.ptr376, align 2
%cond.i.i1486 = select i1 %cmp.i.i1485, i32 %add395, i32 0
%cmp.i4.i1487 = icmp slt i32 %cond.i.i1486, %1
%cond.i5.i1488 = select i1 %cmp.i4.i1487, i32 %cond.i.i1486, i32 %1
- %incdec.ptr397 = getelementptr inbounds i16* %srcptr.41591, i64 8
+ %incdec.ptr397 = getelementptr inbounds i16, i16* %srcptr.41591, i64 8
%24 = load i16* %incdec.ptr383, align 2
%conv398 = zext i16 %24 to i32
%sub399 = sub nsw i32 %cond.i5.i1488, %conv398
%idxprom400 = sext i32 %sub399 to i64
- %arrayidx401 = getelementptr inbounds i32* %cond, i64 %idxprom400
+ %arrayidx401 = getelementptr inbounds i32, i32* %cond, i64 %idxprom400
%25 = load i32* %arrayidx401, align 4
%add402 = add nsw i32 %add388, %25
- %incdec.ptr404 = getelementptr inbounds i16* %refptr.11590, i64 9
+ %incdec.ptr404 = getelementptr inbounds i16, i16* %refptr.11590, i64 9
%cmp.i4.i1483 = icmp slt i32 undef, %1
%cond.i5.i1484 = select i1 %cmp.i4.i1483, i32 undef, i32 %1
%26 = load i16* %incdec.ptr397, align 2
%conv412 = zext i16 %26 to i32
%sub413 = sub nsw i32 %cond.i5.i1484, %conv412
%idxprom414 = sext i32 %sub413 to i64
- %arrayidx415 = getelementptr inbounds i32* %cond, i64 %idxprom414
+ %arrayidx415 = getelementptr inbounds i32, i32* %cond, i64 %idxprom414
%27 = load i32* %arrayidx415, align 4
%add416 = add nsw i32 %27, %LineSadBlk2.01585
- %incdec.ptr418 = getelementptr inbounds i16* %refptr.11590, i64 10
+ %incdec.ptr418 = getelementptr inbounds i16, i16* %refptr.11590, i64 10
%28 = load i16* %incdec.ptr404, align 2
%conv419 = zext i16 %28 to i32
%mul420 = mul nsw i32 %conv419, %2
%cond.i.i1478 = select i1 %cmp.i.i1477, i32 %add423, i32 0
%cmp.i4.i1479 = icmp slt i32 %cond.i.i1478, %1
%cond.i5.i1480 = select i1 %cmp.i4.i1479, i32 %cond.i.i1478, i32 %1
- %incdec.ptr425 = getelementptr inbounds i16* %srcptr.41591, i64 10
+ %incdec.ptr425 = getelementptr inbounds i16, i16* %srcptr.41591, i64 10
%sub427 = sub nsw i32 %cond.i5.i1480, 0
%idxprom428 = sext i32 %sub427 to i64
- %arrayidx429 = getelementptr inbounds i32* %cond, i64 %idxprom428
+ %arrayidx429 = getelementptr inbounds i32, i32* %cond, i64 %idxprom428
%29 = load i32* %arrayidx429, align 4
%add430 = add nsw i32 %add416, %29
- %incdec.ptr432 = getelementptr inbounds i16* %refptr.11590, i64 11
+ %incdec.ptr432 = getelementptr inbounds i16, i16* %refptr.11590, i64 11
%30 = load i16* %incdec.ptr418, align 2
%conv433 = zext i16 %30 to i32
%mul434 = mul nsw i32 %conv433, %2
%conv440 = zext i16 %31 to i32
%sub441 = sub nsw i32 %cond.i5.i1476, %conv440
%idxprom442 = sext i32 %sub441 to i64
- %arrayidx443 = getelementptr inbounds i32* %cond, i64 %idxprom442
+ %arrayidx443 = getelementptr inbounds i32, i32* %cond, i64 %idxprom442
%32 = load i32* %arrayidx443, align 4
%add444 = add nsw i32 %add430, %32
- %incdec.ptr446 = getelementptr inbounds i16* %refptr.11590, i64 12
+ %incdec.ptr446 = getelementptr inbounds i16, i16* %refptr.11590, i64 12
%33 = load i16* %incdec.ptr432, align 2
%conv447 = zext i16 %33 to i32
%mul448 = mul nsw i32 %conv447, %2
%cond.i.i1470 = select i1 %cmp.i.i1469, i32 %add451, i32 0
%cmp.i4.i1471 = icmp slt i32 %cond.i.i1470, %1
%cond.i5.i1472 = select i1 %cmp.i4.i1471, i32 %cond.i.i1470, i32 %1
- %incdec.ptr453 = getelementptr inbounds i16* %srcptr.41591, i64 12
+ %incdec.ptr453 = getelementptr inbounds i16, i16* %srcptr.41591, i64 12
%34 = load i16* undef, align 2
%conv454 = zext i16 %34 to i32
%sub455 = sub nsw i32 %cond.i5.i1472, %conv454
%idxprom456 = sext i32 %sub455 to i64
- %arrayidx457 = getelementptr inbounds i32* %cond, i64 %idxprom456
+ %arrayidx457 = getelementptr inbounds i32, i32* %cond, i64 %idxprom456
%35 = load i32* %arrayidx457, align 4
%add458 = add nsw i32 %add444, %35
- %incdec.ptr460 = getelementptr inbounds i16* %refptr.11590, i64 13
+ %incdec.ptr460 = getelementptr inbounds i16, i16* %refptr.11590, i64 13
%36 = load i16* %incdec.ptr446, align 2
%conv461 = zext i16 %36 to i32
%mul462 = mul nsw i32 %conv461, %2
%cond.i.i1466 = select i1 %cmp.i.i1465, i32 %add465, i32 0
%cmp.i4.i1467 = icmp slt i32 %cond.i.i1466, %1
%cond.i5.i1468 = select i1 %cmp.i4.i1467, i32 %cond.i.i1466, i32 %1
- %incdec.ptr467 = getelementptr inbounds i16* %srcptr.41591, i64 13
+ %incdec.ptr467 = getelementptr inbounds i16, i16* %srcptr.41591, i64 13
%37 = load i16* %incdec.ptr453, align 2
%conv468 = zext i16 %37 to i32
%sub469 = sub nsw i32 %cond.i5.i1468, %conv468
%idxprom470 = sext i32 %sub469 to i64
- %arrayidx471 = getelementptr inbounds i32* %cond, i64 %idxprom470
+ %arrayidx471 = getelementptr inbounds i32, i32* %cond, i64 %idxprom470
%38 = load i32* %arrayidx471, align 4
%add472 = add nsw i32 %38, %LineSadBlk3.01586
- %incdec.ptr474 = getelementptr inbounds i16* %refptr.11590, i64 14
+ %incdec.ptr474 = getelementptr inbounds i16, i16* %refptr.11590, i64 14
%add477 = add nsw i32 0, %3
%shr478 = ashr i32 %add477, %4
%add479 = add nsw i32 %shr478, %5
%cond.i.i1462 = select i1 %cmp.i.i1461, i32 %add479, i32 0
%cmp.i4.i1463 = icmp slt i32 %cond.i.i1462, %1
%cond.i5.i1464 = select i1 %cmp.i4.i1463, i32 %cond.i.i1462, i32 %1
- %incdec.ptr481 = getelementptr inbounds i16* %srcptr.41591, i64 14
+ %incdec.ptr481 = getelementptr inbounds i16, i16* %srcptr.41591, i64 14
%39 = load i16* %incdec.ptr467, align 2
%conv482 = zext i16 %39 to i32
%sub483 = sub nsw i32 %cond.i5.i1464, %conv482
%idxprom484 = sext i32 %sub483 to i64
- %arrayidx485 = getelementptr inbounds i32* %cond, i64 %idxprom484
+ %arrayidx485 = getelementptr inbounds i32, i32* %cond, i64 %idxprom484
%40 = load i32* %arrayidx485, align 4
%add486 = add nsw i32 %add472, %40
- %incdec.ptr488 = getelementptr inbounds i16* %refptr.11590, i64 15
+ %incdec.ptr488 = getelementptr inbounds i16, i16* %refptr.11590, i64 15
%41 = load i16* %incdec.ptr474, align 2
%conv489 = zext i16 %41 to i32
%mul490 = mul nsw i32 %conv489, %2
%cond.i.i1458 = select i1 %cmp.i.i1457, i32 %add493, i32 0
%cmp.i4.i1459 = icmp slt i32 %cond.i.i1458, %1
%cond.i5.i1460 = select i1 %cmp.i4.i1459, i32 %cond.i.i1458, i32 %1
- %incdec.ptr495 = getelementptr inbounds i16* %srcptr.41591, i64 15
+ %incdec.ptr495 = getelementptr inbounds i16, i16* %srcptr.41591, i64 15
%42 = load i16* %incdec.ptr481, align 2
%conv496 = zext i16 %42 to i32
%sub497 = sub nsw i32 %cond.i5.i1460, %conv496
%idxprom498 = sext i32 %sub497 to i64
- %arrayidx499 = getelementptr inbounds i32* %cond, i64 %idxprom498
+ %arrayidx499 = getelementptr inbounds i32, i32* %cond, i64 %idxprom498
%43 = load i32* %arrayidx499, align 4
%add500 = add nsw i32 %add486, %43
%44 = load i16* %incdec.ptr488, align 2
%conv510 = zext i16 %45 to i32
%sub511 = sub nsw i32 %cond.i5.i1456, %conv510
%idxprom512 = sext i32 %sub511 to i64
- %arrayidx513 = getelementptr inbounds i32* %cond, i64 %idxprom512
+ %arrayidx513 = getelementptr inbounds i32, i32* %cond, i64 %idxprom512
%46 = load i32* %arrayidx513, align 4
%add514 = add nsw i32 %add500, %46
- %add.ptr517 = getelementptr inbounds i16* %refptr.11590, i64 %incdec.ptr502.sum
+ %add.ptr517 = getelementptr inbounds i16, i16* %refptr.11590, i64 %incdec.ptr502.sum
%exitcond1692 = icmp eq i32 undef, 4
br i1 %exitcond1692, label %for.end520, label %for.body293
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi ppc_fp128 [ %d, %entry ], [ %conv, %for.body ]
- %arrayidx = getelementptr inbounds ppc_fp128* %n, i32 %i.06
+ %arrayidx = getelementptr inbounds ppc_fp128, ppc_fp128* %n, i32 %i.06
%0 = load ppc_fp128* %arrayidx, align 8
%conv = tail call ppc_fp128 @copysignl(ppc_fp128 %x.05, ppc_fp128 %d) nounwind readonly
%inc = add nsw i32 %i.06, 1
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds double* %n, i32 %i.06
+ %arrayidx = getelementptr inbounds double, double* %n, i32 %i.06
%0 = load double* %arrayidx, align 8
%conv = sitofp i64 %x.05 to double
%add = fadd double %conv, %0
vector.body.i: ; preds = %vector.body.i, %entry
%index.i = phi i32 [ 0, %entry ], [ %index.next.i, %vector.body.i ]
- %next.gep.i = getelementptr [8000 x i64]* @data64, i32 0, i32 %index.i
+ %next.gep.i = getelementptr [8000 x i64], [8000 x i64]* @data64, i32 0, i32 %index.i
%1 = bitcast i64* %next.gep.i to <2 x i64>*
store <2 x i64> %broadcast.splat.i, <2 x i64>* %1, align 8
%next.gep.sum24.i = or i32 %index.i, 2
- %2 = getelementptr [8000 x i64]* @data64, i32 0, i32 %next.gep.sum24.i
+ %2 = getelementptr [8000 x i64], [8000 x i64]* @data64, i32 0, i32 %next.gep.sum24.i
%3 = bitcast i64* %2 to <2 x i64>*
store <2 x i64> %broadcast.splat.i, <2 x i64>* %3, align 8
%index.next.i = add i32 %index.i, 4
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
%0 = load i64* %arrayidx, align 8
%conv = udiv i64 %x.05, %d
%conv1 = add i64 %conv, %0
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
%0 = load i64* %arrayidx, align 8
%conv = sdiv i64 %x.05, %d
%conv1 = add i64 %conv, %0
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
%0 = load i64* %arrayidx, align 8
%conv = urem i64 %x.05, %d
%conv1 = add i64 %conv, %0
for.body: ; preds = %for.body, %entry
%i.06 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%x.05 = phi i64 [ 0, %entry ], [ %conv1, %for.body ]
- %arrayidx = getelementptr inbounds i64* %n, i32 %i.06
+ %arrayidx = getelementptr inbounds i64, i64* %n, i32 %i.06
%0 = load i64* %arrayidx, align 8
%conv = srem i64 %x.05, %d
%conv1 = add i64 %conv, %0
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 28395, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9073, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 21956, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 16782, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 19097, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 8531, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9152, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 18851, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 25466, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 9295, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32623, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 29554, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 15692, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 10449, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ 32087, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i32 [ %a, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8* %p, i32 %i.04
+ %arrayidx = getelementptr inbounds i8, i8* %p, i32 %i.04
%0 = load i8* %arrayidx, align 1
%conv = zext i8 %0 to i32
%add = add nsw i32 %conv, 1
for.body3: ; preds = %for.body3, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next.15, %for.body3 ]
- %arrayidx = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
%0 = load double* %arrayidx, align 32
%add = fadd double %0, 1.000000e+00
- %arrayidx5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
store double %add, double* %arrayidx5, align 32
%indvars.iv.next11 = or i64 %indvars.iv, 1
- %arrayidx.1 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11
+ %arrayidx.1 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11
%1 = load double* %arrayidx.1, align 8
%add.1 = fadd double %1, 1.000000e+00
- %arrayidx5.1 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next11
+ %arrayidx5.1 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next11
store double %add.1, double* %arrayidx5.1, align 8
%indvars.iv.next.112 = or i64 %indvars.iv, 2
- %arrayidx.2 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112
+ %arrayidx.2 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112
%2 = load double* %arrayidx.2, align 16
%add.2 = fadd double %2, 1.000000e+00
- %arrayidx5.2 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112
+ %arrayidx5.2 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112
store double %add.2, double* %arrayidx5.2, align 16
%indvars.iv.next.213 = or i64 %indvars.iv, 3
- %arrayidx.3 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213
+ %arrayidx.3 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213
%3 = load double* %arrayidx.3, align 8
%add.3 = fadd double %3, 1.000000e+00
- %arrayidx5.3 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213
+ %arrayidx5.3 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213
store double %add.3, double* %arrayidx5.3, align 8
%indvars.iv.next.314 = or i64 %indvars.iv, 4
- %arrayidx.4 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314
+ %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314
%4 = load double* %arrayidx.4, align 32
%add.4 = fadd double %4, 1.000000e+00
- %arrayidx5.4 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314
+ %arrayidx5.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314
store double %add.4, double* %arrayidx5.4, align 32
%indvars.iv.next.415 = or i64 %indvars.iv, 5
- %arrayidx.5 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415
+ %arrayidx.5 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415
%5 = load double* %arrayidx.5, align 8
%add.5 = fadd double %5, 1.000000e+00
- %arrayidx5.5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415
+ %arrayidx5.5 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415
store double %add.5, double* %arrayidx5.5, align 8
%indvars.iv.next.516 = or i64 %indvars.iv, 6
- %arrayidx.6 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516
+ %arrayidx.6 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516
%6 = load double* %arrayidx.6, align 16
%add.6 = fadd double %6, 1.000000e+00
- %arrayidx5.6 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516
+ %arrayidx5.6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516
store double %add.6, double* %arrayidx5.6, align 16
%indvars.iv.next.617 = or i64 %indvars.iv, 7
- %arrayidx.7 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617
+ %arrayidx.7 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617
%7 = load double* %arrayidx.7, align 8
%add.7 = fadd double %7, 1.000000e+00
- %arrayidx5.7 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617
+ %arrayidx5.7 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617
store double %add.7, double* %arrayidx5.7, align 8
%indvars.iv.next.718 = or i64 %indvars.iv, 8
- %arrayidx.8 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718
+ %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718
%8 = load double* %arrayidx.8, align 32
%add.8 = fadd double %8, 1.000000e+00
- %arrayidx5.8 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718
+ %arrayidx5.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718
store double %add.8, double* %arrayidx5.8, align 32
%indvars.iv.next.819 = or i64 %indvars.iv, 9
- %arrayidx.9 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819
+ %arrayidx.9 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819
%9 = load double* %arrayidx.9, align 8
%add.9 = fadd double %9, 1.000000e+00
- %arrayidx5.9 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819
+ %arrayidx5.9 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819
store double %add.9, double* %arrayidx5.9, align 8
%indvars.iv.next.920 = or i64 %indvars.iv, 10
- %arrayidx.10 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920
+ %arrayidx.10 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920
%10 = load double* %arrayidx.10, align 16
%add.10 = fadd double %10, 1.000000e+00
- %arrayidx5.10 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920
+ %arrayidx5.10 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920
store double %add.10, double* %arrayidx5.10, align 16
%indvars.iv.next.1021 = or i64 %indvars.iv, 11
- %arrayidx.11 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021
+ %arrayidx.11 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021
%11 = load double* %arrayidx.11, align 8
%add.11 = fadd double %11, 1.000000e+00
- %arrayidx5.11 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021
+ %arrayidx5.11 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021
store double %add.11, double* %arrayidx5.11, align 8
%indvars.iv.next.1122 = or i64 %indvars.iv, 12
- %arrayidx.12 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122
+ %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122
%12 = load double* %arrayidx.12, align 32
%add.12 = fadd double %12, 1.000000e+00
- %arrayidx5.12 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122
+ %arrayidx5.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122
store double %add.12, double* %arrayidx5.12, align 32
%indvars.iv.next.1223 = or i64 %indvars.iv, 13
- %arrayidx.13 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223
+ %arrayidx.13 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223
%13 = load double* %arrayidx.13, align 8
%add.13 = fadd double %13, 1.000000e+00
- %arrayidx5.13 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223
+ %arrayidx5.13 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223
store double %add.13, double* %arrayidx5.13, align 8
%indvars.iv.next.1324 = or i64 %indvars.iv, 14
- %arrayidx.14 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324
+ %arrayidx.14 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324
%14 = load double* %arrayidx.14, align 16
%add.14 = fadd double %14, 1.000000e+00
- %arrayidx5.14 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324
+ %arrayidx5.14 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324
store double %add.14, double* %arrayidx5.14, align 16
%indvars.iv.next.1425 = or i64 %indvars.iv, 15
- %arrayidx.15 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425
+ %arrayidx.15 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425
%15 = load double* %arrayidx.15, align 8
%add.15 = fadd double %15, 1.000000e+00
- %arrayidx5.15 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425
+ %arrayidx5.15 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425
store double %add.15, double* %arrayidx5.15, align 8
%indvars.iv.next.15 = add i64 %indvars.iv, 16
%lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32
for.body3.us: ; preds = %for.body3.us, %for.body3.lr.ph.us
%indvars.iv = phi i64 [ 0, %for.body3.lr.ph.us ], [ %indvars.iv.next, %for.body3.us ]
%Result.111.us = phi i32 [ %Result.014.us, %for.body3.lr.ph.us ], [ %add.us, %for.body3.us ]
- %arrayidx5.us = getelementptr inbounds [100 x i32]* %Array, i64 %indvars.iv16, i64 %indvars.iv
+ %arrayidx5.us = getelementptr inbounds [100 x i32], [100 x i32]* %Array, i64 %indvars.iv16, i64 %indvars.iv
%0 = load i32* %arrayidx5.us, align 4
%add.us = add nsw i32 %0, %Result.111.us
%indvars.iv.next = add i64 %indvars.iv, 1
%indvars.iv33 = phi i64 [ 0, %entry ], [ %indvars.iv.next34, %for.body ]
%0 = trunc i64 %indvars.iv33 to i32
%sub = sub i32 0, %0
- %arrayidx2 = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv33, i64 %indvars.iv33
+ %arrayidx2 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv33, i64 %indvars.iv33
store i32 %sub, i32* %arrayidx2, align 4
%indvars.iv.next34 = add i64 %indvars.iv33, 1
%lftr.wideiv35 = trunc i64 %indvars.iv.next34 to i32
if.then: ; preds = %for.body8
%3 = add i64 %indvars.iv, %indvars.iv29
- %arrayidx13 = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv29, i64 %indvars.iv
+ %arrayidx13 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv29, i64 %indvars.iv
%4 = trunc i64 %3 to i32
store i32 %4, i32* %arrayidx13, align 4
br label %for.inc14
for.body3.us.i: ; preds = %for.body3.lr.ph.us.i, %for.body3.us.i
%indvars.iv.i = phi i64 [ 0, %for.body3.lr.ph.us.i ], [ %indvars.iv.next.i, %for.body3.us.i ]
%Result.111.us.i = phi i32 [ %Result.014.us.i, %for.body3.lr.ph.us.i ], [ %add.us.i, %for.body3.us.i ]
- %arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
+ %arrayidx5.us.i = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %Array, i64 0, i64 %indvars.iv16.i, i64 %indvars.iv.i
%5 = load i32* %arrayidx5.us.i, align 4
%add.us.i = add nsw i32 %5, %Result.111.us.i
%indvars.iv.next.i = add i64 %indvars.iv.i, 1
%0 = load i16* null, align 2 ; <i16> [#uses=1]
%1 = ashr i16 %0, 4 ; <i16> [#uses=1]
%2 = sext i16 %1 to i32 ; <i32> [#uses=1]
- %3 = getelementptr i8* null, i32 %2 ; <i8*> [#uses=1]
+ %3 = getelementptr i8, i8* null, i32 %2 ; <i8*> [#uses=1]
%4 = load i8* %3, align 1 ; <i8> [#uses=1]
%5 = zext i8 %4 to i32 ; <i32> [#uses=1]
%6 = shl i32 %5, 24 ; <i32> [#uses=1]
%0 = zext i32 %n to i64
%vla = alloca i32, i64 %0, align 128
%vla1 = alloca i32, i64 %0, align 128
- %a2 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a2 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%1 = load i32* %a2, align 4
store i32 %1, i32* %vla1, align 128
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%2 = load i32* %b, align 4
- %arrayidx3 = getelementptr inbounds i32* %vla1, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %vla1, i64 1
store i32 %2, i32* %arrayidx3, align 4
call void @bar(i32* %vla1, i32* %vla) #0
ret void
; discovering a bug on PowerPC as well.)
define i32 @f(i32* %x) nounwind ssp {
- %y = getelementptr inbounds i32* %x, i32 5000
+ %y = getelementptr inbounds i32, i32* %x, i32 5000
%tmp103 = load i32* %y, align 4
ret i32 %tmp103
}
; CHECK-LABEL: gep_promotion:
; CHECK: lbz {{[0-9]+}}, 0({{.*}})
- %arrayidx = getelementptr inbounds i8* %0, i8 %add
+ %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
%1 = load i8* %arrayidx, align 1
ret i8 %1
define float @tf(float* nocapture readonly %i, i32 signext %o) #0 {
entry:
%idx.ext = sext i32 %o to i64
- %add.ptr = getelementptr inbounds float* %i, i64 %idx.ext
+ %add.ptr = getelementptr inbounds float, float* %i, i64 %idx.ext
%0 = load float* %add.ptr, align 4
%add.ptr.sum = add nsw i64 %idx.ext, 1
- %add.ptr3 = getelementptr inbounds float* %i, i64 %add.ptr.sum
+ %add.ptr3 = getelementptr inbounds float, float* %i, i64 %add.ptr.sum
%1 = load float* %add.ptr3, align 4
%add = fadd float %0, %1
ret float %add
define double @td(double* nocapture readonly %i, i32 signext %o) #0 {
entry:
%idx.ext = sext i32 %o to i64
- %add.ptr = getelementptr inbounds double* %i, i64 %idx.ext
+ %add.ptr = getelementptr inbounds double, double* %i, i64 %idx.ext
%0 = load double* %add.ptr, align 8
%add.ptr.sum = add nsw i64 %idx.ext, 1
- %add.ptr3 = getelementptr inbounds double* %i, i64 %add.ptr.sum
+ %add.ptr3 = getelementptr inbounds double, double* %i, i64 %add.ptr.sum
%1 = load double* %add.ptr3, align 8
%add = fadd double %0, %1
ret double %add
%ref.tmp = alloca %"class.std::__exception_ptr::exception_ptr", align 8
%tmp = alloca { i64, i64 }, align 8
%agg.tmp = alloca %"class.std::__exception_ptr::exception_ptr", align 8
- %__mut_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 2
- %__m_.i.i = getelementptr inbounds %"class.std::__1::unique_lock"* %__lk, i64 0, i32 0
+ %__mut_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 2
+ %__m_.i.i = getelementptr inbounds %"class.std::__1::unique_lock", %"class.std::__1::unique_lock"* %__lk, i64 0, i32 0
store %"class.std::__1::mutex"* %__mut_, %"class.std::__1::mutex"** %__m_.i.i, align 8, !tbaa !5
- %__owns_.i.i = getelementptr inbounds %"class.std::__1::unique_lock"* %__lk, i64 0, i32 1
+ %__owns_.i.i = getelementptr inbounds %"class.std::__1::unique_lock", %"class.std::__1::unique_lock"* %__lk, i64 0, i32 1
store i8 1, i8* %__owns_.i.i, align 8, !tbaa !6
call void @_ZNSt3__15mutex4lockEv(%"class.std::__1::mutex"* %__mut_) #4
invoke void @_ZNSt3__117__assoc_sub_state10__sub_waitERNS_11unique_lockINS_5mutexEEE(%"class.std::__1::__assoc_sub_state"* %this, %"class.std::__1::unique_lock"* %__lk) #4
to label %invoke.cont unwind label %lpad
invoke.cont: ; preds = %entry
- %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1
+ %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1
%0 = bitcast { i64, i64 }* %tmp to i8*
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 16, i32 8, i1 false)
call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval %tmp) #5
%1 = bitcast [18 x i64]* %regs to i64*
call void asm sideeffect "std 14, $0", "=*m"(i64* %1)
%2 = bitcast [18 x i64]* %regs to i8*
- %3 = getelementptr i8* %2, i32 8
+ %3 = getelementptr i8, i8* %2, i32 8
%4 = bitcast i8* %3 to i64*
call void asm sideeffect "std 15, $0", "=*m"(i64* %4)
%5 = bitcast [18 x i64]* %regs to i8*
- %6 = getelementptr i8* %5, i32 16
+ %6 = getelementptr i8, i8* %5, i32 16
%7 = bitcast i8* %6 to i64*
call void asm sideeffect "std 16, $0", "=*m"(i64* %7)
%8 = bitcast [18 x i64]* %regs to i8*
- %9 = getelementptr i8* %8, i32 24
+ %9 = getelementptr i8, i8* %8, i32 24
%10 = bitcast i8* %9 to i64*
call void asm sideeffect "std 17, $0", "=*m"(i64* %10)
%11 = bitcast [18 x i64]* %regs to i8*
- %12 = getelementptr i8* %11, i32 32
+ %12 = getelementptr i8, i8* %11, i32 32
%13 = bitcast i8* %12 to i64*
call void asm sideeffect "std 18, $0", "=*m"(i64* %13)
%14 = bitcast [18 x i64]* %regs to i8*
- %15 = getelementptr i8* %14, i32 40
+ %15 = getelementptr i8, i8* %14, i32 40
%16 = bitcast i8* %15 to i64*
call void asm sideeffect "std 19, $0", "=*m"(i64* %16)
%17 = bitcast [18 x i64]* %regs to i8*
- %18 = getelementptr i8* %17, i32 48
+ %18 = getelementptr i8, i8* %17, i32 48
%19 = bitcast i8* %18 to i64*
call void asm sideeffect "std 20, $0", "=*m"(i64* %19)
%20 = bitcast [18 x i64]* %regs to i8*
- %21 = getelementptr i8* %20, i32 56
+ %21 = getelementptr i8, i8* %20, i32 56
%22 = bitcast i8* %21 to i64*
call void asm sideeffect "std 21, $0", "=*m"(i64* %22)
%23 = bitcast [18 x i64]* %regs to i8*
- %24 = getelementptr i8* %23, i32 64
+ %24 = getelementptr i8, i8* %23, i32 64
%25 = bitcast i8* %24 to i64*
call void asm sideeffect "std 22, $0", "=*m"(i64* %25)
%26 = bitcast [18 x i64]* %regs to i8*
- %27 = getelementptr i8* %26, i32 72
+ %27 = getelementptr i8, i8* %26, i32 72
%28 = bitcast i8* %27 to i64*
call void asm sideeffect "std 23, $0", "=*m"(i64* %28)
%29 = bitcast [18 x i64]* %regs to i8*
- %30 = getelementptr i8* %29, i32 80
+ %30 = getelementptr i8, i8* %29, i32 80
%31 = bitcast i8* %30 to i64*
call void asm sideeffect "std 24, $0", "=*m"(i64* %31)
%32 = bitcast [18 x i64]* %regs to i8*
- %33 = getelementptr i8* %32, i32 88
+ %33 = getelementptr i8, i8* %32, i32 88
%34 = bitcast i8* %33 to i64*
call void asm sideeffect "std 25, $0", "=*m"(i64* %34)
%35 = bitcast [18 x i64]* %regs to i8*
- %36 = getelementptr i8* %35, i32 96
+ %36 = getelementptr i8, i8* %35, i32 96
%37 = bitcast i8* %36 to i64*
call void asm sideeffect "std 26, $0", "=*m"(i64* %37)
%38 = bitcast [18 x i64]* %regs to i8*
- %39 = getelementptr i8* %38, i32 104
+ %39 = getelementptr i8, i8* %38, i32 104
%40 = bitcast i8* %39 to i64*
call void asm sideeffect "std 27, $0", "=*m"(i64* %40)
%41 = bitcast [18 x i64]* %regs to i8*
- %42 = getelementptr i8* %41, i32 112
+ %42 = getelementptr i8, i8* %41, i32 112
%43 = bitcast i8* %42 to i64*
call void asm sideeffect "std 28, $0", "=*m"(i64* %43)
%44 = bitcast [18 x i64]* %regs to i8*
- %45 = getelementptr i8* %44, i32 120
+ %45 = getelementptr i8, i8* %44, i32 120
%46 = bitcast i8* %45 to i64*
call void asm sideeffect "std 29, $0", "=*m"(i64* %46)
%47 = bitcast [18 x i64]* %regs to i8*
- %48 = getelementptr i8* %47, i32 128
+ %48 = getelementptr i8, i8* %47, i32 128
%49 = bitcast i8* %48 to i64*
call void asm sideeffect "std 30, $0", "=*m"(i64* %49)
%50 = bitcast [18 x i64]* %regs to i8*
- %51 = getelementptr i8* %50, i32 136
+ %51 = getelementptr i8, i8* %50, i32 136
%52 = bitcast i8* %51 to i64*
call void asm sideeffect "std 31, $0", "=*m"(i64* %52)
- %53 = getelementptr { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
+ %53 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 1
%.funcptr = load void (i8*, i8*)** %53
- %54 = getelementptr { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0
+ %54 = getelementptr { i8*, void (i8*, i8*)* }, { i8*, void (i8*, i8*)* }* %fn, i32 0, i32 0
%.ptr = load i8** %54
%55 = load i8** %sp
call void %.funcptr(i8* %.ptr, i8* %55)
; CHECK-NOT: stwx {{[0-9]+}}, {{[0-9]+}}, 64
define void @f(%class.test* %this) {
entry:
- %Subminor.i.i = getelementptr inbounds %class.test* %this, i64 0, i32 1
+ %Subminor.i.i = getelementptr inbounds %class.test, %class.test* %this, i64 0, i32 1
%0 = bitcast [5 x i8]* %Subminor.i.i to i40*
%bf.load2.i.i = load i40* %0, align 4
%bf.clear7.i.i = and i40 %bf.load2.i.i, -8589934592
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
+ %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
%gotovar.4.0.pre = load i8** %2, align 4 ; <i8*> [#uses=1]
br label %bb2
while.cond: ; preds = %while.body, %if.then15
%idxprom17 = sext i32 0 to i64
- %arrayidx18 = getelementptr inbounds i8* %0, i64 %idxprom17
+ %arrayidx18 = getelementptr inbounds i8, i8* %0, i64 %idxprom17
%or = or i32 undef, undef
br i1 %cond1, label %if.end71, label %while.body
if.then45: ; preds = %while.body
%idxprom48139 = zext i32 %or to i64
- %arrayidx49 = getelementptr inbounds i8* %0, i64 %idxprom48139
+ %arrayidx49 = getelementptr inbounds i8, i8* %0, i64 %idxprom48139
%1 = bitcast i8* %arrayidx49 to i16*
%2 = bitcast i8* %arrayidx18 to i16*
%3 = load i16* %1, align 1
; Function Attrs: nounwind
define i32* @test4(i32* readonly %X, i32* nocapture %dest) #0 {
- %Y = getelementptr i32* %X, i64 4
+ %Y = getelementptr i32, i32* %X, i64 4
%A = load i32* %Y, align 4
store i32 %A, i32* %dest, align 4
ret i32* %Y
%shl1 = shl i32 %0, %step_size
%idxprom2 = sext i32 %shl1 to i64
%arrayidx.sum = add nsw i64 %idxprom2, %idxprom
- %arrayidx3 = getelementptr inbounds [4096 x i32]* @phasor, i64 0, i64 %arrayidx.sum
+ %arrayidx3 = getelementptr inbounds [4096 x i32], [4096 x i32]* @phasor, i64 0, i64 %arrayidx.sum
%1 = load i32* %arrayidx3, align 4
- %arrayidx5 = getelementptr inbounds i32* %out, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds i32, i32* %out, i64 %indvars.iv
store i32 %1, i32* %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%cmp = icmp slt i64 %indvars.iv.next, 1020
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %b, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%add = fadd double %0, 1.000000e+00
- %arrayidx2 = getelementptr inbounds double* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %a, i64 %indvars.iv
store double %add, double* %arrayidx2, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
call void @llvm.lifetime.start(i64 32800, i8* %1) #0
%2 = bitcast [8200 x i32]* %q to i8*
call void @llvm.lifetime.start(i64 32800, i8* %2) #0
- %arraydecay = getelementptr inbounds [8200 x i32]* %q, i64 0, i64 0
- %arraydecay1 = getelementptr inbounds [8200 x i32]* %v, i64 0, i64 0
- %arraydecay2 = getelementptr inbounds [8200 x i32]* %w, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [8200 x i32], [8200 x i32]* %q, i64 0, i64 0
+ %arraydecay1 = getelementptr inbounds [8200 x i32], [8200 x i32]* %v, i64 0, i64 0
+ %arraydecay2 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 0
call void @bar(i32* %arraydecay, i32* %arraydecay1, i32* %arraydecay2) #0
%3 = load i32* %arraydecay2, align 4
- %arrayidx3 = getelementptr inbounds [8200 x i32]* %w, i64 0, i64 1
+ %arrayidx3 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 1
%4 = load i32* %arrayidx3, align 4
; CHECK: @foo
; scevgep needs to be inserted in %bb so that it is dominated by %t.
; CHECK: %t = load i8** undef
-; CHECK: %scevgep = getelementptr i8* %t, i32 %lsr.iv.next
+; CHECK: %scevgep = getelementptr i8, i8* %t, i32 %lsr.iv.next
; CHECK: %c1 = icmp ult i8* %scevgep, undef
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
bb:
%t = load i8** undef, align 16 ; <i8*> [#uses=1]
- %p = getelementptr i8* %t, i32 %ii ; <i8*> [#uses=1]
+ %p = getelementptr i8, i8* %t, i32 %ii ; <i8*> [#uses=1]
%c1 = icmp ult i8* %p, undef ; <i1> [#uses=1]
%i.next = add i32 %i, 1 ; <i32> [#uses=1]
br i1 %c1, label %bb11, label %bb13
define signext i8 @test_avext() nounwind {
entry:
- %0 = getelementptr inbounds [13 x i8]* @x, i32 0, i32 0
+ %0 = getelementptr inbounds [13 x i8], [13 x i8]* @x, i32 0, i32 0
%1 = load i8* %0, align 1
ret i8 %1
}
; This shares the 16 between the two loads.
define void @func(<4 x float>* %a, <4 x float>* %b) {
- %tmp1 = getelementptr <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1]
+ %tmp1 = getelementptr <4 x float>, <4 x float>* %b, i32 1 ; <<4 x float>*> [#uses=1]
%tmp = load <4 x float>* %tmp1 ; <<4 x float>> [#uses=1]
- %tmp3 = getelementptr <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
+ %tmp3 = getelementptr <4 x float>, <4 x float>* %a, i32 1 ; <<4 x float>*> [#uses=1]
%tmp4 = load <4 x float>* %tmp3 ; <<4 x float>> [#uses=1]
%tmp5 = fmul <4 x float> %tmp, %tmp4 ; <<4 x float>> [#uses=1]
%tmp8 = load <4 x float>* %b ; <<4 x float>> [#uses=1]
@Glob = global i64 4
define i32* @test0(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32* %X, i32 4
+ %Y = getelementptr i32, i32* %X, i32 4
%A = load i32* %Y
store i32 %A, i32* %dest
ret i32* %Y
}
define i32* @test1(i32* %X, i32* %dest) nounwind {
- %Y = getelementptr i32* %X, i32 4
+ %Y = getelementptr i32, i32* %X, i32 4
%A = load i32* %Y
store i32 %A, i32* %dest
ret i32* %Y
}
define i16* @test2(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
+ %Y = getelementptr i16, i16* %X, i32 4
%A = load i16* %Y
%B = sext i16 %A to i32
store i32 %B, i32* %dest
}
define i16* @test3(i16* %X, i32* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
+ %Y = getelementptr i16, i16* %X, i32 4
%A = load i16* %Y
%B = zext i16 %A to i32
store i32 %B, i32* %dest
}
define i16* @test3a(i16* %X, i64* %dest) nounwind {
- %Y = getelementptr i16* %X, i32 4
+ %Y = getelementptr i16, i16* %X, i32 4
%A = load i16* %Y
%B = sext i16 %A to i64
store i64 %B, i64* %dest
}
define i64* @test4(i64* %X, i64* %dest) nounwind {
- %Y = getelementptr i64* %X, i32 4
+ %Y = getelementptr i64, i64* %X, i32 4
%A = load i64* %Y
store i64 %A, i64* %dest
ret i64* %Y
}
define i16* @test5(i16* %X) nounwind {
- %Y = getelementptr i16* %X, i32 4
+ %Y = getelementptr i16, i16* %X, i32 4
store i16 7, i16* %Y
ret i16* %Y
}
define i64* @test6(i64* %X, i64 %A) nounwind {
- %Y = getelementptr i64* %X, i32 4
+ %Y = getelementptr i64, i64* %X, i32 4
store i64 %A, i64* %Y
ret i64* %Y
}
br i1 %p, label %true, label %end
true:
%sum2 = add i32 %sum1, 1
- %ptr2 = getelementptr i32* %ptr, i32 1
+ %ptr2 = getelementptr i32, i32* %ptr, i32 1
%val = load i32* %ptr2
%val2 = add i32 %val1, %val
br label %end
br i1 undef, label %for.body24.i58, label %for.body24.i
for.body24.i58: ; preds = %for.body24.i58, %for.body24.i
- %arrayidx26.i55.1 = getelementptr inbounds [16000 x double]* @b, i64 0, i64 undef
+ %arrayidx26.i55.1 = getelementptr inbounds [16000 x double], [16000 x double]* @b, i64 0, i64 undef
store double 1.000000e+00, double* %arrayidx26.i55.1, align 8
br i1 undef, label %for.body24.i64, label %for.body24.i58
%1 = load i32* null, align 4
%add = add i32 %1, %conv.i
store i32 %add, i32* null, align 4
- %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0
+ %counter.i.i = getelementptr inbounds %struct.task_struct.4.16.124, %struct.task_struct.4.16.124* %call1.i, i64 0, i32 1, i32 0
%2 = tail call i32 asm sideeffect "\09lwsync\0A1:\09lwarx\09$0,0,$1\09\09# atomic_dec_return\0A\09addic\09$0,$0,-1\0A\09stwcx.\09$0,0,$1\0A\09bne-\091b\0A\09sync\0A", "=&r,r,~{cr0},~{xer},~{memory}"(i32* %counter.i.i) #0
%cmp.i = icmp eq i32 %2, 0
br i1 %cmp.i, label %if.then.i, label %if.end
define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind {
entry:
- %a.realp = getelementptr inbounds %0* %a, i32 0, i32 0
+ %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0
%a.real = load double* %a.realp
- %a.imagp = getelementptr inbounds %0* %a, i32 0, i32 1
+ %a.imagp = getelementptr inbounds %0, %0* %a, i32 0, i32 1
%a.imag = load double* %a.imagp
- %b.realp = getelementptr inbounds %0* %b, i32 0, i32 0
+ %b.realp = getelementptr inbounds %0, %0* %b, i32 0, i32 0
%b.real = load double* %b.realp
- %b.imagp = getelementptr inbounds %0* %b, i32 0, i32 1
+ %b.imagp = getelementptr inbounds %0, %0* %b, i32 0, i32 1
%b.imag = load double* %b.imagp
%mul.rl = fmul double %a.real, %b.real
%mul.rr = fmul double %a.imag, %b.imag
%mul.il = fmul double %a.imag, %b.real
%mul.ir = fmul double %a.real, %b.imag
%mul.i = fadd double %mul.il, %mul.ir
- %c.realp = getelementptr inbounds %0* %c, i32 0, i32 0
+ %c.realp = getelementptr inbounds %0, %0* %c, i32 0, i32 0
%c.real = load double* %c.realp
- %c.imagp = getelementptr inbounds %0* %c, i32 0, i32 1
+ %c.imagp = getelementptr inbounds %0, %0* %c, i32 0, i32 1
%c.imag = load double* %c.imagp
%add.r = fadd double %mul.r, %c.real
%add.i = fadd double %mul.i, %c.imag
- %real = getelementptr inbounds %0* %agg.result, i32 0, i32 0
- %imag = getelementptr inbounds %0* %agg.result, i32 0, i32 1
+ %real = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 0
+ %imag = getelementptr inbounds %0, %0* %agg.result, i32 0, i32 1
store double %add.r, double* %real
store double %add.i, double* %imag
ret void
define ppc_fp128 @test(%struct.S* byval %x) nounwind {
entry:
- %b = getelementptr inbounds %struct.S* %x, i32 0, i32 1
+ %b = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1
%0 = load ppc_fp128* %b, align 16
ret ppc_fp128 %0
}
define i64 @callee2(%struct.pad* byval nocapture readnone %x, i32 signext %y, %struct.test* byval align 16 nocapture readonly %z) {
entry:
- %x1 = getelementptr inbounds %struct.test* %z, i64 0, i32 0
+ %x1 = getelementptr inbounds %struct.test, %struct.test* %z, i64 0, i32 0
%0 = load i64* %x1, align 16
ret i64 %0
}
define void @caller2(i64 %z) {
entry:
%tmp = alloca %struct.test, align 16
- %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test* %tmp, i64 0, i32 0
+ %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test, %struct.test* %tmp, i64 0, i32 0
store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16
%call = call i64 @test2(%struct.pad* byval @gp, i32 signext 0, %struct.test* byval align 16 %tmp)
ret void
; Check that when two complex GEPs are used in two basic blocks, LLVM can
; elimilate the common subexpression for the second use.
define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) {
- %liberties = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
+ %liberties = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
%1 = load i32* %liberties, align 4
%cmp = icmp eq i32 %1, %lib
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
- %origin = getelementptr [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
+ %origin = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
%2 = load i32* %origin, align 4
store i32 %2, i32* %adj, align 4
br label %if.end
; CHECK-UseAA-LABEL: @test_GEP_CSE(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = bitcast [240 x %struct]* %string to i8*
; CHECK-UseAA: [[IDX:%[a-zA-Z0-9]+]] = mul i64 %idxprom, 96
-; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8* [[PTR0]], i64 [[IDX]]
-; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23052
+; CHECK-UseAA: [[PTR1:%[a-zA-Z0-9]+]] = getelementptr i8, i8* [[PTR0]], i64 [[IDX]]
+; CHECK-UseAA: getelementptr i8, i8* [[PTR1]], i64 23052
; CHECK-UseAA: bitcast
; CHECK-UseAA: if.then:
-; CHECK-UseAA: getelementptr i8* [[PTR1]], i64 23048
+; CHECK-UseAA: getelementptr i8, i8* [[PTR1]], i64 23048
; CHECK-UseAA: bitcast
%class.my = type { i32, [128 x i32], i32, [256 x %struct.pt]}
; calculation and code gen can generate a better addressing mode for the second
; use.
define void @test_GEP_across_BB(%class.my* %this, i64 %idx) {
- %1 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
+ %1 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
%2 = load i32* %1, align 4
- %3 = getelementptr %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
+ %3 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
%4 = load i32* %3, align 4
%5 = icmp eq i32 %2, %4
br i1 %5, label %if.true, label %exit
; CHECK-UseAA-LABEL: test_GEP_across_BB(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr
-; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 528
-; CHECK-UseAA: getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: getelementptr i8, i8* [[PTR0]], i64 528
+; CHECK-UseAA: getelementptr i8, i8* [[PTR0]], i64 532
; CHECK-UseAA: if.true:
-; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 532
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* [[PTR0]], i64 532
; CHECK-UseAA: exit:
-; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8* [[PTR0]], i64 528
+; CHECK-UseAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* [[PTR0]], i64 528
%struct.S = type { float, double }
@struct_array = global [1024 x %struct.S] zeroinitializer, align 16
entry:
%add = add nsw i32 %i, 5
%idxprom = sext i32 %add to i64
- %p = getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ %p = getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
ret double* %p
}
; CHECK-NoAA-LABEL: @test-struct_1(
; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, 88
; CHECK-UseAA-LABEL: @test-struct_1(
-; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 88
+; CHECK-UseAA: getelementptr i8, i8* %{{[a-zA-Z0-9]+}}, i64 88
%struct3 = type { i64, i32 }
%struct2 = type { %struct3, i32 }
define %struct2* @test-struct_2(%struct0* %ptr, i64 %idx) {
entry:
%arrayidx = add nsw i64 %idx, -2
- %ptr2 = getelementptr %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
+ %ptr2 = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
ret %struct2* %ptr2
}
; CHECK-NoAA-LABEL: @test-struct_2(
; CHECK-NoAA: add i64 %{{[a-zA-Z0-9]+}}, -40
; CHECK-UseAA-LABEL: @test-struct_2(
-; CHECK-UseAA: getelementptr i8* %{{[a-zA-Z0-9]+}}, i64 -40
+; CHECK-UseAA: getelementptr i8, i8* %{{[a-zA-Z0-9]+}}, i64 -40
; Test that when a index is added from two constant, SeparateConstOffsetFromGEP
; pass does not generate incorrect result.
define void @test_const_add([3 x i32]* %in) {
%inc = add nsw i32 2, 1
%idxprom = sext i32 %inc to i64
- %arrayidx = getelementptr [3 x i32]* %in, i64 %idxprom, i64 2
+ %arrayidx = getelementptr [3 x i32], [3 x i32]* %in, i64 %idxprom, i64 2
store i32 0, i32* %arrayidx, align 4
ret void
}
entry:
; CHECK-LABEL: access_double_array:
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds [32 x double]* @double_array, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [32 x double], [32 x double]* @double_array, i64 0, i64 %idxprom
%0 = load double* %arrayidx, align 8
; CHECK: ld {{[0-9]+}}, .LC{{[0-9]+}}@toc(2)
%cmp = fcmp oeq double %0, %a
define void @_ZN4llvm14MachineOperand12substPhysRegEjRKNS_18TargetRegisterInfoE(%"class.llvm::MachineOperand"* %this, i32 zeroext %Reg, %"class.llvm::TargetRegisterInfo"* %TRI) align 2 {
entry:
- %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 1
+ %SubReg_TargetFlags.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 1
%0 = bitcast [3 x i8]* %SubReg_TargetFlags.i to i24*
%bf.load.i = load i24* %0, align 1
%bf.lshr.i = lshr i24 %bf.load.i, 12
if.then: ; preds = %entry
%bf.cast.i = zext i24 %bf.lshr.i to i32
- %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo"* %TRI, i64 0, i32 1
+ %add.ptr = getelementptr inbounds %"class.llvm::TargetRegisterInfo", %"class.llvm::TargetRegisterInfo"* %TRI, i64 0, i32 1
%call3 = tail call zeroext i32 @_ZNK4llvm14MCRegisterInfo9getSubRegEjj(%"class.llvm::MCRegisterInfo"* %add.ptr, i32 zeroext %Reg, i32 zeroext %bf.cast.i)
%bf.load.i10 = load i24* %0, align 1
%bf.clear.i = and i24 %bf.load.i10, 4095
if.end: ; preds = %entry, %if.then
%Reg.addr.0 = phi i32 [ %call3, %if.then ], [ %Reg, %entry ]
- %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
+ %RegNo.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 2, i32 0
%1 = load i32* %RegNo.i.i, align 4
%cmp.i = icmp eq i32 %1, %Reg.addr.0
br i1 %cmp.i, label %_ZN4llvm14MachineOperand6setRegEj.exit, label %if.end.i
if.end.i: ; preds = %if.end
- %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand"* %this, i64 0, i32 3
+ %ParentMI.i.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %this, i64 0, i32 3
%2 = load %"class.llvm::MachineInstr"** %ParentMI.i.i, align 8
%tobool.i = icmp eq %"class.llvm::MachineInstr"* %2, null
br i1 %tobool.i, label %if.end13.i, label %if.then3.i
if.then3.i: ; preds = %if.end.i
- %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr"* %2, i64 0, i32 2
+ %Parent.i.i = getelementptr inbounds %"class.llvm::MachineInstr", %"class.llvm::MachineInstr"* %2, i64 0, i32 2
%3 = load %"class.llvm::MachineBasicBlock"** %Parent.i.i, align 8
%tobool5.i = icmp eq %"class.llvm::MachineBasicBlock"* %3, null
br i1 %tobool5.i, label %if.end13.i, label %if.then6.i
if.then6.i: ; preds = %if.then3.i
- %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
+ %xParent.i.i = getelementptr inbounds %"class.llvm::MachineBasicBlock", %"class.llvm::MachineBasicBlock"* %3, i64 0, i32 4
%4 = load %"class.llvm::MachineFunction"** %xParent.i.i, align 8
%tobool8.i = icmp eq %"class.llvm::MachineFunction"* %4, null
br i1 %tobool8.i, label %if.end13.i, label %if.then9.i
if.then9.i: ; preds = %if.then6.i
- %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction"* %4, i64 0, i32 5
+ %RegInfo.i.i = getelementptr inbounds %"class.llvm::MachineFunction", %"class.llvm::MachineFunction"* %4, i64 0, i32 5
%5 = load %"class.llvm::MachineRegisterInfo"** %RegInfo.i.i, align 8
tail call void @_ZN4llvm19MachineRegisterInfo27removeRegOperandFromUseListEPNS_14MachineOperandE(%"class.llvm::MachineRegisterInfo"* %5, %"class.llvm::MachineOperand"* %this)
store i32 %Reg.addr.0, i32* %RegNo.i.i, align 4
unreachable
noassert: ; preds = %entry
- %tmp2 = getelementptr %core.time.TickDuration* %.this_arg, i32 0, i32 0
+ %tmp2 = getelementptr %core.time.TickDuration, %core.time.TickDuration* %.this_arg, i32 0, i32 0
%tmp3 = load i64* %tmp2
%tmp4 = sitofp i64 %tmp3 to ppc_fp128
%tmp5 = load i64* @_D4core4time12TickDuration11ticksPerSecyl
; Function Attrs: nounwind
define void @_Z4funcv(%struct.CS* noalias sret %agg.result) #0 {
entry:
- %a_ = getelementptr inbounds %struct.CS* %agg.result, i32 0, i32 0
+ %a_ = getelementptr inbounds %struct.CS, %struct.CS* %agg.result, i32 0, i32 0
store i32 0, i32* %a_, align 4
ret void
}
br i1 %cmp, label %for.bodythread-pre-split, label %if.end8
for.bodythread-pre-split: ; preds = %entry
- %aclass = getelementptr inbounds %struct.anon.0* %2, i32 0, i32 0
+ %aclass = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 0, i32 0
%.pr = load i32* %aclass, align 4
br label %for.body
while.body: ; preds = %while.body.lr.ph, %while.cond
%j.110 = phi i32 [ %j.1.ph13, %while.body.lr.ph ], [ %inc7, %while.cond ]
- %aclass_index = getelementptr inbounds %struct.anon* %0, i32 %j.110, i32 0
+ %aclass_index = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 %j.110, i32 0
%3 = load i32* %aclass_index, align 4
- %aclass5 = getelementptr inbounds %struct.anon.0* %2, i32 %3, i32 0
+ %aclass5 = getelementptr inbounds %struct.anon.0, %struct.anon.0* %2, i32 %3, i32 0
%4 = load i32* %aclass5, align 4
%tobool = icmp eq i32 %4, 0
%inc7 = add nsw i32 %j.110, 1
for.cond.i.i30: ; preds = %for.cond.i.i30, %invoke.cont4
%indvars.iv.i.i26 = phi i64 [ %indvars.iv.next.i.i29, %for.cond.i.i30 ], [ 0, %invoke.cont4 ]
- %arrayidx.i.i27 = getelementptr inbounds i8* %call7, i64 %indvars.iv.i.i26
+ %arrayidx.i.i27 = getelementptr inbounds i8, i8* %call7, i64 %indvars.iv.i.i26
%0 = load i8* %arrayidx.i.i27, align 1
%indvars.iv.next.i.i29 = add nuw nsw i64 %indvars.iv.i.i26, 1
br label %for.cond.i.i30
define <4 x double> @bar(<4 x double>* %a) {
entry:
%r = load <4 x double>* %a, align 8
- %b = getelementptr <4 x double>* %a, i32 16
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 16
%s = load <4 x double>* %b, align 32
%t = fadd <4 x double> %r, %s
ret <4 x double> %t
define <4 x double> @bar1(<4 x double>* %a) {
entry:
%r = load <4 x double>* %a, align 8
- %b = getelementptr <4 x double>* %a, i32 16
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 16
%s = load <4 x double>* %b, align 8
%t = fadd <4 x double> %r, %s
ret <4 x double> %t
define <4 x double> @bar2(<4 x double>* %a) {
entry:
%r = load <4 x double>* %a, align 8
- %b = getelementptr <4 x double>* %a, i32 1
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
%s = load <4 x double>* %b, align 32
%t = fadd <4 x double> %r, %s
ret <4 x double> %t
define <4 x double> @bar3(<4 x double>* %a) {
entry:
%r = load <4 x double>* %a, align 8
- %b = getelementptr <4 x double>* %a, i32 1
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
%s = load <4 x double>* %b, align 8
%t = fadd <4 x double> %r, %s
ret <4 x double> %t
define <4 x double> @bar4(<4 x double>* %a) {
entry:
%r = load <4 x double>* %a, align 8
- %b = getelementptr <4 x double>* %a, i32 1
+ %b = getelementptr <4 x double>, <4 x double>* %a, i32 1
%s = load <4 x double>* %b, align 8
- %c = getelementptr <4 x double>* %b, i32 1
+ %c = getelementptr <4 x double>, <4 x double>* %b, i32 1
%t = load <4 x double>* %c, align 8
%u = fadd <4 x double> %r, %s
%v = fadd <4 x double> %u, %t
%struct.foo = type { i32, i32, [0 x i8] }
define i32 @test(%struct.foo* %X) nounwind {
- %tmp1 = getelementptr %struct.foo* %X, i32 0, i32 2, i32 100 ; <i8*> [#uses=1]
+ %tmp1 = getelementptr %struct.foo, %struct.foo* %X, i32 0, i32 2, i32 100 ; <i8*> [#uses=1]
%tmp = load i8* %tmp1 ; <i8> [#uses=1]
%tmp2 = zext i8 %tmp to i32 ; <i32> [#uses=1]
ret i32 %tmp2
store i32 0, i32* %j, align 4
%2 = load i32* %i, align 4
%idxprom = sext i32 %2 to i64
- %arrayidx = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom
store i8* bitcast (i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1) to i8*), i8** %arrayidx, align 8
%3 = load i32* %i, align 4
%idxprom1 = sext i32 %3 to i64
- %arrayidx2 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom1
store i64 8, i64* %arrayidx2, align 8
%4 = load i32* %i, align 4
%idxprom3 = sext i32 %4 to i64
- %arrayidx4 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom3
store i64 8, i64* %arrayidx4, align 8
store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 190), i32** getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 0, i64 1), align 8
store i32* getelementptr inbounds ([256 x i32]* @intarray, i32 0, i64 241), i32** getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 0, i64 1), align 8
store i32 %inc5, i32* %i, align 4
%6 = load i32* %i, align 4
%idxprom6 = sext i32 %6 to i64
- %arrayidx7 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
+ %arrayidx7 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom6
store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1) to i8*), i8** %arrayidx7, align 8
%7 = load i32* %i, align 4
%idxprom8 = sext i32 %7 to i64
- %arrayidx9 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
+ %arrayidx9 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom8
store i64 8, i64* %arrayidx9, align 8
%8 = load i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom10
store i64 8, i64* %arrayidx11, align 8
store i64 -3866974208859106459, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 1), align 8
store i64 -185376695371304091, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 1), align 8
store i32 %inc12, i32* %i, align 4
%10 = load i32* %i, align 4
%idxprom13 = sext i32 %10 to i64
- %arrayidx14 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
+ %arrayidx14 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom13
store i8* bitcast (i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2) to i8*), i8** %arrayidx14, align 8
%11 = load i32* %i, align 4
%idxprom15 = sext i32 %11 to i64
- %arrayidx16 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
+ %arrayidx16 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom15
store i64 8, i64* %arrayidx16, align 8
%12 = load i32* %i, align 4
%idxprom17 = sext i32 %12 to i64
- %arrayidx18 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
+ %arrayidx18 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom17
store i64 8, i64* %arrayidx18, align 8
store i64 -963638028680427187, i64* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 2), align 8
store i64 7510542175772455554, i64* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 2), align 8
store i32 %inc19, i32* %i, align 4
%14 = load i32* %i, align 4
%idxprom20 = sext i32 %14 to i64
- %arrayidx21 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
+ %arrayidx21 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom20
store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3) to i8*), i8** %arrayidx21, align 8
%15 = load i32* %i, align 4
%idxprom22 = sext i32 %15 to i64
- %arrayidx23 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
+ %arrayidx23 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom22
store i64 8, i64* %arrayidx23, align 8
%16 = load i32* %i, align 4
%idxprom24 = sext i32 %16 to i64
- %arrayidx25 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
+ %arrayidx25 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom24
store i64 16, i64* %arrayidx25, align 8
store double 0xC0F8783300000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 3), align 16
store double 0xC10DF3CCC0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 3), align 16
store i32 %inc26, i32* %i, align 4
%18 = load i32* %i, align 4
%idxprom27 = sext i32 %18 to i64
- %arrayidx28 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
+ %arrayidx28 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom27
store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4) to i8*), i8** %arrayidx28, align 8
%19 = load i32* %i, align 4
%idxprom29 = sext i32 %19 to i64
- %arrayidx30 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
+ %arrayidx30 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom29
store i64 2, i64* %arrayidx30, align 8
%20 = load i32* %i, align 4
%idxprom31 = sext i32 %20 to i64
- %arrayidx32 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
+ %arrayidx32 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom31
store i64 2, i64* %arrayidx32, align 8
store i16 -15897, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 4), align 2
store i16 30935, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 4), align 2
store i32 %inc34, i32* %j, align 4
%23 = load i32* %i, align 4
%idxprom35 = sext i32 %23 to i64
- %arrayidx36 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
+ %arrayidx36 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom35
store i8* bitcast (double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0) to i8*), i8** %arrayidx36, align 8
%24 = load i32* %i, align 4
%idxprom37 = sext i32 %24 to i64
- %arrayidx38 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
+ %arrayidx38 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom37
store i64 8, i64* %arrayidx38, align 8
%25 = load i32* %i, align 4
%idxprom39 = sext i32 %25 to i64
- %arrayidx40 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
+ %arrayidx40 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom39
store i64 8, i64* %arrayidx40, align 8
store double 0xC0FC765780000000, double* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 0, i64 0), align 8
store double 0xC1025CD7A0000000, double* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 0, i64 0), align 8
store i32 %inc61, i32* %j, align 4
%31 = load i32* %i, align 4
%idxprom62 = sext i32 %31 to i64
- %arrayidx63 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
+ %arrayidx63 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom62
store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), i8** %arrayidx63, align 8
%32 = load i32* %i, align 4
%idxprom64 = sext i32 %32 to i64
- %arrayidx65 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
+ %arrayidx65 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom64
store i64 1, i64* %arrayidx65, align 8
%33 = load i32* %i, align 4
%idxprom66 = sext i32 %33 to i64
- %arrayidx67 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
+ %arrayidx67 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom66
store i64 1, i64* %arrayidx67, align 8
store i8 -83, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 5), align 1
store i8 -67, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 5), align 1
store i32 %inc68, i32* %i, align 4
%35 = load i32* %i, align 4
%idxprom69 = sext i32 %35 to i64
- %arrayidx70 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
+ %arrayidx70 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom69
store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), i8** %arrayidx70, align 8
%36 = load i32* %i, align 4
%idxprom71 = sext i32 %36 to i64
- %arrayidx72 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
+ %arrayidx72 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom71
store i64 1, i64* %arrayidx72, align 8
%37 = load i32* %i, align 4
%idxprom73 = sext i32 %37 to i64
- %arrayidx74 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
+ %arrayidx74 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom73
store i64 1, i64* %arrayidx74, align 8
store i8 34, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 5, i64 1), align 1
store i8 64, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 5, i64 1), align 1
store i32 %inc75, i32* %i, align 4
%39 = load i32* %i, align 4
%idxprom76 = sext i32 %39 to i64
- %arrayidx77 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
+ %arrayidx77 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom76
store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3) to i8*), i8** %arrayidx77, align 8
%40 = load i32* %i, align 4
%idxprom78 = sext i32 %40 to i64
- %arrayidx79 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
+ %arrayidx79 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom78
store i64 4, i64* %arrayidx79, align 8
%41 = load i32* %i, align 4
%idxprom80 = sext i32 %41 to i64
- %arrayidx81 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
+ %arrayidx81 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom80
store i64 4, i64* %arrayidx81, align 8
store i32 -3, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 6, i64 3), align 4
store i32 -3, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 6, i64 3), align 4
store i32 %inc82, i32* %i, align 4
%43 = load i32* %i, align 4
%idxprom83 = sext i32 %43 to i64
- %arrayidx84 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
+ %arrayidx84 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom83
store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), i8** %arrayidx84, align 8
%44 = load i32* %i, align 4
%idxprom85 = sext i32 %44 to i64
- %arrayidx86 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
+ %arrayidx86 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom85
store i64 1, i64* %arrayidx86, align 8
%45 = load i32* %i, align 4
%idxprom87 = sext i32 %45 to i64
- %arrayidx88 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
+ %arrayidx88 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom87
store i64 1, i64* %arrayidx88, align 8
store i8 106, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 6, i64 4, i32 7), align 1
store i8 -102, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 6, i64 4, i32 7), align 1
store i32 %inc89, i32* %i, align 4
%47 = load i32* %i, align 4
%idxprom90 = sext i32 %47 to i64
- %arrayidx91 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
+ %arrayidx91 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom90
store i8* bitcast (i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7) to i8*), i8** %arrayidx91, align 8
%48 = load i32* %i, align 4
%idxprom92 = sext i32 %48 to i64
- %arrayidx93 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
+ %arrayidx93 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom92
store i64 2, i64* %arrayidx93, align 8
%49 = load i32* %i, align 4
%idxprom94 = sext i32 %49 to i64
- %arrayidx95 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
+ %arrayidx95 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom94
store i64 2, i64* %arrayidx95, align 8
store i16 29665, i16* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 7), align 2
store i16 7107, i16* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 7), align 2
store i32 %inc96, i32* %i, align 4
%51 = load i32* %i, align 4
%idxprom97 = sext i32 %51 to i64
- %arrayidx98 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
+ %arrayidx98 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom97
store i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), i8** %arrayidx98, align 8
%52 = load i32* %i, align 4
%idxprom99 = sext i32 %52 to i64
- %arrayidx100 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
+ %arrayidx100 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom99
store i64 1, i64* %arrayidx100, align 8
%53 = load i32* %i, align 4
%idxprom101 = sext i32 %53 to i64
- %arrayidx102 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
+ %arrayidx102 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom101
store i64 1, i64* %arrayidx102, align 8
store i8 52, i8* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 8), align 1
store i8 -86, i8* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 8), align 1
store i32 %inc103, i32* %i, align 4
%55 = load i32* %i, align 4
%idxprom104 = sext i32 %55 to i64
- %arrayidx105 = getelementptr inbounds [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
+ %arrayidx105 = getelementptr inbounds [32 x i8*], [32 x i8*]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 5), i32 0, i64 %idxprom104
store i8* bitcast (i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9) to i8*), i8** %arrayidx105, align 8
%56 = load i32* %i, align 4
%idxprom106 = sext i32 %56 to i64
- %arrayidx107 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
+ %arrayidx107 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 7), i32 0, i64 %idxprom106
store i64 4, i64* %arrayidx107, align 8
%57 = load i32* %i, align 4
%idxprom108 = sext i32 %57 to i64
- %arrayidx109 = getelementptr inbounds [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
+ %arrayidx109 = getelementptr inbounds [32 x i64], [32 x i64]* getelementptr inbounds (%struct.Info* @info, i32 0, i32 11), i32 0, i64 %idxprom108
store i64 4, i64* %arrayidx109, align 8
store i32 -54118453, i32* getelementptr inbounds (%struct.S1998* @s1998, i32 0, i32 9), align 4
store i32 1668755823, i32* getelementptr inbounds ([5 x %struct.S1998]* @a1998, i32 0, i64 2, i32 9), align 4
call void @llvm.memset.p0i8.i64(i8* %7, i8 0, i64 11104, i32 32, i1 false)
%8 = bitcast %struct.S2760* %b2 to i8*
call void @llvm.memset.p0i8.i64(i8* %8, i8 0, i64 11104, i32 32, i1 false)
- %b = getelementptr inbounds %struct.S2760* %arg0, i32 0, i32 1
- %g = getelementptr inbounds %struct.anon* %b, i32 0, i32 1
+ %b = getelementptr inbounds %struct.S2760, %struct.S2760* %arg0, i32 0, i32 1
+ %g = getelementptr inbounds %struct.anon, %struct.anon* %b, i32 0, i32 1
%9 = load i64* %g, align 8
%10 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
%cmp = icmp ne i64 %9, %10
if.end: ; preds = %if.then, %entry
%12 = load i64* getelementptr inbounds (%struct.S2760* @s2760, i32 0, i32 1, i32 1), align 8
- %b3 = getelementptr inbounds %struct.S2760* %ret, i32 0, i32 1
- %g4 = getelementptr inbounds %struct.anon* %b3, i32 0, i32 1
+ %b3 = getelementptr inbounds %struct.S2760, %struct.S2760* %ret, i32 0, i32 1
+ %g4 = getelementptr inbounds %struct.anon, %struct.anon* %b3, i32 0, i32 1
store i64 %12, i64* %g4, align 8
%13 = bitcast %struct.S2760* %agg.result to i8*
%14 = bitcast %struct.S2760* %ret to i8*
for.body4: ; preds = %for.body4, %for.cond2.preheader
%indvars.iv = phi i64 [ 0, %for.cond2.preheader ], [ %indvars.iv.next.15, %for.body4 ]
- %arrayidx = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv
- %arrayidx6 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv
%0 = bitcast double* %arrayidx to <1 x double>*
%1 = load <1 x double>* %0, align 32
%add = fadd <1 x double> %1, <double 1.000000e+00>
%2 = bitcast double* %arrayidx6 to <1 x double>*
store <1 x double> %add, <1 x double>* %2, align 32
%indvars.iv.next.322 = or i64 %indvars.iv, 4
- %arrayidx.4 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
- %arrayidx6.4 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
+ %arrayidx.4 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.322
+ %arrayidx6.4 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.322
%3 = bitcast double* %arrayidx.4 to <1 x double>*
%4 = load <1 x double>* %3, align 32
%add.4 = fadd <1 x double> %4, <double 1.000000e+00>
%5 = bitcast double* %arrayidx6.4 to <1 x double>*
store <1 x double> %add.4, <1 x double>* %5, align 32
%indvars.iv.next.726 = or i64 %indvars.iv, 8
- %arrayidx.8 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
- %arrayidx6.8 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
+ %arrayidx.8 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.726
+ %arrayidx6.8 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.726
%6 = bitcast double* %arrayidx.8 to <1 x double>*
%7 = load <1 x double>* %6, align 32
%add.8 = fadd <1 x double> %7, <double 1.000000e+00>
%8 = bitcast double* %arrayidx6.8 to <1 x double>*
store <1 x double> %add.8, <1 x double>* %8, align 32
%indvars.iv.next.1130 = or i64 %indvars.iv, 12
- %arrayidx.12 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
- %arrayidx6.12 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
+ %arrayidx.12 = getelementptr inbounds [16000 x double], [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1130
+ %arrayidx6.12 = getelementptr inbounds [16000 x double], [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1130
%9 = bitcast double* %arrayidx.12 to <1 x double>*
%10 = load <1 x double>* %9, align 32
%add.12 = fadd <1 x double> %10, <double 1.000000e+00>
unreachable
_ZNK4llvm14MachineOperand6getRegEv.exit: ; preds = %_ZN4llvm12MachineInstr10getOperandEj.exit
- %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand"* %0, i64 undef, i32 1
+ %IsDef.i = getelementptr inbounds %"class.llvm::MachineOperand", %"class.llvm::MachineOperand"* %0, i64 undef, i32 1
%1 = bitcast [3 x i8]* %IsDef.i to i24*
%bf.load.i = load i24* %1, align 1
%2 = and i24 %bf.load.i, 128
define void @goo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
%2 = load i32* @barbaz, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %2, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx)
ret void
define void @hoo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [200000 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [200000 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [200000 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [200000 x i32], [200000 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx)
ret void
define void @loo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx)
call void asm sideeffect "", "~{f30}"() nounwind
define i32 @test1(i64 %add, i64* %ptr) nounwind {
entry:
- %p1 = getelementptr i64* %ptr, i64 144115188075855
+ %p1 = getelementptr i64, i64* %ptr, i64 144115188075855
br label %for.cond2.preheader
for.cond2.preheader:
for.body4:
%lsr.iv = phi i32 [ %lsr.iv.next, %for.body4 ], [ 16000, %for.cond2.preheader ]
%i0 = phi i64* [ %p1, %for.cond2.preheader ], [ %i6, %for.body4 ]
- %i6 = getelementptr i64* %i0, i64 400000
- %i7 = getelementptr i64* %i6, i64 300000
- %i8 = getelementptr i64* %i6, i64 200000
- %i9 = getelementptr i64* %i6, i64 100000
+ %i6 = getelementptr i64, i64* %i0, i64 400000
+ %i7 = getelementptr i64, i64* %i6, i64 300000
+ %i8 = getelementptr i64, i64* %i6, i64 200000
+ %i9 = getelementptr i64, i64* %i6, i64 100000
store i64 %add, i64* %i6, align 32
store i64 %add, i64* %i7, align 32
store i64 %add, i64* %i8, align 32
define void @test2(float %a, i32* %b, i32 %i) nounwind {
; CHECK-LABEL: @test2
; CHECK-LS-LABEL: @test2
- %tmp.2 = getelementptr i32* %b, i32 1 ; <i32*> [#uses=1]
- %tmp.5 = getelementptr i32* %b, i32 %i ; <i32*> [#uses=1]
+ %tmp.2 = getelementptr i32, i32* %b, i32 1 ; <i32*> [#uses=1]
+ %tmp.5 = getelementptr i32, i32* %b, i32 %i ; <i32*> [#uses=1]
%tmp.7 = fptosi float %a to i32 ; <i32> [#uses=3]
store i32 %tmp.7, i32* %tmp.5
store i32 %tmp.7, i32* %tmp.2
define i8* @test_stbu(i8* %base, i8 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i8* %base, i64 16
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
store i8 %val, i8* %arrayidx, align 1
ret i8* %arrayidx
}
define i8* @test_stbux(i8* %base, i8 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i8* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
store i8 %val, i8* %arrayidx, align 1
ret i8* %arrayidx
}
define i16* @test_sthu(i16* %base, i16 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i16* %base, i64 16
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
store i16 %val, i16* %arrayidx, align 2
ret i16* %arrayidx
}
define i16* @test_sthux(i16* %base, i16 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i16* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
store i16 %val, i16* %arrayidx, align 2
ret i16* %arrayidx
}
define i32* @test_stwu(i32* %base, i32 zeroext %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i32* %base, i64 16
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
store i32 %val, i32* %arrayidx, align 4
ret i32* %arrayidx
}
define i32* @test_stwux(i32* %base, i32 zeroext %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i32* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
store i32 %val, i32* %arrayidx, align 4
ret i32* %arrayidx
}
define i8* @test_stbu8(i8* %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i8
- %arrayidx = getelementptr inbounds i8* %base, i64 16
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 16
store i8 %conv, i8* %arrayidx, align 1
ret i8* %arrayidx
}
define i8* @test_stbux8(i8* %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i8
- %arrayidx = getelementptr inbounds i8* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %offset
store i8 %conv, i8* %arrayidx, align 1
ret i8* %arrayidx
}
define i16* @test_sthu8(i16* %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i16
- %arrayidx = getelementptr inbounds i16* %base, i64 16
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 16
store i16 %conv, i16* %arrayidx, align 2
ret i16* %arrayidx
}
define i16* @test_sthux8(i16* %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i16
- %arrayidx = getelementptr inbounds i16* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i16, i16* %base, i64 %offset
store i16 %conv, i16* %arrayidx, align 2
ret i16* %arrayidx
}
define i32* @test_stwu8(i32* %base, i64 %val) nounwind {
entry:
%conv = trunc i64 %val to i32
- %arrayidx = getelementptr inbounds i32* %base, i64 16
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 16
store i32 %conv, i32* %arrayidx, align 4
ret i32* %arrayidx
}
define i32* @test_stwux8(i32* %base, i64 %val, i64 %offset) nounwind {
entry:
%conv = trunc i64 %val to i32
- %arrayidx = getelementptr inbounds i32* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i32, i32* %base, i64 %offset
store i32 %conv, i32* %arrayidx, align 4
ret i32* %arrayidx
}
define i64* @test_stdu(i64* %base, i64 %val) nounwind {
entry:
- %arrayidx = getelementptr inbounds i64* %base, i64 16
+ %arrayidx = getelementptr inbounds i64, i64* %base, i64 16
store i64 %val, i64* %arrayidx, align 8
ret i64* %arrayidx
}
define i64* @test_stdux(i64* %base, i64 %val, i64 %offset) nounwind {
entry:
- %arrayidx = getelementptr inbounds i64* %base, i64 %offset
+ %arrayidx = getelementptr inbounds i64, i64* %base, i64 %offset
store i64 %val, i64* %arrayidx, align 8
ret i64* %arrayidx
}
store i32 %z6, i32* %z6.addr, align 4
store i32 %z7, i32* %z7.addr, align 4
store i32 %z8, i32* %z8.addr, align 4
- %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0
+ %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
%0 = load i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
%1 = load i16* %a1, align 2
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0
+ %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
%2 = load i16* %a3, align 2
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0
+ %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
%3 = load i32* %a6, align 4
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0
+ %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
%4 = load i32* %a8, align 4
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0
+ %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
%5 = load i32* %a10, align 4
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0
+ %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
%6 = load i32* %a12, align 4
%add13 = add nsw i32 %add11, %6
ret i32 %add13
store i32 %z6, i32* %z6.addr, align 4
store i32 %z7, i32* %z7.addr, align 4
store i32 %z8, i32* %z8.addr, align 4
- %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0
+ %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
%0 = load i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
%1 = load i16* %a1, align 1
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0
+ %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
%2 = load i16* %a3, align 1
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0
+ %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
%3 = load i32* %a6, align 1
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0
+ %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
%4 = load i32* %a8, align 1
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0
+ %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
%5 = load i32* %a10, align 1
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0
+ %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
%6 = load i32* %a12, align 1
%add13 = add nsw i32 %add11, %6
ret i32 %add13
define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind {
entry:
- %a = getelementptr inbounds %struct.s1* %v1, i32 0, i32 0
+ %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0
%0 = load i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.s2* %v2, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.s2, %struct.s2* %v2, i32 0, i32 0
%1 = load i16* %a1, align 2
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.s3* %v3, i32 0, i32 0
+ %a3 = getelementptr inbounds %struct.s3, %struct.s3* %v3, i32 0, i32 0
%2 = load i16* %a3, align 2
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.s4* %v4, i32 0, i32 0
+ %a6 = getelementptr inbounds %struct.s4, %struct.s4* %v4, i32 0, i32 0
%3 = load i32* %a6, align 4
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.s5* %v5, i32 0, i32 0
+ %a8 = getelementptr inbounds %struct.s5, %struct.s5* %v5, i32 0, i32 0
%4 = load i32* %a8, align 4
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.s6* %v6, i32 0, i32 0
+ %a10 = getelementptr inbounds %struct.s6, %struct.s6* %v6, i32 0, i32 0
%5 = load i32* %a10, align 4
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.s7* %v7, i32 0, i32 0
+ %a12 = getelementptr inbounds %struct.s7, %struct.s7* %v7, i32 0, i32 0
%6 = load i32* %a12, align 4
%add13 = add nsw i32 %add11, %6
ret i32 %add13
define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind {
entry:
- %a = getelementptr inbounds %struct.t1* %v1, i32 0, i32 0
+ %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0
%0 = load i8* %a, align 1
%conv = zext i8 %0 to i32
- %a1 = getelementptr inbounds %struct.t2* %v2, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.t2, %struct.t2* %v2, i32 0, i32 0
%1 = load i16* %a1, align 1
%conv2 = sext i16 %1 to i32
%add = add nsw i32 %conv, %conv2
- %a3 = getelementptr inbounds %struct.t3* %v3, i32 0, i32 0
+ %a3 = getelementptr inbounds %struct.t3, %struct.t3* %v3, i32 0, i32 0
%2 = load i16* %a3, align 1
%conv4 = sext i16 %2 to i32
%add5 = add nsw i32 %add, %conv4
- %a6 = getelementptr inbounds %struct.t4* %v4, i32 0, i32 0
+ %a6 = getelementptr inbounds %struct.t4, %struct.t4* %v4, i32 0, i32 0
%3 = load i32* %a6, align 1
%add7 = add nsw i32 %add5, %3
- %a8 = getelementptr inbounds %struct.t5* %v5, i32 0, i32 0
+ %a8 = getelementptr inbounds %struct.t5, %struct.t5* %v5, i32 0, i32 0
%4 = load i32* %a8, align 1
%add9 = add nsw i32 %add7, %4
- %a10 = getelementptr inbounds %struct.t6* %v6, i32 0, i32 0
+ %a10 = getelementptr inbounds %struct.t6, %struct.t6* %v6, i32 0, i32 0
%5 = load i32* %a10, align 1
%add11 = add nsw i32 %add9, %5
- %a12 = getelementptr inbounds %struct.t7* %v7, i32 0, i32 0
+ %a12 = getelementptr inbounds %struct.t7, %struct.t7* %v7, i32 0, i32 0
%6 = load i32* %a12, align 1
%add13 = add nsw i32 %add11, %6
ret i32 %add13
define void @test1(%class.spell_checker.21.103.513.538* %this) unnamed_addr align 2 {
entry:
- %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1
+ %_M_header.i.i.i.i.i.i = getelementptr inbounds %class.spell_checker.21.103.513.538, %class.spell_checker.21.103.513.538* %this, i64 0, i32 0, i32 0, i32 0, i32 1
%0 = bitcast %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i to i8*
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 4, i1 false) nounwind
store %"struct.std::_Rb_tree_node_base.17.99.509.534"* %_M_header.i.i.i.i.i.i, %"struct.std::_Rb_tree_node_base.17.99.509.534"** undef, align 8
if.end15: ; preds = %while.end
%idxprom.i.i230 = sext i32 %i.1 to i64
- %arrayidx18 = getelementptr inbounds [100 x i32]* @multvec_i, i64 0, i64 %idxprom.i.i230
+ %arrayidx18 = getelementptr inbounds [100 x i32], [100 x i32]* @multvec_i, i64 0, i64 %idxprom.i.i230
store i32 0, i32* %arrayidx18, align 4
br i1 undef, label %while.body21, label %while.end90
while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
%0 = load i8** undef, align 8
- %add.ptr399 = getelementptr inbounds i8* %0, i64 -72
+ %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
%b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
%tobool.i1316 = icmp eq i64 undef, 0
br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
while.body392: ; preds = %wait_on_buffer.exit1319, %while.body392.lr.ph
%0 = load i8** undef, align 8
- %add.ptr399 = getelementptr inbounds i8* %0, i64 -72
+ %add.ptr399 = getelementptr inbounds i8, i8* %0, i64 -72
%b_state.i.i1314 = bitcast i8* %add.ptr399 to i64*
%tobool.i1316 = icmp eq i64 undef, 0
br i1 %tobool.i1316, label %wait_on_buffer.exit1319, label %if.then.i1317
%lnot404 = icmp eq i64 %conv.i.i1322, 0
%.err.4 = select i1 %lnot404, i32 -5, i32 undef
%2 = call i64 asm sideeffect "1:.long 0x7c0000a8 $| ((($0) & 0x1f) << 21) $| (((0) & 0x1f) << 16) $| ((($3) & 0x1f) << 11) $| (((0) & 0x1) << 0) \0Aandc $0,$0,$2\0Astdcx. $0,0,$3\0Abne- 1b\0A", "=&r,=*m,r,r,*m,~{cc},~{memory}"(i64* %b_state.i.i1314, i64 262144, i64* %b_state.i.i1314, i64* %b_state.i.i1314) #1
- %prev.i.i.i1325 = getelementptr inbounds i8* %0, i64 8
+ %prev.i.i.i1325 = getelementptr inbounds i8, i8* %0, i64 8
%3 = load i32** null, align 8
store i32* %3, i32** undef, align 8
call void @__brelse(i32* undef) #1
define void @_ZN4llvm21PrettyStackTraceEntryD0Ev(%"class.llvm::PrettyStackTraceEntry"* %this) unnamed_addr align 2 {
entry:
- %0 = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 0
+ %0 = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 0
store i32 (...)** bitcast (i8** getelementptr inbounds ([5 x i8*]* @_ZTVN4llvm21PrettyStackTraceEntryE, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
%1 = load %"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead, align 8
%cmp.i = icmp eq %"class.llvm::PrettyStackTraceEntry"* %1, %this
unreachable
_ZN4llvm21PrettyStackTraceEntryD2Ev.exit: ; preds = %entry
- %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 1
+ %NextEntry.i.i = getelementptr inbounds %"class.llvm::PrettyStackTraceEntry", %"class.llvm::PrettyStackTraceEntry"* %this, i64 0, i32 1
%2 = bitcast %"class.llvm::PrettyStackTraceEntry"** %NextEntry.i.i to i64*
%3 = load i64* %2, align 8
store i64 %3, i64* bitcast (%"class.llvm::PrettyStackTraceEntry"** @_ZL20PrettyStackTraceHead to i64*), align 8
%FileOrErr = alloca %"class.llvm::ErrorOr", align 8
%ref.tmp = alloca %"class.llvm::SMDiagnostic", align 8
%ref.tmp5 = alloca %"class.std::basic_string", align 8
- %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string"* %Filename, i64 0, i32 0, i32 0
+ %_M_p.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Filename, i64 0, i32 0, i32 0
%0 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
%1 = ptrtoint i8* %0 to i64
- %arrayidx.i.i.i = getelementptr inbounds i8* %0, i64 -24
+ %arrayidx.i.i.i = getelementptr inbounds i8, i8* %0, i64 -24
%_M_length.i.i = bitcast i8* %arrayidx.i.i.i to i64*
%2 = load i64* %_M_length.i.i, align 8, !tbaa !7
%.fca.0.insert18 = insertvalue [2 x i64] undef, i64 %1, 0
%.fca.1.insert21 = insertvalue [2 x i64] %.fca.0.insert18, i64 %2, 1
call void @_ZN4llvm12MemoryBuffer14getFileOrSTDINENS_9StringRefEl(%"class.llvm::ErrorOr"* sret %FileOrErr, [2 x i64] %.fca.1.insert21, i64 -1) #3
- %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 1
+ %HasError.i24 = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 1
%bf.load.i25 = load i8* %HasError.i24, align 8
%3 = and i8 %bf.load.i25, 1
%bf.cast.i26 = icmp eq i8 %3, 0
_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit: ; preds = %entry
%retval.sroa.0.0..sroa_cast.i = bitcast %"class.llvm::ErrorOr"* %FileOrErr to i64*
%retval.sroa.0.0.copyload.i = load i64* %retval.sroa.0.0..sroa_cast.i, align 8
- %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
+ %retval.sroa.3.0..sroa_idx.i = getelementptr inbounds %"class.llvm::ErrorOr", %"class.llvm::ErrorOr"* %FileOrErr, i64 0, i32 0, i32 0, i32 0, i32 0, i64 8
%retval.sroa.3.0..sroa_cast.i = bitcast i8* %retval.sroa.3.0..sroa_idx.i to i64*
%retval.sroa.3.0.copyload.i = load i64* %retval.sroa.3.0..sroa_cast.i, align 8
%phitmp = trunc i64 %retval.sroa.0.0.copyload.i to i32
if.then: ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
%.c = inttoptr i64 %retval.sroa.3.0.copyload.i to %"class.std::error_category"*
%4 = load i8** %_M_p.i.i.i, align 8, !tbaa !1
- %arrayidx.i.i.i30 = getelementptr inbounds i8* %4, i64 -24
+ %arrayidx.i.i.i30 = getelementptr inbounds i8, i8* %4, i64 -24
%_M_length.i.i31 = bitcast i8* %arrayidx.i.i.i30 to i64*
%5 = load i64* %_M_length.i.i31, align 8, !tbaa !7
%6 = inttoptr i64 %retval.sroa.3.0.copyload.i to void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)***
%vtable.i = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*** %6, align 8, !tbaa !11
- %vfn.i = getelementptr inbounds void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vtable.i, i64 3
+ %vfn.i = getelementptr inbounds void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)*, void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vtable.i, i64 3
%7 = load void (%"class.std::basic_string"*, %"class.std::error_category"*, i32)** %vfn.i, align 8
call void %7(%"class.std::basic_string"* sret %ref.tmp5, %"class.std::error_category"* %.c, i32 signext %phitmp) #3
%call2.i.i = call dereferenceable(8) %"class.std::basic_string"* @_ZNSs6insertEmPKcm(%"class.std::basic_string"* %ref.tmp5, i64 0, i8* getelementptr inbounds ([28 x i8]* @.str, i64 0, i64 0), i64 27) #3
- %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string"* %call2.i.i, i64 0, i32 0, i32 0
+ %_M_p2.i.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %call2.i.i, i64 0, i32 0, i32 0
%8 = load i8** %_M_p2.i.i.i.i, align 8, !tbaa !13
store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p2.i.i.i.i, align 8, !tbaa !1
- %arrayidx.i.i.i36 = getelementptr inbounds i8* %8, i64 -24
+ %arrayidx.i.i.i36 = getelementptr inbounds i8, i8* %8, i64 -24
%_M_length.i.i37 = bitcast i8* %arrayidx.i.i.i36 to i64*
%9 = load i64* %_M_length.i.i37, align 8, !tbaa !7
- %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2
- %10 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
+ %Filename.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 2
+ %10 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
%11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8*
call void @llvm.memset.p0i8.i64(i8* %11, i8 0, i64 16, i32 8, i1 false) #3
call void @llvm.lifetime.start(i64 1, i8* %10) #3
br i1 %tobool.i.i4.i, label %if.then.i.i6.i, label %if.end.i.i8.i
if.then.i.i6.i: ; preds = %if.then
- %_M_p.i.i.i.i.i.i5.i = getelementptr inbounds %"class.std::basic_string"* %Filename.i, i64 0, i32 0, i32 0
+ %_M_p.i.i.i.i.i.i5.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Filename.i, i64 0, i32 0, i32 0
store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i5.i, align 8, !tbaa !13
br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
_ZNK4llvm9StringRefcvSsEv.exit9.i: ; preds = %if.end.i.i8.i, %if.then.i.i6.i
call void @llvm.lifetime.end(i64 1, i8* %10) #3
- %LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
+ %LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
store i32 -1, i32* %LineNo.i, align 8, !tbaa !14
- %ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
+ %ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
store i32 -1, i32* %ColumnNo.i, align 4, !tbaa !21
- %Kind.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 5
+ %Kind.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 5
store i32 0, i32* %Kind.i, align 8, !tbaa !22
- %Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
- %12 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
+ %Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
+ %12 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
call void @llvm.lifetime.start(i64 1, i8* %12) #3
%tobool.i.i.i = icmp eq i8* %8, null
br i1 %tobool.i.i.i, label %if.then.i.i.i, label %if.end.i.i.i
if.then.i.i.i: ; preds = %_ZNK4llvm9StringRefcvSsEv.exit9.i
- %_M_p.i.i.i.i.i.i.i = getelementptr inbounds %"class.std::basic_string"* %Message.i, i64 0, i32 0, i32 0
+ %_M_p.i.i.i.i.i.i.i = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %Message.i, i64 0, i32 0, i32 0
store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i.i.i, align 8, !tbaa !13
br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds = %if.then.i.i.i, %if.end.i.i.i
call void @llvm.lifetime.end(i64 1, i8* %12) #3
- %_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
+ %_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
store i8* bitcast (i64* getelementptr inbounds ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13
- %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
+ %Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
%13 = bitcast %"class.std::vector.79"* %Ranges.i to i8*
call void @llvm.memset.p0i8.i64(i8* %13, i8 0, i64 24, i32 8, i1 false) #3
- %14 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0
- %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0
+ %14 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 0
+ %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 0
store i8* %14, i8** %BeginX.i.i.i.i.i.i, align 8, !tbaa !23
- %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 1
+ %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 1
store i8* %14, i8** %EndX.i.i.i.i.i.i, align 8, !tbaa !25
- %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 2
- %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96
+ %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 0, i32 2
+ %add.ptr.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0, i64 96
store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 8, !tbaa !26
%15 = bitcast %"class.llvm::SMDiagnostic"* %Err to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %15, i8* %11, i64 16, i32 8, i1 false) #3
- %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 2
+ %Filename.i38 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 2
call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Filename.i38, %"class.std::basic_string"* dereferenceable(8) %Filename.i) #3
- %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 3
+ %LineNo.i39 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 3
%16 = bitcast i32* %LineNo.i39 to i8*
%17 = bitcast i32* %LineNo.i to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %16, i8* %17, i64 12, i32 4, i1 false) #3
- %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 6
+ %Message.i40 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 6
call void @_ZNSs4swapERSs(%"class.std::basic_string"* %Message.i40, %"class.std::basic_string"* dereferenceable(8) %Message.i) #3
- %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 7
- %LineContents7.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7
+ %LineContents.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 7
+ %LineContents7.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7
call void @_ZNSs4swapERSs(%"class.std::basic_string"* %LineContents.i, %"class.std::basic_string"* dereferenceable(8) %LineContents7.i) #3
- %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8
- %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79"* %Ranges.i41, i64 0, i32 0, i32 0, i32 0
+ %Ranges.i41 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8
+ %_M_start.i7.i.i.i = getelementptr inbounds %"class.std::vector.79", %"class.std::vector.79"* %Ranges.i41, i64 0, i32 0, i32 0, i32 0
%18 = load %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
- %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 1
- %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2
- %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
+ %_M_finish.i9.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 1
+ %_M_end_of_storage.i11.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 8, i32 0, i32 0, i32 2
+ %_M_start2.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 0
%19 = bitcast %"class.std::vector.79"* %Ranges.i41 to i8*
call void @llvm.memset.p0i8.i64(i8* %19, i8 0, i64 16, i32 8, i1 false) #3
%20 = load %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* %20, %"struct.std::pair"** %_M_start.i7.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* null, %"struct.std::pair"** %_M_start2.i.i.i.i, align 8, !tbaa !27
- %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
+ %_M_finish3.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 1
%21 = load %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* %21, %"struct.std::pair"** %_M_finish.i9.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* null, %"struct.std::pair"** %_M_finish3.i.i.i.i, align 8, !tbaa !27
- %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
+ %_M_end_of_storage4.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8, i32 0, i32 0, i32 2
%22 = load %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* %22, %"struct.std::pair"** %_M_end_of_storage.i11.i.i.i, align 8, !tbaa !27
store %"struct.std::pair"* null, %"struct.std::pair"** %_M_end_of_storage4.i.i.i.i, align 8, !tbaa !27
br label %_ZN4llvm12SMDiagnosticaSEOS0_.exit
_ZN4llvm12SMDiagnosticaSEOS0_.exit: ; preds = %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit, %if.then.i.i.i.i.i.i
- %24 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 9, i32 0
- %25 = getelementptr inbounds %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0
+ %24 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %Err, i64 0, i32 9, i32 0
+ %25 = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 9, i32 0
%call2.i.i42 = call dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %24, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %25) #3
call void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* %ref.tmp) #3
- %26 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
+ %26 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
call void @llvm.lifetime.start(i64 1, i8* %26) #3
%27 = bitcast i8* %arrayidx.i.i.i36 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
%cmp.i.i.i = icmp eq i8* %arrayidx.i.i.i36, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
br i1 %cmp.i.i.i, label %_ZNSsD1Ev.exit, label %if.then.i.i.i45, !prof !28
if.then.i.i.i45: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit
- %_M_refcount.i.i.i = getelementptr inbounds i8* %8, i64 -8
+ %_M_refcount.i.i.i = getelementptr inbounds i8, i8* %8, i64 -8
%28 = bitcast i8* %_M_refcount.i.i.i to i32*
br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i, label %if.else.i.i.i.i
_ZNSsD1Ev.exit: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i, %if.then4.i.i.i
call void @llvm.lifetime.end(i64 1, i8* %26) #3
- %31 = getelementptr inbounds %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
+ %31 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
call void @llvm.lifetime.start(i64 1, i8* %31) #3
- %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
+ %_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
%32 = load i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
- %arrayidx.i.i.i49 = getelementptr inbounds i8* %32, i64 -24
+ %arrayidx.i.i.i49 = getelementptr inbounds i8, i8* %32, i64 -24
%33 = bitcast i8* %arrayidx.i.i.i49 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
%cmp.i.i.i50 = icmp eq i8* %arrayidx.i.i.i49, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
br i1 %cmp.i.i.i50, label %_ZNSsD1Ev.exit62, label %if.then.i.i.i52, !prof !28
if.then.i.i.i52: ; preds = %_ZNSsD1Ev.exit
- %_M_refcount.i.i.i51 = getelementptr inbounds i8* %32, i64 -8
+ %_M_refcount.i.i.i51 = getelementptr inbounds i8, i8* %32, i64 -8
%34 = bitcast i8* %_M_refcount.i.i.i51 to i32*
br i1 icmp ne (i8* bitcast (i32 (i32*, void (i8*)*)* @__pthread_key_create to i8*), i8* null), label %if.then.i.i.i.i55, label %if.else.i.i.i.i57
_ZNKSt14default_deleteIN4llvm12MemoryBufferEEclEPS1_.exit.i.i: ; preds = %_ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE10getStorageEv.exit.i
%40 = bitcast %"class.llvm::MemoryBuffer"* %39 to void (%"class.llvm::MemoryBuffer"*)***
%vtable.i.i.i = load void (%"class.llvm::MemoryBuffer"*)*** %40, align 8, !tbaa !11
- %vfn.i.i.i = getelementptr inbounds void (%"class.llvm::MemoryBuffer"*)** %vtable.i.i.i, i64 1
+ %vfn.i.i.i = getelementptr inbounds void (%"class.llvm::MemoryBuffer"*)*, void (%"class.llvm::MemoryBuffer"*)** %vtable.i.i.i, i64 1
%41 = load void (%"class.llvm::MemoryBuffer"*)** %vfn.i.i.i, align 8
call void %41(%"class.llvm::MemoryBuffer"* %39) #3
br label %_ZNSt10unique_ptrIN4llvm12MemoryBufferESt14default_deleteIS1_EED2Ev.exit.i
store %struct.NSBitmapImageRep* %self, %struct.NSBitmapImageRep** %self_addr
store %struct.objc_selector* %_cmd, %struct.objc_selector** %_cmd_addr
store %struct.NSZone* %zone, %struct.NSZone** %zone_addr
- %3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %3 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
%4 = load %struct.NSBitmapImageRep** %self_addr, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
store %struct.NSBitmapImageRep* %4, %struct.NSBitmapImageRep** %3, align 4
%TRAMP.91 = bitcast %struct.__builtin_trampoline* %TRAMP.9 to i8* ; <i8*> [#uses=1]
call void @llvm.init.trampoline(i8* %TRAMP.91, i8* bitcast (void (%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*, %struct.__block_1*, %struct.CGImage*)* @__helper_1.1632 to i8*), i8* %FRAME.72) ; <i8*> [#uses=1]
%tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.91)
store i8* %tramp, i8** %0, align 4
- %5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
+ %5 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
%6 = load i8** %0, align 4 ; <i8*> [#uses=1]
%7 = bitcast i8* %6 to void (%struct.__block_1*, %struct.CGImage*)* ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
store void (%struct.__block_1*, %struct.CGImage*)* %7, void (%struct.__block_1*, %struct.CGImage*)** %5, align 4
store %struct.NSBitmapImageRep* null, %struct.NSBitmapImageRep** %new, align 4
- %8 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %9 = getelementptr %struct.__invoke_impl* %8, i32 0, i32 0 ; <i8**> [#uses=1]
+ %8 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %9 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %8, i32 0, i32 0 ; <i8**> [#uses=1]
store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %9, align 4
- %10 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %11 = getelementptr %struct.__invoke_impl* %10, i32 0, i32 1 ; <i32*> [#uses=1]
+ %10 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %11 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %10, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 67108864, i32* %11, align 4
- %12 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %13 = getelementptr %struct.__invoke_impl* %12, i32 0, i32 2 ; <i32*> [#uses=1]
+ %12 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %13 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %12, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 24, i32* %13, align 4
- %14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
+ %14 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 1 ; <void (%struct.__block_1*, %struct.CGImage*)**> [#uses=1]
%15 = load void (%struct.__block_1*, %struct.CGImage*)** %14, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
store void (%struct.__block_1*, %struct.CGImage*)* %15, void (%struct.__block_1*, %struct.CGImage*)** %1, align 4
- %16 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
- %17 = getelementptr %struct.__invoke_impl* %16, i32 0, i32 3 ; <i8**> [#uses=1]
+ %16 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 0 ; <%struct.__invoke_impl*> [#uses=1]
+ %17 = getelementptr %struct.__invoke_impl, %struct.__invoke_impl* %16, i32 0, i32 3 ; <i8**> [#uses=1]
%18 = load void (%struct.__block_1*, %struct.CGImage*)** %1, align 4 ; <void (%struct.__block_1*, %struct.CGImage*)*> [#uses=1]
%19 = bitcast void (%struct.__block_1*, %struct.CGImage*)* %18 to i8* ; <i8*> [#uses=1]
store i8* %19, i8** %17, align 4
- %20 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
+ %20 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
%21 = load %struct.NSZone** %zone_addr, align 4 ; <%struct.NSZone*> [#uses=1]
store %struct.NSZone* %21, %struct.NSZone** %20, align 4
- %22 = getelementptr %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
+ %22 = getelementptr %struct.__block_1, %struct.__block_1* %__block_holder_tmp_1.0, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
store %struct.NSBitmapImageRep** %new, %struct.NSBitmapImageRep*** %22, align 4
- %23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %23 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %FRAME.7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
%24 = load %struct.NSBitmapImageRep** %23, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
store %struct.NSBitmapImageRep* %24, %struct.NSBitmapImageRep** %2, align 4
%25 = load %struct.NSBitmapImageRep** %2, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
store %struct.__block_1* %_self, %struct.__block_1** %_self_addr
store %struct.CGImage* %cgImage, %struct.CGImage** %cgImage_addr
%1 = load %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %2 = getelementptr %struct.__block_1* %1, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
+ %2 = getelementptr %struct.__block_1, %struct.__block_1* %1, i32 0, i32 2 ; <%struct.NSBitmapImageRep***> [#uses=1]
%3 = load %struct.NSBitmapImageRep*** %2, align 4 ; <%struct.NSBitmapImageRep**> [#uses=1]
store %struct.NSBitmapImageRep** %3, %struct.NSBitmapImageRep*** %new, align 4
%4 = load %struct.__block_1** %_self_addr, align 4 ; <%struct.__block_1*> [#uses=1]
- %5 = getelementptr %struct.__block_1* %4, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
+ %5 = getelementptr %struct.__block_1, %struct.__block_1* %4, i32 0, i32 1 ; <%struct.NSZone**> [#uses=1]
%6 = load %struct.NSZone** %5, align 4 ; <%struct.NSZone*> [#uses=1]
store %struct.NSZone* %6, %struct.NSZone** %zone, align 4
%7 = load %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"** %CHAIN.8_addr, align 4 ; <%"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"*> [#uses=1]
- %8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
+ %8 = getelementptr %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]", %"struct.FRAME.-[NSBitmapImageRep copyWithZone:]"* %7, i32 0, i32 0 ; <%struct.NSBitmapImageRep**> [#uses=1]
%9 = load %struct.NSBitmapImageRep** %8, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
store %struct.NSBitmapImageRep* %9, %struct.NSBitmapImageRep** %0, align 4
%10 = load %struct.NSBitmapImageRep** %0, align 4 ; <%struct.NSBitmapImageRep*> [#uses=1]
%11 = bitcast %struct.NSBitmapImageRep* %10 to %struct.objc_object* ; <%struct.objc_object*> [#uses=1]
- %12 = getelementptr %struct._objc_super* %objc_super, i32 0, i32 0 ; <%struct.objc_object**> [#uses=1]
+ %12 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 0 ; <%struct.objc_object**> [#uses=1]
store %struct.objc_object* %11, %struct.objc_object** %12, align 4
%13 = load %struct._objc_class** getelementptr (%struct._objc_class* @"\01L_OBJC_CLASS_NSBitmapImageRep", i32 0, i32 1), align 4 ; <%struct._objc_class*> [#uses=1]
- %14 = getelementptr %struct._objc_super* %objc_super, i32 0, i32 1 ; <%struct._objc_class**> [#uses=1]
+ %14 = getelementptr %struct._objc_super, %struct._objc_super* %objc_super, i32 0, i32 1 ; <%struct._objc_class**> [#uses=1]
store %struct._objc_class* %13, %struct._objc_class** %14, align 4
%objc_super1 = bitcast %struct._objc_super* %objc_super to %struct.objc_super* ; <%struct.objc_super*> [#uses=1]
store %struct.objc_super* %objc_super1, %struct.objc_super** %objc_super.5, align 4
define <4 x i32> @test1(<4 x i32>* %h) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
%vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
define <4 x i32> @test2(<4 x i32>* %h, <4 x i32> %d) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds float* %b, i64 %index
+ %0 = getelementptr inbounds float, float* %b, i64 %index
%1 = bitcast float* %0 to <4 x float>*
%wide.load = load <4 x float>* %1, align 4
%.sum11 = or i64 %index, 4
- %2 = getelementptr float* %b, i64 %.sum11
+ %2 = getelementptr float, float* %b, i64 %.sum11
%3 = bitcast float* %2 to <4 x float>*
%wide.load8 = load <4 x float>* %3, align 4
%4 = fadd <4 x float> %wide.load, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
%5 = fadd <4 x float> %wide.load8, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
- %6 = getelementptr inbounds float* %a, i64 %index
+ %6 = getelementptr inbounds float, float* %a, i64 %index
%7 = bitcast float* %6 to <4 x float>*
store <4 x float> %4, <4 x float>* %7, align 4
%.sum12 = or i64 %index, 4
- %8 = getelementptr float* %a, i64 %.sum12
+ %8 = getelementptr float, float* %a, i64 %.sum12
%9 = bitcast float* %8 to <4 x float>*
store <4 x float> %5, <4 x float>* %9, align 4
%index.next = add i64 %index, 8
; CHECK: lvsl
; CHECK: blr
%index = phi i64 [ 0, %entry ], [ %index.next.15, %vector.body ]
- %0 = getelementptr inbounds float* %y, i64 %index
+ %0 = getelementptr inbounds float, float* %y, i64 %index
%1 = bitcast float* %0 to <4 x float>*
%wide.load = load <4 x float>* %1, align 4
%2 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load)
- %3 = getelementptr inbounds float* %x, i64 %index
+ %3 = getelementptr inbounds float, float* %x, i64 %index
%4 = bitcast float* %3 to <4 x float>*
store <4 x float> %2, <4 x float>* %4, align 4
%index.next = add i64 %index, 4
- %5 = getelementptr inbounds float* %y, i64 %index.next
+ %5 = getelementptr inbounds float, float* %y, i64 %index.next
%6 = bitcast float* %5 to <4 x float>*
%wide.load.1 = load <4 x float>* %6, align 4
%7 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.1)
- %8 = getelementptr inbounds float* %x, i64 %index.next
+ %8 = getelementptr inbounds float, float* %x, i64 %index.next
%9 = bitcast float* %8 to <4 x float>*
store <4 x float> %7, <4 x float>* %9, align 4
%index.next.1 = add i64 %index.next, 4
- %10 = getelementptr inbounds float* %y, i64 %index.next.1
+ %10 = getelementptr inbounds float, float* %y, i64 %index.next.1
%11 = bitcast float* %10 to <4 x float>*
%wide.load.2 = load <4 x float>* %11, align 4
%12 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.2)
- %13 = getelementptr inbounds float* %x, i64 %index.next.1
+ %13 = getelementptr inbounds float, float* %x, i64 %index.next.1
%14 = bitcast float* %13 to <4 x float>*
store <4 x float> %12, <4 x float>* %14, align 4
%index.next.2 = add i64 %index.next.1, 4
- %15 = getelementptr inbounds float* %y, i64 %index.next.2
+ %15 = getelementptr inbounds float, float* %y, i64 %index.next.2
%16 = bitcast float* %15 to <4 x float>*
%wide.load.3 = load <4 x float>* %16, align 4
%17 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.3)
- %18 = getelementptr inbounds float* %x, i64 %index.next.2
+ %18 = getelementptr inbounds float, float* %x, i64 %index.next.2
%19 = bitcast float* %18 to <4 x float>*
store <4 x float> %17, <4 x float>* %19, align 4
%index.next.3 = add i64 %index.next.2, 4
- %20 = getelementptr inbounds float* %y, i64 %index.next.3
+ %20 = getelementptr inbounds float, float* %y, i64 %index.next.3
%21 = bitcast float* %20 to <4 x float>*
%wide.load.4 = load <4 x float>* %21, align 4
%22 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.4)
- %23 = getelementptr inbounds float* %x, i64 %index.next.3
+ %23 = getelementptr inbounds float, float* %x, i64 %index.next.3
%24 = bitcast float* %23 to <4 x float>*
store <4 x float> %22, <4 x float>* %24, align 4
%index.next.4 = add i64 %index.next.3, 4
- %25 = getelementptr inbounds float* %y, i64 %index.next.4
+ %25 = getelementptr inbounds float, float* %y, i64 %index.next.4
%26 = bitcast float* %25 to <4 x float>*
%wide.load.5 = load <4 x float>* %26, align 4
%27 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.5)
- %28 = getelementptr inbounds float* %x, i64 %index.next.4
+ %28 = getelementptr inbounds float, float* %x, i64 %index.next.4
%29 = bitcast float* %28 to <4 x float>*
store <4 x float> %27, <4 x float>* %29, align 4
%index.next.5 = add i64 %index.next.4, 4
- %30 = getelementptr inbounds float* %y, i64 %index.next.5
+ %30 = getelementptr inbounds float, float* %y, i64 %index.next.5
%31 = bitcast float* %30 to <4 x float>*
%wide.load.6 = load <4 x float>* %31, align 4
%32 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.6)
- %33 = getelementptr inbounds float* %x, i64 %index.next.5
+ %33 = getelementptr inbounds float, float* %x, i64 %index.next.5
%34 = bitcast float* %33 to <4 x float>*
store <4 x float> %32, <4 x float>* %34, align 4
%index.next.6 = add i64 %index.next.5, 4
- %35 = getelementptr inbounds float* %y, i64 %index.next.6
+ %35 = getelementptr inbounds float, float* %y, i64 %index.next.6
%36 = bitcast float* %35 to <4 x float>*
%wide.load.7 = load <4 x float>* %36, align 4
%37 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.7)
- %38 = getelementptr inbounds float* %x, i64 %index.next.6
+ %38 = getelementptr inbounds float, float* %x, i64 %index.next.6
%39 = bitcast float* %38 to <4 x float>*
store <4 x float> %37, <4 x float>* %39, align 4
%index.next.7 = add i64 %index.next.6, 4
- %40 = getelementptr inbounds float* %y, i64 %index.next.7
+ %40 = getelementptr inbounds float, float* %y, i64 %index.next.7
%41 = bitcast float* %40 to <4 x float>*
%wide.load.8 = load <4 x float>* %41, align 4
%42 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.8)
- %43 = getelementptr inbounds float* %x, i64 %index.next.7
+ %43 = getelementptr inbounds float, float* %x, i64 %index.next.7
%44 = bitcast float* %43 to <4 x float>*
store <4 x float> %42, <4 x float>* %44, align 4
%index.next.8 = add i64 %index.next.7, 4
- %45 = getelementptr inbounds float* %y, i64 %index.next.8
+ %45 = getelementptr inbounds float, float* %y, i64 %index.next.8
%46 = bitcast float* %45 to <4 x float>*
%wide.load.9 = load <4 x float>* %46, align 4
%47 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.9)
- %48 = getelementptr inbounds float* %x, i64 %index.next.8
+ %48 = getelementptr inbounds float, float* %x, i64 %index.next.8
%49 = bitcast float* %48 to <4 x float>*
store <4 x float> %47, <4 x float>* %49, align 4
%index.next.9 = add i64 %index.next.8, 4
- %50 = getelementptr inbounds float* %y, i64 %index.next.9
+ %50 = getelementptr inbounds float, float* %y, i64 %index.next.9
%51 = bitcast float* %50 to <4 x float>*
%wide.load.10 = load <4 x float>* %51, align 4
%52 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.10)
- %53 = getelementptr inbounds float* %x, i64 %index.next.9
+ %53 = getelementptr inbounds float, float* %x, i64 %index.next.9
%54 = bitcast float* %53 to <4 x float>*
store <4 x float> %52, <4 x float>* %54, align 4
%index.next.10 = add i64 %index.next.9, 4
- %55 = getelementptr inbounds float* %y, i64 %index.next.10
+ %55 = getelementptr inbounds float, float* %y, i64 %index.next.10
%56 = bitcast float* %55 to <4 x float>*
%wide.load.11 = load <4 x float>* %56, align 4
%57 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.11)
- %58 = getelementptr inbounds float* %x, i64 %index.next.10
+ %58 = getelementptr inbounds float, float* %x, i64 %index.next.10
%59 = bitcast float* %58 to <4 x float>*
store <4 x float> %57, <4 x float>* %59, align 4
%index.next.11 = add i64 %index.next.10, 4
- %60 = getelementptr inbounds float* %y, i64 %index.next.11
+ %60 = getelementptr inbounds float, float* %y, i64 %index.next.11
%61 = bitcast float* %60 to <4 x float>*
%wide.load.12 = load <4 x float>* %61, align 4
%62 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.12)
- %63 = getelementptr inbounds float* %x, i64 %index.next.11
+ %63 = getelementptr inbounds float, float* %x, i64 %index.next.11
%64 = bitcast float* %63 to <4 x float>*
store <4 x float> %62, <4 x float>* %64, align 4
%index.next.12 = add i64 %index.next.11, 4
- %65 = getelementptr inbounds float* %y, i64 %index.next.12
+ %65 = getelementptr inbounds float, float* %y, i64 %index.next.12
%66 = bitcast float* %65 to <4 x float>*
%wide.load.13 = load <4 x float>* %66, align 4
%67 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.13)
- %68 = getelementptr inbounds float* %x, i64 %index.next.12
+ %68 = getelementptr inbounds float, float* %x, i64 %index.next.12
%69 = bitcast float* %68 to <4 x float>*
store <4 x float> %67, <4 x float>* %69, align 4
%index.next.13 = add i64 %index.next.12, 4
- %70 = getelementptr inbounds float* %y, i64 %index.next.13
+ %70 = getelementptr inbounds float, float* %y, i64 %index.next.13
%71 = bitcast float* %70 to <4 x float>*
%wide.load.14 = load <4 x float>* %71, align 4
%72 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.14)
- %73 = getelementptr inbounds float* %x, i64 %index.next.13
+ %73 = getelementptr inbounds float, float* %x, i64 %index.next.13
%74 = bitcast float* %73 to <4 x float>*
store <4 x float> %72, <4 x float>* %74, align 4
%index.next.14 = add i64 %index.next.13, 4
- %75 = getelementptr inbounds float* %y, i64 %index.next.14
+ %75 = getelementptr inbounds float, float* %y, i64 %index.next.14
%76 = bitcast float* %75 to <4 x float>*
%wide.load.15 = load <4 x float>* %76, align 4
%77 = call <4 x float> @llvm_cos_v4f32(<4 x float> %wide.load.15)
- %78 = getelementptr inbounds float* %x, i64 %index.next.14
+ %78 = getelementptr inbounds float, float* %x, i64 %index.next.14
%79 = bitcast float* %78 to <4 x float>*
store <4 x float> %77, <4 x float>* %79, align 4
%index.next.15 = add i64 %index.next.14, 4
define void @foo(float inreg %s.coerce) nounwind {
entry:
%s = alloca %struct.Sf1, align 4
- %coerce.dive = getelementptr %struct.Sf1* %s, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
store float %s.coerce, float* %coerce.dive, align 1
- %coerce.dive1 = getelementptr %struct.Sf1* %s, i32 0, i32 0
+ %coerce.dive1 = getelementptr %struct.Sf1, %struct.Sf1* %s, i32 0, i32 0
%0 = load float* %coerce.dive1, align 1
call void (i32, ...)* @testvaSf1(i32 1, float inreg %0)
ret void
; Function Attrs: nounwind
define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval nocapture readonly %vs) #0 {
entry:
- %m = getelementptr inbounds %struct.s2* %vs, i64 0, i32 0
+ %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
%0 = load i64* %m, align 8
store i64 %0, i64* @n, align 8
- %v = getelementptr inbounds %struct.s2* %vs, i64 0, i32 1
+ %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
%1 = load <4 x float>* %v, align 16
store <4 x float> %1, <4 x float>* @ve, align 16
ret void
; Function Attrs: nounwind
define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval nocapture readonly %vs) #0 {
entry:
- %m = getelementptr inbounds %struct.s2* %vs, i64 0, i32 0
+ %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0
%0 = load i64* %m, align 8
store i64 %0, i64* @n, align 8
- %v = getelementptr inbounds %struct.s2* %vs, i64 0, i32 1
+ %v = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 1
%1 = load <4 x float>* %v, align 16
store <4 x float> %1, <4 x float>* @ve, align 16
ret void
%tmp = load i8** %ap, align 4 ; <i8*> [#uses=1]
store i8* %tmp, i8** %ap.0, align 4
%tmp2 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
- %tmp3 = getelementptr i8* %tmp2, i64 16 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr i8, i8* %tmp2, i64 16 ; <i8*> [#uses=1]
store i8* %tmp3, i8** %ap, align 4
%tmp4 = load i8** %ap.0, align 4 ; <i8*> [#uses=1]
%tmp45 = bitcast i8* %tmp4 to %struct.S2203* ; <%struct.S2203*> [#uses=1]
- %tmp6 = getelementptr %struct.S2203* @s, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp7 = getelementptr %struct.S2203* %tmp45, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
- %tmp8 = getelementptr %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
- %tmp9 = getelementptr %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
+ %tmp6 = getelementptr %struct.S2203, %struct.S2203* @s, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
+ %tmp7 = getelementptr %struct.S2203, %struct.S2203* %tmp45, i32 0, i32 0 ; <%struct.u16qi*> [#uses=1]
+ %tmp8 = getelementptr %struct.u16qi, %struct.u16qi* %tmp6, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
+ %tmp9 = getelementptr %struct.u16qi, %struct.u16qi* %tmp7, i32 0, i32 0 ; <<16 x i8>*> [#uses=1]
%tmp10 = load <16 x i8>* %tmp9, align 4 ; <<16 x i8>> [#uses=1]
; CHECK: lvsl
; CHECK: vperm
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx1, align 8
ret void
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx1, align 8
%2 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx2 = getelementptr inbounds double* %d, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
store double %2, double* %arrayidx2, align 8
ret void
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
%2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
- %arrayidx1 = getelementptr inbounds double* %d, i64 3
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 3
store double %2, double* %arrayidx1, align 8
%3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx2 = getelementptr inbounds double* %d, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %d, i64 2
store double %3, double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %d, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx3, align 8
ret void
%0 = tail call double @llvm.fma.f64(double %b, double %c, double %a)
store double %0, double* %d, align 8
%1 = tail call double @llvm.fma.f64(double %b, double %e, double %a)
- %arrayidx1 = getelementptr inbounds double* %d, i64 1
+ %arrayidx1 = getelementptr inbounds double, double* %d, i64 1
store double %1, double* %arrayidx1, align 8
%2 = tail call double @llvm.fma.f64(double %b, double %c, double %1)
- %arrayidx3 = getelementptr inbounds double* %d, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %d, i64 3
store double %2, double* %arrayidx3, align 8
%3 = tail call double @llvm.fma.f64(double %b, double %f, double %a)
- %arrayidx4 = getelementptr inbounds double* %d, i64 2
+ %arrayidx4 = getelementptr inbounds double, double* %d, i64 2
store double %3, double* %arrayidx4, align 8
ret void
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx1, align 8
ret void
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx1, align 8
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx2 = getelementptr inbounds <2 x double>* %d, i64 2
+ %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
store <2 x double> %2, <2 x double>* %arrayidx2, align 8
ret void
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 3
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
store <2 x double> %2, <2 x double>* %arrayidx1, align 8
%3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx2 = getelementptr inbounds <2 x double>* %d, i64 2
+ %arrayidx2 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
store <2 x double> %3, <2 x double>* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx3, align 8
ret void
%0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %a)
store <2 x double> %0, <2 x double>* %d, align 8
%1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %e, <2 x double> %a)
- %arrayidx1 = getelementptr inbounds <2 x double>* %d, i64 1
+ %arrayidx1 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 1
store <2 x double> %1, <2 x double>* %arrayidx1, align 8
%2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %c, <2 x double> %1)
- %arrayidx3 = getelementptr inbounds <2 x double>* %d, i64 3
+ %arrayidx3 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 3
store <2 x double> %2, <2 x double>* %arrayidx3, align 8
%3 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %b, <2 x double> %f, <2 x double> %a)
- %arrayidx4 = getelementptr inbounds <2 x double>* %d, i64 2
+ %arrayidx4 = getelementptr inbounds <2 x double>, <2 x double>* %d, i64 2
store <2 x double> %3, <2 x double>* %arrayidx4, align 8
ret void
%vec.phi30 = phi <4 x i32> [ zeroinitializer, %entry ], [ %53, %vector.body ]
%wide.load32 = load <4 x i32>* null, align 4
%.sum82 = add i64 %index, 24
- %0 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum82
+ %0 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum82
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load36 = load <4 x i32>* %1, align 4
%wide.load37 = load <4 x i32>* undef, align 4
%.sum84 = add i64 %index, 32
- %2 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum84
+ %2 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum84
%3 = bitcast i32* %2 to <4 x i32>*
%wide.load38 = load <4 x i32>* %3, align 4
%.sum85 = add i64 %index, 36
- %4 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum85
+ %4 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum85
%5 = bitcast i32* %4 to <4 x i32>*
%wide.load39 = load <4 x i32>* %5, align 4
- %6 = getelementptr [1024 x i32]* @ub, i64 0, i64 undef
+ %6 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 undef
%7 = bitcast i32* %6 to <4 x i32>*
%wide.load40 = load <4 x i32>* %7, align 4
%.sum87 = add i64 %index, 44
- %8 = getelementptr [1024 x i32]* @ub, i64 0, i64 %.sum87
+ %8 = getelementptr [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %.sum87
%9 = bitcast i32* %8 to <4 x i32>*
%wide.load41 = load <4 x i32>* %9, align 4
- %10 = getelementptr inbounds [1024 x i32]* @uc, i64 0, i64 %index
+ %10 = getelementptr inbounds [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %index
%11 = bitcast i32* %10 to <4 x i32>*
%wide.load42 = load <4 x i32>* %11, align 4
%.sum8889 = or i64 %index, 4
- %12 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum8889
+ %12 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum8889
%13 = bitcast i32* %12 to <4 x i32>*
%wide.load43 = load <4 x i32>* %13, align 4
%.sum9091 = or i64 %index, 8
- %14 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum9091
+ %14 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum9091
%15 = bitcast i32* %14 to <4 x i32>*
%wide.load44 = load <4 x i32>* %15, align 4
%.sum94 = add i64 %index, 16
- %16 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum94
+ %16 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum94
%17 = bitcast i32* %16 to <4 x i32>*
%wide.load46 = load <4 x i32>* %17, align 4
%.sum95 = add i64 %index, 20
- %18 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum95
+ %18 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum95
%19 = bitcast i32* %18 to <4 x i32>*
%wide.load47 = load <4 x i32>* %19, align 4
- %20 = getelementptr [1024 x i32]* @uc, i64 0, i64 undef
+ %20 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 undef
%21 = bitcast i32* %20 to <4 x i32>*
%wide.load48 = load <4 x i32>* %21, align 4
%.sum97 = add i64 %index, 28
- %22 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum97
+ %22 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum97
%23 = bitcast i32* %22 to <4 x i32>*
%wide.load49 = load <4 x i32>* %23, align 4
%.sum98 = add i64 %index, 32
- %24 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum98
+ %24 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum98
%25 = bitcast i32* %24 to <4 x i32>*
%wide.load50 = load <4 x i32>* %25, align 4
%.sum99 = add i64 %index, 36
- %26 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum99
+ %26 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum99
%27 = bitcast i32* %26 to <4 x i32>*
%wide.load51 = load <4 x i32>* %27, align 4
%.sum100 = add i64 %index, 40
- %28 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum100
+ %28 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum100
%29 = bitcast i32* %28 to <4 x i32>*
%wide.load52 = load <4 x i32>* %29, align 4
%.sum101 = add i64 %index, 44
- %30 = getelementptr [1024 x i32]* @uc, i64 0, i64 %.sum101
+ %30 = getelementptr [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %.sum101
%31 = bitcast i32* %30 to <4 x i32>*
%wide.load53 = load <4 x i32>* %31, align 4
%32 = add <4 x i32> zeroinitializer, %vec.phi
%.sum = add i64 0, 4
%wide.load72 = load <4 x i32>* null, align 4
%.sum109 = add i64 0, 8
- %0 = getelementptr i32* %first, i64 %.sum109
+ %0 = getelementptr i32, i32* %first, i64 %.sum109
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load73 = load <4 x i32>* %1, align 4
%.sum110 = add i64 0, 12
- %2 = getelementptr i32* %first, i64 %.sum110
+ %2 = getelementptr i32, i32* %first, i64 %.sum110
%3 = bitcast i32* %2 to <4 x i32>*
%wide.load74 = load <4 x i32>* %3, align 4
%.sum112 = add i64 0, 20
- %4 = getelementptr i32* %first, i64 %.sum112
+ %4 = getelementptr i32, i32* %first, i64 %.sum112
%5 = bitcast i32* %4 to <4 x i32>*
%wide.load76 = load <4 x i32>* %5, align 4
%.sum114 = add i64 0, 28
- %6 = getelementptr i32* %first, i64 %.sum114
+ %6 = getelementptr i32, i32* %first, i64 %.sum114
%7 = bitcast i32* %6 to <4 x i32>*
%wide.load78 = load <4 x i32>* %7, align 4
%.sum115 = add i64 0, 32
- %8 = getelementptr i32* %first, i64 %.sum115
+ %8 = getelementptr i32, i32* %first, i64 %.sum115
%9 = bitcast i32* %8 to <4 x i32>*
%wide.load79 = load <4 x i32>* %9, align 4
%.sum116 = add i64 0, 36
- %10 = getelementptr i32* %first, i64 %.sum116
+ %10 = getelementptr i32, i32* %first, i64 %.sum116
%11 = bitcast i32* %10 to <4 x i32>*
%wide.load80 = load <4 x i32>* %11, align 4
%.sum117 = add i64 0, 40
- %12 = getelementptr i32* %first, i64 %.sum117
+ %12 = getelementptr i32, i32* %first, i64 %.sum117
%13 = bitcast i32* %12 to <4 x i32>*
%wide.load81 = load <4 x i32>* %13, align 4
%.sum118 = add i64 0, 44
- %14 = getelementptr i32* %first, i64 %.sum118
+ %14 = getelementptr i32, i32* %first, i64 %.sum118
%15 = bitcast i32* %14 to <4 x i32>*
%wide.load82 = load <4 x i32>* %15, align 4
%16 = mul <4 x i32> %wide.load72, <i32 269850533, i32 269850533, i32 269850533, i32 269850533>
%0 = load i32* %__a.addr.i, align 4
%1 = load <4 x i32>** %__b.addr.i, align 8
%2 = bitcast <4 x i32>* %1 to i8*
- %3 = getelementptr i8* %2, i32 %0
+ %3 = getelementptr i8, i8* %2, i32 %0
%4 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %3)
store <4 x i32> %4, <4 x i32>* @res_vsi, align 16
store i32 0, i32* %__a.addr.i31, align 4
%5 = load i32* %__a.addr.i31, align 4
%6 = load <4 x i32>** %__b.addr.i32, align 8
%7 = bitcast <4 x i32>* %6 to i8*
- %8 = getelementptr i8* %7, i32 %5
+ %8 = getelementptr i8, i8* %7, i32 %5
%9 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %8)
store <4 x i32> %9, <4 x i32>* @res_vui, align 16
store i32 0, i32* %__a.addr.i29, align 4
%10 = load i32* %__a.addr.i29, align 4
%11 = load <4 x float>** %__b.addr.i30, align 8
%12 = bitcast <4 x float>* %11 to i8*
- %13 = getelementptr i8* %12, i32 %10
+ %13 = getelementptr i8, i8* %12, i32 %10
%14 = call <4 x i32> @llvm.ppc.vsx.lxvw4x(i8* %13)
%15 = bitcast <4 x i32> %14 to <4 x float>
store <4 x float> %15, <4 x float>* @res_vf, align 16
%16 = load i32* %__a.addr.i27, align 4
%17 = load <2 x i64>** %__b.addr.i28, align 8
%18 = bitcast <2 x i64>* %17 to i8*
- %19 = getelementptr i8* %18, i32 %16
+ %19 = getelementptr i8, i8* %18, i32 %16
%20 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %19)
%21 = bitcast <2 x double> %20 to <2 x i64>
store <2 x i64> %21, <2 x i64>* @res_vsll, align 16
%22 = load i32* %__a.addr.i25, align 4
%23 = load <2 x i64>** %__b.addr.i26, align 8
%24 = bitcast <2 x i64>* %23 to i8*
- %25 = getelementptr i8* %24, i32 %22
+ %25 = getelementptr i8, i8* %24, i32 %22
%26 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %25)
%27 = bitcast <2 x double> %26 to <2 x i64>
store <2 x i64> %27, <2 x i64>* @res_vull, align 16
%28 = load i32* %__a.addr.i23, align 4
%29 = load <2 x double>** %__b.addr.i24, align 8
%30 = bitcast <2 x double>* %29 to i8*
- %31 = getelementptr i8* %30, i32 %28
+ %31 = getelementptr i8, i8* %30, i32 %28
%32 = call <2 x double> @llvm.ppc.vsx.lxvd2x(i8* %31)
store <2 x double> %32, <2 x double>* @res_vd, align 16
%33 = load <4 x i32>* @vsi, align 16
%35 = load i32* %__b.addr.i21, align 4
%36 = load <4 x i32>** %__c.addr.i22, align 8
%37 = bitcast <4 x i32>* %36 to i8*
- %38 = getelementptr i8* %37, i32 %35
+ %38 = getelementptr i8, i8* %37, i32 %35
call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %34, i8* %38)
%39 = load <4 x i32>* @vui, align 16
store <4 x i32> %39, <4 x i32>* %__a.addr.i17, align 16
%41 = load i32* %__b.addr.i18, align 4
%42 = load <4 x i32>** %__c.addr.i19, align 8
%43 = bitcast <4 x i32>* %42 to i8*
- %44 = getelementptr i8* %43, i32 %41
+ %44 = getelementptr i8, i8* %43, i32 %41
call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %40, i8* %44)
%45 = load <4 x float>* @vf, align 16
store <4 x float> %45, <4 x float>* %__a.addr.i14, align 16
%48 = load i32* %__b.addr.i15, align 4
%49 = load <4 x float>** %__c.addr.i16, align 8
%50 = bitcast <4 x float>* %49 to i8*
- %51 = getelementptr i8* %50, i32 %48
+ %51 = getelementptr i8, i8* %50, i32 %48
call void @llvm.ppc.vsx.stxvw4x(<4 x i32> %47, i8* %51) #1
%52 = load <2 x i64>* @vsll, align 16
store <2 x i64> %52, <2 x i64>* %__a.addr.i11, align 16
%55 = load i32* %__b.addr.i12, align 4
%56 = load <2 x i64>** %__c.addr.i13, align 8
%57 = bitcast <2 x i64>* %56 to i8*
- %58 = getelementptr i8* %57, i32 %55
+ %58 = getelementptr i8, i8* %57, i32 %55
call void @llvm.ppc.vsx.stxvd2x(<2 x double> %54, i8* %58)
%59 = load <2 x i64>* @vull, align 16
store <2 x i64> %59, <2 x i64>* %__a.addr.i8, align 16
%62 = load i32* %__b.addr.i9, align 4
%63 = load <2 x i64>** %__c.addr.i10, align 8
%64 = bitcast <2 x i64>* %63 to i8*
- %65 = getelementptr i8* %64, i32 %62
+ %65 = getelementptr i8, i8* %64, i32 %62
call void @llvm.ppc.vsx.stxvd2x(<2 x double> %61, i8* %65)
%66 = load <2 x double>* @vd, align 16
store <2 x double> %66, <2 x double>* %__a.addr.i6, align 16
%68 = load i32* %__b.addr.i7, align 4
%69 = load <2 x double>** %__c.addr.i, align 8
%70 = bitcast <2 x double>* %69 to i8*
- %71 = getelementptr i8* %70, i32 %68
+ %71 = getelementptr i8, i8* %70, i32 %68
call void @llvm.ppc.vsx.stxvd2x(<2 x double> %67, i8* %71)
ret void
}
%0 = phi i8* [ %.pre, %entry ], [ %.be, %loop.backedge ]
%1 = load i8* %0, align 1
%tobool = icmp eq i8 %1, 0
- %incdec.ptr = getelementptr inbounds i8* %0, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %0, i64 1
store i8* %incdec.ptr, i8** %p, align 8
%2 = load i8* %incdec.ptr, align 1
%tobool2 = icmp ne i8 %2, 0
br i1 %or.cond, label %if.then3, label %loop.backedge
if.then3: ; preds = %loop
- %incdec.ptr4 = getelementptr inbounds i8* %0, i64 2
+ %incdec.ptr4 = getelementptr inbounds i8, i8* %0, i64 2
store i8* %incdec.ptr4, i8** %p, align 8
br label %loop.backedge
; SI: ds_read_b32 [[VPTR]]
define void @local_address_gep(i32 addrspace(1)* %out, i32 addrspace(3)* %in, i32 %offset) {
entry:
- %0 = getelementptr i32 addrspace(3)* %in, i32 %offset
+ %0 = getelementptr i32, i32 addrspace(3)* %in, i32 %offset
%1 = load i32 addrspace(3)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; SI: ds_read_b32 v{{[0-9]+}}, [[VPTR]] offset:4
define void @local_address_gep_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
- %0 = getelementptr i32 addrspace(3)* %in, i32 1
+ %0 = getelementptr i32, i32 addrspace(3)* %in, i32 1
%1 = load i32 addrspace(3)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; SI: ds_read_b32 [[VPTR]]
define void @local_address_gep_large_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
- %0 = getelementptr i32 addrspace(3)* %in, i32 16385
+ %0 = getelementptr i32, i32 addrspace(3)* %in, i32 16385
%1 = load i32 addrspace(3)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; SI-NEXT: s_add_i32
; SI: ds_read_b32
define void @mul_32bit_ptr(float addrspace(1)* %out, [3 x float] addrspace(3)* %lds, i32 %tid) {
- %ptr = getelementptr [3 x float] addrspace(3)* %lds, i32 %tid, i32 0
+ %ptr = getelementptr [3 x float], [3 x float] addrspace(3)* %lds, i32 %tid, i32 0
%val = load float addrspace(3)* %ptr
store float %val, float addrspace(1)* %out
ret void
; SI: v_mov_b32_e32 [[ADDR:v[0-9]+]], [[SADDR]]
; SI: ds_write_b32 [[ADDR]], v{{[0-9]+}}
define void @local_address_gep_store(i32 addrspace(3)* %out, i32, i32 %val, i32 %offset) {
- %gep = getelementptr i32 addrspace(3)* %out, i32 %offset
+ %gep = getelementptr i32, i32 addrspace(3)* %out, i32 %offset
store i32 %val, i32 addrspace(3)* %gep, align 4
ret void
}
; SI: v_mov_b32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
; SI: ds_write_b32 [[VPTR]], [[VAL]] offset:4
define void @local_address_gep_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
- %gep = getelementptr i32 addrspace(3)* %out, i32 1
+ %gep = getelementptr i32, i32 addrspace(3)* %out, i32 1
store i32 %val, i32 addrspace(3)* %gep, align 4
ret void
}
; SI: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
; SI: ds_write_b32 [[VPTR]], v{{[0-9]+$}}
define void @local_address_gep_large_const_offset_store(i32 addrspace(3)* %out, i32 %val) {
- %gep = getelementptr i32 addrspace(3)* %out, i32 16385
+ %gep = getelementptr i32, i32 addrspace(3)* %out, i32 16385
store i32 %val, i32 addrspace(3)* %gep, align 4
ret void
}
;SI-NOT: [[REG]]
;SI: buffer_store_dword [[REG]],
define void @test1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%result = add i32 %a, %b
;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1)* %in
%b = load <2 x i32> addrspace(1)* %b_ptr
%result = add <2 x i32> %a, %b
;SI: v_add_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1)* %in
%b = load <4 x i32> addrspace(1)* %b_ptr
%result = add <4 x i32> %a, %b
; SI: v_addc_u32
define void @test_i64_vreg(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
- %a_ptr = getelementptr i64 addrspace(1)* %inA, i32 %tid
- %b_ptr = getelementptr i64 addrspace(1)* %inB, i32 %tid
+ %a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
%a = load i64 addrspace(1)* %a_ptr
%b = load i64 addrspace(1)* %b_ptr
%result = add i64 %a, %b
; SI: v_addc_u32
define void @test_v2i64_vreg(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
- %a_ptr = getelementptr <2 x i64> addrspace(1)* %inA, i32 %tid
- %b_ptr = getelementptr <2 x i64> addrspace(1)* %inB, i32 %tid
+ %a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
%a = load <2 x i64> addrspace(1)* %a_ptr
%b = load <2 x i64> addrspace(1)* %b_ptr
%result = add <2 x i64> %a, %b
; CHECK-DAG: ds_read_b32 v{{[0-9]+}}, [[VREG2]] offset:20
define void @do_as_ptr_calcs(%struct.foo addrspace(3)* nocapture %ptr) nounwind {
entry:
- %x = getelementptr inbounds %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 0
- %y = getelementptr inbounds %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 2
+ %x = getelementptr inbounds %struct.foo, %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 0
+ %y = getelementptr inbounds %struct.foo, %struct.foo addrspace(3)* %ptr, i32 0, i32 1, i32 2
br label %bb32
bb32:
; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
%result = and <2 x i32> %a, %b
; SI: v_and_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = and <4 x i32> %a, %b
; FIXME: The AMDGPUPromoteAlloca pass should be able to convert this
; alloca to a vector. It currently fails because it does not know how
; to interpret:
-; getelementptr [4 x i32]* %alloca, i32 1, i32 %b
+; getelementptr [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b
; SI-PROMOTE: v_add_i32_e32 [[PTRREG:v[0-9]+]], 16
; SI-PROMOTE: ds_write_b32 [[PTRREG]]
define void @test_private_array_ptr_calc(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
%alloca = alloca [4 x i32], i32 4, align 16
%tid = call i32 @llvm.SI.tid() readnone
- %a_ptr = getelementptr i32 addrspace(1)* %inA, i32 %tid
- %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
+ %a_ptr = getelementptr i32, i32 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid
%a = load i32 addrspace(1)* %a_ptr
%b = load i32 addrspace(1)* %b_ptr
%result = add i32 %a, %b
- %alloca_ptr = getelementptr [4 x i32]* %alloca, i32 1, i32 %b
+ %alloca_ptr = getelementptr [4 x i32], [4 x i32]* %alloca, i32 1, i32 %b
store i32 %result, i32* %alloca_ptr, align 4
; Dummy call
call void @llvm.AMDGPU.barrier.local() nounwind noduplicate
%reload = load i32* %alloca_ptr, align 4
- %out_ptr = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
store i32 %reload, i32 addrspace(1)* %out_ptr, align 4
ret void
}
; SI: v_mul_hi_i32
define void @test_array_ptr_calc(i32 addrspace(1)* noalias %out, [1025 x i32] addrspace(1)* noalias %inA, i32 addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.SI.tid() readnone
- %a_ptr = getelementptr [1025 x i32] addrspace(1)* %inA, i32 %tid, i32 0
- %b_ptr = getelementptr i32 addrspace(1)* %inB, i32 %tid
+ %a_ptr = getelementptr [1025 x i32], [1025 x i32] addrspace(1)* %inA, i32 %tid, i32 0
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %inB, i32 %tid
%a = load i32 addrspace(1)* %a_ptr
%b = load i32 addrspace(1)* %b_ptr
%result = add i32 %a, %b
; GCN: ds_cmpst_rtn_b32 [[RESULT:v[0-9]+]], [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
; GCN: s_endpgm
define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
%result = extractvalue { i32, i1 } %pair, 0
store i32 %result, i32 addrspace(1)* %out, align 4
; GCN: buffer_store_dwordx2 [[RESULT]],
; GCN: s_endpgm
define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr, i64 %swap) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
%result = extractvalue { i64, i1 } %pair, 0
store i64 %result, i64 addrspace(1)* %out, align 8
define void @lds_atomic_cmpxchg_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %swap, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
%pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
%result = extractvalue { i32, i1 } %pair, 0
store i32 %result, i32 addrspace(1)* %out, align 4
; GCN: ds_cmpst_b32 [[VPTR]], [[VCMP]], [[VSWAP]] offset:16
; GCN: s_endpgm
define void @lds_atomic_cmpxchg_noret_i32_offset(i32 addrspace(3)* %ptr, i32 %swap) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i32 addrspace(3)* %gep, i32 7, i32 %swap seq_cst monotonic
%result = extractvalue { i32, i1 } %pair, 0
ret void
; GCN: ds_cmpst_b64 [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}} offset:32
; GCN: s_endpgm
define void @lds_atomic_cmpxchg_noret_i64_offset(i64 addrspace(3)* %ptr, i64 %swap) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%pair = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic
%result = extractvalue { i64, i1 } %pair, 0
ret void
; R600: LDS_ADD *
; SI: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
define void @atomic_add_local_const_offset(i32 addrspace(3)* %local) {
- %gep = getelementptr i32 addrspace(3)* %local, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %local, i32 4
%val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
ret void
}
; R600: LDS_ADD_RET *
; SI: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:20
define void @atomic_add_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
- %gep = getelementptr i32 addrspace(3)* %local, i32 5
+ %gep = getelementptr i32, i32 addrspace(3)* %local, i32 5
%val = atomicrmw volatile add i32 addrspace(3)* %gep, i32 5 seq_cst
store i32 %val, i32 addrspace(1)* %out
ret void
; R600: LDS_SUB *
; SI: ds_sub_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
define void @atomic_sub_local_const_offset(i32 addrspace(3)* %local) {
- %gep = getelementptr i32 addrspace(3)* %local, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %local, i32 4
%val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
ret void
}
; R600: LDS_SUB_RET *
; SI: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:20
define void @atomic_sub_ret_local_const_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %local) {
- %gep = getelementptr i32 addrspace(3)* %local, i32 5
+ %gep = getelementptr i32, i32 addrspace(3)* %local, i32 5
%val = atomicrmw volatile sub i32 addrspace(3)* %gep, i32 5 seq_cst
store i32 %val, i32 addrspace(1)* %out
ret void
}
define void @test_call(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%c = call i32 @defined_function(i32 %b) nounwind
}
define void @test_call_external(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%c = call i32 @external_function(i32 %b) nounwind
entry:
%0 = mul nsw i32 %a, 3
%1 = sext i32 %0 to i64
- %2 = getelementptr i8 addrspace(1)* %in, i64 %1
+ %2 = getelementptr i8, i8 addrspace(1)* %in, i64 %1
store i8 %b, i8 addrspace(1)* %2
ret void
}
%tmp13 = add nsw <8 x i8> %tmp9, %tmp12
%tmp16 = shufflevector <32 x i8> %1, <32 x i8> undef, <8 x i32> <i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
%tmp17 = add nsw <8 x i8> %tmp13, %tmp16
- %scevgep = getelementptr <8 x i8> addrspace(1)* %result, i32 %i.01
+ %scevgep = getelementptr <8 x i8>, <8 x i8> addrspace(1)* %result, i32 %i.01
%2 = bitcast <8 x i8> %tmp17 to <2 x i32>
%3 = bitcast <8 x i8> addrspace(1)* %scevgep to <2 x i32> addrspace(1)*
store <2 x i32> %2, <2 x i32> addrspace(1)* %3, align 8
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_add_imm_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float addrspace(1)* %gep.0
%x.fabs = call float @llvm.fabs.f32(float %x) #1
%z = fadd float 2.0, %x.fabs
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_imm_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float addrspace(1)* %gep.0
%x.fabs = call float @llvm.fabs.f32(float %x) #1
%x.fneg.fabs = fsub float -0.000000e+00, %x.fabs
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_imm_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float addrspace(1)* %gep.0
%x.fneg = fsub float -0.000000e+00, %x
%z = fmul float 4.0, %x.fneg
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_add_lit_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%x = load float addrspace(1)* %gep.0
%x.fabs = call float @llvm.fabs.f32(float %x) #1
%z = fadd float 1024.0, %x.fabs
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_add_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
%y.fabs = call float @llvm.fabs.f32(float %y) #1
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
%y.fneg = fsub float -0.000000e+00, %y
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fabs_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
%y.fabs = call float @llvm.fabs.f32(float %y) #1
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fabs_x_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
%x.fabs = call float @llvm.fabs.f32(float %x) #1
; SI-NEXT: buffer_store_dword [[REG]]
define void @commute_mul_fabs_x_fneg_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
%x.fabs = call float @llvm.fabs.f32(float %x) #1
; SI: buffer_store_dword [[RESULT]]
define void @fma_a_2.0_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
loop:
%inc = phi i32 [0, %entry], [%inc.i, %loop]
- %ptr = getelementptr [16 x i32]* %alloca, i32 0, i32 %inc
+ %ptr = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 %inc
store i32 %inc, i32* %ptr
%inc.i = add i32 %inc, 1
%cnd = icmp uge i32 %inc.i, 16
br i1 %cnd, label %done, label %loop
done:
- %tmp0 = getelementptr [16 x i32]* %alloca, i32 0, i32 0
+ %tmp0 = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 0
%tmp1 = load i32* %tmp0
store i32 %tmp1, i32 addrspace(1)* %out
ret void
define void @v_ctpop_i32_add_vvar_inv(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i32 addrspace(1)* noalias %constptr) nounwind {
%val = load i32 addrspace(1)* %in, align 4
%ctpop = call i32 @llvm.ctpop.i32(i32 %val) nounwind readnone
- %gep = getelementptr i32 addrspace(1)* %constptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %constptr, i32 4
%const = load i32 addrspace(1)* %gep, align 4
%add = add i32 %const, %ctpop
store i32 %add, i32 addrspace(1)* %out, align 4
br label %endif
else:
- %tmp3 = getelementptr i32 addrspace(1)* %in, i32 1
+ %tmp3 = getelementptr i32, i32 addrspace(1)* %in, i32 1
%tmp4 = load i32 addrspace(1)* %tmp3
br label %endif
br label %endif
else:
- %tmp3 = getelementptr i64 addrspace(1)* %in, i32 1
+ %tmp3 = getelementptr i64, i64 addrspace(1)* %in, i32 1
%tmp4 = load i64 addrspace(1)* %tmp3
br label %endif
define void @sint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%sint = load i32 addrspace(1) * %in
%conv = sitofp i32 %sint to float
%0 = insertelement <4 x float> undef, float %conv, i32 0
define void @uint(<4 x float> addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%uint = load i32 addrspace(1) * %in
%conv = uitofp i32 %uint to float
%0 = insertelement <4 x float> undef, float %conv, i32 0
%i.07.in = phi i32 [ %i.07, %for.body ], [ %iterations, %entry ]
%ai.06 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%i.07 = add nsw i32 %i.07.in, -1
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %out, i32 %ai.06
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %ai.06
store i32 %i.07, i32 addrspace(1)* %arrayidx, align 4
%add = add nsw i32 %ai.06, 1
%exitcond = icmp eq i32 %add, %iterations
%offset.02 = phi i32 [ %mul, %entry ], [ %add14, %for.body ]
%k.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
tail call void @llvm.AMDGPU.barrier.local() #1
- %arrayidx = getelementptr inbounds float addrspace(3)* %lptr, i32 %offset.02
+ %arrayidx = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %offset.02
%tmp = load float addrspace(3)* %arrayidx, align 4
%add1 = add nsw i32 %offset.02, 1
- %arrayidx2 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add1
+ %arrayidx2 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add1
%tmp1 = load float addrspace(3)* %arrayidx2, align 4
%add3 = add nsw i32 %offset.02, 32
- %arrayidx4 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add3
+ %arrayidx4 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add3
%tmp2 = load float addrspace(3)* %arrayidx4, align 4
%add5 = add nsw i32 %offset.02, 33
- %arrayidx6 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add5
+ %arrayidx6 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add5
%tmp3 = load float addrspace(3)* %arrayidx6, align 4
%add7 = add nsw i32 %offset.02, 64
- %arrayidx8 = getelementptr inbounds float addrspace(3)* %lptr, i32 %add7
+ %arrayidx8 = getelementptr inbounds float, float addrspace(3)* %lptr, i32 %add7
%tmp4 = load float addrspace(3)* %arrayidx8, align 4
%add9 = fadd float %tmp, %tmp1
%add10 = fadd float %add9, %tmp2
for.end: ; preds = %for.body
%tmp5 = sext i32 %x.i to i64
- %arrayidx15 = getelementptr inbounds float addrspace(1)* %out, i64 %tmp5
+ %arrayidx15 = getelementptr inbounds float, float addrspace(1)* %out, i64 %tmp5
store float %add13, float addrspace(1)* %arrayidx15, align 4
ret void
}
; SI: s_endpgm
define void @simple_read2_f32(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @simple_read2_f32_max_offset(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 255
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @simple_read2_f32_too_far(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 257
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
define void @simple_read2_f32_x2(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 0
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%idx.1 = add nsw i32 %tid.x, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum.0 = fadd float %val0, %val1
%idx.2 = add nsw i32 %tid.x, 11
- %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
%val2 = load float addrspace(3)* %arrayidx2, align 4
%idx.3 = add nsw i32 %tid.x, 27
- %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
+ %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
%val3 = load float addrspace(3)* %arrayidx3, align 4
%sum.1 = fadd float %val2, %val3
%sum = fadd float %sum.0, %sum.1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %idx.0
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %idx.0
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
define void @simple_read2_f32_x2_barrier(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 0
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%idx.1 = add nsw i32 %tid.x, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum.0 = fadd float %val0, %val1
call void @llvm.AMDGPU.barrier.local() #2
%idx.2 = add nsw i32 %tid.x, 11
- %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
%val2 = load float addrspace(3)* %arrayidx2, align 4
%idx.3 = add nsw i32 %tid.x, 27
- %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
+ %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
%val3 = load float addrspace(3)* %arrayidx3, align 4
%sum.1 = fadd float %val2, %val3
%sum = fadd float %sum.0, %sum.1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %idx.0
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %idx.0
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
define void @simple_read2_f32_x2_nonzero_base(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%idx.1 = add nsw i32 %tid.x, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum.0 = fadd float %val0, %val1
%idx.2 = add nsw i32 %tid.x, 11
- %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
%val2 = load float addrspace(3)* %arrayidx2, align 4
%idx.3 = add nsw i32 %tid.x, 27
- %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
+ %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
%val3 = load float addrspace(3)* %arrayidx3, align 4
%sum.1 = fadd float %val2, %val3
%sum = fadd float %sum.0, %sum.1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %idx.0
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %idx.0
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0
%index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
- %gep = getelementptr inbounds <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1
+ %gep = getelementptr inbounds float, <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1
%gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0
%gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1
%val0 = load float addrspace(3)* %gep.0, align 4
%val1 = load float addrspace(3)* %gep.1, align 4
%add.x = add nsw i32 %x.i, 8
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0
%index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
- %gep = getelementptr inbounds <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1
+ %gep = getelementptr inbounds float, <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1
%gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0
%gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1
; Apply an additional offset after the vector that will be more obviously folded.
- %gep.1.offset = getelementptr float addrspace(3)* %gep.1, i32 8
+ %gep.1.offset = getelementptr float, float addrspace(3)* %gep.1, i32 8
%val0 = load float addrspace(3)* %gep.0, align 4
%val1 = load float addrspace(3)* %gep.1.offset, align 4
%add.x = add nsw i32 %x.i, 8
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
%x.i.v.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0
%x.i.v.1 = insertelement <2 x i32> %x.i.v.0, i32 %x.i, i32 1
%idx = add <2 x i32> %x.i.v.1, <i32 0, i32 8>
- %gep = getelementptr inbounds <2 x [512 x float] addrspace(3)*> %ptr.1, <2 x i32> <i32 0, i32 0>, <2 x i32> %idx
+ %gep = getelementptr inbounds [512 x float], <2 x [512 x float] addrspace(3)*> %ptr.1, <2 x i32> <i32 0, i32 0>, <2 x i32> %idx
%gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0
%gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1
%val0 = load float addrspace(3)* %gep.0, align 4
%val1 = load float addrspace(3)* %gep.1, align 4
%add.x = add nsw i32 %x.i, 8
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @simple_read2_f32_volatile_0(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load volatile float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @simple_read2_f32_volatile_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load volatile float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @unaligned_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 1
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 1
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @misaligned_2_simple_read2_f32(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 2
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 2
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @simple_read2_f64(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI: s_endpgm
define void @simple_read2_f64_max_offset(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 255
- %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI: s_endpgm
define void @simple_read2_f64_too_far(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 257
- %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI: s_endpgm
define void @misaligned_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 7
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 4
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 4
ret void
}
define void @sgemm_inner_loop_read2_sequence(float addrspace(1)* %C, i32 %lda, i32 %ldb) #0 {
%x.i = tail call i32 @llvm.r600.read.tgid.x() #1
%y.i = tail call i32 @llvm.r600.read.tidig.y() #1
- %arrayidx44 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i
+ %arrayidx44 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i
%tmp16 = load float addrspace(3)* %arrayidx44, align 4
%add47 = add nsw i32 %x.i, 1
- %arrayidx48 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add47
+ %arrayidx48 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add47
%tmp17 = load float addrspace(3)* %arrayidx48, align 4
%add51 = add nsw i32 %x.i, 16
- %arrayidx52 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add51
+ %arrayidx52 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add51
%tmp18 = load float addrspace(3)* %arrayidx52, align 4
%add55 = add nsw i32 %x.i, 17
- %arrayidx56 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add55
+ %arrayidx56 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add55
%tmp19 = load float addrspace(3)* %arrayidx56, align 4
- %arrayidx60 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %y.i
+ %arrayidx60 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %y.i
%tmp20 = load float addrspace(3)* %arrayidx60, align 4
%add63 = add nsw i32 %y.i, 1
- %arrayidx64 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add63
+ %arrayidx64 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add63
%tmp21 = load float addrspace(3)* %arrayidx64, align 4
%add67 = add nsw i32 %y.i, 32
- %arrayidx68 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add67
+ %arrayidx68 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add67
%tmp22 = load float addrspace(3)* %arrayidx68, align 4
%add71 = add nsw i32 %y.i, 33
- %arrayidx72 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add71
+ %arrayidx72 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add71
%tmp23 = load float addrspace(3)* %arrayidx72, align 4
%add75 = add nsw i32 %y.i, 64
- %arrayidx76 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add75
+ %arrayidx76 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add75
%tmp24 = load float addrspace(3)* %arrayidx76, align 4
%add79 = add nsw i32 %y.i, 65
- %arrayidx80 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add79
+ %arrayidx80 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add79
%tmp25 = load float addrspace(3)* %arrayidx80, align 4
%sum.0 = fadd float %tmp16, %tmp17
%sum.1 = fadd float %sum.0, %tmp18
define void @offset_order(float addrspace(1)* %out) {
entry:
- %ptr0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 0
+ %ptr0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 0
%val0 = load float addrspace(3)* %ptr0
- %ptr1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 256
+ %ptr1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 256
%val1 = load float addrspace(3)* %ptr1
%add1 = fadd float %val0, %val1
- %ptr2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 3
+ %ptr2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 3
%val2 = load float addrspace(3)* %ptr2
%add2 = fadd float %add1, %val2
- %ptr3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 2
+ %ptr3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 2
%val3 = load float addrspace(3)* %ptr3
%add3 = fadd float %add2, %val3
- %ptr4 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 12
+ %ptr4 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 12
%val4 = load float addrspace(3)* %ptr4
%add4 = fadd float %add3, %val4
- %ptr5 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 14
+ %ptr5 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 14
%val5 = load float addrspace(3)* %ptr5
%add5 = fadd float %add4, %val5
- %ptr6 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 11
+ %ptr6 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 11
%val6 = load float addrspace(3)* %ptr6
%add6 = fadd float %add5, %val6
store float %add6, float addrspace(1)* %out
; SI: s_endpgm
define void @simple_read2st64_f32_0_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 64
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
define void @simple_read2st64_f32_1_2(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
- %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 128
- %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
define void @simple_read2st64_f32_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
- %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 16320
- %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
define void @simple_read2st64_f32_over_max_offset(float addrspace(1)* %out, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
- %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 16384
- %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @odd_invalid_read2st64_f32_0(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 63
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
define void @odd_invalid_read2st64_f32_1(float addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 127
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
- %out.gep = getelementptr inbounds float addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds float, float addrspace(1)* %out, i32 %x.i
store float %sum, float addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @simple_read2st64_f64_0_1(double addrspace(1)* %out) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 64
- %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
define void @simple_read2st64_f64_1_2(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 128
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI: s_endpgm
define void @misaligned_read2st64_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 64
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 4
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 4
ret void
}
define void @simple_read2st64_f64_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 256
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8128
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
define void @simple_read2st64_f64_over_max_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8192
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
define void @invalid_read2st64_f64_odd_offset(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
%add.x.0 = add nsw i32 %x.i, 64
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8129
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 8
ret void
}
; SI: s_endpgm
define void @byte_size_only_divisible_64_read2_f64(double addrspace(1)* %out, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
%val0 = load double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
%val1 = load double addrspace(3)* %arrayidx1, align 8
%sum = fadd double %val0, %val1
- %out.gep = getelementptr inbounds double addrspace(1)* %out, i32 %x.i
+ %out.gep = getelementptr inbounds double, double addrspace(1)* %out, i32 %x.i
store double %sum, double addrspace(1)* %out.gep, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_one_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep = getelementptr float addrspace(1)* %in, i32 %x.i
+ %in.gep = getelementptr float, float addrspace(1)* %in, i32 %x.i
%val = load float addrspace(1)* %in.gep, align 4
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i
- %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
+ %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
%val0 = load float addrspace(1)* %in.gep.0, align 4
%val1 = load float addrspace(1)* %in.gep.1, align 4
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_f32_volatile_0(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i
- %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i
+ %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
+ %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
%val0 = load float addrspace(1)* %in0.gep, align 4
%val1 = load float addrspace(1)* %in1.gep, align 4
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store volatile float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_f32_volatile_1(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i
- %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i
+ %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
+ %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
%val0 = load float addrspace(1)* %in0.gep, align 4
%val1 = load float addrspace(1)* %in1.gep, align 4
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store volatile float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_subreg2_mixed_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep.0 = getelementptr <2 x float> addrspace(1)* %in, i32 %x.i
- %in.gep.1 = getelementptr <2 x float> addrspace(1)* %in.gep.0, i32 1
+ %in.gep.0 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %x.i
+ %in.gep.1 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in.gep.0, i32 1
%val0 = load <2 x float> addrspace(1)* %in.gep.0, align 8
%val1 = load <2 x float> addrspace(1)* %in.gep.1, align 8
%val0.0 = extractelement <2 x float> %val0, i32 0
%val1.1 = extractelement <2 x float> %val1, i32 1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0.0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1.1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_subreg2_f32(float addrspace(1)* %C, <2 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep = getelementptr <2 x float> addrspace(1)* %in, i32 %x.i
+ %in.gep = getelementptr <2 x float>, <2 x float> addrspace(1)* %in, i32 %x.i
%val = load <2 x float> addrspace(1)* %in.gep, align 8
%val0 = extractelement <2 x float> %val, i32 0
%val1 = extractelement <2 x float> %val, i32 1
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_subreg4_f32(float addrspace(1)* %C, <4 x float> addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep = getelementptr <4 x float> addrspace(1)* %in, i32 %x.i
+ %in.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 %x.i
%val = load <4 x float> addrspace(1)* %in.gep, align 16
%val0 = extractelement <4 x float> %val, i32 0
%val1 = extractelement <4 x float> %val, i32 3
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i
- %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
+ %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
%val0 = load float addrspace(1)* %in.gep.0, align 4
%val1 = load float addrspace(1)* %in.gep.1, align 4
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 255
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_too_far_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i
- %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i
+ %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
+ %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
%val0 = load float addrspace(1)* %in0.gep, align 4
%val1 = load float addrspace(1)* %in1.gep, align 4
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 257
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_f32_x2(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
- %in0.gep = getelementptr float addrspace(1)* %in0, i32 %tid.x
- %in1.gep = getelementptr float addrspace(1)* %in1, i32 %tid.x
+ %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %tid.x
+ %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %tid.x
%val0 = load float addrspace(1)* %in0.gep, align 4
%val1 = load float addrspace(1)* %in1.gep, align 4
%idx.0 = add nsw i32 %tid.x, 0
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
store float %val0, float addrspace(3)* %arrayidx0, align 4
%idx.1 = add nsw i32 %tid.x, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
store float %val1, float addrspace(3)* %arrayidx1, align 4
%idx.2 = add nsw i32 %tid.x, 11
- %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
store float %val0, float addrspace(3)* %arrayidx2, align 4
%idx.3 = add nsw i32 %tid.x, 27
- %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
+ %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
store float %val1, float addrspace(3)* %arrayidx3, align 4
ret void
; SI: s_endpgm
define void @simple_write2_two_val_f32_x2_nonzero_base(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
- %in0.gep = getelementptr float addrspace(1)* %in0, i32 %tid.x
- %in1.gep = getelementptr float addrspace(1)* %in1, i32 %tid.x
+ %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %tid.x
+ %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %tid.x
%val0 = load float addrspace(1)* %in0.gep, align 4
%val1 = load float addrspace(1)* %in1.gep, align 4
%idx.0 = add nsw i32 %tid.x, 3
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0
store float %val0, float addrspace(3)* %arrayidx0, align 4
%idx.1 = add nsw i32 %tid.x, 8
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.1
store float %val1, float addrspace(3)* %arrayidx1, align 4
%idx.2 = add nsw i32 %tid.x, 11
- %arrayidx2 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.2
store float %val0, float addrspace(3)* %arrayidx2, align 4
%idx.3 = add nsw i32 %tid.x, 27
- %arrayidx3 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
+ %arrayidx3 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.3
store float %val1, float addrspace(3)* %arrayidx3, align 4
ret void
; SI: s_endpgm
define void @write2_ptr_subreg_arg_two_val_f32(float addrspace(1)* %C, float addrspace(1)* %in0, float addrspace(1)* %in1, <2 x float addrspace(3)*> %lds.ptr) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in0.gep = getelementptr float addrspace(1)* %in0, i32 %x.i
- %in1.gep = getelementptr float addrspace(1)* %in1, i32 %x.i
+ %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i
+ %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i
%val0 = load float addrspace(1)* %in0.gep, align 4
%val1 = load float addrspace(1)* %in1.gep, align 4
%index.0 = insertelement <2 x i32> undef, i32 %x.i, i32 0
%index.1 = insertelement <2 x i32> %index.0, i32 8, i32 0
- %gep = getelementptr inbounds <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1
+ %gep = getelementptr inbounds float, <2 x float addrspace(3)*> %lds.ptr, <2 x i32> %index.1
%gep.0 = extractelement <2 x float addrspace(3)*> %gep, i32 0
%gep.1 = extractelement <2 x float addrspace(3)*> %gep, i32 1
; Apply an additional offset after the vector that will be more obviously folded.
- %gep.1.offset = getelementptr float addrspace(3)* %gep.1, i32 8
+ %gep.1.offset = getelementptr float, float addrspace(3)* %gep.1, i32 8
store float %val0, float addrspace(3)* %gep.0, align 4
%add.x = add nsw i32 %x.i, 8
; SI: s_endpgm
define void @simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep = getelementptr double addrspace(1)* %in, i32 %x.i
+ %in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
%val = load double addrspace(1)* %in.gep, align 8
- %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
store double %val, double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
store double %val, double addrspace(3)* %arrayidx1, align 8
ret void
}
; SI: s_endpgm
define void @misaligned_simple_write2_one_val_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep = getelementptr double addrspace(1)* %in, i32 %x.i
+ %in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
%val = load double addrspace(1)* %in.gep, align 8
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
store double %val, double addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 7
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
store double %val, double addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2_two_val_f64(double addrspace(1)* %C, double addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep.0 = getelementptr double addrspace(1)* %in, i32 %x.i
- %in.gep.1 = getelementptr double addrspace(1)* %in.gep.0, i32 1
+ %in.gep.0 = getelementptr double, double addrspace(1)* %in, i32 %x.i
+ %in.gep.1 = getelementptr double, double addrspace(1)* %in.gep.0, i32 1
%val0 = load double addrspace(1)* %in.gep.0, align 8
%val1 = load double addrspace(1)* %in.gep.1, align 8
- %arrayidx0 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %x.i
store double %val0, double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x double], [512 x double] addrspace(3)* @lds.f64, i32 0, i32 %add.x
store double %val1, double addrspace(3)* %arrayidx1, align 8
ret void
}
%x.i = tail call i32 @llvm.r600.read.tgid.x() #1
%y.i = tail call i32 @llvm.r600.read.tidig.y() #1
%val = load float addrspace(1)* %in
- %arrayidx44 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i
+ %arrayidx44 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %x.i
store float %val, float addrspace(3)* %arrayidx44, align 4
%add47 = add nsw i32 %x.i, 1
- %arrayidx48 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add47
+ %arrayidx48 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add47
store float %val, float addrspace(3)* %arrayidx48, align 4
%add51 = add nsw i32 %x.i, 16
- %arrayidx52 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add51
+ %arrayidx52 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add51
store float %val, float addrspace(3)* %arrayidx52, align 4
%add55 = add nsw i32 %x.i, 17
- %arrayidx56 = getelementptr inbounds [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add55
+ %arrayidx56 = getelementptr inbounds [264 x float], [264 x float] addrspace(3)* @sgemm.lA, i32 0, i32 %add55
store float %val, float addrspace(3)* %arrayidx56, align 4
- %arrayidx60 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %y.i
+ %arrayidx60 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %y.i
store float %val, float addrspace(3)* %arrayidx60, align 4
%add63 = add nsw i32 %y.i, 1
- %arrayidx64 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add63
+ %arrayidx64 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add63
store float %val, float addrspace(3)* %arrayidx64, align 4
%add67 = add nsw i32 %y.i, 32
- %arrayidx68 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add67
+ %arrayidx68 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add67
store float %val, float addrspace(3)* %arrayidx68, align 4
%add71 = add nsw i32 %y.i, 33
- %arrayidx72 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add71
+ %arrayidx72 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add71
store float %val, float addrspace(3)* %arrayidx72, align 4
%add75 = add nsw i32 %y.i, 64
- %arrayidx76 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add75
+ %arrayidx76 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add75
store float %val, float addrspace(3)* %arrayidx76, align 4
%add79 = add nsw i32 %y.i, 65
- %arrayidx80 = getelementptr inbounds [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add79
+ %arrayidx80 = getelementptr inbounds [776 x float], [776 x float] addrspace(3)* @sgemm.lB, i32 0, i32 %add79
store float %val, float addrspace(3)* %arrayidx80, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2st64_one_val_f32_0_1(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep = getelementptr float addrspace(1)* %in, i32 %x.i
+ %in.gep = getelementptr float, float addrspace(1)* %in, i32 %x.i
%val = load float addrspace(1)* %in.gep, align 4
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %x.i
store float %val, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 64
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x
store float %val, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2st64_two_val_f32_2_5(float addrspace(1)* %C, float addrspace(1)* %in) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i
- %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
+ %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
%val0 = load float addrspace(1)* %in.gep.0, align 4
%val1 = load float addrspace(1)* %in.gep.1, align 4
%add.x.0 = add nsw i32 %x.i, 128
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.0
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x.1 = add nsw i32 %x.i, 320
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %add.x.1
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2st64_two_val_max_offset_f32(float addrspace(1)* %C, float addrspace(1)* %in, float addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %x.i
- %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i
+ %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
%val0 = load float addrspace(1)* %in.gep.0, align 4
%val1 = load float addrspace(1)* %in.gep.1, align 4
- %arrayidx0 = getelementptr inbounds float addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %x.i
store float %val0, float addrspace(3)* %arrayidx0, align 4
%add.x = add nsw i32 %x.i, 16320
- %arrayidx1 = getelementptr inbounds float addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds float, float addrspace(3)* %lds, i32 %add.x
store float %val1, float addrspace(3)* %arrayidx1, align 4
ret void
}
; SI: s_endpgm
define void @simple_write2st64_two_val_max_offset_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep.0 = getelementptr double addrspace(1)* %in, i32 %x.i
- %in.gep.1 = getelementptr double addrspace(1)* %in.gep.0, i32 1
+ %in.gep.0 = getelementptr double, double addrspace(1)* %in, i32 %x.i
+ %in.gep.1 = getelementptr double, double addrspace(1)* %in.gep.0, i32 1
%val0 = load double addrspace(1)* %in.gep.0, align 8
%val1 = load double addrspace(1)* %in.gep.1, align 8
%add.x.0 = add nsw i32 %x.i, 256
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.0
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.0
store double %val0, double addrspace(3)* %arrayidx0, align 8
%add.x.1 = add nsw i32 %x.i, 8128
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x.1
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x.1
store double %val1, double addrspace(3)* %arrayidx1, align 8
ret void
}
; SI: s_endpgm
define void @byte_size_only_divisible_64_write2st64_f64(double addrspace(1)* %C, double addrspace(1)* %in, double addrspace(3)* %lds) #0 {
%x.i = tail call i32 @llvm.r600.read.tidig.x() #1
- %in.gep = getelementptr double addrspace(1)* %in, i32 %x.i
+ %in.gep = getelementptr double, double addrspace(1)* %in, i32 %x.i
%val = load double addrspace(1)* %in.gep, align 8
- %arrayidx0 = getelementptr inbounds double addrspace(3)* %lds, i32 %x.i
+ %arrayidx0 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %x.i
store double %val, double addrspace(3)* %arrayidx0, align 8
%add.x = add nsw i32 %x.i, 8
- %arrayidx1 = getelementptr inbounds double addrspace(3)* %lds, i32 %add.x
+ %arrayidx1 = getelementptr inbounds double, double addrspace(3)* %lds, i32 %add.x
store double %val, double addrspace(3)* %arrayidx1, align 8
ret void
}
br i1 %tmp2, label %done, label %loop
done:
- %tmp3 = getelementptr i32 addrspace(1)* %out, i64 1
+ %tmp3 = getelementptr i32, i32 addrspace(1)* %out, i64 1
store i32 %inc, i32 addrspace(1)* %tmp3
ret void
}
define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) nounwind {
%p0 = extractelement <2 x i16> %foo, i32 0
%p1 = extractelement <2 x i16> %foo, i32 1
- %out1 = getelementptr i16 addrspace(1)* %out, i32 1
+ %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 %p1, i16 addrspace(1)* %out, align 2
store i16 %p0, i16 addrspace(1)* %out1, align 2
ret void
define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) nounwind {
%p0 = extractelement <4 x i16> %foo, i32 0
%p1 = extractelement <4 x i16> %foo, i32 2
- %out1 = getelementptr i16 addrspace(1)* %out, i32 1
+ %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
store i16 %p1, i16 addrspace(1)* %out, align 2
store i16 %p0, i16 addrspace(1)* %out1, align 2
ret void
define void @v_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
%tidext = sext i32 %tid to i64
- %gep = getelementptr double addrspace(1)* %in, i64 %tidext
+ %gep = getelementptr double, double addrspace(1)* %in, i64 %tidext
%val = load double addrspace(1)* %gep, align 8
%fabs = call double @llvm.fabs.f64(double %val)
store double %fabs, double addrspace(1)* %out
; SI: v_add_f32
; SI: v_add_f32
define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1)* %in, align 16
%b = load <4 x float> addrspace(1)* %b_ptr, align 16
%result = fadd <4 x float> %a, %b
define void @fcmp_sext(i32 addrspace(1)* %out, float addrspace(1)* %in) {
entry:
%0 = load float addrspace(1)* %in
- %arrayidx1 = getelementptr inbounds float addrspace(1)* %in, i32 1
+ %arrayidx1 = getelementptr inbounds float, float addrspace(1)* %in, i32 1
%1 = load float addrspace(1)* %arrayidx1
%cmp = fcmp oeq float %0, %1
%sext = sext i1 %cmp to i32
br i1 %0, label %IF, label %ENDIF
IF:
- %1 = getelementptr i32 addrspace(1)* %out, i32 1
+ %1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 0, i32 addrspace(1)* %1
br label %ENDIF
; COMMON: buffer_store_dwordx2 [[RESULT]]
; COMMON: s_endpgm
define void @fdiv_f64(double addrspace(1)* %out, double addrspace(1)* %in) nounwind {
- %gep.1 = getelementptr double addrspace(1)* %in, i32 1
+ %gep.1 = getelementptr double, double addrspace(1)* %in, i32 1
%num = load double addrspace(1)* %in
%den = load double addrspace(1)* %gep.1
%result = fdiv double %num, %den
; COMMON-LABEL: {{^}}v_fdiv_v2f64:
define void @v_fdiv_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in) nounwind {
- %gep.1 = getelementptr <2 x double> addrspace(1)* %in, i32 1
+ %gep.1 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in, i32 1
%num = load <2 x double> addrspace(1)* %in
%den = load <2 x double> addrspace(1)* %gep.1
%result = fdiv <2 x double> %num, %den
; COMMON-LABEL: {{^}}v_fdiv_v4f64:
define void @v_fdiv_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) nounwind {
- %gep.1 = getelementptr <4 x double> addrspace(1)* %in, i32 1
+ %gep.1 = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
%num = load <4 x double> addrspace(1)* %in
%den = load <4 x double> addrspace(1)* %gep.1
%result = fdiv <4 x double> %num, %den
; SI-DAG: v_rcp_f32
; SI-DAG: v_mul_f32
define void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1) * %in
%b = load <4 x float> addrspace(1) * %b_ptr
%result = fdiv <4 x float> %a, %b
define void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 {
%alloca = alloca i32, i32 9, align 4
%x = call i32 @llvm.r600.read.tidig.x() #3
- %pptr = getelementptr i32* %alloca, i32 %x
+ %pptr = getelementptr i32, i32* %alloca, i32 %x
%fptr = addrspacecast i32* %pptr to i32 addrspace(4)*
store i32 %x, i32 addrspace(4)* %fptr
; Dummy call
; SI: buffer_store_dwordx2 [[RESULT]]
define void @combine_to_fma_f64_0(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_fma_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double, double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: buffer_store_dwordx2 [[RESULT]]
define void @combine_to_fma_f64_1(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: buffer_store_dwordx2 [[RESULT]]
define void @combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_fma_fsub_f64_0_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double, double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: buffer_store_dwordx2 [[RESULT]]
define void @combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_fma_fsub_1_f64_2use(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double, double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: buffer_store_dwordx2 [[RESULT]]
define void @combine_to_fma_fsub_2_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_fma_fsub_2_f64_2uses_neg(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double, double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_fma_fsub_2_f64_2uses_mul(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr double addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr double addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double, double addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr double, double addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr double, double addrspace(1)* %gep.out.0, i32 1
%a = load double addrspace(1)* %gep.0
%b = load double addrspace(1)* %gep.1
; SI: buffer_store_dwordx2 [[RESULT]]
define void @aggressive_combine_to_fma_fsub_0_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
- %gep.4 = getelementptr double addrspace(1)* %gep.0, i32 4
- %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double, double addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr double, double addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
%x = load double addrspace(1)* %gep.0
%y = load double addrspace(1)* %gep.1
; SI: buffer_store_dwordx2 [[RESULT]]
define void @aggressive_combine_to_fma_fsub_1_f64(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr double addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr double addrspace(1)* %gep.0, i32 3
- %gep.4 = getelementptr double addrspace(1)* %gep.0, i32 4
- %gep.out = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr double, double addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr double, double addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid
%x = load double addrspace(1)* %gep.0
%y = load double addrspace(1)* %gep.1
; SI: v_fma_f32 {{v[0-9]+}}, 2.0, {{v[0-9]+}}, {{v[0-9]+}}
define void @fma_commute_mul_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
%b = load float addrspace(1)* %in.b.gep, align 4
; FUNC-LABEL: @fma_commute_mul_s_f32
define void @fma_commute_mul_s_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b, float %b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
%c = load float addrspace(1)* %in.b.gep, align 4
; FUNC-LABEL: @test_fmax_legacy_uge_f64
define void @test_fmax_legacy_uge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; FUNC-LABEL: @test_fmax_legacy_oge_f64
define void @test_fmax_legacy_oge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; FUNC-LABEL: @test_fmax_legacy_ugt_f64
define void @test_fmax_legacy_ugt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; FUNC-LABEL: @test_fmax_legacy_ogt_f64
define void @test_fmax_legacy_ogt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; EG: MAX
define void @test_fmax_legacy_uge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; EG: MAX
define void @test_fmax_legacy_oge_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; EG: MAX
define void @test_fmax_legacy_ugt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; EG: MAX
define void @test_fmax_legacy_ogt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; EG: MAX
define void @test_fmax_legacy_ogt_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; FUNC-LABEL: @test_fmin_legacy_ule_f64
define void @test_fmin_legacy_ule_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; FUNC-LABEL: @test_fmin_legacy_ole_f64
define void @test_fmin_legacy_ole_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; FUNC-LABEL: @test_fmin_legacy_olt_f64
define void @test_fmin_legacy_olt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; FUNC-LABEL: @test_fmin_legacy_ult_f64
define void @test_fmin_legacy_ult_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_ule_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_ole_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_olt_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
define void @test_fmin_legacy_ult_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI: s_endpgm
define void @test_fmin_legacy_ole_f32_multi_use(float addrspace(1)* %out0, i1 addrspace(1)* %out1, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI: v_mul_f32
; SI: v_mul_f32
define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1) * %in
%b = load <4 x float> addrspace(1) * %b_ptr
%result = fmul <4 x float> %a, %b
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_a_2.0_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r0 = load float addrspace(1)* %gep.0
%r1 = load float addrspace(1)* %gep.1
float addrspace(1)* %in1,
float addrspace(1)* %in2) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r0 = load float addrspace(1)* %gep.0
%r1 = load float addrspace(1)* %gep.1
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_neg_2.0_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_neg_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_2.0_neg_a_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
; CHECK: buffer_store_dword [[RESULT]]
define void @fmuladd_2.0_a_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
; CI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
define void @fp_to_sint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%val = load double addrspace(1)* %gep, align 8
%cast = fptosi double %val to i64
store i64 %cast, i64 addrspace(1)* %out, align 8
; CI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
define void @fp_to_uint_i64_f64(i64 addrspace(1)* %out, double addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%val = load double addrspace(1)* %gep, align 8
%cast = fptoui double %val to i64
store i64 %cast, i64 addrspace(1)* %out, align 4
; GCN: s_endpgm
define void @frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2) #0 {
- %gep2 = getelementptr float addrspace(1)* %in2, i32 4
+ %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
%r0 = load float addrspace(1)* %in1, align 4
%r1 = load float addrspace(1)* %gep2, align 4
%r2 = frem float %r0, %r1
; GCN: s_endpgm
define void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
float addrspace(1)* %in2) #1 {
- %gep2 = getelementptr float addrspace(1)* %in2, i32 4
+ %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
%r0 = load float addrspace(1)* %in1, align 4
%r1 = load float addrspace(1)* %gep2, align 4
%r2 = frem float %r0, %r1
define void @frem_v2f32(<2 x float> addrspace(1)* %out, <2 x float> addrspace(1)* %in1,
<2 x float> addrspace(1)* %in2) #0 {
- %gep2 = getelementptr <2 x float> addrspace(1)* %in2, i32 4
+ %gep2 = getelementptr <2 x float>, <2 x float> addrspace(1)* %in2, i32 4
%r0 = load <2 x float> addrspace(1)* %in1, align 8
%r1 = load <2 x float> addrspace(1)* %gep2, align 8
%r2 = frem <2 x float> %r0, %r1
define void @frem_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in1,
<4 x float> addrspace(1)* %in2) #0 {
- %gep2 = getelementptr <4 x float> addrspace(1)* %in2, i32 4
+ %gep2 = getelementptr <4 x float>, <4 x float> addrspace(1)* %in2, i32 4
%r0 = load <4 x float> addrspace(1)* %in1, align 16
%r1 = load <4 x float> addrspace(1)* %gep2, align 16
%r2 = frem <4 x float> %r0, %r1
define void @frem_v2f64(<2 x double> addrspace(1)* %out, <2 x double> addrspace(1)* %in1,
<2 x double> addrspace(1)* %in2) #0 {
- %gep2 = getelementptr <2 x double> addrspace(1)* %in2, i32 4
+ %gep2 = getelementptr <2 x double>, <2 x double> addrspace(1)* %in2, i32 4
%r0 = load <2 x double> addrspace(1)* %in1, align 16
%r1 = load <2 x double> addrspace(1)* %gep2, align 16
%r2 = frem <2 x double> %r0, %r1
; FUNC-LABEL: {{^}}v_fsub_f32:
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
define void @v_fsub_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
- %b_ptr = getelementptr float addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr float, float addrspace(1)* %in, i32 1
%a = load float addrspace(1)* %in, align 4
%b = load float addrspace(1)* %b_ptr, align 4
%result = fsub float %a, %b
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
; SI: v_subrev_f32_e32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
define void @v_fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x float>, <4 x float> addrspace(1)* %in, i32 1
%a = load <4 x float> addrspace(1)* %in, align 16
%b = load <4 x float> addrspace(1)* %b_ptr, align 16
%result = fsub <4 x float> %a, %b
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -v\[[0-9]+:[0-9]+\]}}
define void @fsub_v4f64(<4 x double> addrspace(1)* %out, <4 x double> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x double> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x double>, <4 x double> addrspace(1)* %in, i32 1
%a = load <4 x double> addrspace(1)* %in
%b = load <4 x double> addrspace(1)* %b_ptr
%result = fsub <4 x double> %a, %b
; CHECK-LABEL: {{^}}use_gep_address_space:
; CHECK: v_mov_b32_e32 [[PTR:v[0-9]+]], s{{[0-9]+}}
; CHECK: ds_write_b32 [[PTR]], v{{[0-9]+}} offset:64
- %p = getelementptr [1024 x i32] addrspace(3)* %array, i16 0, i16 16
+ %p = getelementptr [1024 x i32], [1024 x i32] addrspace(3)* %array, i16 0, i16 16
store i32 99, i32 addrspace(3)* %p
ret void
}
; SI: s_or_b32
; CI: s_add_i32
; CHECK: ds_write_b32
- %p = getelementptr [1024 x i32] addrspace(3)* %array, i16 0, i16 16384
+ %p = getelementptr [1024 x i32], [1024 x i32] addrspace(3)* %array, i16 0, i16 16384
store i32 99, i32 addrspace(3)* %p
ret void
}
; CHECK: s_add_i32
; CHECK: s_add_i32
; CHECK: s_add_i32
- %p = getelementptr <4 x [1024 x i32] addrspace(3)*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
+ %p = getelementptr [1024 x i32], <4 x [1024 x i32] addrspace(3)*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
%p0 = extractelement <4 x i32 addrspace(3)*> %p, i32 0
%p1 = extractelement <4 x i32 addrspace(3)*> %p, i32 1
%p2 = extractelement <4 x i32 addrspace(3)*> %p, i32 2
; CHECK-LABEL: {{^}}gep_as_vector_v2:
; CHECK: s_add_i32
; CHECK: s_add_i32
- %p = getelementptr <2 x [1024 x i32] addrspace(3)*> %array, <2 x i16> zeroinitializer, <2 x i16> <i16 16, i16 16>
+ %p = getelementptr [1024 x i32], <2 x [1024 x i32] addrspace(3)*> %array, <2 x i16> zeroinitializer, <2 x i16> <i16 16, i16 16>
%p0 = extractelement <2 x i32 addrspace(3)*> %p, i32 0
%p1 = extractelement <2 x i32 addrspace(3)*> %p, i32 1
store i32 99, i32 addrspace(3)* %p0
; SI: .globl foo
; SI: {{^}}foo:
define void @foo(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%result = add i32 %a, %b
@lds = addrspace(1) global [256 x i32] zeroinitializer
define void @load_init_global_global(i32 addrspace(1)* %out, i1 %p) {
- %gep = getelementptr [256 x i32] addrspace(1)* @lds, i32 0, i32 10
+ %gep = getelementptr [256 x i32], [256 x i32] addrspace(1)* @lds, i32 0, i32 10
%ld = load i32 addrspace(1)* %gep
store i32 %ld, i32 addrspace(1)* %out
ret void
; SI: buffer_atomic_add v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_add_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_add_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_add_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_add_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile add i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_add v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_add_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_add_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile add i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_and v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_and_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_and_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_and_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_and_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile and i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_and v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_and_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_and_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile and i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_sub v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_sub_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_sub_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_sub_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_sub_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile sub i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_sub v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_sub_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_sub_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile sub i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_smax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_smax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_umax v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_umax v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_smin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_smin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_umin v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_umin v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_or v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_or_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_or_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_or_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_or_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile or i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_or v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_or_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_or_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile or i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_swap v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_xchg_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_xchg_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile xchg i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_swap v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_xchg_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_xchg_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile xchg i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_xor v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 0 offset:16{{$}}
define void @atomic_xor_i32_offset(i32 addrspace(1)* %out, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_xor_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) {
entry:
- %gep = getelementptr i32 addrspace(1)* %out, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 4
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64 offset:16{{$}}
define void @atomic_xor_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_xor_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 4
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
%0 = atomicrmw volatile xor i32 addrspace(1)* %gep, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_atomic_xor v{{[0-9]+}}, v[{{[0-9]+}}:{{[0-9]+}}], s[{{[0-9]+}}:{{[0-9]+}}], 0 addr64{{$}}
define void @atomic_xor_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
ret void
}
; SI: buffer_store_dword [[RET]]
define void @atomic_xor_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) {
entry:
- %ptr = getelementptr i32 addrspace(1)* %out, i64 %index
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index
%0 = atomicrmw volatile xor i32 addrspace(1)* %ptr, i32 %in seq_cst
store i32 %0, i32 addrspace(1)* %out2
ret void
; SI: buffer_store_byte
; SI: s_endpgm
define void @test_i8( i32 %s, i8 addrspace(1)* %out) #3 {
- %arrayidx = getelementptr inbounds [1 x i8] addrspace(2)* @a, i32 0, i32 %s
+ %arrayidx = getelementptr inbounds [1 x i8], [1 x i8] addrspace(2)* @a, i32 0, i32 %s
%1 = load i8 addrspace(2)* %arrayidx, align 1
store i8 %1, i8 addrspace(1)* %out
ret void
; SI: buffer_store_short
; SI: s_endpgm
define void @test_i16( i32 %s, i16 addrspace(1)* %out) #3 {
- %arrayidx = getelementptr inbounds [1 x i16] addrspace(2)* @b, i32 0, i32 %s
+ %arrayidx = getelementptr inbounds [1 x i16], [1 x i16] addrspace(2)* @b, i32 0, i32 %s
%1 = load i16 addrspace(2)* %arrayidx, align 2
store i16 %1, i16 addrspace(1)* %out
ret void
; FUNC-LABEL: {{^}}struct_bar_gv_load:
define void @struct_bar_gv_load(i8 addrspace(1)* %out, i32 %index) {
- %gep = getelementptr inbounds [1 x %struct.bar] addrspace(2)* @struct_bar_gv, i32 0, i32 0, i32 1, i32 %index
+ %gep = getelementptr inbounds [1 x %struct.bar], [1 x %struct.bar] addrspace(2)* @struct_bar_gv, i32 0, i32 0, i32 1, i32 %index
%load = load i8 addrspace(2)* %gep, align 1
store i8 %load, i8 addrspace(1)* %out, align 1
ret void
; FUNC-LABEL: {{^}}array_vector_gv_load:
define void @array_vector_gv_load(<4 x i32> addrspace(1)* %out, i32 %index) {
- %gep = getelementptr inbounds [4 x <4 x i32>] addrspace(2)* @array_vector_gv, i32 0, i32 %index
+ %gep = getelementptr inbounds [4 x <4 x i32>], [4 x <4 x i32>] addrspace(2)* @array_vector_gv, i32 0, i32 %index
%load = load <4 x i32> addrspace(2)* %gep, align 16
store <4 x i32> %load, <4 x i32> addrspace(1)* %out, align 16
ret void
define void @float(float addrspace(1)* %out, i32 %index) {
entry:
- %0 = getelementptr inbounds [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
+ %0 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%1 = load float addrspace(2)* %0
store float %1, float addrspace(1)* %out
ret void
define void @i32(i32 addrspace(1)* %out, i32 %index) {
entry:
- %0 = getelementptr inbounds [5 x i32] addrspace(2)* @i32_gv, i32 0, i32 %index
+ %0 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(2)* @i32_gv, i32 0, i32 %index
%1 = load i32 addrspace(2)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; GCN: s_load_dword
define void @struct_foo_gv_load(i32 addrspace(1)* %out, i32 %index) {
- %gep = getelementptr inbounds [1 x %struct.foo] addrspace(2)* @struct_foo_gv, i32 0, i32 0, i32 1, i32 %index
+ %gep = getelementptr inbounds [1 x %struct.foo], [1 x %struct.foo] addrspace(2)* @struct_foo_gv, i32 0, i32 0, i32 1, i32 %index
%load = load i32 addrspace(2)* %gep, align 4
store i32 %load, i32 addrspace(1)* %out, align 4
ret void
; SI: buffer_load_dword
; VI: s_load_dword
define void @array_v1_gv_load(<1 x i32> addrspace(1)* %out, i32 %index) {
- %gep = getelementptr inbounds [4 x <1 x i32>] addrspace(2)* @array_v1_gv, i32 0, i32 %index
+ %gep = getelementptr inbounds [4 x <1 x i32>], [4 x <1 x i32>] addrspace(2)* @array_v1_gv, i32 0, i32 %index
%load = load <1 x i32> addrspace(2)* %gep, align 4
store <1 x i32> %load, <1 x i32> addrspace(1)* %out, align 4
ret void
br i1 %0, label %if, label %else
if:
- %1 = getelementptr inbounds [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
+ %1 = getelementptr inbounds [5 x float], [5 x float] addrspace(2)* @float_gv, i32 0, i32 %index
%2 = load float addrspace(2)* %1
store float %2, float addrspace(1)* %out
br label %endif
define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
%0 = load i32 addrspace(1)* %in
- %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %in, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
%1 = load i32 addrspace(1)* %arrayidx1
%cmp = icmp eq i32 %0, %1
%value = select i1 %cmp, i32 0, i32 -1
define void @private_access_f64_alloca(double addrspace(1)* noalias %out, double addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load double addrspace(1)* %in, align 8
%array = alloca double, i32 16, align 8
- %ptr = getelementptr double* %array, i32 %b
+ %ptr = getelementptr double, double* %array, i32 %b
store double %val, double* %ptr, align 8
call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
%result = load double* %ptr, align 8
define void @private_access_v2f64_alloca(<2 x double> addrspace(1)* noalias %out, <2 x double> addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load <2 x double> addrspace(1)* %in, align 16
%array = alloca <2 x double>, i32 16, align 16
- %ptr = getelementptr <2 x double>* %array, i32 %b
+ %ptr = getelementptr <2 x double>, <2 x double>* %array, i32 %b
store <2 x double> %val, <2 x double>* %ptr, align 16
call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
%result = load <2 x double>* %ptr, align 16
define void @private_access_i64_alloca(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load i64 addrspace(1)* %in, align 8
%array = alloca i64, i32 16, align 8
- %ptr = getelementptr i64* %array, i32 %b
+ %ptr = getelementptr i64, i64* %array, i32 %b
store i64 %val, i64* %ptr, align 8
call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
%result = load i64* %ptr, align 8
define void @private_access_v2i64_alloca(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %in, i32 %b) nounwind {
%val = load <2 x i64> addrspace(1)* %in, align 16
%array = alloca <2 x i64>, i32 16, align 16
- %ptr = getelementptr <2 x i64>* %array, i32 %b
+ %ptr = getelementptr <2 x i64>, <2 x i64>* %array, i32 %b
store <2 x i64> %val, <2 x i64>* %ptr, align 16
call void @llvm.AMDGPU.barrier.local() noduplicate nounwind
%result = load <2 x i64>* %ptr, align 16
br label %endif
else:
- %4 = getelementptr i32 addrspace(1)* %in, i32 1
+ %4 = getelementptr i32, i32 addrspace(1)* %in, i32 1
%5 = load i32 addrspace(1)* %4
%6 = insertelement <2 x i32> %0, i32 %5, i32 1
br label %endif
define void @large_alloca(i32 addrspace(1)* %out, i32 %x, i32 %y) nounwind {
%large = alloca [8192 x i32], align 4
- %gep = getelementptr [8192 x i32]* %large, i32 0, i32 8191
+ %gep = getelementptr [8192 x i32], [8192 x i32]* %large, i32 0, i32 8191
store i32 %x, i32* %gep
- %gep1 = getelementptr [8192 x i32]* %large, i32 0, i32 %y
+ %gep1 = getelementptr [8192 x i32], [8192 x i32]* %large, i32 0, i32 %y
%0 = load i32* %gep1
store i32 %0, i32 addrspace(1)* %out
ret void
@lds = addrspace(3) global [8 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8]
define void @load_init_lds_global(i32 addrspace(1)* %out, i1 %p) {
- %gep = getelementptr [8 x i32] addrspace(3)* @lds, i32 0, i32 10
+ %gep = getelementptr [8 x i32], [8 x i32] addrspace(3)* @lds, i32 0, i32 10
%ld = load i32 addrspace(3)* %gep
store i32 %ld, i32 addrspace(1)* %out
ret void
define void @lds_input_queue(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %index) {
entry:
- %0 = getelementptr inbounds [2 x i32] addrspace(3)* @local_mem, i32 0, i32 %index
+ %0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 %index
%1 = load i32 addrspace(3)* %0
call void @llvm.AMDGPU.barrier.local()
; load from global memory which immediately follows a load from a global value that
; has been declared in the local memory space:
;
-; %0 = getelementptr inbounds [2 x i32] addrspace(3)* @local_mem, i32 0, i32 %index
+; %0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 %index
; %1 = load i32 addrspace(3)* %0
; %2 = load i32 addrspace(1)* %in
;
; CHECK: MOV * T{{[0-9]\.[XYZW]}}, OQAP
define void @local_global_alias(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
- %0 = getelementptr inbounds [2 x i32] addrspace(3)* @local_mem, i32 0, i32 0
+ %0 = getelementptr inbounds [2 x i32], [2 x i32] addrspace(3)* @local_mem, i32 0, i32 0
%1 = load i32 addrspace(3)* %0
%2 = load i32 addrspace(1)* %in
%3 = add i32 %2, %1
@lds = addrspace(3) global [256 x i32] zeroinitializer
define void @load_zeroinit_lds_global(i32 addrspace(1)* %out, i1 %p) {
- %gep = getelementptr [256 x i32] addrspace(3)* @lds, i32 0, i32 10
+ %gep = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds, i32 0, i32 10
%ld = load i32 addrspace(3)* %gep
store i32 %ld, i32 addrspace(1)* %out
ret void
%0 = icmp eq i32 %in, 5
br i1 %0, label %IF, label %ENDIF
IF:
- %1 = getelementptr i32 addrspace(1)* %out, i32 1
+ %1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 0, i32 addrspace(1)* %1
br label %ENDIF
define void @test_barrier_global(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.x()
- %1 = getelementptr i32 addrspace(1)* %out, i32 %0
+ %1 = getelementptr i32, i32 addrspace(1)* %out, i32 %0
store i32 %0, i32 addrspace(1)* %1
call void @llvm.AMDGPU.barrier.global()
%2 = call i32 @llvm.r600.read.local.size.x()
%3 = sub i32 %2, 1
%4 = sub i32 %3, %0
- %5 = getelementptr i32 addrspace(1)* %out, i32 %4
+ %5 = getelementptr i32, i32 addrspace(1)* %out, i32 %4
%6 = load i32 addrspace(1)* %5
store i32 %6, i32 addrspace(1)* %1
ret void
define void @test_barrier_local(i32 addrspace(1)* %out) {
entry:
%0 = call i32 @llvm.r600.read.tidig.x()
- %1 = getelementptr i32 addrspace(1)* %out, i32 %0
+ %1 = getelementptr i32, i32 addrspace(1)* %out, i32 %0
store i32 %0, i32 addrspace(1)* %1
call void @llvm.AMDGPU.barrier.local()
%2 = call i32 @llvm.r600.read.local.size.x()
%3 = sub i32 %2, 1
%4 = sub i32 %3, %0
- %5 = getelementptr i32 addrspace(1)* %out, i32 %4
+ %5 = getelementptr i32, i32 addrspace(1)* %out, i32 %4
%6 = load i32 addrspace(1)* %5
store i32 %6, i32 addrspace(1)* %1
ret void
; SI: s_endpgm
define void @v_test_class_full_mask_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f32(float %a, i32 511) #1
; SI: s_endpgm
define void @test_class_inline_imm_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f32(float 1.0, i32 %b) #1
; SI: s_endpgm
define void @test_class_lit_constant_dynamic_mask_f32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f32(float 1024.0, i32 %b) #1
; SI: s_endpgm
define void @v_test_class_full_mask_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load double addrspace(1)* %in
%result = call i1 @llvm.AMDGPU.class.f64(double %a, i32 511) #1
; SI: s_endpgm
define void @test_class_inline_imm_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f64(double 1.0, i32 %b) #1
; SI: s_endpgm
define void @test_class_lit_constant_dynamic_mask_f64(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%b = load i32 addrspace(1)* %gep.in
%result = call i1 @llvm.AMDGPU.class.f64(double 1024.0, i32 %b) #1
; SI: s_endpgm
define void @test_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
; SI: s_endpgm
define void @test_fold_or3_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
; SI: s_endpgm
define void @test_fold_or_all_tests_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 1) #1
; SI: s_endpgm
define void @test_fold_or_class_f32_1(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
; SI: s_endpgm
define void @test_fold_or_class_f32_2(i32 addrspace(1)* %out, float addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 7) #1
; SI: s_endpgm
define void @test_no_fold_or_class_f32_0(i32 addrspace(1)* %out, float addrspace(1)* %in, float %b) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep.in = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.in
%class0 = call i1 @llvm.AMDGPU.class.f32(float %a, i32 4) #1
; SI: s_endpgm
define void @test_div_fmas_f32_logical_cond_to_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 %d) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.a = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.b = getelementptr float addrspace(1)* %gep.a, i32 1
- %gep.c = getelementptr float addrspace(1)* %gep.a, i32 2
- %gep.out = getelementptr float addrspace(1)* %out, i32 2
+ %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1
+ %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 2
%a = load float addrspace(1)* %gep.a
%b = load float addrspace(1)* %gep.b
define void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) nounwind {
entry:
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.out = getelementptr float addrspace(1)* %out, i32 2
- %gep.a = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.b = getelementptr float addrspace(1)* %gep.a, i32 1
- %gep.c = getelementptr float addrspace(1)* %gep.a, i32 2
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 2
+ %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1
+ %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2
%a = load float addrspace(1)* %gep.a
%b = load float addrspace(1)* %gep.b
; SI: s_endpgm
define void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI: s_endpgm
define void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI: s_endpgm
define void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; SI: s_endpgm
define void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr double addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr double addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
%a = load double addrspace(1)* %gep.0, align 8
%b = load double addrspace(1)* %gep.1, align 8
; SI: s_endpgm
define void @test_div_scale_f32_scalar_num_1(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
%b = load float addrspace(1)* %gep, align 4
; SI: s_endpgm
define void @test_div_scale_f32_scalar_num_2(float addrspace(1)* %out, float addrspace(1)* %in, float %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
%b = load float addrspace(1)* %gep, align 4
; SI: s_endpgm
define void @test_div_scale_f32_scalar_den_1(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
%a = load float addrspace(1)* %gep, align 4
; SI: s_endpgm
define void @test_div_scale_f32_scalar_den_2(float addrspace(1)* %out, float addrspace(1)* %in, float %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep = getelementptr float, float addrspace(1)* %in, i32 %tid
%a = load float addrspace(1)* %gep, align 4
; SI: s_endpgm
define void @test_div_scale_f64_scalar_num_1(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%b = load double addrspace(1)* %gep, align 8
; SI: s_endpgm
define void @test_div_scale_f64_scalar_num_2(double addrspace(1)* %out, double addrspace(1)* %in, double %a) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%b = load double addrspace(1)* %gep, align 8
; SI: s_endpgm
define void @test_div_scale_f64_scalar_den_1(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%a = load double addrspace(1)* %gep, align 8
; SI: s_endpgm
define void @test_div_scale_f64_scalar_den_2(double addrspace(1)* %out, double addrspace(1)* %in, double %b) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
%a = load double addrspace(1)* %gep, align 8
; SI: s_endpgm
define void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float 1.0, float %a, i1 false) nounwind readnone
; SI: s_endpgm
define void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
%result = call { float, i1 } @llvm.AMDGPU.div.scale.f32(float %a, float 2.0, i1 false) nounwind readnone
; SI: s_endpgm
define void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI: s_endpgm
define void @test_div_scale_f32_fabs_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; SI: buffer_store_dword [[RESULT]]
define void @commute_umad24(i32 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %out.gep = getelementptr i32 addrspace(1)* %out, i32 %tid
- %src0.gep = getelementptr i32 addrspace(1)* %out, i32 %tid
- %src2.gep = getelementptr i32 addrspace(1)* %src0.gep, i32 1
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ %src0.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ %src2.gep = getelementptr i32, i32 addrspace(1)* %src0.gep, i32 1
%src0 = load i32 addrspace(1)* %src0.gep, align 4
%src2 = load i32 addrspace(1)* %src2.gep, align 4
; CHECK: image_load_mip {{v\[[0-9]+:[0-9]+\]}}, 15, 0, 0, 0, 0, 0, 0, 0, {{v\[[0-9]+:[0-9]+\]}}
define void @vgpr_coords(float addrspace(2)* addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr float addrspace(2)* addrspace(2)* %0, i32 0
+ %20 = getelementptr float addrspace(2)*, float addrspace(2)* addrspace(2)* %0, i32 0
%21 = load float addrspace(2)* addrspace(2)* %20, !tbaa !2
- %22 = getelementptr float addrspace(2)* %21, i32 0
+ %22 = getelementptr float, float addrspace(2)* %21, i32 0
%23 = load float addrspace(2)* %22, !tbaa !2, !invariant.load !1
- %24 = getelementptr float addrspace(2)* %21, i32 1
+ %24 = getelementptr float, float addrspace(2)* %21, i32 1
%25 = load float addrspace(2)* %24, !tbaa !2, !invariant.load !1
- %26 = getelementptr float addrspace(2)* %21, i32 4
+ %26 = getelementptr float, float addrspace(2)* %21, i32 4
%27 = load float addrspace(2)* %26, !tbaa !2, !invariant.load !1
- %28 = getelementptr <32 x i8> addrspace(2)* %2, i32 0
+ %28 = getelementptr <32 x i8>, <32 x i8> addrspace(2)* %2, i32 0
%29 = load <32 x i8> addrspace(2)* %28, !tbaa !2
%30 = bitcast float %27 to i32
%31 = bitcast float %23 to i32
define void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <16 x i8>] addrspace(2)* byval %arg3, [17 x <16 x i8>] addrspace(2)* inreg %arg4, [17 x <16 x i8>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) #0 {
main_body:
- %tmp = getelementptr [2 x <16 x i8>] addrspace(2)* %arg3, i64 0, i32 1
+ %tmp = getelementptr [2 x <16 x i8>], [2 x <16 x i8>] addrspace(2)* %arg3, i64 0, i32 1
%tmp10 = load <16 x i8> addrspace(2)* %tmp, !tbaa !0
%tmp11 = shl i32 %arg6, 2
%tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1, i32 1, i32 0)
; SI: s_endpgm
define void @v_round_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() #1
- %gep = getelementptr double addrspace(1)* %in, i32 %tid
- %out.gep = getelementptr double addrspace(1)* %out, i32 %tid
+ %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr double, double addrspace(1)* %out, i32 %tid
%x = load double addrspace(1)* %gep
%result = call double @llvm.round.f64(double %x) #1
store double %result, double addrspace(1)* %out.gep
; SI: buffer_load_ubyte v{{[0-9]+}},
define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
entry:
- %0 = getelementptr i8 addrspace(2)* %in, i32 1
+ %0 = getelementptr i8, i8 addrspace(2)* %in, i32 1
%1 = load i8 addrspace(2)* %0
%2 = zext i8 %1 to i32
store i32 %2, i32 addrspace(1)* %out
; SI: buffer_load_ushort
define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
entry:
- %0 = getelementptr i16 addrspace(2)* %in, i32 1
+ %0 = getelementptr i16, i16 addrspace(2)* %in, i32 1
%1 = load i16 addrspace(2)* %0
%2 = zext i16 %1 to i32
store i32 %2, i32 addrspace(1)* %out
define void @load_i32_v2i32_local(<2 x i32> addrspace(1)* %out, i32 addrspace(3)* %in) {
%scalar = load i32 addrspace(3)* %in
%tmp0 = bitcast i32 addrspace(3)* %in to <2 x i32> addrspace(3)*
- %vec_ptr = getelementptr <2 x i32> addrspace(3)* %tmp0, i32 2
+ %vec_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(3)* %tmp0, i32 2
%vec0 = load <2 x i32> addrspace(3)* %vec_ptr, align 4
%vec1 = insertelement <2 x i32> <i32 0, i32 0>, i32 %scalar, i32 0
%vec = add <2 x i32> %vec0, %vec1
; R600: LDS_READ_RET
define void @load_i32_local_const_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %in) {
entry:
- %tmp0 = getelementptr [512 x i32] addrspace(3)* @lds, i32 0, i32 1
+ %tmp0 = getelementptr [512 x i32], [512 x i32] addrspace(3)* @lds, i32 0, i32 1
%tmp1 = load i32 addrspace(3)* %tmp0
- %tmp2 = getelementptr i32 addrspace(1)* %out, i32 1
+ %tmp2 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 %tmp1, i32 addrspace(1)* %tmp2
ret void
}
; BOTH: ds_read_b32 [[REG:v[0-9]+]], v{{[0-9]+}} offset:28
; BOTH: buffer_store_dword [[REG]],
define void @local_i32_load(i32 addrspace(1)* %out, i32 addrspace(3)* %in) nounwind {
- %gep = getelementptr i32 addrspace(3)* %in, i32 7
+ %gep = getelementptr i32, i32 addrspace(3)* %in, i32 7
%val = load i32 addrspace(3)* %gep, align 4
store i32 %val, i32 addrspace(1)* %out, align 4
ret void
; BOTH: ds_read_u8 [[REG:v[0-9]+]], {{v[0-9]+}} offset:65535
; BOTH: buffer_store_byte [[REG]],
define void @local_i8_load_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
- %gep = getelementptr i8 addrspace(3)* %in, i32 65535
+ %gep = getelementptr i8, i8 addrspace(3)* %in, i32 65535
%val = load i8 addrspace(3)* %gep, align 4
store i8 %val, i8 addrspace(1)* %out, align 4
ret void
; BOTH: ds_read_u8 [[REG:v[0-9]+]], [[VREGADDR]]
; BOTH: buffer_store_byte [[REG]],
define void @local_i8_load_over_i16_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %in) nounwind {
- %gep = getelementptr i8 addrspace(3)* %in, i32 65536
+ %gep = getelementptr i8, i8 addrspace(3)* %in, i32 65536
%val = load i8 addrspace(3)* %gep, align 4
store i8 %val, i8 addrspace(1)* %out, align 4
ret void
; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56
; BOTH: buffer_store_dwordx2 [[REG]],
define void @local_i64_load(i64 addrspace(1)* %out, i64 addrspace(3)* %in) nounwind {
- %gep = getelementptr i64 addrspace(3)* %in, i32 7
+ %gep = getelementptr i64, i64 addrspace(3)* %in, i32 7
%val = load i64 addrspace(3)* %gep, align 8
store i64 %val, i64 addrspace(1)* %out, align 8
ret void
; BOTH: ds_read_b64 [[REG:v[[0-9]+:[0-9]+]]], v{{[0-9]+}} offset:56
; BOTH: buffer_store_dwordx2 [[REG]],
define void @local_f64_load(double addrspace(1)* %out, double addrspace(3)* %in) nounwind {
- %gep = getelementptr double addrspace(3)* %in, i32 7
+ %gep = getelementptr double, double addrspace(3)* %in, i32 7
%val = load double addrspace(3)* %gep, align 8
store double %val, double addrspace(1)* %out, align 8
ret void
; BOTH-NOT: ADD
; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56
define void @local_i64_store(i64 addrspace(3)* %out) nounwind {
- %gep = getelementptr i64 addrspace(3)* %out, i32 7
+ %gep = getelementptr i64, i64 addrspace(3)* %out, i32 7
store i64 5678, i64 addrspace(3)* %gep, align 8
ret void
}
; BOTH-NOT: ADD
; BOTH: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:56
define void @local_f64_store(double addrspace(3)* %out) nounwind {
- %gep = getelementptr double addrspace(3)* %out, i32 7
+ %gep = getelementptr double, double addrspace(3)* %out, i32 7
store double 16.0, double addrspace(3)* %gep, align 8
ret void
}
; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:120
; BOTH: s_endpgm
define void @local_v2i64_store(<2 x i64> addrspace(3)* %out) nounwind {
- %gep = getelementptr <2 x i64> addrspace(3)* %out, i32 7
+ %gep = getelementptr <2 x i64>, <2 x i64> addrspace(3)* %out, i32 7
store <2 x i64> <i64 5678, i64 5678>, <2 x i64> addrspace(3)* %gep, align 16
ret void
}
; BOTH-DAG: ds_write_b64 v{{[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}} offset:248
; BOTH: s_endpgm
define void @local_v4i64_store(<4 x i64> addrspace(3)* %out) nounwind {
- %gep = getelementptr <4 x i64> addrspace(3)* %out, i32 7
+ %gep = getelementptr <4 x i64>, <4 x i64> addrspace(3)* %out, i32 7
store <4 x i64> <i64 5678, i64 5678, i64 5678, i64 5678>, <4 x i64> addrspace(3)* %gep, align 16
ret void
}
; GCN: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_xchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_add_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_add_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
define void @lds_atomic_add_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] offset:16
; GCN: s_endpgm
define void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
define void @lds_atomic_inc_ret_i32_bad_si_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_sub_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_sub_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_dec_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[NEGONE]] offset:16
; GCN: s_endpgm
define void @lds_atomic_dec_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_and_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_and_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_or_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_or_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_xor_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_xor_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_min_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_min_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_max_rtn_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_max_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_min_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_umin_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_max_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_umax_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
store i32 %result, i32 addrspace(1)* %out, align 4
ret void
; GCN: ds_wrxchg_rtn_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_xchg_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_add_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_add_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
define void @lds_atomic_add_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
%result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_inc_u32 v{{[0-9]+}}, [[NEGONE]] offset:16
; GCN: s_endpgm
define void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
ret void
}
define void @lds_atomic_inc_noret_i32_bad_si_offset(i32 addrspace(3)* %ptr, i32 %a, i32 %b) nounwind {
%sub = sub i32 %a, %b
%add = add i32 %sub, 4
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 %add
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add
%result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst
ret void
}
; GCN: ds_sub_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_sub_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_dec_u32 v{{[0-9]+}}, [[NEGONE]] offset:16
; GCN: s_endpgm
define void @lds_atomic_dec_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i32 addrspace(3)* %gep, i32 1 seq_cst
ret void
}
; GCN: ds_and_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_and_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_or_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_or_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_xor_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_xor_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_min_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_min_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_max_i32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_max_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_min_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_umin_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_max_u32 v{{[0-9]+}}, v{{[0-9]+}} offset:16
; GCN: s_endpgm
define void @lds_atomic_umax_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i32 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i32 addrspace(3)* %gep, i32 4 seq_cst
ret void
}
; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: buffer_store_dwordx2 [[RESULT]],
; GCN: s_endpgm
define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_inc_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_sub_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_dec_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_and_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_or_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_xor_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_min_rtn_i64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_max_rtn_i64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_min_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_max_rtn_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
store i64 %result, i64 addrspace(1)* %out, align 8
ret void
; GCN: ds_wrxchg_rtn_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_xchg_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_add_u64 [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}} offset:32
; GCN: s_endpgm
define void @lds_atomic_add_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i64 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i64 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst
ret void
}
; GCN: ds_inc_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst
ret void
}
; GCN: ds_sub_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_sub_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_dec_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_dec_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst
ret void
}
; GCN: ds_and_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_and_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_or_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_or_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_xor_b64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_xor_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_min_i64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_min_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_max_i64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_max_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_min_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_umin_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
; GCN: ds_max_u64 {{.*}} offset:32
; GCN: s_endpgm
define void @lds_atomic_umax_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
- %gep = getelementptr i64 addrspace(3)* %ptr, i32 4
+ %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
%result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst
ret void
}
define void @local_memory_two_objects(i32 addrspace(1)* %out) {
entry:
%x.i = call i32 @llvm.r600.read.tidig.x() #0
- %arrayidx = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %x.i
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %x.i
store i32 %x.i, i32 addrspace(3)* %arrayidx, align 4
%mul = shl nsw i32 %x.i, 1
- %arrayidx1 = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %x.i
+ %arrayidx1 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %x.i
store i32 %mul, i32 addrspace(3)* %arrayidx1, align 4
%sub = sub nsw i32 3, %x.i
call void @llvm.AMDGPU.barrier.local()
- %arrayidx2 = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %sub
+ %arrayidx2 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem0, i32 0, i32 %sub
%0 = load i32 addrspace(3)* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32 addrspace(1)* %out, i32 %x.i
+ %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %x.i
store i32 %0, i32 addrspace(1)* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %sub
+ %arrayidx4 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* @local_memory_two_objects.local_mem1, i32 0, i32 %sub
%1 = load i32 addrspace(3)* %arrayidx4, align 4
%add = add nsw i32 %x.i, 4
- %arrayidx5 = getelementptr inbounds i32 addrspace(1)* %out, i32 %add
+ %arrayidx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %add
store i32 %1, i32 addrspace(1)* %arrayidx5, align 4
ret void
}
define void @local_memory(i32 addrspace(1)* %out) {
entry:
%y.i = call i32 @llvm.r600.read.tidig.x() #0
- %arrayidx = getelementptr inbounds [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %y.i
+ %arrayidx = getelementptr inbounds [128 x i32], [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %y.i
store i32 %y.i, i32 addrspace(3)* %arrayidx, align 4
%add = add nsw i32 %y.i, 1
%cmp = icmp eq i32 %add, 16
%.add = select i1 %cmp, i32 0, i32 %add
call void @llvm.AMDGPU.barrier.local()
- %arrayidx1 = getelementptr inbounds [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %.add
+ %arrayidx1 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(3)* @local_memory.local_mem, i32 0, i32 %.add
%0 = load i32 addrspace(3)* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %out, i32 %y.i
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %y.i
store i32 %0, i32 addrspace(1)* %arrayidx2, align 4
ret void
}
%i.07.in = phi i32 [ %i.07, %for.body ], [ %iterations, %entry ]
%ai.06 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%i.07 = add nsw i32 %i.07.in, -1
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %out, i32 %ai.06
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %ai.06
store i32 %i.07, i32 addrspace(1)* %arrayidx, align 4
%add = add nsw i32 %ai.06, 1
%exitcond = icmp eq i32 %add, %iterations
for.body:
%0 = phi i32 [0, %entry], [%4, %for.body]
- %1 = getelementptr i8 addrspace(3)* %in, i32 %0
- %2 = getelementptr i8* %dest, i32 %0
+ %1 = getelementptr i8, i8 addrspace(3)* %in, i32 %0
+ %2 = getelementptr i8, i8* %dest, i32 %0
%3 = load i8 addrspace(3)* %1
store i8 %3, i8* %2
%4 = add i32 %0, 1
for.body:
%0 = phi i32 [0, %entry], [%2, %for.body]
- %1 = getelementptr i8* %dest, i32 %0
+ %1 = getelementptr i8, i8* %dest, i32 %0
store i8 0, i8* %1
%2 = add i32 %0, 1
%3 = icmp eq i32 %2, %size
br i1 %cmp, label %if, label %else
if:
- %lds_ptr = getelementptr [64 x float] addrspace(3)* @lds, i32 0, i32 0
+ %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0
%lds_data = load float addrspace(3)* %lds_ptr
br label %endif
; SI: buffer_store_dword [[RESULT]]
define void @combine_to_mad_f32_0(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_mad_f32_0_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: buffer_store_dword [[RESULT]]
define void @combine_to_mad_f32_1(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: buffer_store_dword [[RESULT]]
define void @combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_mad_fsub_0_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: buffer_store_dword [[RESULT]]
define void @combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_mad_fsub_1_f32_2use(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: buffer_store_dword [[RESULT]]
define void @combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_mad_fsub_2_f32_2uses_neg(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @combine_to_mad_fsub_2_f32_2uses_mul(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.out.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.out.1 = getelementptr float addrspace(1)* %gep.out.0, i32 1
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.out.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.out.1 = getelementptr float, float addrspace(1)* %gep.out.0, i32 1
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
; SI: buffer_store_dword [[RESULT]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}}
define void @aggressive_combine_to_mad_fsub_0_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @aggressive_combine_to_mad_fsub_1_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @aggressive_combine_to_mad_fsub_2_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
; SI: s_endpgm
define void @aggressive_combine_to_mad_fsub_3_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
- %gep.3 = getelementptr float addrspace(1)* %gep.0, i32 3
- %gep.4 = getelementptr float addrspace(1)* %gep.0, i32 4
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
+ %gep.3 = getelementptr float, float addrspace(1)* %gep.0, i32 3
+ %gep.4 = getelementptr float, float addrspace(1)* %gep.0, i32 4
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%x = load float addrspace(1)* %gep.0
%y = load float addrspace(1)* %gep.1
define void @mad_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
%tid.ext = sext i32 %tid to i64
- %gep0 = getelementptr float addrspace(1)* %ptr, i64 %tid.ext
+ %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
%add1 = add i64 %tid.ext, 1
- %gep1 = getelementptr float addrspace(1)* %ptr, i64 %add1
+ %gep1 = getelementptr float, float addrspace(1)* %ptr, i64 %add1
%add2 = add i64 %tid.ext, 2
- %gep2 = getelementptr float addrspace(1)* %ptr, i64 %add2
- %outgep = getelementptr float addrspace(1)* %out, i64 %tid.ext
+ %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
+ %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
%a = load float addrspace(1)* %gep0, align 4
%b = load float addrspace(1)* %gep1, align 4
%c = load float addrspace(1)* %gep2, align 4
define void @mad_sub_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
%tid.ext = sext i32 %tid to i64
- %gep0 = getelementptr float addrspace(1)* %ptr, i64 %tid.ext
+ %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
%add1 = add i64 %tid.ext, 1
- %gep1 = getelementptr float addrspace(1)* %ptr, i64 %add1
+ %gep1 = getelementptr float, float addrspace(1)* %ptr, i64 %add1
%add2 = add i64 %tid.ext, 2
- %gep2 = getelementptr float addrspace(1)* %ptr, i64 %add2
- %outgep = getelementptr float addrspace(1)* %out, i64 %tid.ext
+ %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
+ %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
%a = load float addrspace(1)* %gep0, align 4
%b = load float addrspace(1)* %gep1, align 4
%c = load float addrspace(1)* %gep2, align 4
define void @mad_sub_f64(double addrspace(1)* noalias nocapture %out, double addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
%tid.ext = sext i32 %tid to i64
- %gep0 = getelementptr double addrspace(1)* %ptr, i64 %tid.ext
+ %gep0 = getelementptr double, double addrspace(1)* %ptr, i64 %tid.ext
%add1 = add i64 %tid.ext, 1
- %gep1 = getelementptr double addrspace(1)* %ptr, i64 %add1
+ %gep1 = getelementptr double, double addrspace(1)* %ptr, i64 %add1
%add2 = add i64 %tid.ext, 2
- %gep2 = getelementptr double addrspace(1)* %ptr, i64 %add2
- %outgep = getelementptr double addrspace(1)* %out, i64 %tid.ext
+ %gep2 = getelementptr double, double addrspace(1)* %ptr, i64 %add2
+ %outgep = getelementptr double, double addrspace(1)* %out, i64 %tid.ext
%a = load double addrspace(1)* %gep0, align 8
%b = load double addrspace(1)* %gep1, align 8
%c = load double addrspace(1)* %gep2, align 8
define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
%tid.ext = sext i32 %tid to i64
- %gep0 = getelementptr float addrspace(1)* %ptr, i64 %tid.ext
+ %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
%add1 = add i64 %tid.ext, 1
- %gep1 = getelementptr float addrspace(1)* %ptr, i64 %add1
+ %gep1 = getelementptr float, float addrspace(1)* %ptr, i64 %add1
%add2 = add i64 %tid.ext, 2
- %gep2 = getelementptr float addrspace(1)* %ptr, i64 %add2
- %outgep = getelementptr float addrspace(1)* %out, i64 %tid.ext
+ %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
+ %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
%a = load float addrspace(1)* %gep0, align 4
%b = load float addrspace(1)* %gep1, align 4
%c = load float addrspace(1)* %gep2, align 4
define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
%tid.ext = sext i32 %tid to i64
- %gep0 = getelementptr float addrspace(1)* %ptr, i64 %tid.ext
+ %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
%add1 = add i64 %tid.ext, 1
- %gep1 = getelementptr float addrspace(1)* %ptr, i64 %add1
+ %gep1 = getelementptr float, float addrspace(1)* %ptr, i64 %add1
%add2 = add i64 %tid.ext, 2
- %gep2 = getelementptr float addrspace(1)* %ptr, i64 %add2
- %outgep = getelementptr float addrspace(1)* %out, i64 %tid.ext
+ %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
+ %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
%a = load float addrspace(1)* %gep0, align 4
%b = load float addrspace(1)* %gep1, align 4
%c = load float addrspace(1)* %gep2, align 4
define void @neg_neg_mad_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
%tid.ext = sext i32 %tid to i64
- %gep0 = getelementptr float addrspace(1)* %ptr, i64 %tid.ext
+ %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
%add1 = add i64 %tid.ext, 1
- %gep1 = getelementptr float addrspace(1)* %ptr, i64 %add1
+ %gep1 = getelementptr float, float addrspace(1)* %ptr, i64 %add1
%add2 = add i64 %tid.ext, 2
- %gep2 = getelementptr float addrspace(1)* %ptr, i64 %add2
- %outgep = getelementptr float addrspace(1)* %out, i64 %tid.ext
+ %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
+ %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
%a = load float addrspace(1)* %gep0, align 4
%b = load float addrspace(1)* %gep1, align 4
%c = load float addrspace(1)* %gep2, align 4
define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 {
%tid = tail call i32 @llvm.r600.read.tidig.x() #0
%tid.ext = sext i32 %tid to i64
- %gep0 = getelementptr float addrspace(1)* %ptr, i64 %tid.ext
+ %gep0 = getelementptr float, float addrspace(1)* %ptr, i64 %tid.ext
%add1 = add i64 %tid.ext, 1
- %gep1 = getelementptr float addrspace(1)* %ptr, i64 %add1
+ %gep1 = getelementptr float, float addrspace(1)* %ptr, i64 %add1
%add2 = add i64 %tid.ext, 2
- %gep2 = getelementptr float addrspace(1)* %ptr, i64 %add2
- %outgep = getelementptr float addrspace(1)* %out, i64 %tid.ext
+ %gep2 = getelementptr float, float addrspace(1)* %ptr, i64 %add2
+ %outgep = getelementptr float, float addrspace(1)* %out, i64 %tid.ext
%a = load float addrspace(1)* %gep0, align 4
%b = load float addrspace(1)* %gep1, align 4
%c = load float addrspace(1)* %gep2, align 4
; SI: buffer_store_dword [[RESULT]]
define void @fsub_c_fadd_a_a(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
; SI: buffer_store_dword [[RESULT]]
define void @fsub_fadd_a_a_c(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.out = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid
%r1 = load float addrspace(1)* %gep.0
%r2 = load float addrspace(1)* %gep.1
; GCN: v_madak_f32 {{v[0-9]+}}, [[VB]], [[VA]], 0x41200000
define void @madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
%b = load float addrspace(1)* %in.b.gep, align 4
define void @madak_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
- %in.gep.2 = getelementptr float addrspace(1)* %in.gep.0, i32 2
+ %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.2 = getelementptr float, float addrspace(1)* %in.gep.0, i32 2
- %out.gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %out.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %out.gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %out.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
%a = load float addrspace(1)* %in.gep.0, align 4
%b = load float addrspace(1)* %in.gep.1, align 4
; GCN: v_madak_f32 {{v[0-9]+}}, 4.0, [[VA]], 0x41200000
define void @madak_m_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
; GCN: v_mad_f32 {{v[0-9]+}}, [[VA]], [[VB]], 4.0
define void @madak_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
%b = load float addrspace(1)* %in.b.gep, align 4
; GCN: v_mad_f32 {{v[0-9]+}}, [[SB]], [[VA]], [[VK]]
define void @s_v_madak_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float %b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
; GCN: v_mad_f32 {{v[0-9]+}}, [[VA]], [[SB]], [[VK]]
define void @v_s_madak_f32(float addrspace(1)* noalias %out, float %a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%b = load float addrspace(1)* %in.b.gep, align 4
; GCN: s_endpgm
define void @no_madak_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
%b = load float addrspace(1)* %in.b.gep, align 4
; GCN: s_endpgm
define void @no_madak_src1_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.a.gep = getelementptr float addrspace(1)* %in.a, i32 %tid
- %in.b.gep = getelementptr float addrspace(1)* %in.b, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid
+ %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %in.a.gep, align 4
%b = load float addrspace(1)* %in.b.gep, align 4
; GCN: v_madmk_f32 {{v[0-9]+}}, [[VA]], [[VB]], 0x41200000
define void @madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
define void @madmk_2_use_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %in.gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %in.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
- %in.gep.2 = getelementptr float addrspace(1)* %in.gep.0, i32 2
+ %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
+ %in.gep.2 = getelementptr float, float addrspace(1)* %in.gep.0, i32 2
- %out.gep.0 = getelementptr float addrspace(1)* %out, i32 %tid
- %out.gep.1 = getelementptr float addrspace(1)* %in.gep.0, i32 1
+ %out.gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %out.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1
%a = load float addrspace(1)* %in.gep.0, align 4
%b = load float addrspace(1)* %in.gep.1, align 4
; GCN: v_mad_f32 {{v[0-9]+}}, 4.0, [[VA]], [[VB]]
define void @madmk_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; GCN: s_endpgm
define void @s_s_madmk_f32(float addrspace(1)* noalias %out, float %a, float %b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%mul = fmul float %a, 10.0
%madmk = fadd float %mul, %b
; GCN: s_endpgm
define void @v_s_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %b) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
%mul = fmul float %a, 10.0
; GCN: s_endpgm
define void @scalar_vector_madmk_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in, float %a) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%b = load float addrspace(1)* %gep.0, align 4
%mul = fmul float %a, 10.0
; GCN: v_mad_f32 {{v[0-9]+}}, |{{v[0-9]+}}|, {{v[0-9]+}}, {{[sv][0-9]+}}
define void @no_madmk_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, |{{[sv][0-9]+}}|
define void @no_madmk_src2_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
%b = load float addrspace(1)* %gep.1, align 4
; GCN: v_mad_f32 {{v[0-9]+}}, [[VK]], [[A]], 2.0
define void @madmk_add_inline_imm_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind {
%tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
%a = load float addrspace(1)* %gep.0, align 4
; SI: v_max_i32_e32
define void @v_test_imax_sge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp sge i32 %a, %b
; SI: v_max_i32_e32
define void @v_test_imax_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp sgt i32 %a, %b
; SI: v_max_u32_e32
define void @v_test_umax_uge_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp uge i32 %a, %b
; SI: v_max_u32_e32
define void @v_test_umax_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp ugt i32 %a, %b
; SI: v_max3_i32
define void @v_test_imax3_sgt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %gep2 = getelementptr i32 addrspace(1)* %cptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%c = load i32 addrspace(1)* %gep2, align 4
; SI: v_max3_u32
define void @v_test_umax3_ugt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %gep2 = getelementptr i32 addrspace(1)* %cptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%c = load i32 addrspace(1)* %gep2, align 4
; SI: v_min_i32_e32
define void @v_test_imin_sle_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp sle i32 %a, %b
; SI: v_min_i32_e32
define void @v_test_imin_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp slt i32 %a, %b
; SI: v_min_u32_e32
define void @v_test_umin_ule_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp ule i32 %a, %b
; SI: v_min_u32_e32
define void @v_test_umin_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp ult i32 %a, %b
; SI: s_endpgm
define void @v_test_umin_ult_i32_multi_use(i32 addrspace(1)* %out0, i1 addrspace(1)* %out1, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %outgep0 = getelementptr i32 addrspace(1)* %out0, i32 %tid
- %outgep1 = getelementptr i1 addrspace(1)* %out1, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %outgep0 = getelementptr i32, i32 addrspace(1)* %out0, i32 %tid
+ %outgep1 = getelementptr i1, i1 addrspace(1)* %out1, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%cmp = icmp ult i32 %a, %b
; SI: v_min3_i32
define void @v_test_imin3_slt_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %gep2 = getelementptr i32 addrspace(1)* %cptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%c = load i32 addrspace(1)* %gep2, align 4
; SI: v_min3_u32
define void @v_test_umin3_ult_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %gep2 = getelementptr i32 addrspace(1)* %cptr, i32 %tid
- %outgep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
+ %outgep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
%c = load i32 addrspace(1)* %gep2, align 4
define void @v_test_umin_umin_umin(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
%tid2 = mul i32 %tid, 2
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %gep2 = getelementptr i32 addrspace(1)* %cptr, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
- %gep3 = getelementptr i32 addrspace(1)* %aptr, i32 %tid2
- %gep4 = getelementptr i32 addrspace(1)* %bptr, i32 %tid2
- %gep5 = getelementptr i32 addrspace(1)* %cptr, i32 %tid2
+ %gep3 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid2
+ %gep4 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid2
+ %gep5 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid2
- %outgep0 = getelementptr i32 addrspace(1)* %out, i32 %tid
- %outgep1 = getelementptr i32 addrspace(1)* %out, i32 %tid2
+ %outgep0 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ %outgep1 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
define void @v_test_umin3_2_uses(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr, i32 addrspace(1)* %cptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
%tid2 = mul i32 %tid, 2
- %gep0 = getelementptr i32 addrspace(1)* %aptr, i32 %tid
- %gep1 = getelementptr i32 addrspace(1)* %bptr, i32 %tid
- %gep2 = getelementptr i32 addrspace(1)* %cptr, i32 %tid
+ %gep0 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+ %gep2 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid
- %gep3 = getelementptr i32 addrspace(1)* %aptr, i32 %tid2
- %gep4 = getelementptr i32 addrspace(1)* %bptr, i32 %tid2
- %gep5 = getelementptr i32 addrspace(1)* %cptr, i32 %tid2
+ %gep3 = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid2
+ %gep4 = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid2
+ %gep5 = getelementptr i32, i32 addrspace(1)* %cptr, i32 %tid2
- %outgep0 = getelementptr i32 addrspace(1)* %out, i32 %tid
- %outgep1 = getelementptr i32 addrspace(1)* %out, i32 %tid2
+ %outgep0 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ %outgep1 = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
%a = load i32 addrspace(1)* %gep0, align 4
%b = load i32 addrspace(1)* %gep1, align 4
; SI: s_endpgm
define void @missing_store_reduced(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(2)* addrspace(3)* @ptr_load, align 8
- %ptr2 = getelementptr inbounds i32 addrspace(2)* %ptr0, i64 2
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
store i32 99, i32 addrspace(1)* %gptr, align 4
%tmp2 = load i32 addrspace(2)* %ptr2, align 4
; CHECK: buffer_load_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:4 ; encoding: [0x04,0x00,0x30,0xe0
define void @mubuf_load0(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
- %0 = getelementptr i32 addrspace(1)* %in, i64 1
+ %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1
%1 = load i32 addrspace(1)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; CHECK: buffer_load_ubyte v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:4095 ; encoding: [0xff,0x0f,0x20,0xe0
define void @mubuf_load1(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
entry:
- %0 = getelementptr i8 addrspace(1)* %in, i64 4095
+ %0 = getelementptr i8, i8 addrspace(1)* %in, i64 4095
%1 = load i8 addrspace(1)* %0
store i8 %1, i8 addrspace(1)* %out
ret void
; CHECK: buffer_load_dword v{{[0-9]}}, s[{{[0-9]+:[0-9]+}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x30,0xe0
define void @mubuf_load2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
entry:
- %0 = getelementptr i32 addrspace(1)* %in, i64 1024
+ %0 = getelementptr i32, i32 addrspace(1)* %in, i64 1024
%1 = load i32 addrspace(1)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; CHECK: buffer_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x30,0xe0
define void @mubuf_load3(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i64 %offset) {
entry:
- %0 = getelementptr i32 addrspace(1)* %in, i64 %offset
- %1 = getelementptr i32 addrspace(1)* %0, i64 1
+ %0 = getelementptr i32, i32 addrspace(1)* %in, i64 %offset
+ %1 = getelementptr i32, i32 addrspace(1)* %0, i64 1
%2 = load i32 addrspace(1)* %1
store i32 %2, i32 addrspace(1)* %out
ret void
; CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 64 offen glc
define void @soffset_max_imm([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 {
main_body:
- %tmp0 = getelementptr [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
+ %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
%tmp1 = load <16 x i8> addrspace(2)* %tmp0
%tmp2 = shl i32 %6, 2
%tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 64, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
; CHECK: buffer_load_dword v{{[0-9+]}}, v{{[0-9+]}}, s[{{[0-9]+}}:{{[0-9]+}}], [[SOFFSET]] offen glc
define void @soffset_no_fold([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 {
main_body:
- %tmp0 = getelementptr [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
+ %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0
%tmp1 = load <16 x i8> addrspace(2)* %tmp0
%tmp2 = shl i32 %6, 2
%tmp3 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp1, i32 %tmp2, i32 65, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0)
; CHECK: buffer_store_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0 offset:4 ; encoding: [0x04,0x00,0x70,0xe0
define void @mubuf_store0(i32 addrspace(1)* %out) {
entry:
- %0 = getelementptr i32 addrspace(1)* %out, i64 1
+ %0 = getelementptr i32, i32 addrspace(1)* %out, i64 1
store i32 0, i32 addrspace(1)* %0
ret void
}
define void @mubuf_store1(i8 addrspace(1)* %out) {
entry:
- %0 = getelementptr i8 addrspace(1)* %out, i64 4095
+ %0 = getelementptr i8, i8 addrspace(1)* %out, i64 4095
store i8 0, i8 addrspace(1)* %0
ret void
}
; CHECK: buffer_store_dword v{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[SOFFSET]] ; encoding: [0x00,0x00,0x70,0xe0
define void @mubuf_store2(i32 addrspace(1)* %out) {
entry:
- %0 = getelementptr i32 addrspace(1)* %out, i64 1024
+ %0 = getelementptr i32, i32 addrspace(1)* %out, i64 1024
store i32 0, i32 addrspace(1)* %0
ret void
}
; CHECK: buffer_store_dword v{{[0-9]}}, v[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0 addr64 offset:4 ; encoding: [0x04,0x80,0x70,0xe0
define void @mubuf_store3(i32 addrspace(1)* %out, i64 %offset) {
entry:
- %0 = getelementptr i32 addrspace(1)* %out, i64 %offset
- %1 = getelementptr i32 addrspace(1)* %0, i64 1
+ %0 = getelementptr i32, i32 addrspace(1)* %out, i64 %offset
+ %1 = getelementptr i32, i32 addrspace(1)* %0, i64 1
store i32 0, i32 addrspace(1)* %1
ret void
}
; CHECK-LABEL: {{^}}store_sgpr_ptr_offset:
; CHECK: buffer_store_dword v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:40
define void @store_sgpr_ptr_offset(i32 addrspace(1)* %out) #0 {
- %out.gep = getelementptr i32 addrspace(1)* %out, i32 10
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 10
store i32 99, i32 addrspace(1)* %out.gep, align 4
ret void
}
; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
; CHECK: buffer_store_dword v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
define void @store_sgpr_ptr_large_offset(i32 addrspace(1)* %out) #0 {
- %out.gep = getelementptr i32 addrspace(1)* %out, i32 32768
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768
store i32 99, i32 addrspace(1)* %out.gep, align 4
ret void
}
; CHECK: s_mov_b32 [[SOFFSET:s[0-9]+]], 0x20000
; CHECK: buffer_atomic_add v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, [[SOFFSET]]
define void @store_sgpr_ptr_large_offset_atomic(i32 addrspace(1)* %out) #0 {
- %gep = getelementptr i32 addrspace(1)* %out, i32 32768
+ %gep = getelementptr i32, i32 addrspace(1)* %out, i32 32768
%val = atomicrmw volatile add i32 addrspace(1)* %gep, i32 5 seq_cst
ret void
}
; CHECK: buffer_store_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64
define void @store_vgpr_ptr(i32 addrspace(1)* %out) #0 {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
- %out.gep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
store i32 99, i32 addrspace(1)* %out.gep, align 4
ret void
}
; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
%result = mul <2 x i32> %a, %b
; SI: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = mul <4 x i32> %a, %b
; FUNC-LABEL: {{^}}v_mul_i32:
; SI: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%result = mul i32 %a, %b
; SI: buffer_store_short v
define void @truncate_buffer_load_i32_to_i16(i16 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i16 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i16, i16 addrspace(1)* %out, i32 %tid
%load = load i32 addrspace(1)* %gep.in
%trunc = trunc i32 %load to i16
store i16 %trunc, i16 addrspace(1)* %gep.out
; SI: buffer_store_byte v
define void @truncate_buffer_load_i32_to_i8(i8 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
%load = load i32 addrspace(1)* %gep.in
%trunc = trunc i32 %load to i8
store i8 %trunc, i8 addrspace(1)* %gep.out
; SI: buffer_store_byte v
define void @truncate_buffer_load_i32_to_i1(i1 addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i1 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i1, i1 addrspace(1)* %out, i32 %tid
%load = load i32 addrspace(1)* %gep.in
%trunc = trunc i32 %load to i1
store i1 %trunc, i1 addrspace(1)* %gep.out
; SI: buffer_store_dword v
define void @truncate_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%load = load i64 addrspace(1)* %gep.in
%trunc = trunc i64 %load to i32
store i32 %trunc, i32 addrspace(1)* %gep.out
; SI: buffer_store_dword v
define void @srl_buffer_load_i64_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%load = load i64 addrspace(1)* %gep.in
%srl = lshr i64 %load, 32
%trunc = trunc i64 %srl to i32
; SI: buffer_store_byte v
define void @truncate_buffer_load_i16_to_i8(i8 addrspace(1)* %out, i16 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i16 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i16, i16 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
%load = load i16 addrspace(1)* %gep.in
%trunc = trunc i16 %load to i8
store i8 %trunc, i8 addrspace(1)* %gep.out
; SI: buffer_store_byte v
define void @srl_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
%load = load i64 addrspace(1)* %gep.in
%srl = lshr i64 %load, 32
%trunc = trunc i64 %srl to i8
; SI: buffer_store_byte v
define void @truncate_buffer_load_i64_to_i8(i8 addrspace(1)* %out, i64 addrspace(1)* %in) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid
- %gep.out = getelementptr i8 addrspace(1)* %out, i32 %tid
+ %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid
%load = load i64 addrspace(1)* %gep.in
%trunc = trunc i64 %load to i8
store i8 %trunc, i8 addrspace(1)* %gep.out
if:
%id = call i32 @llvm.r600.read.tidig.x()
%offset = add i32 %fold, %id
- %tmp1 = getelementptr i32 addrspace(1)* %out, i32 %offset
+ %tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 %offset
store i32 0, i32 addrspace(1)* %tmp1
br label %endif
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @or_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
%result = or <2 x i32> %a, %b
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; SI: v_or_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @or_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = or <4 x i32> %a, %b
define void @atomicrmw_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
- %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
- %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 1
store i32 0, i32* %tmp1
store i32 1, i32* %tmp2
- %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %tmp3 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 %in
%tmp4 = atomicrmw add i32* %tmp3, i32 7 acq_rel
store i32 %tmp4, i32 addrspace(1)* %out
ret void
define void @cmpxchg_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
- %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
- %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 1
store i32 0, i32* %tmp1
store i32 1, i32* %tmp2
- %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %tmp3 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 %in
%tmp4 = cmpxchg i32* %tmp3, i32 0, i32 1 acq_rel monotonic
%val = extractvalue { i32, i1 } %tmp4, 0
store i32 %val, i32 addrspace(1)* %out
define void @call_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
- %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
- %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 1
store i32 0, i32* %tmp1
store i32 1, i32* %tmp2
- %tmp3 = getelementptr [2 x i32]* %tmp, i32 0, i32 %in
+ %tmp3 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 %in
%val = call i32 @foo(i32* %tmp3) nounwind
store i32 %val, i32 addrspace(1)* %out
ret void
entry:
%stack = alloca [5 x i32], align 4
%0 = load i32 addrspace(1)* %in, align 4
- %arrayidx1 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 %0
+ %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
store i32 4, i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %in, i32 1
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
%1 = load i32 addrspace(1)* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 %1
+ %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
store i32 5, i32* %arrayidx3, align 4
- %arrayidx10 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 0
+ %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
%2 = load i32* %arrayidx10, align 4
store i32 %2, i32 addrspace(1)* %out, align 4
- %arrayidx12 = getelementptr inbounds [5 x i32]* %stack, i32 0, i32 1
+ %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
%3 = load i32* %arrayidx12
- %arrayidx13 = getelementptr inbounds i32 addrspace(1)* %out, i32 1
+ %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
store i32 %3, i32 addrspace(1)* %arrayidx13
ret void
}
entry:
%a = alloca %struct.point
%b = alloca %struct.point
- %a.x.ptr = getelementptr %struct.point* %a, i32 0, i32 0
- %a.y.ptr = getelementptr %struct.point* %a, i32 0, i32 1
- %b.x.ptr = getelementptr %struct.point* %b, i32 0, i32 0
- %b.y.ptr = getelementptr %struct.point* %b, i32 0, i32 1
+ %a.x.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
+ %a.y.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 1
+ %b.x.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
+ %b.y.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 1
store i32 0, i32* %a.x.ptr
store i32 1, i32* %a.y.ptr
store i32 2, i32* %b.x.ptr
store i32 3, i32* %b.y.ptr
- %a.indirect.ptr = getelementptr %struct.point* %a, i32 0, i32 0
- %b.indirect.ptr = getelementptr %struct.point* %b, i32 0, i32 0
+ %a.indirect.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
+ %b.indirect.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
%a.indirect = load i32* %a.indirect.ptr
%b.indirect = load i32* %b.indirect.ptr
%0 = add i32 %a.indirect, %b.indirect
%prv_array_const = alloca [2 x i32]
%prv_array = alloca [2 x i32]
%a = load i32 addrspace(1)* %in
- %b_src_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_src_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%b = load i32 addrspace(1)* %b_src_ptr
- %a_dst_ptr = getelementptr [2 x i32]* %prv_array_const, i32 0, i32 0
+ %a_dst_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
store i32 %a, i32* %a_dst_ptr
- %b_dst_ptr = getelementptr [2 x i32]* %prv_array_const, i32 0, i32 1
+ %b_dst_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 1
store i32 %b, i32* %b_dst_ptr
br label %for.body
for.body:
%inc = phi i32 [0, %entry], [%count, %for.body]
- %x_ptr = getelementptr [2 x i32]* %prv_array_const, i32 0, i32 0
+ %x_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
%x = load i32* %x_ptr
- %y_ptr = getelementptr [2 x i32]* %prv_array, i32 0, i32 0
+ %y_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
%y = load i32* %y_ptr
%xy = add i32 %x, %y
store i32 %xy, i32* %y_ptr
br i1 %done, label %for.end, label %for.body
for.end:
- %value_ptr = getelementptr [2 x i32]* %prv_array, i32 0, i32 0
+ %value_ptr = getelementptr [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
%value = load i32* %value_ptr
store i32 %value, i32 addrspace(1)* %out
ret void
define void @short_array(i32 addrspace(1)* %out, i32 %index) {
entry:
%0 = alloca [2 x i16]
- %1 = getelementptr [2 x i16]* %0, i32 0, i32 0
- %2 = getelementptr [2 x i16]* %0, i32 0, i32 1
+ %1 = getelementptr [2 x i16], [2 x i16]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i16], [2 x i16]* %0, i32 0, i32 1
store i16 0, i16* %1
store i16 1, i16* %2
- %3 = getelementptr [2 x i16]* %0, i32 0, i32 %index
+ %3 = getelementptr [2 x i16], [2 x i16]* %0, i32 0, i32 %index
%4 = load i16* %3
%5 = sext i16 %4 to i32
store i32 %5, i32 addrspace(1)* %out
define void @char_array(i32 addrspace(1)* %out, i32 %index) {
entry:
%0 = alloca [2 x i8]
- %1 = getelementptr [2 x i8]* %0, i32 0, i32 0
- %2 = getelementptr [2 x i8]* %0, i32 0, i32 1
+ %1 = getelementptr [2 x i8], [2 x i8]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i8], [2 x i8]* %0, i32 0, i32 1
store i8 0, i8* %1
store i8 1, i8* %2
- %3 = getelementptr [2 x i8]* %0, i32 0, i32 %index
+ %3 = getelementptr [2 x i8], [2 x i8]* %0, i32 0, i32 %index
%4 = load i8* %3
%5 = sext i8 %4 to i32
store i32 %5, i32 addrspace(1)* %out
define void @work_item_info(i32 addrspace(1)* %out, i32 %in) {
entry:
%0 = alloca [2 x i32]
- %1 = getelementptr [2 x i32]* %0, i32 0, i32 0
- %2 = getelementptr [2 x i32]* %0, i32 0, i32 1
+ %1 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 0
+ %2 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 1
store i32 0, i32* %1
store i32 1, i32* %2
- %3 = getelementptr [2 x i32]* %0, i32 0, i32 %in
+ %3 = getelementptr [2 x i32], [2 x i32]* %0, i32 0, i32 %in
%4 = load i32* %3
%5 = call i32 @llvm.r600.read.tidig.x()
%6 = add i32 %4, %5
entry:
%0 = alloca [3 x i8], align 1
%1 = alloca [2 x i8], align 1
- %2 = getelementptr [3 x i8]* %0, i32 0, i32 0
- %3 = getelementptr [3 x i8]* %0, i32 0, i32 1
- %4 = getelementptr [3 x i8]* %0, i32 0, i32 2
- %5 = getelementptr [2 x i8]* %1, i32 0, i32 0
- %6 = getelementptr [2 x i8]* %1, i32 0, i32 1
+ %2 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 0
+ %3 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 1
+ %4 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 2
+ %5 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 0
+ %6 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 1
store i8 0, i8* %2
store i8 1, i8* %3
store i8 2, i8* %4
store i8 1, i8* %5
store i8 0, i8* %6
- %7 = getelementptr [3 x i8]* %0, i32 0, i32 %in
- %8 = getelementptr [2 x i8]* %1, i32 0, i32 %in
+ %7 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 %in
+ %8 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 %in
%9 = load i8* %7
%10 = load i8* %8
%11 = add i8 %9, %10
define void @char_array_array(i32 addrspace(1)* %out, i32 %index) {
entry:
%alloca = alloca [2 x [2 x i8]]
- %gep0 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
- %gep1 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 1
+ %gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 1
store i8 0, i8* %gep0
store i8 1, i8* %gep1
- %gep2 = getelementptr [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
+ %gep2 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
%load = load i8* %gep2
%sext = sext i8 %load to i32
store i32 %sext, i32 addrspace(1)* %out
define void @i32_array_array(i32 addrspace(1)* %out, i32 %index) {
entry:
%alloca = alloca [2 x [2 x i32]]
- %gep0 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
- %gep1 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 1
+ %gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 1
store i32 0, i32* %gep0
store i32 1, i32* %gep1
- %gep2 = getelementptr [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
+ %gep2 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
%load = load i32* %gep2
store i32 %load, i32 addrspace(1)* %out
ret void
define void @i64_array_array(i64 addrspace(1)* %out, i32 %index) {
entry:
%alloca = alloca [2 x [2 x i64]]
- %gep0 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
- %gep1 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 1
+ %gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
+ %gep1 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 1
store i64 0, i64* %gep0
store i64 1, i64* %gep1
- %gep2 = getelementptr [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
+ %gep2 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
%load = load i64* %gep2
store i64 %load, i64 addrspace(1)* %out
ret void
define void @struct_array_array(i32 addrspace(1)* %out, i32 %index) {
entry:
%alloca = alloca [2 x [2 x %struct.pair32]]
- %gep0 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
- %gep1 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 1, i32 1
+ %gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
+ %gep1 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 1, i32 1
store i32 0, i32* %gep0
store i32 1, i32* %gep1
- %gep2 = getelementptr [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
+ %gep2 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
%load = load i32* %gep2
store i32 %load, i32 addrspace(1)* %out
ret void
define void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) {
entry:
%alloca = alloca [2 x %struct.pair32]
- %gep0 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
- %gep1 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 1, i32 0
+ %gep0 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
+ %gep1 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 1, i32 0
store i32 0, i32* %gep0
store i32 1, i32* %gep1
- %gep2 = getelementptr [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
+ %gep2 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
%load = load i32* %gep2
store i32 %load, i32 addrspace(1)* %out
ret void
define void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
entry:
%tmp = alloca [2 x i32]
- %tmp1 = getelementptr [2 x i32]* %tmp, i32 0, i32 0
- %tmp2 = getelementptr [2 x i32]* %tmp, i32 0, i32 1
+ %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 1
store i32 0, i32* %tmp1
store i32 1, i32* %tmp2
%cmp = icmp eq i32 %in, 0
; SI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:5
define void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) {
%alloca = alloca [16 x i32]
- %tmp0 = getelementptr [16 x i32]* %alloca, i32 0, i32 %a
+ %tmp0 = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
store i32 5, i32* %tmp0
%tmp1 = ptrtoint [16 x i32]* %alloca to i32
%tmp2 = add i32 %tmp1, 5
%tmp3 = inttoptr i32 %tmp2 to i32*
- %tmp4 = getelementptr i32* %tmp3, i32 %b
+ %tmp4 = getelementptr i32, i32* %tmp3, i32 %b
%tmp5 = load i32* %tmp4
store i32 %tmp5, i32 addrspace(1)* %out
ret void
; SI: ; NumVgprs: {{[0-9]+}}
define void @foo(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %abase, i32 addrspace(1)* %bbase) nounwind {
%tid = call i32 @llvm.SI.tid() nounwind readnone
- %aptr = getelementptr i32 addrspace(1)* %abase, i32 %tid
- %bptr = getelementptr i32 addrspace(1)* %bbase, i32 %tid
- %outptr = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %aptr = getelementptr i32, i32 addrspace(1)* %abase, i32 %tid
+ %bptr = getelementptr i32, i32 addrspace(1)* %bbase, i32 %tid
+ %outptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%a = load i32 addrspace(1)* %aptr, align 4
%b = load i32 addrspace(1)* %bptr, align 4
%result = add i32 %a, %b
; SI: s_endpgm
define void @rsqrt_fmul(float addrspace(1)* %out, float addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %out.gep = getelementptr float addrspace(1)* %out, i32 %tid
- %gep.0 = getelementptr float addrspace(1)* %in, i32 %tid
- %gep.1 = getelementptr float addrspace(1)* %gep.0, i32 1
- %gep.2 = getelementptr float addrspace(1)* %gep.0, i32 2
+ %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid
+ %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
+ %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
+ %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2
%a = load float addrspace(1)* %gep.0
%b = load float addrspace(1)* %gep.1
loop:
%4 = phi i64 [0, %entry], [%5, %loop]
%5 = add i64 %2, %4
- %6 = getelementptr i8 addrspace(1)* %in, i64 %5
+ %6 = getelementptr i8, i8 addrspace(1)* %in, i64 %5
%7 = load i8 addrspace(1)* %6, align 1
%8 = or i64 %5, 1
- %9 = getelementptr i8 addrspace(1)* %in, i64 %8
+ %9 = getelementptr i8, i8 addrspace(1)* %in, i64 %8
%10 = load i8 addrspace(1)* %9, align 1
%11 = add i8 %7, %10
%12 = sext i8 %11 to i32
br label %endif
else:
- %2 = getelementptr i32 addrspace(2)* addrspace(1)* %in
+ %2 = getelementptr i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in
%3 = load i32 addrspace(2)* addrspace(1)* %2
br label %endif
endif:
%4 = phi i32 addrspace(2)* [%1, %if], [%3, %else]
- %5 = getelementptr i32 addrspace(2)* %4, i32 3000
+ %5 = getelementptr i32, i32 addrspace(2)* %4, i32 3000
%6 = load i32 addrspace(2)* %5
store i32 %6, i32 addrspace(1)* %out
ret void
entry:
%0 = call i32 @llvm.r600.read.tidig.x() nounwind readnone
%1 = add i32 %0, 4
- %2 = getelementptr [8 x i32] addrspace(2)* %in, i32 %0, i32 4
+ %2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %0, i32 4
%3 = load i32 addrspace(2)* %2
store i32 %3, i32 addrspace(1)* %out
ret void
define void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) {
entry:
%tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
- %tmp1 = getelementptr inbounds i32 addrspace(2)* %in, i32 %tmp0
+ %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
%tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)*
%tmp3 = load <8 x i32> addrspace(2)* %tmp2, align 4
store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32
define void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) {
entry:
%tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1
- %tmp1 = getelementptr inbounds i32 addrspace(2)* %in, i32 %tmp0
+ %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0
%tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)*
%tmp3 = load <16 x i32> addrspace(2)* %tmp2, align 4
store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32
; SI: buffer_store_dword [[REG1]]
define void @cluster_global_arg_loads(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %ptr) #0 {
%load0 = load i32 addrspace(1)* %ptr, align 4
- %gep = getelementptr i32 addrspace(1)* %ptr, i32 1
+ %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 1
%load1 = load i32 addrspace(1)* %gep, align 4
store i32 %load0, i32 addrspace(1)* %out0, align 4
store i32 %load1, i32 addrspace(1)* %out1, align 4
; SI: buffer_load_dword
define void @same_base_ptr_crash(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %offset) {
entry:
- %out1 = getelementptr i32 addrspace(1)* %out, i32 %offset
+ %out1 = getelementptr i32, i32 addrspace(1)* %out, i32 %offset
%tmp0 = load i32 addrspace(1)* %out
%tmp1 = load i32 addrspace(1)* %out1
%tmp2 = add i32 %tmp0, %tmp1
%scratch0 = alloca [8192 x i32]
%scratch1 = alloca [8192 x i32]
- %scratchptr0 = getelementptr [8192 x i32]* %scratch0, i32 0, i32 0
+ %scratchptr0 = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 0
store i32 1, i32* %scratchptr0
- %scratchptr1 = getelementptr [8192 x i32]* %scratch1, i32 0, i32 0
+ %scratchptr1 = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 0
store i32 2, i32* %scratchptr1
%cmp = icmp eq i32 %cond, 0
br i1 %cmp, label %if, label %else
if:
- %if_ptr = getelementptr [8192 x i32]* %scratch0, i32 0, i32 %if_offset
+ %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset
%if_value = load i32* %if_ptr
br label %done
else:
- %else_ptr = getelementptr [8192 x i32]* %scratch1, i32 0, i32 %else_offset
+ %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset
%else_value = load i32* %else_ptr
br label %done
%scratch1 = alloca [8192 x i32]
%offset0 = load i32 addrspace(1)* %offsets
- %scratchptr0 = getelementptr [8192 x i32]* %scratch0, i32 0, i32 %offset0
+ %scratchptr0 = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %offset0
store i32 %offset0, i32* %scratchptr0
- %offsetptr1 = getelementptr i32 addrspace(1)* %offsets, i32 1
+ %offsetptr1 = getelementptr i32, i32 addrspace(1)* %offsets, i32 1
%offset1 = load i32 addrspace(1)* %offsetptr1
- %scratchptr1 = getelementptr [8192 x i32]* %scratch1, i32 0, i32 %offset1
+ %scratchptr1 = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %offset1
store i32 %offset1, i32* %scratchptr1
%cmp = icmp eq i32 %cond, 0
br i1 %cmp, label %if, label %else
if:
- %if_ptr = getelementptr [8192 x i32]* %scratch0, i32 0, i32 %if_offset
+ %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset
%if_value = load i32* %if_ptr
br label %done
else:
- %else_ptr = getelementptr [8192 x i32]* %scratch1, i32 0, i32 %else_offset
+ %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset
%else_value = load i32* %else_ptr
br label %done
; FUNC-LABEL: {{^}}sdiv_i32:
; EG: CF_END
define void @sdiv_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in
%den = load i32 addrspace(1) * %den_ptr
%result = sdiv i32 %num, %den
}
define void @sdiv_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %den_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%num = load <2 x i32> addrspace(1) * %in
%den = load <2 x i32> addrspace(1) * %den_ptr
%result = sdiv <2 x i32> %num, %den
}
define void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %den_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%num = load <4 x i32> addrspace(1) * %in
%den = load <4 x i32> addrspace(1) * %den_ptr
%result = sdiv <4 x i32> %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
define void @sdiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
- %den_ptr = getelementptr i8 addrspace(1)* %in, i8 1
+ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8 addrspace(1) * %in
%den = load i8 addrspace(1) * %den_ptr
%result = sdiv i8 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
define void @sdiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %den_ptr = getelementptr i16 addrspace(1)* %in, i16 1
+ %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16 addrspace(1) * %in, align 2
%den = load i16 addrspace(1) * %den_ptr, align 2
%result = sdiv i16 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
define void @sdiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @sdiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_sdiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_sdiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
define void @srem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
- %den_ptr = getelementptr i8 addrspace(1)* %in, i8 1
+ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8 addrspace(1) * %in
%den = load i8 addrspace(1) * %den_ptr
%result = srem i8 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
define void @srem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %den_ptr = getelementptr i16 addrspace(1)* %in, i16 1
+ %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16 addrspace(1) * %in, align 2
%den = load i16 addrspace(1) * %den_ptr, align 2
%result = srem i16 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_INT
define void @srem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @srem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_srem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: INT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_srem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
br i1 %6, label %IF, label %ENDIF
IF:
- %7 = getelementptr i32 addrspace(1)* %out, i32 1
+ %7 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 0, i32 addrspace(1)* %7
br label %ENDIF
br i1 %6, label %ENDIF, label %IF
IF:
- %7 = getelementptr i32 addrspace(1)* %out, i32 1
+ %7 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 0, i32 addrspace(1)* %7
br label %ENDIF
; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = icmp eq <4 x i32> %a, %b
; SI: s_endpgm
define void @v3i32_eq(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %ptra, <3 x i32> addrspace(1)* %ptrb) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.a = getelementptr <3 x i32> addrspace(1)* %ptra, i32 %tid
- %gep.b = getelementptr <3 x i32> addrspace(1)* %ptrb, i32 %tid
- %gep.out = getelementptr <3 x i32> addrspace(1)* %out, i32 %tid
+ %gep.a = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptra, i32 %tid
+ %gep.b = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptrb, i32 %tid
+ %gep.out = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %out, i32 %tid
%a = load <3 x i32> addrspace(1)* %gep.a
%b = load <3 x i32> addrspace(1)* %gep.b
%cmp = icmp eq <3 x i32> %a, %b
; SI: s_endpgm
define void @v3i8_eq(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %ptra, <3 x i8> addrspace(1)* %ptrb) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep.a = getelementptr <3 x i8> addrspace(1)* %ptra, i32 %tid
- %gep.b = getelementptr <3 x i8> addrspace(1)* %ptrb, i32 %tid
- %gep.out = getelementptr <3 x i8> addrspace(1)* %out, i32 %tid
+ %gep.a = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptra, i32 %tid
+ %gep.b = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptrb, i32 %tid
+ %gep.out = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %out, i32 %tid
%a = load <3 x i8> addrspace(1)* %gep.a
%b = load <3 x i8> addrspace(1)* %gep.b
%cmp = icmp eq <3 x i8> %a, %b
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
define void @v_sext_in_reg_i1_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
- %a.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %b.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %out.gep = getelementptr i64 addrspace(1)* %out, i32 %tid
+ %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
%a = load i64 addrspace(1)* %a.gep, align 8
%b = load i64 addrspace(1)* %b.gep, align 8
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
define void @v_sext_in_reg_i8_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
- %a.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %b.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %out.gep = getelementptr i64 addrspace(1)* %out, i32 %tid
+ %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
%a = load i64 addrspace(1)* %a.gep, align 8
%b = load i64 addrspace(1)* %b.gep, align 8
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
define void @v_sext_in_reg_i16_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
- %a.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %b.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %out.gep = getelementptr i64 addrspace(1)* %out, i32 %tid
+ %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
%a = load i64 addrspace(1)* %a.gep, align 8
%b = load i64 addrspace(1)* %b.gep, align 8
; SI: buffer_store_dwordx2 v{{\[}}[[LO]]:[[SHR]]{{\]}}
define void @v_sext_in_reg_i32_to_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x()
- %a.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %b.gep = getelementptr i64 addrspace(1)* %aptr, i32 %tid
- %out.gep = getelementptr i64 addrspace(1)* %out, i32 %tid
+ %a.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %b.gep = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid
+ %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
%a = load i64 addrspace(1)* %a.gep, align 8
%b = load i64 addrspace(1)* %b.gep, align 8
br i1 %tmp1, label %if, label %else
if:
- %gep.if = getelementptr i32 addrspace(1)* %a, i32 %tid
+ %gep.if = getelementptr i32, i32 addrspace(1)* %a, i32 %tid
%a.val = load i32 addrspace(1)* %gep.if
%cmp.if = icmp eq i32 %a.val, 0
br label %endif
else:
- %gep.else = getelementptr i32 addrspace(1)* %b, i32 %tid
+ %gep.else = getelementptr i32, i32 addrspace(1)* %b, i32 %tid
%b.val = load i32 addrspace(1)* %gep.else
%cmp.else = icmp slt i32 %b.val, 0
br label %endif
define void @phi1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
%21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
%22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
%23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
; CHECK-LABEL: {{^}}phi2:
define void @phi2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
%21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
%22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
%23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 32)
%34 = call float @llvm.SI.load.const(<16 x i8> %21, i32 84)
%35 = call float @llvm.SI.load.const(<16 x i8> %21, i32 88)
%36 = call float @llvm.SI.load.const(<16 x i8> %21, i32 92)
- %37 = getelementptr <32 x i8> addrspace(2)* %2, i32 0
+ %37 = getelementptr <32 x i8>, <32 x i8> addrspace(2)* %2, i32 0
%38 = load <32 x i8> addrspace(2)* %37, !tbaa !1
- %39 = getelementptr <16 x i8> addrspace(2)* %1, i32 0
+ %39 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %1, i32 0
%40 = load <16 x i8> addrspace(2)* %39, !tbaa !1
%41 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %3, <2 x i32> %5)
%42 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %3, <2 x i32> %5)
define void @loop(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
%21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
%22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
%23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 4)
define void @sample_v3([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
entry:
- %21 = getelementptr [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
+ %21 = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
%22 = load <16 x i8> addrspace(2)* %21, !tbaa !2
%23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 16)
- %24 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
+ %24 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
%25 = load <32 x i8> addrspace(2)* %24, !tbaa !2
- %26 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
+ %26 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
%27 = load <16 x i8> addrspace(2)* %26, !tbaa !2
%28 = fcmp oeq float %23, 0.0
br i1 %28, label %if, label %else
; CHECK: s_endpgm
define void @sample_rsrc([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 {
bb:
- %tmp = getelementptr [17 x <16 x i8>] addrspace(2)* %arg1, i32 0, i32 0
+ %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i32 0, i32 0
%tmp22 = load <16 x i8> addrspace(2)* %tmp, !tbaa !0
%tmp23 = call float @llvm.SI.load.const(<16 x i8> %tmp22, i32 16)
- %tmp25 = getelementptr [32 x <8 x i32>] addrspace(2)* %arg3, i32 0, i32 0
+ %tmp25 = getelementptr [32 x <8 x i32>], [32 x <8 x i32>] addrspace(2)* %arg3, i32 0, i32 0
%tmp26 = load <8 x i32> addrspace(2)* %tmp25, !tbaa !0
- %tmp27 = getelementptr [16 x <4 x i32>] addrspace(2)* %arg2, i32 0, i32 0
+ %tmp27 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(2)* %arg2, i32 0, i32 0
%tmp28 = load <4 x i32> addrspace(2)* %tmp27, !tbaa !0
%tmp29 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %arg5, <2 x i32> %arg7)
%tmp30 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %arg5, <2 x i32> %arg7)
;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
%result = shl <2 x i32> %a, %b
;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = shl <4 x i32> %a, %b
;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
- %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64 addrspace(1) * %in
%b = load i64 addrspace(1) * %b_ptr
%result = shl i64 %a, %b
;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64> addrspace(1) * %in
%b = load <2 x i64> addrspace(1) * %b_ptr
%result = shl <2 x i64> %a, %b
;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64> addrspace(1) * %in
%b = load <4 x i64> addrspace(1) * %b_ptr
%result = shl <4 x i64> %a, %b
; SI: s_endpgm
define void @shl_2_add_9_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
- %ptr = getelementptr i32 addrspace(1)* %in, i32 %tid.x
+ %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
%val = load i32 addrspace(1)* %ptr, align 4
%add = add i32 %val, 9
%result = shl i32 %add, 2
; SI: s_endpgm
define void @shl_2_add_9_i32_2_add_uses(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
- %ptr = getelementptr i32 addrspace(1)* %in, i32 %tid.x
+ %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
%val = load i32 addrspace(1)* %ptr, align 4
%add = add i32 %val, 9
%result = shl i32 %add, 2
; SI: s_endpgm
define void @shl_2_add_999_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
- %ptr = getelementptr i32 addrspace(1)* %in, i32 %tid.x
+ %ptr = getelementptr i32, i32 addrspace(1)* %in, i32 %tid.x
%val = load i32 addrspace(1)* %ptr, align 4
%shl = add i32 %val, 999
%result = shl i32 %shl, 2
define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
store float %val0, float addrspace(1)* %out
define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
%shl_add_use = shl i32 %idx.0, 2
store i32 %shl_add_use, i32 addrspace(1)* %add_use, align 4
define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 65535
- %arrayidx0 = getelementptr inbounds [65536 x i8] addrspace(3)* @maxlds, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [65536 x i8], [65536 x i8] addrspace(3)* @maxlds, i32 0, i32 %idx.0
%val0 = load i8 addrspace(3)* %arrayidx0
store i32 %idx.0, i32 addrspace(1)* %add_use
store i8 %val0, i8 addrspace(1)* %out
define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 64
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
%val0 = load float addrspace(3)* %arrayidx0, align 4
- %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
+ %arrayidx1 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
%val1 = load float addrspace(3)* %arrayidx1, align 4
%sum = fadd float %val0, %val1
store float %sum, float addrspace(1)* %out, align 4
define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
store float 1.0, float addrspace(3)* %arrayidx0, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
ret void
; define void @atomic_load_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
; %idx.0 = add nsw i32 %tid.x, 2
-; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+; %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
; %val = load atomic i32 addrspace(3)* %arrayidx0 seq_cst, align 4
; store i32 %val, i32 addrspace(1)* %out, align 4
; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%pair = cmpxchg i32 addrspace(3)* %arrayidx0, i32 7, i32 %swap seq_cst monotonic
%result = extractvalue { i32, i1 } %pair, 0
store i32 %result, i32 addrspace(1)* %out, align 4
define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw xchg i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw add i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw sub i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw and i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw or i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw xor i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
; define void @atomic_nand_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
; %idx.0 = add nsw i32 %tid.x, 2
-; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+; %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
; %val = atomicrmw nand i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
; store i32 %val, i32 addrspace(1)* %out, align 4
; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw min i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw max i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw umin i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
%tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
%idx.0 = add nsw i32 %tid.x, 2
- %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
%val = atomicrmw umax i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
store i32 %val, i32 addrspace(1)* %out, align 4
store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
%21 = load <16 x i8> addrspace(2)* %20, !tbaa !1
%22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
- %23 = getelementptr <32 x i8> addrspace(2)* %2, i32 0
+ %23 = getelementptr <32 x i8>, <32 x i8> addrspace(2)* %2, i32 0
%24 = load <32 x i8> addrspace(2)* %23, !tbaa !1
- %25 = getelementptr <16 x i8> addrspace(2)* %1, i32 0
+ %25 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %1, i32 0
%26 = load <16 x i8> addrspace(2)* %25, !tbaa !1
%27 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %3, <2 x i32> %5)
%28 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %3, <2 x i32> %5)
define void @main([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %21 = getelementptr [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
+ %21 = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
%22 = load <16 x i8> addrspace(2)* %21, !tbaa !0
%23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 96)
%24 = call float @llvm.SI.load.const(<16 x i8> %22, i32 100)
%58 = call float @llvm.SI.load.const(<16 x i8> %22, i32 372)
%59 = call float @llvm.SI.load.const(<16 x i8> %22, i32 376)
%60 = call float @llvm.SI.load.const(<16 x i8> %22, i32 384)
- %61 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
+ %61 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
%62 = load <32 x i8> addrspace(2)* %61, !tbaa !0
- %63 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
+ %63 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
%64 = load <16 x i8> addrspace(2)* %63, !tbaa !0
- %65 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
+ %65 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
%66 = load <32 x i8> addrspace(2)* %65, !tbaa !0
- %67 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
+ %67 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
%68 = load <16 x i8> addrspace(2)* %67, !tbaa !0
- %69 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
+ %69 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
%70 = load <32 x i8> addrspace(2)* %69, !tbaa !0
- %71 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
+ %71 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
%72 = load <16 x i8> addrspace(2)* %71, !tbaa !0
- %73 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
+ %73 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
%74 = load <32 x i8> addrspace(2)* %73, !tbaa !0
- %75 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
+ %75 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
%76 = load <16 x i8> addrspace(2)* %75, !tbaa !0
- %77 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
+ %77 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
%78 = load <32 x i8> addrspace(2)* %77, !tbaa !0
- %79 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
+ %79 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
%80 = load <16 x i8> addrspace(2)* %79, !tbaa !0
- %81 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
+ %81 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
%82 = load <32 x i8> addrspace(2)* %81, !tbaa !0
- %83 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
+ %83 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
%84 = load <16 x i8> addrspace(2)* %83, !tbaa !0
- %85 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
+ %85 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
%86 = load <32 x i8> addrspace(2)* %85, !tbaa !0
- %87 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
+ %87 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
%88 = load <16 x i8> addrspace(2)* %87, !tbaa !0
- %89 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
+ %89 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
%90 = load <32 x i8> addrspace(2)* %89, !tbaa !0
- %91 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
+ %91 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
%92 = load <16 x i8> addrspace(2)* %91, !tbaa !0
%93 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %4, <2 x i32> %6)
%94 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %4, <2 x i32> %6)
%108 = call float @llvm.SI.fs.interp(i32 1, i32 5, i32 %4, <2 x i32> %6)
%109 = call float @llvm.SI.fs.interp(i32 2, i32 5, i32 %4, <2 x i32> %6)
%110 = call i32 @llvm.SI.tid()
- %111 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %110
+ %111 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %110
%112 = bitcast float %93 to i32
store i32 %112, i32 addrspace(3)* %111
%113 = bitcast float %94 to i32
store i32 %113, i32 addrspace(3)* %111
%114 = call i32 @llvm.SI.tid()
- %115 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %114
+ %115 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %114
%116 = and i32 %114, -4
- %117 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %116
+ %117 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %116
%118 = add i32 %116, 1
- %119 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %118
+ %119 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %118
%120 = bitcast float %93 to i32
store i32 %120, i32 addrspace(3)* %115
%121 = load i32 addrspace(3)* %117
%140 = fmul float %60, %94
%141 = fmul float %60, %94
%142 = call i32 @llvm.SI.tid()
- %143 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %142
+ %143 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %142
%144 = bitcast float %138 to i32
store i32 %144, i32 addrspace(3)* %143
%145 = bitcast float %139 to i32
%147 = bitcast float %141 to i32
store i32 %147, i32 addrspace(3)* %143
%148 = call i32 @llvm.SI.tid()
- %149 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %148
+ %149 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %148
%150 = and i32 %148, -4
- %151 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %150
+ %151 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %150
%152 = add i32 %150, 2
- %153 = getelementptr [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %152
+ %153 = getelementptr [64 x i32], [64 x i32] addrspace(3)* @ddxy_lds, i32 0, i32 %152
%154 = bitcast float %138 to i32
store i32 %154, i32 addrspace(3)* %149
%155 = load i32 addrspace(3)* %151
; CHECK: s_endpgm
define void @main1([17 x <16 x i8>] addrspace(2)* byval, [32 x <16 x i8>] addrspace(2)* byval, [16 x <32 x i8>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %21 = getelementptr [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
+ %21 = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %0, i64 0, i32 0
%22 = load <16 x i8> addrspace(2)* %21, !tbaa !0
%23 = call float @llvm.SI.load.const(<16 x i8> %22, i32 0)
%24 = call float @llvm.SI.load.const(<16 x i8> %22, i32 4)
%123 = call float @llvm.SI.load.const(<16 x i8> %22, i32 716)
%124 = call float @llvm.SI.load.const(<16 x i8> %22, i32 864)
%125 = call float @llvm.SI.load.const(<16 x i8> %22, i32 868)
- %126 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
+ %126 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 0
%127 = load <32 x i8> addrspace(2)* %126, !tbaa !0
- %128 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
+ %128 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 0
%129 = load <16 x i8> addrspace(2)* %128, !tbaa !0
- %130 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
+ %130 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 1
%131 = load <32 x i8> addrspace(2)* %130, !tbaa !0
- %132 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
+ %132 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 1
%133 = load <16 x i8> addrspace(2)* %132, !tbaa !0
- %134 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
+ %134 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 2
%135 = load <32 x i8> addrspace(2)* %134, !tbaa !0
- %136 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
+ %136 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 2
%137 = load <16 x i8> addrspace(2)* %136, !tbaa !0
- %138 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
+ %138 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 3
%139 = load <32 x i8> addrspace(2)* %138, !tbaa !0
- %140 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
+ %140 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 3
%141 = load <16 x i8> addrspace(2)* %140, !tbaa !0
- %142 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
+ %142 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 4
%143 = load <32 x i8> addrspace(2)* %142, !tbaa !0
- %144 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
+ %144 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 4
%145 = load <16 x i8> addrspace(2)* %144, !tbaa !0
- %146 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
+ %146 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 5
%147 = load <32 x i8> addrspace(2)* %146, !tbaa !0
- %148 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
+ %148 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 5
%149 = load <16 x i8> addrspace(2)* %148, !tbaa !0
- %150 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
+ %150 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 6
%151 = load <32 x i8> addrspace(2)* %150, !tbaa !0
- %152 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
+ %152 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 6
%153 = load <16 x i8> addrspace(2)* %152, !tbaa !0
- %154 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
+ %154 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 7
%155 = load <32 x i8> addrspace(2)* %154, !tbaa !0
- %156 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
+ %156 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 7
%157 = load <16 x i8> addrspace(2)* %156, !tbaa !0
- %158 = getelementptr [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 8
+ %158 = getelementptr [16 x <32 x i8>], [16 x <32 x i8>] addrspace(2)* %2, i64 0, i32 8
%159 = load <32 x i8> addrspace(2)* %158, !tbaa !0
- %160 = getelementptr [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 8
+ %160 = getelementptr [32 x <16 x i8>], [32 x <16 x i8>] addrspace(2)* %1, i64 0, i32 8
%161 = load <16 x i8> addrspace(2)* %160, !tbaa !0
%162 = fcmp ugt float %17, 0.000000e+00
%163 = select i1 %162, float 1.000000e+00, float 0.000000e+00
define void @reorder_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
- %ptr1 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 1
- %ptr2 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 2
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
%tmp1 = load i32 addrspace(3)* %ptr1, align 4
store i32 99, i32 addrspace(1)* %gptr, align 4
define void @no_reorder_local_load_volatile_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
- %ptr1 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 1
- %ptr2 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 2
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
%tmp1 = load i32 addrspace(3)* %ptr1, align 4
store volatile i32 99, i32 addrspace(1)* %gptr, align 4
define void @no_reorder_barrier_local_load_global_store_local_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
- %ptr1 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 1
- %ptr2 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 2
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
%tmp1 = load i32 addrspace(3)* %ptr1, align 4
store i32 99, i32 addrspace(1)* %gptr, align 4
define void @no_reorder_constant_load_global_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(1)* %gptr) #0 {
%ptr0 = load i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
- %ptr1 = getelementptr inbounds i32 addrspace(2)* %ptr0, i64 1
- %ptr2 = getelementptr inbounds i32 addrspace(2)* %ptr0, i64 2
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
%tmp1 = load i32 addrspace(2)* %ptr1, align 4
store i32 99, i32 addrspace(1)* %gptr, align 4
define void @reorder_constant_load_local_store_constant_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr) #0 {
%ptr0 = load i32 addrspace(2)* addrspace(3)* @stored_constant_ptr, align 8
- %ptr1 = getelementptr inbounds i32 addrspace(2)* %ptr0, i64 1
- %ptr2 = getelementptr inbounds i32 addrspace(2)* %ptr0, i64 2
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
%tmp1 = load i32 addrspace(2)* %ptr1, align 4
store i32 99, i32 addrspace(3)* %lptr, align 4
; CI: ds_write_b32
; CI: buffer_store_dword
define void @reorder_smrd_load_local_store_smrd_load(i32 addrspace(1)* %out, i32 addrspace(3)* noalias %lptr, i32 addrspace(2)* %ptr0) #0 {
- %ptr1 = getelementptr inbounds i32 addrspace(2)* %ptr0, i64 1
- %ptr2 = getelementptr inbounds i32 addrspace(2)* %ptr0, i64 2
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 1
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(2)* %ptr0, i64 2
%tmp1 = load i32 addrspace(2)* %ptr1, align 4
store i32 99, i32 addrspace(3)* %lptr, align 4
; CI: ds_write_b32
; CI: buffer_store_dword
define void @reorder_global_load_local_store_global_load(i32 addrspace(1)* %out, i32 addrspace(3)* %lptr, i32 addrspace(1)* %ptr0) #0 {
- %ptr1 = getelementptr inbounds i32 addrspace(1)* %ptr0, i64 1
- %ptr2 = getelementptr inbounds i32 addrspace(1)* %ptr0, i64 2
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 1
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i64 2
%tmp1 = load i32 addrspace(1)* %ptr1, align 4
store i32 99, i32 addrspace(3)* %lptr, align 4
; CI: buffer_store_dword
; CI: s_endpgm
define void @reorder_local_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(3)* noalias nocapture %ptr0) #0 {
- %ptr1 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 3
- %ptr2 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 100
- %ptr3 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 101
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 3
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 100
+ %ptr3 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 101
store i32 123, i32 addrspace(3)* %ptr1, align 4
%tmp1 = load i32 addrspace(3)* %ptr2, align 4
; CI: buffer_store_dword
; CI: s_endpgm
define void @reorder_global_offsets(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* noalias nocapture readnone %gptr, i32 addrspace(1)* noalias nocapture %ptr0) #0 {
- %ptr1 = getelementptr inbounds i32 addrspace(1)* %ptr0, i32 3
- %ptr2 = getelementptr inbounds i32 addrspace(1)* %ptr0, i32 100
- %ptr3 = getelementptr inbounds i32 addrspace(1)* %ptr0, i32 101
+ %ptr1 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 3
+ %ptr2 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 100
+ %ptr3 = getelementptr inbounds i32, i32 addrspace(1)* %ptr0, i32 101
store i32 123, i32 addrspace(1)* %ptr1, align 4
%tmp1 = load i32 addrspace(1)* %ptr2, align 4
; define void @reorder_local_load_tbuffer_store_local_load(i32 addrspace(1)* %out, i32 %a1, i32 %vaddr) #1 {
; %ptr0 = load i32 addrspace(3)* addrspace(3)* @stored_lds_ptr, align 4
-; %ptr1 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 1
-; %ptr2 = getelementptr inbounds i32 addrspace(3)* %ptr0, i32 2
+; %ptr1 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 1
+; %ptr2 = getelementptr inbounds i32, i32 addrspace(3)* %ptr0, i32 2
; %tmp1 = load i32 addrspace(3)* %ptr1, align 4
entry:
%0 = load i8 addrspace(1)* %in0, align 1
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
- %arrayidx2.i.i = getelementptr inbounds i8 addrspace(1)* %in0, i64 1
+ %arrayidx2.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 1
%2 = load i8 addrspace(1)* %arrayidx2.i.i, align 1
%3 = insertelement <8 x i8> %1, i8 %2, i32 1
- %arrayidx6.i.i = getelementptr inbounds i8 addrspace(1)* %in0, i64 2
+ %arrayidx6.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 2
%4 = load i8 addrspace(1)* %arrayidx6.i.i, align 1
%5 = insertelement <8 x i8> %3, i8 %4, i32 2
- %arrayidx10.i.i = getelementptr inbounds i8 addrspace(1)* %in0, i64 3
+ %arrayidx10.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 3
%6 = load i8 addrspace(1)* %arrayidx10.i.i, align 1
%7 = insertelement <8 x i8> %5, i8 %6, i32 3
- %arrayidx.i.i = getelementptr inbounds i8 addrspace(1)* %in0, i64 4
+ %arrayidx.i.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 4
%8 = load i8 addrspace(1)* %arrayidx.i.i, align 1
%9 = insertelement <8 x i8> undef, i8 %8, i32 0
- %arrayidx2.i9.i = getelementptr inbounds i8 addrspace(1)* %in0, i64 5
+ %arrayidx2.i9.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 5
%10 = load i8 addrspace(1)* %arrayidx2.i9.i, align 1
%11 = insertelement <8 x i8> %9, i8 %10, i32 1
- %arrayidx6.i11.i = getelementptr inbounds i8 addrspace(1)* %in0, i64 6
+ %arrayidx6.i11.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 6
%12 = load i8 addrspace(1)* %arrayidx6.i11.i, align 1
%13 = insertelement <8 x i8> %11, i8 %12, i32 2
- %arrayidx10.i13.i = getelementptr inbounds i8 addrspace(1)* %in0, i64 7
+ %arrayidx10.i13.i = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 7
%14 = load i8 addrspace(1)* %arrayidx10.i13.i, align 1
%15 = insertelement <8 x i8> %13, i8 %14, i32 3
%vecinit5.i = shufflevector <8 x i8> %7, <8 x i8> %15, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
%16 = load i8 addrspace(1)* %in1, align 1
%17 = insertelement <8 x i8> undef, i8 %16, i32 0
- %arrayidx2.i.i4 = getelementptr inbounds i8 addrspace(1)* %in1, i64 1
+ %arrayidx2.i.i4 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 1
%18 = load i8 addrspace(1)* %arrayidx2.i.i4, align 1
%19 = insertelement <8 x i8> %17, i8 %18, i32 1
- %arrayidx6.i.i5 = getelementptr inbounds i8 addrspace(1)* %in1, i64 2
+ %arrayidx6.i.i5 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 2
%20 = load i8 addrspace(1)* %arrayidx6.i.i5, align 1
%21 = insertelement <8 x i8> %19, i8 %20, i32 2
- %arrayidx10.i.i6 = getelementptr inbounds i8 addrspace(1)* %in1, i64 3
+ %arrayidx10.i.i6 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 3
%22 = load i8 addrspace(1)* %arrayidx10.i.i6, align 1
%23 = insertelement <8 x i8> %21, i8 %22, i32 3
- %arrayidx.i.i7 = getelementptr inbounds i8 addrspace(1)* %in1, i64 4
+ %arrayidx.i.i7 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 4
%24 = load i8 addrspace(1)* %arrayidx.i.i7, align 1
%25 = insertelement <8 x i8> undef, i8 %24, i32 0
- %arrayidx2.i9.i8 = getelementptr inbounds i8 addrspace(1)* %in1, i64 5
+ %arrayidx2.i9.i8 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 5
%26 = load i8 addrspace(1)* %arrayidx2.i9.i8, align 1
%27 = insertelement <8 x i8> %25, i8 %26, i32 1
- %arrayidx6.i11.i9 = getelementptr inbounds i8 addrspace(1)* %in1, i64 6
+ %arrayidx6.i11.i9 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 6
%28 = load i8 addrspace(1)* %arrayidx6.i11.i9, align 1
%29 = insertelement <8 x i8> %27, i8 %28, i32 2
- %arrayidx10.i13.i10 = getelementptr inbounds i8 addrspace(1)* %in1, i64 7
+ %arrayidx10.i13.i10 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 7
%30 = load i8 addrspace(1)* %arrayidx10.i13.i10, align 1
%31 = insertelement <8 x i8> %29, i8 %30, i32 3
%vecinit5.i11 = shufflevector <8 x i8> %23, <8 x i8> %31, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
%32 = extractelement <8 x i8> %cond.i, i32 0
store i8 %32, i8 addrspace(1)* %out, align 1
%33 = extractelement <8 x i8> %cond.i, i32 1
- %arrayidx2.i.i.i = getelementptr inbounds i8 addrspace(1)* %out, i64 1
+ %arrayidx2.i.i.i = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 1
store i8 %33, i8 addrspace(1)* %arrayidx2.i.i.i, align 1
%34 = extractelement <8 x i8> %cond.i, i32 2
- %arrayidx.i.i.i = getelementptr inbounds i8 addrspace(1)* %out, i64 2
+ %arrayidx.i.i.i = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 2
store i8 %34, i8 addrspace(1)* %arrayidx.i.i.i, align 1
%35 = extractelement <8 x i8> %cond.i, i32 3
- %arrayidx2.i6.i.i = getelementptr inbounds i8 addrspace(1)* %out, i64 3
+ %arrayidx2.i6.i.i = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 3
store i8 %35, i8 addrspace(1)* %arrayidx2.i6.i.i, align 1
- %arrayidx.i.i3 = getelementptr inbounds i8 addrspace(1)* %out, i64 4
+ %arrayidx.i.i3 = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 4
%36 = extractelement <8 x i8> %cond.i, i32 4
store i8 %36, i8 addrspace(1)* %arrayidx.i.i3, align 1
%37 = extractelement <8 x i8> %cond.i, i32 5
- %arrayidx2.i.i6.i = getelementptr inbounds i8 addrspace(1)* %out, i64 5
+ %arrayidx2.i.i6.i = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 5
store i8 %37, i8 addrspace(1)* %arrayidx2.i.i6.i, align 1
%38 = extractelement <8 x i8> %cond.i, i32 6
- %arrayidx.i.i7.i = getelementptr inbounds i8 addrspace(1)* %out, i64 6
+ %arrayidx.i.i7.i = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 6
store i8 %38, i8 addrspace(1)* %arrayidx.i.i7.i, align 1
%39 = extractelement <8 x i8> %cond.i, i32 7
- %arrayidx2.i6.i8.i = getelementptr inbounds i8 addrspace(1)* %out, i64 7
+ %arrayidx2.i6.i8.i = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 7
store i8 %39, i8 addrspace(1)* %arrayidx2.i6.i8.i, align 1
ret void
}
define void @trunc_load_alloca_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) {
%idx = add i32 %a, %b
%alloca = alloca i64, i32 4
- %gep0 = getelementptr i64* %alloca, i64 0
- %gep1 = getelementptr i64* %alloca, i64 1
- %gep2 = getelementptr i64* %alloca, i64 2
- %gep3 = getelementptr i64* %alloca, i64 3
+ %gep0 = getelementptr i64, i64* %alloca, i64 0
+ %gep1 = getelementptr i64, i64* %alloca, i64 1
+ %gep2 = getelementptr i64, i64* %alloca, i64 2
+ %gep3 = getelementptr i64, i64* %alloca, i64 3
store i64 24, i64* %gep0, align 8
store i64 9334, i64* %gep1, align 8
store i64 3935, i64* %gep2, align 8
store i64 9342, i64* %gep3, align 8
- %gep = getelementptr i64* %alloca, i32 %idx
+ %gep = getelementptr i64, i64* %alloca, i32 %idx
%load = load i64* %gep, align 8
%mask = and i64 %load, 4294967296
%add = add i64 %mask, -1
; SI: buffer_store_dwordx2 [[RESULT]]
define void @v_sint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr i64 addrspace(1)* %in, i32 %tid
+ %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%val = load i64 addrspace(1)* %gep, align 8
%result = sitofp i64 %val to double
store double %result, double addrspace(1)* %out
; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4
define void @smrd0(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
- %0 = getelementptr i32 addrspace(2)* %ptr, i64 1
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 1
%1 = load i32 addrspace(2)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
define void @smrd1(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
- %0 = getelementptr i32 addrspace(2)* %ptr, i64 255
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 255
%1 = load i32 addrspace(2)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; GCN: s_endpgm
define void @smrd2(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
- %0 = getelementptr i32 addrspace(2)* %ptr, i64 256
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 256
%1 = load i32 addrspace(2)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; GCN: s_endpgm
define void @smrd3(i32 addrspace(1)* %out, i32 addrspace(2)* %ptr) {
entry:
- %0 = getelementptr i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
+ %0 = getelementptr i32, i32 addrspace(2)* %ptr, i64 4294967296 ; 2 ^ 32
%1 = load i32 addrspace(2)* %0
store i32 %1, i32 addrspace(1)* %out
ret void
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10
define void @smrd_load_const0(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
%21 = load <16 x i8> addrspace(2)* %20
%22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc
define void @smrd_load_const1(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
%21 = load <16 x i8> addrspace(2)* %20
%22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1020)
call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
; VI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400
define void @smrd_load_const2(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
main_body:
- %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %20 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %0, i32 0
%21 = load <16 x i8> addrspace(2)* %20
%22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 1024)
call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %22, float %22, float %22, float %22)
; SI: v_addc_u32
define void @imp_def_vcc_split_i64_add_2(i64 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %val0, i64 %val1) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
- %gep = getelementptr i32 addrspace(1)* %in, i32 %tid
+ %gep = getelementptr i32, i32 addrspace(1)* %in, i32 %tid
%load = load i32 addrspace(1)* %gep
%vec.0 = insertelement <2 x i32> undef, i32 %val0, i32 0
%vec.1 = insertelement <2 x i32> %vec.0, i32 %load, i32 1
;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @ashr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
%result = ashr <2 x i32> %a, %b
;VI: v_ashrrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @ashr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = ashr <4 x i32> %a, %b
define void @ashr_i64_2(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
entry:
- %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64 addrspace(1) * %in
%b = load i64 addrspace(1) * %b_ptr
%result = ashr i64 %a, %b
;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @ashr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64> addrspace(1) * %in
%b = load <2 x i64> addrspace(1) * %b_ptr
%result = ashr <2 x i64> %a, %b
;VI: v_ashrrev_i64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
define void @ashr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64> addrspace(1) * %in
%b = load <4 x i64> addrspace(1) * %b_ptr
%result = ashr <4 x i64> %a, %b
; RUN: llc -march=r600 -mcpu=redwood < %s
define void @srem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in
%den = load i32 addrspace(1) * %den_ptr
%result = srem i32 %num, %den
}
define void @srem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %den_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%num = load <2 x i32> addrspace(1) * %in
%den = load <2 x i32> addrspace(1) * %den_ptr
%result = srem <2 x i32> %num, %den
}
define void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %den_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%num = load <4 x i32> addrspace(1) * %in
%den = load <4 x i32> addrspace(1) * %den_ptr
%result = srem <4 x i32> %num, %den
}
define void @srem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
- %den_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %den_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%num = load i64 addrspace(1) * %in
%den = load i64 addrspace(1) * %den_ptr
%result = srem i64 %num, %den
}
define void @srem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
- %den_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %den_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%num = load <2 x i64> addrspace(1) * %in
%den = load <2 x i64> addrspace(1) * %den_ptr
%result = srem <2 x i64> %num, %den
}
define void @srem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
- %den_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %den_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%num = load <4 x i64> addrspace(1) * %in
%den = load <4 x i64> addrspace(1) * %den_ptr
%result = srem <4 x i64> %num, %den
; VI: v_lshrrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%result = lshr i32 %a, %b
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @lshr_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1)* %in
%b = load <2 x i32> addrspace(1)* %b_ptr
%result = lshr <2 x i32> %a, %b
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
; EG: LSHR {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
define void @lshr_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1)* %in
%b = load <4 x i32> addrspace(1)* %b_ptr
%result = lshr <4 x i32> %a, %b
; EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
; EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
- %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64 addrspace(1)* %in
%b = load i64 addrspace(1)* %b_ptr
%result = lshr i64 %a, %b
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
define void @lshr_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64> addrspace(1)* %in
%b = load <2 x i64> addrspace(1)* %b_ptr
%result = lshr <2 x i64> %a, %b
; EG-DAG: CNDE_INT
; EG-DAG: CNDE_INT
define void @lshr_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64> addrspace(1)* %in
%b = load <4 x i64> addrspace(1)* %b_ptr
%result = lshr <4 x i64> %a, %b
; Function Attrs: nounwind
define void @test(<2 x i8> addrspace(3)* nocapture %arg, <2 x i8> addrspace(1)* nocapture readonly %arg1, i32 addrspace(1)* nocapture readonly %arg2, <2 x i8> addrspace(1)* nocapture %arg3, i32 %arg4, i64 %tmp9) {
bb:
- %tmp10 = getelementptr inbounds i32 addrspace(1)* %arg2, i64 %tmp9
+ %tmp10 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp9
%tmp13 = load i32 addrspace(1)* %tmp10, align 2
- %tmp14 = getelementptr inbounds <2 x i8> addrspace(3)* %arg, i32 %tmp13
+ %tmp14 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp13
%tmp15 = load <2 x i8> addrspace(3)* %tmp14, align 2
%tmp16 = add i32 %tmp13, 1
- %tmp17 = getelementptr inbounds <2 x i8> addrspace(3)* %arg, i32 %tmp16
+ %tmp17 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp16
store <2 x i8> %tmp15, <2 x i8> addrspace(3)* %tmp17, align 2
tail call void @llvm.AMDGPU.barrier.local() #2
%tmp25 = load i32 addrspace(1)* %tmp10, align 4
%tmp26 = sext i32 %tmp25 to i64
%tmp27 = sext i32 %arg4 to i64
- %tmp28 = getelementptr inbounds <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 %arg4
+ %tmp28 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 %arg4
%tmp29 = load i8 addrspace(3)* %tmp28, align 1
- %tmp30 = getelementptr inbounds <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 %tmp27
+ %tmp30 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 %tmp27
store i8 %tmp29, i8 addrspace(1)* %tmp30, align 1
- %tmp32 = getelementptr inbounds <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 0
+ %tmp32 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(3)* %arg, i32 %tmp25, i32 0
%tmp33 = load i8 addrspace(3)* %tmp32, align 1
- %tmp35 = getelementptr inbounds <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 0
+ %tmp35 = getelementptr inbounds <2 x i8>, <2 x i8> addrspace(1)* %arg3, i64 %tmp26, i64 0
store i8 %tmp33, i8 addrspace(1)* %tmp35, align 1
ret void
}
; scratch loads and stores.
; CHECK-LABEL: {{^}}store_vector_ptrs:
define void @store_vector_ptrs(<4 x i32*>* %out, <4 x [1024 x i32]*> %array) nounwind {
- %p = getelementptr <4 x [1024 x i32]*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
+ %p = getelementptr [1024 x i32], <4 x [1024 x i32]*> %array, <4 x i16> zeroinitializer, <4 x i16> <i16 16, i16 16, i16 16, i16 16>
store <4 x i32*> %p, <4 x i32*>* %out
ret void
}
define void @vecload2(i32 addrspace(1)* nocapture %out, i32 addrspace(2)* nocapture %mem) #0 {
entry:
%0 = load i32 addrspace(2)* %mem, align 4
- %arrayidx1.i = getelementptr inbounds i32 addrspace(2)* %mem, i64 1
+ %arrayidx1.i = getelementptr inbounds i32, i32 addrspace(2)* %mem, i64 1
%1 = load i32 addrspace(2)* %arrayidx1.i, align 4
store i32 %0, i32 addrspace(1)* %out, align 4
- %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %out, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 %1, i32 addrspace(1)* %arrayidx1, align 4
ret void
}
define void @i128-const-store(i32 addrspace(1)* %out) {
entry:
store i32 1, i32 addrspace(1)* %out, align 4
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %out, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 1
store i32 1, i32 addrspace(1)* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds i32 addrspace(1)* %out, i64 2
+ %arrayidx4 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 2
store i32 2, i32 addrspace(1)* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds i32 addrspace(1)* %out, i64 3
+ %arrayidx6 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 3
store i32 2, i32 addrspace(1)* %arrayidx6, align 4
ret void
}
; SI: v_subrev_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test_sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%result = sub i32 %a, %b
; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test_sub_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
%result = sub <2 x i32> %a, %b
; SI: v_sub_i32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
define void @test_sub_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = sub <4 x i32> %a, %b
; EG-DAG: SUB_INT
define void @v_sub_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %inA, i64 addrspace(1)* noalias %inB) nounwind {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
- %a_ptr = getelementptr i64 addrspace(1)* %inA, i32 %tid
- %b_ptr = getelementptr i64 addrspace(1)* %inB, i32 %tid
+ %a_ptr = getelementptr i64, i64 addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %inB, i32 %tid
%a = load i64 addrspace(1)* %a_ptr
%b = load i64 addrspace(1)* %b_ptr
%result = sub i64 %a, %b
; SI: v_subb_u32_e32
define void @v_test_sub_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* noalias %inA, <2 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
- %a_ptr = getelementptr <2 x i64> addrspace(1)* %inA, i32 %tid
- %b_ptr = getelementptr <2 x i64> addrspace(1)* %inB, i32 %tid
+ %a_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %inB, i32 %tid
%a = load <2 x i64> addrspace(1)* %a_ptr
%b = load <2 x i64> addrspace(1)* %b_ptr
%result = sub <2 x i64> %a, %b
; SI: v_subb_u32_e32
define void @v_test_sub_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* noalias %inA, <4 x i64> addrspace(1)* noalias %inB) {
%tid = call i32 @llvm.r600.read.tidig.x() readnone
- %a_ptr = getelementptr <4 x i64> addrspace(1)* %inA, i32 %tid
- %b_ptr = getelementptr <4 x i64> addrspace(1)* %inB, i32 %tid
+ %a_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inA, i32 %tid
+ %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %inB, i32 %tid
%a = load <4 x i64> addrspace(1)* %a_ptr
%b = load <4 x i64> addrspace(1)* %b_ptr
%result = sub <4 x i64> %a, %b
; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, [[CMP]]
define void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr i64 addrspace(1)* %in, i32 %tid
- %out.gep = getelementptr i32 addrspace(1)* %out, i32 %tid
+ %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
%x = load i64 addrspace(1)* %gep
%trunc = trunc i64 %x to i1
%add2 = add nsw i32 %b.addr.014, 1
%1 = sext i32 %b.addr.014 to i64
%add.ptr.sum = add nsw i64 %1, %0
- %add.ptr5 = getelementptr inbounds i8 addrspace(1)* %dst, i64 %add.ptr.sum
+ %add.ptr5 = getelementptr inbounds i8, i8 addrspace(1)* %dst, i64 %add.ptr.sum
store i8 0, i8 addrspace(1)* %add.ptr5, align 1
%inc = add nsw i32 %i.015, 1
%cmp1 = icmp slt i32 %inc, 4
;EG: CF_END
define void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1) * %in
%b = load i32 addrspace(1) * %b_ptr
%result = udiv i32 %a, %b
;SI: s_endpgm
define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1) * %in
%b = load <2 x i32> addrspace(1) * %b_ptr
%result = udiv <2 x i32> %a, %b
;SI: s_endpgm
define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1) * %in
%b = load <4 x i32> addrspace(1) * %b_ptr
%result = udiv <4 x i32> %a, %b
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
define void @udiv24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
- %den_ptr = getelementptr i8 addrspace(1)* %in, i8 1
+ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8 addrspace(1) * %in
%den = load i8 addrspace(1) * %den_ptr
%result = udiv i8 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
define void @udiv24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %den_ptr = getelementptr i16 addrspace(1)* %in, i16 1
+ %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16 addrspace(1) * %in, align 2
%den = load i16 addrspace(1) * %den_ptr, align 2
%result = udiv i16 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
define void @udiv24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @udiv25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_udiv24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_udiv24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
define void @urem24_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %in) {
- %den_ptr = getelementptr i8 addrspace(1)* %in, i8 1
+ %den_ptr = getelementptr i8, i8 addrspace(1)* %in, i8 1
%num = load i8 addrspace(1) * %in
%den = load i8 addrspace(1) * %den_ptr
%result = urem i8 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
define void @urem24_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
- %den_ptr = getelementptr i16 addrspace(1)* %in, i16 1
+ %den_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
%num = load i16 addrspace(1) * %in, align 2
%den = load i16 addrspace(1) * %den_ptr, align 2
%result = urem i16 %num, %den
; EG-DAG: RECIP_IEEE
; EG: FLT_TO_UINT
define void @urem24_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @urem25_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_urem24_i32_1(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 8
; EG-NOT: UINT_TO_FLT
; EG-NOT: RECIP_IEEE
define void @test_no_urem24_i32_2(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %den_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %den_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%num = load i32 addrspace(1) * %in, align 4
%den = load i32 addrspace(1) * %den_ptr, align 4
%num.i24.0 = shl i32 %num, 7
; SI: buffer_store_dwordx2 [[RESULT]]
define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) {
%tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
- %gep = getelementptr i64 addrspace(1)* %in, i32 %tid
+ %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
%val = load i64 addrspace(1)* %gep, align 8
%result = uitofp i64 %val to double
store double %result, double addrspace(1)* %out
; SI: ds_read2_b32 v[{{[0-9]+}}:{{[0-9]+}}], v{{[0-9]}} offset0:8 offset1:9
; SI: s_endpgm
define void @load_lds_i64_align_4_with_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
- %ptr = getelementptr i64 addrspace(3)* %in, i32 4
+ %ptr = getelementptr i64, i64 addrspace(3)* %in, i32 4
%val = load i64 addrspace(3)* %ptr, align 4
store i64 %val, i64 addrspace(1)* %out, align 8
ret void
; SI: s_endpgm
define void @load_lds_i64_align_4_with_split_offset(i64 addrspace(1)* nocapture %out, i64 addrspace(3)* %in) #0 {
%ptr = bitcast i64 addrspace(3)* %in to i32 addrspace(3)*
- %ptr255 = getelementptr i32 addrspace(3)* %ptr, i32 255
+ %ptr255 = getelementptr i32, i32 addrspace(3)* %ptr, i32 255
%ptri64 = bitcast i32 addrspace(3)* %ptr255 to i64 addrspace(3)*
%val = load i64 addrspace(3)* %ptri64, align 4
store i64 %val, i64 addrspace(1)* %out, align 8
; SI: ds_write2_b32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} offset0:8 offset1:9
; SI: s_endpgm
define void @store_lds_i64_align_4_with_offset(i64 addrspace(3)* %out) #0 {
- %ptr = getelementptr i64 addrspace(3)* %out, i32 4
+ %ptr = getelementptr i64, i64 addrspace(3)* %out, i32 4
store i64 0, i64 addrspace(3)* %ptr, align 4
ret void
}
; SI: s_endpgm
define void @store_lds_i64_align_4_with_split_offset(i64 addrspace(3)* %out) #0 {
%ptr = bitcast i64 addrspace(3)* %out to i32 addrspace(3)*
- %ptr255 = getelementptr i32 addrspace(3)* %ptr, i32 255
+ %ptr255 = getelementptr i32, i32 addrspace(3)* %ptr, i32 255
%ptri64 = bitcast i32 addrspace(3)* %ptr255 to i64 addrspace(3)*
store i64 0, i64 addrspace(3)* %out, align 4
ret void
%main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
%0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
%1 = load i32 addrspace(1)* %0, align 4
- %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %main_stride
%2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
%3 = load i32 addrspace(1)* %2, align 4
- %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
%4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
%5 = load i32 addrspace(1)* %4, align 4
- %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
%6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
%7 = load i32 addrspace(1)* %6, align 4
- %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %add.ptr3 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
%8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
%9 = load i32 addrspace(1)* %8, align 4
- %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ %add.ptr6 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 undef
br i1 undef, label %for.end, label %for.body
for.end: ; preds = %for.body, %entry
%main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
%0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
%1 = load i32 addrspace(1)* %0, align 4
- %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %main_stride
%2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
%3 = load i32 addrspace(1)* %2, align 4
- %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
%4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
%5 = load i32 addrspace(1)* %4, align 4
- %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
%6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
%7 = load i32 addrspace(1)* %6, align 4
- %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %add.ptr3 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
%8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
%9 = load i32 addrspace(1)* %8, align 4
- %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ %add.ptr6 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 undef
br i1 undef, label %for.end, label %for.body
for.end: ; preds = %for.body, %entry
%main.addr.011 = phi i8 addrspace(1)* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
%0 = bitcast i8 addrspace(1)* %main.addr.011 to i32 addrspace(1)*
%1 = load i32 addrspace(1)* %0, align 4
- %add.ptr = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %main_stride
+ %add.ptr = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %main_stride
%2 = bitcast i8 addrspace(1)* %add.ptr to i32 addrspace(1)*
%3 = load i32 addrspace(1)* %2, align 4
- %add.ptr1 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr.sum
%4 = bitcast i8 addrspace(1)* %add.ptr1 to i32 addrspace(1)*
%5 = load i32 addrspace(1)* %4, align 4
- %add.ptr2 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
+ %add.ptr2 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr1.sum
%6 = bitcast i8 addrspace(1)* %add.ptr2 to i32 addrspace(1)*
%7 = load i32 addrspace(1)* %6, align 4
- %add.ptr3 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
+ %add.ptr3 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 %add.ptr4.sum
%8 = bitcast i8 addrspace(1)* %add.ptr3 to i32 addrspace(1)*
%9 = load i32 addrspace(1)* %8, align 4
- %add.ptr6 = getelementptr inbounds i8 addrspace(1)* %main.addr.011, i32 undef
+ %add.ptr6 = getelementptr inbounds i8, i8 addrspace(1)* %main.addr.011, i32 undef
br i1 undef, label %for.end, label %for.body
for.end: ; preds = %for.body, %entry
br label %loop.body
loop.body:
- %ptr = getelementptr [32 x i32]* %0, i32 0, i32 %counter
+ %ptr = getelementptr [32 x i32], [32 x i32]* %0, i32 0, i32 %counter
store i32 %counter, i32* %ptr
br label %loop.inc
br i1 %1, label %exit, label %loop.header
exit:
- %2 = getelementptr [32 x i32]* %0, i32 0, i32 5
+ %2 = getelementptr [32 x i32], [32 x i32]* %0, i32 0, i32 5
%3 = load i32* %2
store i32 %3, i32 addrspace(1)* %out
ret void
; SI: s_endpgm
; EG: CF_END
define void @test_urem_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
- %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
%a = load i32 addrspace(1)* %in
%b = load i32 addrspace(1)* %b_ptr
%result = urem i32 %a, %b
; SI: s_endpgm
; EG: CF_END
define void @test_urem_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
%a = load <2 x i32> addrspace(1)* %in
%b = load <2 x i32> addrspace(1)* %b_ptr
%result = urem <2 x i32> %a, %b
; SI: s_endpgm
; EG: CF_END
define void @test_urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
+ %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
%a = load <4 x i32> addrspace(1)* %in
%b = load <4 x i32> addrspace(1)* %b_ptr
%result = urem <4 x i32> %a, %b
; SI: s_endpgm
; EG: CF_END
define void @test_urem_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
- %b_ptr = getelementptr i64 addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
%a = load i64 addrspace(1)* %in
%b = load i64 addrspace(1)* %b_ptr
%result = urem i64 %a, %b
; SI: s_endpgm
; EG: CF_END
define void @test_urem_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <2 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
%a = load <2 x i64> addrspace(1)* %in
%b = load <2 x i64> addrspace(1)* %b_ptr
%result = urem <2 x i64> %a, %b
; SI: s_endpgm
; EG: CF_END
define void @test_urem_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
- %b_ptr = getelementptr <4 x i64> addrspace(1)* %in, i64 1
+ %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
%a = load <4 x i64> addrspace(1)* %in
%b = load <4 x i64> addrspace(1)* %b_ptr
%result = urem <4 x i64> %a, %b
; SI: s_endpgm
define void @v_cnd_nan_nosgpr(float addrspace(1)* %out, i32 %c, float addrspace(1)* %fptr) #0 {
%idx = call i32 @llvm.r600.read.tidig.x() #1
- %f.gep = getelementptr float addrspace(1)* %fptr, i32 %idx
+ %f.gep = getelementptr float, float addrspace(1)* %fptr, i32 %idx
%f = load float addrspace(1)* %fptr
%setcc = icmp ne i32 %c, 0
%select = select i1 %setcc, float 0xFFFFFFFFE0000000, float %f
]
case0:
- %arrayidx1 = getelementptr i32 addrspace(1)* %dst, i32 %b
+ %arrayidx1 = getelementptr i32, i32 addrspace(1)* %dst, i32 %b
store i32 0, i32 addrspace(1)* %arrayidx1, align 4
br label %end
case1:
- %arrayidx5 = getelementptr i32 addrspace(1)* %dst, i32 %b
+ %arrayidx5 = getelementptr i32, i32 addrspace(1)* %dst, i32 %b
store i32 1, i32 addrspace(1)* %arrayidx5, align 4
br label %end
default:
%cmp8 = icmp eq i32 %a, 2
- %arrayidx10 = getelementptr i32 addrspace(1)* %dst, i32 %b
+ %arrayidx10 = getelementptr i32, i32 addrspace(1)* %dst, i32 %b
br i1 %cmp8, label %if, label %else
if:
br i1 %is.0, label %store, label %exit
store:
- %gep = getelementptr i32 addrspace(1)* %dst, i32 %tid
+ %gep = getelementptr i32, i32 addrspace(1)* %dst, i32 %tid
store i32 999, i32 addrspace(1)* %gep
ret void
loop:
%i = phi i32 [%tid, %entry], [%i.inc, %loop]
- %gep.src = getelementptr i32 addrspace(1)* %src, i32 %i
- %gep.dst = getelementptr i32 addrspace(1)* %dst, i32 %i
+ %gep.src = getelementptr i32, i32 addrspace(1)* %src, i32 %i
+ %gep.dst = getelementptr i32, i32 addrspace(1)* %dst, i32 %i
%load = load i32 addrspace(1)* %src
store i32 %load, i32 addrspace(1)* %gep.dst
%i.inc = add nsw i32 %i, 1
bb:
%tmp = tail call i32 @llvm.r600.read.tidig.x() #0
%tmp4 = sext i32 %tmp to i64
- %tmp5 = getelementptr inbounds i32 addrspace(1)* %arg3, i64 %tmp4
+ %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg3, i64 %tmp4
%tmp6 = load i32 addrspace(1)* %tmp5, align 4
%tmp7 = icmp sgt i32 %tmp6, 0
%tmp8 = sext i32 %tmp6 to i64
bb10: ; preds = %bb, %bb20
%tmp11 = phi i64 [ %tmp23, %bb20 ], [ 0, %bb ]
%tmp12 = add nsw i64 %tmp11, %tmp4
- %tmp13 = getelementptr inbounds i32 addrspace(1)* %arg1, i64 %tmp12
+ %tmp13 = getelementptr inbounds i32, i32 addrspace(1)* %arg1, i64 %tmp12
%tmp14 = load i32 addrspace(1)* %tmp13, align 4
- %tmp15 = getelementptr inbounds i32 addrspace(1)* %arg2, i64 %tmp12
+ %tmp15 = getelementptr inbounds i32, i32 addrspace(1)* %arg2, i64 %tmp12
%tmp16 = load i32 addrspace(1)* %tmp15, align 4
%tmp17 = icmp ne i32 %tmp14, -1
%tmp18 = icmp ne i32 %tmp16, -1
bb20: ; preds = %bb10
%tmp21 = add nsw i32 %tmp16, %tmp14
- %tmp22 = getelementptr inbounds i32 addrspace(1)* %arg, i64 %tmp12
+ %tmp22 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp12
store i32 %tmp21, i32 addrspace(1)* %tmp22, align 4
%tmp23 = add nuw nsw i64 %tmp11, 1
%tmp24 = icmp slt i64 %tmp23, %tmp8
define void @vector_read(i32 addrspace(1)* %out, i32 %index) {
entry:
%0 = alloca [4 x i32]
- %x = getelementptr [4 x i32]* %0, i32 0, i32 0
- %y = getelementptr [4 x i32]* %0, i32 0, i32 1
- %z = getelementptr [4 x i32]* %0, i32 0, i32 2
- %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ %x = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 3
store i32 0, i32* %x
store i32 1, i32* %y
store i32 2, i32* %z
store i32 3, i32* %w
- %1 = getelementptr [4 x i32]* %0, i32 0, i32 %index
+ %1 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 %index
%2 = load i32* %1
store i32 %2, i32 addrspace(1)* %out
ret void
define void @vector_write(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
entry:
%0 = alloca [4 x i32]
- %x = getelementptr [4 x i32]* %0, i32 0, i32 0
- %y = getelementptr [4 x i32]* %0, i32 0, i32 1
- %z = getelementptr [4 x i32]* %0, i32 0, i32 2
- %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ %x = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 3
store i32 0, i32* %x
store i32 0, i32* %y
store i32 0, i32* %z
store i32 0, i32* %w
- %1 = getelementptr [4 x i32]* %0, i32 0, i32 %w_index
+ %1 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 %w_index
store i32 1, i32* %1
- %2 = getelementptr [4 x i32]* %0, i32 0, i32 %r_index
+ %2 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 %r_index
%3 = load i32* %2
store i32 %3, i32 addrspace(1)* %out
ret void
define void @bitcast_gep(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) {
entry:
%0 = alloca [4 x i32]
- %x = getelementptr [4 x i32]* %0, i32 0, i32 0
- %y = getelementptr [4 x i32]* %0, i32 0, i32 1
- %z = getelementptr [4 x i32]* %0, i32 0, i32 2
- %w = getelementptr [4 x i32]* %0, i32 0, i32 3
+ %x = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 0
+ %y = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 1
+ %z = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 2
+ %w = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 3
store i32 0, i32* %x
store i32 0, i32* %y
store i32 0, i32* %z
store i32 0, i32* %w
- %1 = getelementptr [4 x i32]* %0, i32 0, i32 1
+ %1 = getelementptr [4 x i32], [4 x i32]* %0, i32 0, i32 1
%2 = bitcast i32* %1 to [4 x i32]*
- %3 = getelementptr [4 x i32]* %2, i32 0, i32 0
+ %3 = getelementptr [4 x i32], [4 x i32]* %2, i32 0, i32 0
%4 = load i32* %3
store i32 %4, i32 addrspace(1)* %out
ret void
br i1 %tmp, label %if, label %else
if: ; preds = %entry
- %tmp1 = getelementptr i32 addrspace(1)* %out, i32 1
+ %tmp1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
%tmp2 = extractelement <4 x i32> %sgpr, i32 1
store i32 %tmp2, i32 addrspace(1)* %out
br label %endif
; CHECK: s_endpgm
define void @main(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, <16 x i8> addrspace(2)* inreg %arg3, <16 x i8> addrspace(2)* inreg %arg4, i32 inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(2)* inreg %constptr) #0 {
main_body:
- %tmp = getelementptr <16 x i8> addrspace(2)* %arg3, i32 0
+ %tmp = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 0
%tmp10 = load <16 x i8> addrspace(2)* %tmp, !tbaa !0
%tmp11 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp10, i32 0, i32 %arg6)
%tmp12 = extractelement <4 x float> %tmp11, i32 0
%tmp14 = extractelement <4 x float> %tmp11, i32 2
; %tmp15 = extractelement <4 x float> %tmp11, i32 3
%tmp15 = load float addrspace(2)* %constptr, align 4 ; Force waiting for expcnt and lgkmcnt
- %tmp16 = getelementptr <16 x i8> addrspace(2)* %arg3, i32 1
+ %tmp16 = getelementptr <16 x i8>, <16 x i8> addrspace(2)* %arg3, i32 1
%tmp17 = load <16 x i8> addrspace(2)* %tmp16, !tbaa !0
%tmp18 = call <4 x float> @llvm.SI.vs.load.input(<16 x i8> %tmp17, i32 0, i32 %arg6)
%tmp19 = extractelement <4 x float> %tmp18, i32 0
%z.i8.i = tail call i32 @llvm.r600.read.tidig.z() #1
%add.i = add i32 %z.i8.i, %mul33.i
%add13 = add i32 %add.i, %add
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %out, i32 %add13
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %add13
store i32 %mul3, i32 addrspace(1)* %arrayidx, align 4
ret void
}
;CHECK-LABEL: make_foo:
;CHECK: ld [%sp+64], {{.+}}
;CHECK: jmp %o7+12
- %0 = getelementptr inbounds %struct.foo_t* %agg.result, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %agg.result, i32 0, i32 0
store i32 %a, i32* %0, align 4
- %1 = getelementptr inbounds %struct.foo_t* %agg.result, i32 0, i32 1
+ %1 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %agg.result, i32 0, i32 1
store i32 %b, i32* %1, align 4
- %2 = getelementptr inbounds %struct.foo_t* %agg.result, i32 0, i32 2
+ %2 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %agg.result, i32 0, i32 2
store i32 %c, i32* %2, align 4
ret void
}
;CHECK: unimp 12
%f = alloca %struct.foo_t, align 8
call void @make_foo(%struct.foo_t* noalias sret %f, i32 10, i32 20, i32 30) nounwind
- %0 = getelementptr inbounds %struct.foo_t* %f, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 0
%1 = load i32* %0, align 8
- %2 = getelementptr inbounds %struct.foo_t* %f, i32 0, i32 1
+ %2 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 1
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds %struct.foo_t* %f, i32 0, i32 2
+ %4 = getelementptr inbounds %struct.foo_t, %struct.foo_t* %f, i32 0, i32 2
%5 = load i32* %4, align 8
%6 = add nsw i32 %3, %1
%7 = add nsw i32 %6, %5
define i32 @test_large_stack() {
entry:
%buffer1 = alloca [16384 x i8], align 8
- %buffer1.sub = getelementptr inbounds [16384 x i8]* %buffer1, i32 0, i32 0
+ %buffer1.sub = getelementptr inbounds [16384 x i8], [16384 x i8]* %buffer1, i32 0, i32 0
%0 = call i32 @use_buf(i32 16384, i8* %buffer1.sub)
ret i32 %0
}
; CHECK: sth [[R]], [%i2+40]
; CHECK: stb [[R]], [%i3+-20]
define void @stores(i64* %p, i32* %q, i16* %r, i8* %s) {
- %p1 = getelementptr i64* %p, i64 1
- %p2 = getelementptr i64* %p, i64 2
+ %p1 = getelementptr i64, i64* %p, i64 1
+ %p2 = getelementptr i64, i64* %p, i64 2
%pv = load i64* %p1
store i64 %pv, i64* %p2
- %q2 = getelementptr i32* %q, i32 -2
+ %q2 = getelementptr i32, i32* %q, i32 -2
%qv = trunc i64 %pv to i32
store i32 %qv, i32* %q2
- %r2 = getelementptr i16* %r, i16 20
+ %r2 = getelementptr i16, i16* %r, i16 20
%rv = trunc i64 %pv to i16
store i16 %rv, i16* %r2
- %s2 = getelementptr i8* %s, i8 -20
+ %s2 = getelementptr i8, i8* %s, i8 -20
%sv = trunc i64 %pv to i8
store i8 %sv, i8* %s2
define void @access_fi() {
entry:
%b = alloca [32 x i8], align 1
- %arraydecay = getelementptr inbounds [32 x i8]* %b, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [32 x i8], [32 x i8]* %b, i64 0, i64 0
call void @g(i8* %arraydecay) #2
ret void
}
define i64 @store_zero(i64* nocapture %a, i64* nocapture %b) {
entry:
store i64 0, i64* %a, align 8
- %0 = getelementptr inbounds i64* %b, i32 1
+ %0 = getelementptr inbounds i64, i64* %b, i32 1
store i64 0, i64* %0, align 8
ret i64 0
}
define i32 @store_zero(i32* %a, i32* %b) {
entry:
store i32 0, i32* %a, align 4
- %0 = getelementptr inbounds i32* %b, i32 1
+ %0 = getelementptr inbounds i32, i32* %b, i32 1
store i32 0, i32* %0, align 4
ret i32 0
}
entry:
%array = alloca [2 x i32], align 4
%0 = sub nsw i32 %b, %c
- %1 = getelementptr inbounds [2 x i32]* %array, i32 0, i32 0
+ %1 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 0
store i32 1, i32* %1, align 4
- %2 = getelementptr inbounds [2 x i32]* %array, i32 0, i32 1
+ %2 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 1
store i32 2, i32* %2, align 4
- %3 = getelementptr inbounds [2 x i32]* %array, i32 0, i32 %a
+ %3 = getelementptr inbounds [2 x i32], [2 x i32]* %array, i32 0, i32 %a
%4 = load i32* %3, align 4
ret i32 %4
}
; Function Attrs: nounwind
define i32 @foo(%struct.jmpbuf_env* byval %inbuf) #0 {
entry:
- %0 = getelementptr inbounds %struct.jmpbuf_env* %inbuf, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 0
store i32 0, i32* %0, align 4, !tbaa !4
- %1 = getelementptr inbounds %struct.jmpbuf_env* %inbuf, i32 0, i32 1
+ %1 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 1
store i32 1, i32* %1, align 4, !tbaa !4
- %2 = getelementptr inbounds %struct.jmpbuf_env* %inbuf, i32 0, i32 2, i32 0
+ %2 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 2, i32 0
%3 = call i32 @_setjmp(%struct.__jmp_buf_tag* %2) #2
- %4 = getelementptr inbounds %struct.jmpbuf_env* %inbuf, i32 0, i32 3
+ %4 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 3
store i32 %3, i32* %4, align 4, !tbaa !4
store %struct.jmpbuf_env* %inbuf, %struct.jmpbuf_env** @jenv, align 4, !tbaa !3
%5 = load i32* %1, align 4, !tbaa !4
%cm80 = zext i1 %cmp0 to i64
store i64 %cm80, i64* %p, align 8
tail call void asm sideeffect "", "~{i0},~{i1},~{i2},~{i3},~{i4},~{i5},~{g2},~{g3},~{g4},~{g5},~{l0},~{l1},~{l2},~{l3},~{l4},~{l5},~{l6},~{l7},~{o0},~{o1},~{o2},~{o3},~{o4},~{o5},~{o7}"()
- %arrayidx1 = getelementptr inbounds i64* %p, i64 1
+ %arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1
%val = load i64* %arrayidx1
%cmp = icmp ult i64 %val, 385672958347594845
%cm8 = select i1 %cmp, i64 10, i64 20
for.cond:
%fmt.addr.0 = phi i8* [ %fmt, %entry ], [ %incdec.ptr, %for.cond.backedge ]
%sum.addr.0 = phi double [ %sum, %entry ], [ %sum.addr.0.be, %for.cond.backedge ]
- %incdec.ptr = getelementptr inbounds i8* %fmt.addr.0, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %fmt.addr.0, i64 1
%0 = load i8* %fmt.addr.0, align 1
%conv = sext i8 %0 to i32
switch i32 %conv, label %sw.default [
; CHECK-FP: lgr %r11, %r15
; CHECK-FP: lmg %r6, %r15, 224(%r11)
%a = alloca i8, i64 %length
- %b = getelementptr i8 *%a, i64 1
+ %b = getelementptr i8, i8 *%a, i64 1
%cindex = add i64 %index, 3919
- %c = getelementptr i8 *%a, i64 %cindex
+ %c = getelementptr i8, i8 *%a, i64 %cindex
%dindex = add i64 %index, 3920
- %d = getelementptr i8 *%a, i64 %dindex
+ %d = getelementptr i8, i8 *%a, i64 %dindex
%eindex = add i64 %index, 4095
- %e = getelementptr i8 *%a, i64 %eindex
+ %e = getelementptr i8, i8 *%a, i64 %eindex
%count = call i64 @bar(i8 *%a, i8 *%b, i8 *%c, i8 *%d, i8 *%e, i64 0, i64 0)
%res = add i64 %count, 1
ret i64 %res
; CHECK-E: stcy [[TMP]], 4096({{%r3,%r2|%r2,%r3}})
%a = alloca i8, i64 %length
store volatile i8 0, i8 *%a
- %b = getelementptr i8 *%a, i64 4095
+ %b = getelementptr i8, i8 *%a, i64 4095
store volatile i8 1, i8 *%b
- %c = getelementptr i8 *%a, i64 %index
+ %c = getelementptr i8, i8 *%a, i64 %index
store volatile i8 2, i8 *%c
- %d = getelementptr i8 *%c, i64 4095
+ %d = getelementptr i8, i8 *%c, i64 4095
store volatile i8 3, i8 *%d
- %e = getelementptr i8 *%d, i64 1
+ %e = getelementptr i8, i8 *%d, i64 1
store volatile i8 4, i8 *%e
%count = call i64 @bar(i8 *%a)
%res = add i64 %count, 1
; CHECK-LABEL: f3:
; CHECK: n %r2, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%b = load i32 *%ptr
%and = and i32 %a, %b
ret i32 %and
; CHECK-LABEL: f4:
; CHECK: ny %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%b = load i32 *%ptr
%and = and i32 %a, %b
ret i32 %and
; CHECK-LABEL: f5:
; CHECK: ny %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%and = and i32 %a, %b
ret i32 %and
; CHECK: agfi %r3, 524288
; CHECK: n %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%and = and i32 %a, %b
ret i32 %and
; CHECK-LABEL: f7:
; CHECK: ny %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%and = and i32 %a, %b
ret i32 %and
; CHECK-LABEL: f8:
; CHECK: ny %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%and = and i32 %a, %b
ret i32 %and
; CHECK: agfi %r3, -524292
; CHECK: n %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%and = and i32 %a, %b
ret i32 %and
; CHECK: brasl %r14, foo@PLT
; CHECK: n %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: ng %r2, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%and = and i64 %a, %b
ret i64 %and
; CHECK: agfi %r3, 524288
; CHECK: ng %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%and = and i64 %a, %b
ret i64 %and
; CHECK-LABEL: f5:
; CHECK: ng %r2, -8(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%and = and i64 %a, %b
ret i64 %and
; CHECK-LABEL: f6:
; CHECK: ng %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%and = and i64 %a, %b
ret i64 %and
; CHECK: agfi %r3, -524296
; CHECK: ng %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%and = and i64 %a, %b
ret i64 %and
; CHECK: brasl %r14, foo@PLT
; CHECK: ng %r2, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK-LABEL: f5:
; CHECK: ni 4095(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
%val = load i8 *%ptr
%and = and i8 %val, 127
store i8 %and, i8 *%ptr
; CHECK-LABEL: f6:
; CHECK: niy 4096(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
%val = load i8 *%ptr
%and = and i8 %val, 127
store i8 %and, i8 *%ptr
; CHECK-LABEL: f7:
; CHECK: niy 524287(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%val = load i8 *%ptr
%and = and i8 %val, 127
store i8 %and, i8 *%ptr
; CHECK: agfi %r2, 524288
; CHECK: ni 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%val = load i8 *%ptr
%and = and i8 %val, 127
store i8 %and, i8 *%ptr
; CHECK-LABEL: f9:
; CHECK: niy -1(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%val = load i8 *%ptr
%and = and i8 %val, 127
store i8 %and, i8 *%ptr
; CHECK-LABEL: f10:
; CHECK: niy -524288(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%val = load i8 *%ptr
%and = and i8 %val, 127
store i8 %and, i8 *%ptr
; CHECK: agfi %r2, -524289
; CHECK: ni 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%val = load i8 *%ptr
%and = and i8 %val, 127
store i8 %and, i8 *%ptr
; CHECK-LABEL: f1:
; CHECK: nc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%old = load i8 *%ptr2
%and = and i8 %val, %old
; CHECK-LABEL: f2:
; CHECK: nc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%old = load i8 *%ptr2
%and = and i8 %old, %val
; CHECK-LABEL: f3:
; CHECK: nc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%extval = zext i8 %val to i32
%old = load i8 *%ptr2
; CHECK-LABEL: f4:
; CHECK: nc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%extval = sext i8 %val to i32
%old = load i8 *%ptr2
; CHECK-LABEL: f5:
; CHECK: nc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%extval = sext i8 %val to i32
%old = load i8 *%ptr2
; CHECK-LABEL: f6:
; CHECK: nc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%extval = zext i8 %val to i32
%old = load i8 *%ptr2
; CHECK-LABEL: f7:
; CHECK: nc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%extval = sext i8 %val to i64
%old = load i8 *%ptr2
; CHECK-LABEL: f8:
; CHECK: nc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%old = load i16 *%ptr2
%and = and i16 %val, %old
; CHECK-LABEL: f9:
; CHECK: nc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%extval = zext i16 %val to i32
%old = load i16 *%ptr2
; CHECK-LABEL: f10:
; CHECK: nc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%extval = sext i16 %val to i64
%old = load i16 *%ptr2
; CHECK-LABEL: f11:
; CHECK: nc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i32 *%ptr1, i64 1
+ %ptr2 = getelementptr i32, i32 *%ptr1, i64 1
%val = load i32 *%ptr1
%old = load i32 *%ptr2
%and = and i32 %old, %val
; CHECK-LABEL: f12:
; CHECK: nc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i32 *%ptr1, i64 1
+ %ptr2 = getelementptr i32, i32 *%ptr1, i64 1
%val = load i32 *%ptr1
%extval = sext i32 %val to i64
%old = load i32 *%ptr2
; CHECK-LABEL: f13:
; CHECK: nc 8(8,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1
%old = load i64 *%ptr2
%and = and i64 %old, %val
; CHECK-LABEL: f14:
; CHECK-NOT: nc
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load volatile i64 *%ptr1
%old = load i64 *%ptr2
%and = and i64 %old, %val
; CHECK-LABEL: f15:
; CHECK-NOT: nc
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1
%old = load volatile i64 *%ptr2
%and = and i64 %old, %val
; CHECK-LABEL: f16:
; CHECK-NOT: nc
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1
%old = load i64 *%ptr2
%and = and i64 %old, %val
; CHECK-LABEL: f24:
; CHECK: nc 8(8,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1, align 1
%old = load i64 *%ptr2, align 1
%and = and i64 %old, %val
; CHECK-DAG: stfh [[REG3]], 4096(%r2)
; CHECK-DAG: sty [[REG4]], 524284(%r3)
; CHECK: br %r14
- %ptr3 = getelementptr i32 *%ptr1, i64 1024
- %ptr4 = getelementptr i32 *%ptr2, i64 131071
+ %ptr3 = getelementptr i32, i32 *%ptr1, i64 1024
+ %ptr4 = getelementptr i32, i32 *%ptr2, i64 131071
%old1 = load i32 *%ptr1
%old2 = load i32 *%ptr2
%old3 = load i32 *%ptr3
; CHECK-DAG: lb [[REG4:%r[0-5]]], 524287(%r3)
; CHECK: blah [[REG1]], [[REG2]]
; CHECK: br %r14
- %ptr3 = getelementptr i8 *%ptr1, i64 4096
- %ptr4 = getelementptr i8 *%ptr2, i64 524287
+ %ptr3 = getelementptr i8, i8 *%ptr1, i64 4096
+ %ptr4 = getelementptr i8, i8 *%ptr2, i64 524287
%val1 = load i8 *%ptr1
%val2 = load i8 *%ptr2
%val3 = load i8 *%ptr3
; CHECK-DAG: lhy [[REG4:%r[0-5]]], 524286(%r3)
; CHECK: blah [[REG1]], [[REG2]]
; CHECK: br %r14
- %ptr3 = getelementptr i16 *%ptr1, i64 2048
- %ptr4 = getelementptr i16 *%ptr2, i64 262143
+ %ptr3 = getelementptr i16, i16 *%ptr1, i64 2048
+ %ptr4 = getelementptr i16, i16 *%ptr2, i64 262143
%val1 = load i16 *%ptr1
%val2 = load i16 *%ptr2
%val3 = load i16 *%ptr3
; CHECK-DAG: llc [[REG4:%r[0-5]]], 524287(%r3)
; CHECK: blah [[REG1]], [[REG2]]
; CHECK: br %r14
- %ptr3 = getelementptr i8 *%ptr1, i64 4096
- %ptr4 = getelementptr i8 *%ptr2, i64 524287
+ %ptr3 = getelementptr i8, i8 *%ptr1, i64 4096
+ %ptr4 = getelementptr i8, i8 *%ptr2, i64 524287
%val1 = load i8 *%ptr1
%val2 = load i8 *%ptr2
%val3 = load i8 *%ptr3
; CHECK-DAG: llh [[REG4:%r[0-5]]], 524286(%r3)
; CHECK: blah [[REG1]], [[REG2]]
; CHECK: br %r14
- %ptr3 = getelementptr i16 *%ptr1, i64 2048
- %ptr4 = getelementptr i16 *%ptr2, i64 262143
+ %ptr3 = getelementptr i16, i16 *%ptr1, i64 2048
+ %ptr4 = getelementptr i16, i16 *%ptr2, i64 262143
%val1 = load i16 *%ptr1
%val2 = load i16 *%ptr2
%val3 = load i16 *%ptr3
%res2 = extractvalue { i32, i32 } %res, 1
%trunc1 = trunc i32 %res1 to i8
%trunc2 = trunc i32 %res2 to i8
- %ptr3 = getelementptr i8 *%ptr1, i64 4096
- %ptr4 = getelementptr i8 *%ptr2, i64 524287
+ %ptr3 = getelementptr i8, i8 *%ptr1, i64 4096
+ %ptr4 = getelementptr i8, i8 *%ptr2, i64 524287
store i8 %trunc1, i8 *%ptr1
store i8 %trunc2, i8 *%ptr2
store i8 %trunc1, i8 *%ptr3
%res2 = extractvalue { i32, i32 } %res, 1
%trunc1 = trunc i32 %res1 to i16
%trunc2 = trunc i32 %res2 to i16
- %ptr3 = getelementptr i16 *%ptr1, i64 2048
- %ptr4 = getelementptr i16 *%ptr2, i64 262143
+ %ptr3 = getelementptr i16, i16 *%ptr1, i64 2048
+ %ptr4 = getelementptr i16, i16 *%ptr2, i64 262143
store i16 %trunc1, i16 *%ptr1
store i16 %trunc2, i16 *%ptr2
store i16 %trunc1, i16 *%ptr3
; CHECK-LABEL: f3:
; CHECK: laa %r2, %r4, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131071
+ %ptr = getelementptr i32, i32 *%src, i32 131071
%res = atomicrmw add i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, 524288
; CHECK: laa %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131072
+ %ptr = getelementptr i32, i32 *%src, i32 131072
%res = atomicrmw add i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f5:
; CHECK: laa %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131072
+ %ptr = getelementptr i32, i32 *%src, i32 -131072
%res = atomicrmw add i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, -524292
; CHECK: laa %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131073
+ %ptr = getelementptr i32, i32 *%src, i32 -131073
%res = atomicrmw add i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f3:
; CHECK: laag %r2, %r4, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%res = atomicrmw add i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, 524288
; CHECK: laag %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%res = atomicrmw add i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-LABEL: f5:
; CHECK: laag %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%res = atomicrmw add i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, -524296
; CHECK: laag %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%res = atomicrmw add i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-LABEL: f3:
; CHECK: lan %r2, %r4, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131071
+ %ptr = getelementptr i32, i32 *%src, i32 131071
%res = atomicrmw and i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, 524288
; CHECK: lan %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131072
+ %ptr = getelementptr i32, i32 *%src, i32 131072
%res = atomicrmw and i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f5:
; CHECK: lan %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131072
+ %ptr = getelementptr i32, i32 *%src, i32 -131072
%res = atomicrmw and i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, -524292
; CHECK: lan %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131073
+ %ptr = getelementptr i32, i32 *%src, i32 -131073
%res = atomicrmw and i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f3:
; CHECK: lang %r2, %r4, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%res = atomicrmw and i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, 524288
; CHECK: lang %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%res = atomicrmw and i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-LABEL: f5:
; CHECK: lang %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%res = atomicrmw and i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, -524296
; CHECK: lang %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%res = atomicrmw and i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: l %r2, 4092(%r3)
; CHECK: cs %r2, {{%r[0-9]+}}, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%res = atomicrmw min i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, 4096(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%res = atomicrmw min i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, 524284(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%res = atomicrmw min i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: l %r2, 0(%r3)
; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%res = atomicrmw min i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, -4(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%res = atomicrmw min i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, -524288(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%res = atomicrmw min i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: l %r2, 0(%r3)
; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%res = atomicrmw min i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: lg %r2, 524280(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%res = atomicrmw min i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lg %r2, 0(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%res = atomicrmw min i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lg %r2, -524288(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%res = atomicrmw min i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lg %r2, 0(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%res = atomicrmw min i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-LABEL: f3:
; CHECK: lao %r2, %r4, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131071
+ %ptr = getelementptr i32, i32 *%src, i32 131071
%res = atomicrmw or i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, 524288
; CHECK: lao %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131072
+ %ptr = getelementptr i32, i32 *%src, i32 131072
%res = atomicrmw or i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f5:
; CHECK: lao %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131072
+ %ptr = getelementptr i32, i32 *%src, i32 -131072
%res = atomicrmw or i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, -524292
; CHECK: lao %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131073
+ %ptr = getelementptr i32, i32 *%src, i32 -131073
%res = atomicrmw or i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f3:
; CHECK: laog %r2, %r4, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%res = atomicrmw or i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, 524288
; CHECK: laog %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%res = atomicrmw or i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-LABEL: f5:
; CHECK: laog %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%res = atomicrmw or i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, -524296
; CHECK: laog %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%res = atomicrmw or i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lcr [[NEG:%r[0-5]]], %r4
; CHECK: laa %r2, [[NEG]], 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131071
+ %ptr = getelementptr i32, i32 *%src, i32 131071
%res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-DAG: agfi %r3, 524288
; CHECK: laa %r2, [[NEG]], 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131072
+ %ptr = getelementptr i32, i32 *%src, i32 131072
%res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: lcr [[NEG:%r[0-5]]], %r4
; CHECK: laa %r2, [[NEG]], -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131072
+ %ptr = getelementptr i32, i32 *%src, i32 -131072
%res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-DAG: agfi %r3, -524292
; CHECK: laa %r2, [[NEG]], 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131073
+ %ptr = getelementptr i32, i32 *%src, i32 -131073
%res = atomicrmw sub i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: lcgr [[NEG:%r[0-5]]], %r4
; CHECK: laag %r2, [[NEG]], 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-DAG: agfi %r3, 524288
; CHECK: laag %r2, [[NEG]], 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lcgr [[NEG:%r[0-5]]], %r4
; CHECK: laag %r2, [[NEG]], -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-DAG: agfi %r3, -524296
; CHECK: laag %r2, [[NEG]], 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%res = atomicrmw sub i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: l %r2, 4092(%r3)
; CHECK: cs %r2, {{%r[0-9]+}}, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, 4096(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, 524284(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: l %r2, 0(%r3)
; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, -4(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: ly %r2, -524288(%r3)
; CHECK: csy %r2, {{%r[0-9]+}}, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: l %r2, 0(%r3)
; CHECK: cs %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%res = atomicrmw xchg i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: lg %r2, 524280(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lg %r2, 0(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lg %r2, -524288(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: lg %r2, 0(%r3)
; CHECK: csg %r2, {{%r[0-9]+}}, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%res = atomicrmw xchg i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-LABEL: f3:
; CHECK: lax %r2, %r4, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131071
+ %ptr = getelementptr i32, i32 *%src, i32 131071
%res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, 524288
; CHECK: lax %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 131072
+ %ptr = getelementptr i32, i32 *%src, i32 131072
%res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f5:
; CHECK: lax %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131072
+ %ptr = getelementptr i32, i32 *%src, i32 -131072
%res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK: agfi %r3, -524292
; CHECK: lax %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i32 -131073
+ %ptr = getelementptr i32, i32 *%src, i32 -131073
%res = atomicrmw xor i32 *%ptr, i32 %b seq_cst
ret i32 %res
}
; CHECK-LABEL: f3:
; CHECK: laxg %r2, %r4, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, 524288
; CHECK: laxg %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK-LABEL: f5:
; CHECK: laxg %r2, %r4, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
; CHECK: agfi %r3, -524296
; CHECK: laxg %r2, %r4, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%res = atomicrmw xor i64 *%ptr, i64 %b seq_cst
ret i64 %res
}
br label %loop
loop:
%val = call i32 @foo()
- %targetptr2 = getelementptr i8 *%targetptr1, i64 1
+ %targetptr2 = getelementptr i8, i8 *%targetptr1, i64 1
%byte1 = load i8 *%targetptr1
%byte2 = load i8 *%targetptr2
%ext1 = zext i8 %byte1 to i32
br label %loop
loop:
%val = call i32 @foo()
- %targetptr2 = getelementptr i16 *%targetptr1, i64 1
+ %targetptr2 = getelementptr i16, i16 *%targetptr1, i64 1
%half1 = load i16 *%targetptr1
%half2 = load i16 *%targetptr2
%ext1 = zext i16 %half1 to i32
; CHECK-LABEL: f2:
; CHECK: lrv %r2, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%a = load i32 *%ptr
%swapped = call i32 @llvm.bswap.i32(i32 %a)
ret i32 %swapped
; CHECK: agfi %r2, 524288
; CHECK: lrv %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%a = load i32 *%ptr
%swapped = call i32 @llvm.bswap.i32(i32 %a)
ret i32 %swapped
; CHECK-LABEL: f4:
; CHECK: lrv %r2, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%a = load i32 *%ptr
%swapped = call i32 @llvm.bswap.i32(i32 %a)
ret i32 %swapped
; CHECK-LABEL: f5:
; CHECK: lrv %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%a = load i32 *%ptr
%swapped = call i32 @llvm.bswap.i32(i32 %a)
ret i32 %swapped
; CHECK: agfi %r2, -524292
; CHECK: lrv %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%a = load i32 *%ptr
%swapped = call i32 @llvm.bswap.i32(i32 %a)
ret i32 %swapped
; CHECK-LABEL: f2:
; CHECK: lrvg %r2, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%a = load i64 *%ptr
%swapped = call i64 @llvm.bswap.i64(i64 %a)
ret i64 %swapped
; CHECK: agfi %r2, 524288
; CHECK: lrvg %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%a = load i64 *%ptr
%swapped = call i64 @llvm.bswap.i64(i64 %a)
ret i64 %swapped
; CHECK-LABEL: f4:
; CHECK: lrvg %r2, -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%a = load i64 *%ptr
%swapped = call i64 @llvm.bswap.i64(i64 %a)
ret i64 %swapped
; CHECK-LABEL: f5:
; CHECK: lrvg %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%a = load i64 *%ptr
%swapped = call i64 @llvm.bswap.i64(i64 %a)
ret i64 %swapped
; CHECK: agfi %r2, -524296
; CHECK: lrvg %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%a = load i64 *%ptr
%swapped = call i64 @llvm.bswap.i64(i64 %a)
ret i64 %swapped
; CHECK-LABEL: f2:
; CHECK: strv %r3, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 131071
+ %ptr = getelementptr i32, i32 *%dst, i64 131071
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
; CHECK: agfi %r2, 524288
; CHECK: strv %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 131072
+ %ptr = getelementptr i32, i32 *%dst, i64 131072
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
; CHECK-LABEL: f4:
; CHECK: strv %r3, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 -1
+ %ptr = getelementptr i32, i32 *%dst, i64 -1
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
; CHECK-LABEL: f5:
; CHECK: strv %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 -131072
+ %ptr = getelementptr i32, i32 *%dst, i64 -131072
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
; CHECK: agfi %r2, -524292
; CHECK: strv %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 -131073
+ %ptr = getelementptr i32, i32 *%dst, i64 -131073
%swapped = call i32 @llvm.bswap.i32(i32 %a)
store i32 %swapped, i32 *%ptr
ret void
; CHECK-LABEL: f2:
; CHECK: strvg %r3, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 65535
+ %ptr = getelementptr i64, i64 *%dst, i64 65535
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
; CHECK: agfi %r2, 524288
; CHECK: strvg %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 65536
+ %ptr = getelementptr i64, i64 *%dst, i64 65536
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
; CHECK-LABEL: f4:
; CHECK: strvg %r3, -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 -1
+ %ptr = getelementptr i64, i64 *%dst, i64 -1
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
; CHECK-LABEL: f5:
; CHECK: strvg %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 -65536
+ %ptr = getelementptr i64, i64 *%dst, i64 -65536
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
; CHECK: agfi %r2, -524296
; CHECK: strvg %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 -65537
+ %ptr = getelementptr i64, i64 *%dst, i64 -65537
%swapped = call i64 @llvm.bswap.i64(i64 %a)
store i64 %swapped, i64 *%ptr
ret void
; CHECK-LABEL: f2:
; CHECK: cs %r2, %r3, 4092(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
; CHECK-LABEL: f3:
; CHECK: csy %r2, %r3, 4096(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
; CHECK-LABEL: f4:
; CHECK: csy %r2, %r3, 524284(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
; CHECK: agfi %r4, 524288
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
; CHECK-LABEL: f6:
; CHECK: csy %r2, %r3, -4(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
; CHECK-LABEL: f7:
; CHECK: csy %r2, %r3, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
; CHECK: agfi %r4, -524292
; CHECK: cs %r2, %r3, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%pair = cmpxchg i32 *%ptr, i32 %cmp, i32 %swap seq_cst seq_cst
%val = extractvalue { i32, i1 } %pair, 0
ret i32 %val
; CHECK-LABEL: f2:
; CHECK: csg %r2, %r3, 524280(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
%val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
; CHECK: agfi %r4, 524288
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
%val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
; CHECK-LABEL: f4:
; CHECK: csg %r2, %r3, -8(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
%val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
; CHECK-LABEL: f5:
; CHECK: csg %r2, %r3, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
%val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
; CHECK: agfi %r4, -524296
; CHECK: csg %r2, %r3, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%pairval = cmpxchg i64 *%ptr, i64 %cmp, i64 %swap seq_cst seq_cst
%val = extractvalue { i64, i1 } %pairval, 0
ret i64 %val
; CHECK: clfi %r4, 42
; CHECK: loche %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%cond = icmp ult i32 %limit, 42
%other = load i32 *%ptr
%res = select i1 %cond, i32 %easy, i32 %other
; CHECK: clfi %r4, 42
; CHECK: loche %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%cond = icmp ult i32 %limit, 42
%other = load i32 *%ptr
%res = select i1 %cond, i32 %easy, i32 %other
; CHECK: clfi %r4, 42
; CHECK: loche %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%cond = icmp ult i32 %limit, 42
%other = load i32 *%ptr
%res = select i1 %cond, i32 %easy, i32 %other
; CHECK: clfi %r4, 42
; CHECK: loche %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%cond = icmp ult i32 %limit, 42
%other = load i32 *%ptr
%res = select i1 %cond, i32 %easy, i32 %other
; CHECK: clgfi %r4, 42
; CHECK: locghe %r2, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65535
+ %ptr = getelementptr i64, i64 *%base, i64 65535
%cond = icmp ult i64 %limit, 42
%other = load i64 *%ptr
%res = select i1 %cond, i64 %easy, i64 %other
; CHECK: clgfi %r4, 42
; CHECK: locghe %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65536
+ %ptr = getelementptr i64, i64 *%base, i64 65536
%cond = icmp ult i64 %limit, 42
%other = load i64 *%ptr
%res = select i1 %cond, i64 %easy, i64 %other
; CHECK: clgfi %r4, 42
; CHECK: locghe %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65536
+ %ptr = getelementptr i64, i64 *%base, i64 -65536
%cond = icmp ult i64 %limit, 42
%other = load i64 *%ptr
%res = select i1 %cond, i64 %easy, i64 %other
; CHECK: clgfi %r4, 42
; CHECK: locghe %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65537
+ %ptr = getelementptr i64, i64 *%base, i64 -65537
%cond = icmp ult i64 %limit, 42
%other = load i64 *%ptr
%res = select i1 %cond, i64 %easy, i64 %other
; CHECK: stc %r3, 4095(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i8 *%base, i64 4095
+ %ptr = getelementptr i8, i8 *%base, i64 4095
%cond = icmp ult i32 %limit, 420
%orig = load i8 *%ptr
%res = select i1 %cond, i8 %orig, i8 %alt
; CHECK: stcy %r3, 4096(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i8 *%base, i64 4096
+ %ptr = getelementptr i8, i8 *%base, i64 4096
%cond = icmp ult i32 %limit, 420
%orig = load i8 *%ptr
%res = select i1 %cond, i8 %orig, i8 %alt
; CHECK: stcy %r3, 524287(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i8 *%base, i64 524287
+ %ptr = getelementptr i8, i8 *%base, i64 524287
%cond = icmp ult i32 %limit, 420
%orig = load i8 *%ptr
%res = select i1 %cond, i8 %orig, i8 %alt
; CHECK: stc %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i8 *%base, i64 524288
+ %ptr = getelementptr i8, i8 *%base, i64 524288
%cond = icmp ult i32 %limit, 420
%orig = load i8 *%ptr
%res = select i1 %cond, i8 %orig, i8 %alt
; CHECK: stcy %r3, -524288(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i8 *%base, i64 -524288
+ %ptr = getelementptr i8, i8 *%base, i64 -524288
%cond = icmp ult i32 %limit, 420
%orig = load i8 *%ptr
%res = select i1 %cond, i8 %orig, i8 %alt
; CHECK: stc %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i8 *%base, i64 -524289
+ %ptr = getelementptr i8, i8 *%base, i64 -524289
%cond = icmp ult i32 %limit, 420
%orig = load i8 *%ptr
%res = select i1 %cond, i8 %orig, i8 %alt
; CHECK: sth %r3, 4094(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 2047
+ %ptr = getelementptr i16, i16 *%base, i64 2047
%cond = icmp ult i32 %limit, 420
%orig = load i16 *%ptr
%res = select i1 %cond, i16 %orig, i16 %alt
; CHECK: sthy %r3, 4096(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 2048
+ %ptr = getelementptr i16, i16 *%base, i64 2048
%cond = icmp ult i32 %limit, 420
%orig = load i16 *%ptr
%res = select i1 %cond, i16 %orig, i16 %alt
; CHECK: sthy %r3, 524286(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 262143
+ %ptr = getelementptr i16, i16 *%base, i64 262143
%cond = icmp ult i32 %limit, 420
%orig = load i16 *%ptr
%res = select i1 %cond, i16 %orig, i16 %alt
; CHECK: sth %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 262144
+ %ptr = getelementptr i16, i16 *%base, i64 262144
%cond = icmp ult i32 %limit, 420
%orig = load i16 *%ptr
%res = select i1 %cond, i16 %orig, i16 %alt
; CHECK: sthy %r3, -524288(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 -262144
+ %ptr = getelementptr i16, i16 *%base, i64 -262144
%cond = icmp ult i32 %limit, 420
%orig = load i16 *%ptr
%res = select i1 %cond, i16 %orig, i16 %alt
; CHECK: sth %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 -262145
+ %ptr = getelementptr i16, i16 *%base, i64 -262145
%cond = icmp ult i32 %limit, 420
%orig = load i16 *%ptr
%res = select i1 %cond, i16 %orig, i16 %alt
; CHECK: st %r3, 4092(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1023
+ %ptr = getelementptr i32, i32 *%base, i64 1023
%cond = icmp ult i32 %limit, 420
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: sty %r3, 4096(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1024
+ %ptr = getelementptr i32, i32 *%base, i64 1024
%cond = icmp ult i32 %limit, 420
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: sty %r3, 524284(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%cond = icmp ult i32 %limit, 420
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: st %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%cond = icmp ult i32 %limit, 420
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: sty %r3, -524288(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%cond = icmp ult i32 %limit, 420
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: st %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%cond = icmp ult i32 %limit, 420
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: stg %r3, 524280(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65535
+ %ptr = getelementptr i64, i64 *%base, i64 65535
%cond = icmp ult i32 %limit, 420
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK: stg %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65536
+ %ptr = getelementptr i64, i64 *%base, i64 65536
%cond = icmp ult i32 %limit, 420
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK: stg %r3, -524288(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65536
+ %ptr = getelementptr i64, i64 *%base, i64 -65536
%cond = icmp ult i32 %limit, 420
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK: stg %r3, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65537
+ %ptr = getelementptr i64, i64 *%base, i64 -65537
%cond = icmp ult i32 %limit, 420
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK: ste %f0, 4092(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%cond = icmp ult i32 %limit, 420
%orig = load float *%ptr
%res = select i1 %cond, float %orig, float %alt
; CHECK: stey %f0, 4096(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%cond = icmp ult i32 %limit, 420
%orig = load float *%ptr
%res = select i1 %cond, float %orig, float %alt
; CHECK: stey %f0, 524284(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 131071
+ %ptr = getelementptr float, float *%base, i64 131071
%cond = icmp ult i32 %limit, 420
%orig = load float *%ptr
%res = select i1 %cond, float %orig, float %alt
; CHECK: ste %f0, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 131072
+ %ptr = getelementptr float, float *%base, i64 131072
%cond = icmp ult i32 %limit, 420
%orig = load float *%ptr
%res = select i1 %cond, float %orig, float %alt
; CHECK: stey %f0, -524288(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -131072
+ %ptr = getelementptr float, float *%base, i64 -131072
%cond = icmp ult i32 %limit, 420
%orig = load float *%ptr
%res = select i1 %cond, float %orig, float %alt
; CHECK: ste %f0, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -131073
+ %ptr = getelementptr float, float *%base, i64 -131073
%cond = icmp ult i32 %limit, 420
%orig = load float *%ptr
%res = select i1 %cond, float %orig, float %alt
; CHECK: std %f0, 4088(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%cond = icmp ult i32 %limit, 420
%orig = load double *%ptr
%res = select i1 %cond, double %orig, double %alt
; CHECK: stdy %f0, 4096(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%cond = icmp ult i32 %limit, 420
%orig = load double *%ptr
%res = select i1 %cond, double %orig, double %alt
; CHECK: stdy %f0, 524280(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 65535
+ %ptr = getelementptr double, double *%base, i64 65535
%cond = icmp ult i32 %limit, 420
%orig = load double *%ptr
%res = select i1 %cond, double %orig, double %alt
; CHECK: std %f0, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 65536
+ %ptr = getelementptr double, double *%base, i64 65536
%cond = icmp ult i32 %limit, 420
%orig = load double *%ptr
%res = select i1 %cond, double %orig, double %alt
; CHECK: stdy %f0, -524288(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -65536
+ %ptr = getelementptr double, double *%base, i64 -65536
%cond = icmp ult i32 %limit, 420
%orig = load double *%ptr
%res = select i1 %cond, double %orig, double %alt
; CHECK: std %f0, 0(%r2)
; CHECK: [[LABEL]]:
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -65537
+ %ptr = getelementptr double, double *%base, i64 -65537
%cond = icmp ult i32 %limit, 420
%orig = load double *%ptr
%res = select i1 %cond, double %orig, double %alt
; CHECK: clfi %r4, 42
; CHECK: stoche %r3, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%cond = icmp ult i32 %limit, 42
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: clfi %r4, 42
; CHECK: stoche %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%cond = icmp ult i32 %limit, 42
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: clfi %r4, 42
; CHECK: stoche %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%cond = icmp ult i32 %limit, 42
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: clfi %r4, 42
; CHECK: stoche %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%cond = icmp ult i32 %limit, 42
%orig = load i32 *%ptr
%res = select i1 %cond, i32 %orig, i32 %alt
; CHECK: clfi %r4, 42
; CHECK: stocghe %r3, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65535
+ %ptr = getelementptr i64, i64 *%base, i64 65535
%cond = icmp ult i32 %limit, 42
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK: clfi %r4, 42
; CHECK: stocghe %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65536
+ %ptr = getelementptr i64, i64 *%base, i64 65536
%cond = icmp ult i32 %limit, 42
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK: clfi %r4, 42
; CHECK: stocghe %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65536
+ %ptr = getelementptr i64, i64 *%base, i64 -65536
%cond = icmp ult i32 %limit, 42
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK: clfi %r4, 42
; CHECK: stocghe %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65537
+ %ptr = getelementptr i64, i64 *%base, i64 -65537
%cond = icmp ult i32 %limit, 42
%orig = load i64 *%ptr
%res = select i1 %cond, i64 %orig, i64 %alt
; CHECK-LABEL: f3:
; CHECK: aeb %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%res = fadd float %f1, %f2
ret float %res
; CHECK: aghi %r2, 4096
; CHECK: aeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%res = fadd float %f1, %f2
ret float %res
; CHECK: aghi %r2, -4
; CHECK: aeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%res = fadd float %f1, %f2
ret float %res
; CHECK: sllg %r1, %r3, 2
; CHECK: aeb %f0, 400(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%f2 = load float *%ptr2
%res = fadd float %f1, %f2
ret float %res
; CHECK: brasl %r14, foo@PLT
; CHECK: aeb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr float *%ptr0, i64 2
- %ptr2 = getelementptr float *%ptr0, i64 4
- %ptr3 = getelementptr float *%ptr0, i64 6
- %ptr4 = getelementptr float *%ptr0, i64 8
- %ptr5 = getelementptr float *%ptr0, i64 10
- %ptr6 = getelementptr float *%ptr0, i64 12
- %ptr7 = getelementptr float *%ptr0, i64 14
- %ptr8 = getelementptr float *%ptr0, i64 16
- %ptr9 = getelementptr float *%ptr0, i64 18
- %ptr10 = getelementptr float *%ptr0, i64 20
+ %ptr1 = getelementptr float, float *%ptr0, i64 2
+ %ptr2 = getelementptr float, float *%ptr0, i64 4
+ %ptr3 = getelementptr float, float *%ptr0, i64 6
+ %ptr4 = getelementptr float, float *%ptr0, i64 8
+ %ptr5 = getelementptr float, float *%ptr0, i64 10
+ %ptr6 = getelementptr float, float *%ptr0, i64 12
+ %ptr7 = getelementptr float, float *%ptr0, i64 14
+ %ptr8 = getelementptr float, float *%ptr0, i64 16
+ %ptr9 = getelementptr float, float *%ptr0, i64 18
+ %ptr10 = getelementptr float, float *%ptr0, i64 20
%val0 = load float *%ptr0
%val1 = load float *%ptr1
; CHECK-LABEL: f3:
; CHECK: adb %f0, 4088(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%res = fadd double %f1, %f2
ret double %res
; CHECK: aghi %r2, 4096
; CHECK: adb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%res = fadd double %f1, %f2
ret double %res
; CHECK: aghi %r2, -8
; CHECK: adb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%res = fadd double %f1, %f2
ret double %res
; CHECK: sllg %r1, %r3, 3
; CHECK: adb %f0, 800(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%f2 = load double *%ptr2
%res = fadd double %f1, %f2
ret double %res
; CHECK: brasl %r14, foo@PLT
; CHECK: adb %f0, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr double *%ptr0, i64 2
- %ptr2 = getelementptr double *%ptr0, i64 4
- %ptr3 = getelementptr double *%ptr0, i64 6
- %ptr4 = getelementptr double *%ptr0, i64 8
- %ptr5 = getelementptr double *%ptr0, i64 10
- %ptr6 = getelementptr double *%ptr0, i64 12
- %ptr7 = getelementptr double *%ptr0, i64 14
- %ptr8 = getelementptr double *%ptr0, i64 16
- %ptr9 = getelementptr double *%ptr0, i64 18
- %ptr10 = getelementptr double *%ptr0, i64 20
+ %ptr1 = getelementptr double, double *%ptr0, i64 2
+ %ptr2 = getelementptr double, double *%ptr0, i64 4
+ %ptr3 = getelementptr double, double *%ptr0, i64 6
+ %ptr4 = getelementptr double, double *%ptr0, i64 8
+ %ptr5 = getelementptr double, double *%ptr0, i64 10
+ %ptr6 = getelementptr double, double *%ptr0, i64 12
+ %ptr7 = getelementptr double, double *%ptr0, i64 14
+ %ptr8 = getelementptr double, double *%ptr0, i64 16
+ %ptr9 = getelementptr double, double *%ptr0, i64 18
+ %ptr10 = getelementptr double, double *%ptr0, i64 20
%val0 = load double *%ptr0
%val1 = load double *%ptr1
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%cond = fcmp oeq float %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%cond = fcmp oeq float %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%cond = fcmp oeq float %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%f2 = load float *%ptr2
%cond = fcmp oeq float %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK: brasl %r14, foo@PLT
; CHECK: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr float *%ptr0, i64 2
- %ptr2 = getelementptr float *%ptr0, i64 4
- %ptr3 = getelementptr float *%ptr0, i64 6
- %ptr4 = getelementptr float *%ptr0, i64 8
- %ptr5 = getelementptr float *%ptr0, i64 10
- %ptr6 = getelementptr float *%ptr0, i64 12
- %ptr7 = getelementptr float *%ptr0, i64 14
- %ptr8 = getelementptr float *%ptr0, i64 16
- %ptr9 = getelementptr float *%ptr0, i64 18
- %ptr10 = getelementptr float *%ptr0, i64 20
+ %ptr1 = getelementptr float, float *%ptr0, i64 2
+ %ptr2 = getelementptr float, float *%ptr0, i64 4
+ %ptr3 = getelementptr float, float *%ptr0, i64 6
+ %ptr4 = getelementptr float, float *%ptr0, i64 8
+ %ptr5 = getelementptr float, float *%ptr0, i64 10
+ %ptr6 = getelementptr float, float *%ptr0, i64 12
+ %ptr7 = getelementptr float, float *%ptr0, i64 14
+ %ptr8 = getelementptr float, float *%ptr0, i64 16
+ %ptr9 = getelementptr float, float *%ptr0, i64 18
+ %ptr10 = getelementptr float, float *%ptr0, i64 20
%val0 = load float *%ptr0
%val1 = load float *%ptr1
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%cond = fcmp oeq double %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%cond = fcmp oeq double %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%cond = fcmp oeq double %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK-NEXT: je
; CHECK: lgr %r2, %r3
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%f2 = load double *%ptr2
%cond = fcmp oeq double %f1, %f2
%res = select i1 %cond, i64 %a, i64 %b
; CHECK: brasl %r14, foo@PLT
; CHECK: cdb {{%f[0-9]+}}, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr double *%ptr0, i64 2
- %ptr2 = getelementptr double *%ptr0, i64 4
- %ptr3 = getelementptr double *%ptr0, i64 6
- %ptr4 = getelementptr double *%ptr0, i64 8
- %ptr5 = getelementptr double *%ptr0, i64 10
- %ptr6 = getelementptr double *%ptr0, i64 12
- %ptr7 = getelementptr double *%ptr0, i64 14
- %ptr8 = getelementptr double *%ptr0, i64 16
- %ptr9 = getelementptr double *%ptr0, i64 18
- %ptr10 = getelementptr double *%ptr0, i64 20
+ %ptr1 = getelementptr double, double *%ptr0, i64 2
+ %ptr2 = getelementptr double, double *%ptr0, i64 4
+ %ptr3 = getelementptr double, double *%ptr0, i64 6
+ %ptr4 = getelementptr double, double *%ptr0, i64 8
+ %ptr5 = getelementptr double, double *%ptr0, i64 10
+ %ptr6 = getelementptr double, double *%ptr0, i64 12
+ %ptr7 = getelementptr double, double *%ptr0, i64 14
+ %ptr8 = getelementptr double, double *%ptr0, i64 16
+ %ptr9 = getelementptr double, double *%ptr0, i64 18
+ %ptr10 = getelementptr double, double *%ptr0, i64 20
%val0 = load double *%ptr0
%val1 = load double *%ptr1
; CHECK-LABEL: f3:
; CHECK: ldeb %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%val = load float *%ptr
%res = fpext float %val to double
ret double %res
; CHECK: aghi %r2, 4096
; CHECK: ldeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%val = load float *%ptr
%res = fpext float %val to double
ret double %res
; CHECK: aghi %r2, -4
; CHECK: ldeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%val = load float *%ptr
%res = fpext float %val to double
ret double %res
; CHECK: sllg %r1, %r3, 2
; CHECK: ldeb %f0, 400(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%val = load float *%ptr2
%res = fpext float %val to double
ret double %res
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%val = load float *%ptr
%res = fpext float %val to fp128
store fp128 %res, fp128 *%dst
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%val = load float *%ptr
%res = fpext float %val to fp128
store fp128 %res, fp128 *%dst
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%val = load float *%ptr
%res = fpext float %val to fp128
store fp128 %res, fp128 *%dst
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%val = load float *%ptr2
%res = fpext float %val to fp128
store fp128 %res, fp128 *%dst
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%val = load double *%ptr
%res = fpext double %val to fp128
store fp128 %res, fp128 *%dst
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%val = load double *%ptr
%res = fpext double %val to fp128
store fp128 %res, fp128 *%dst
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%val = load double *%ptr
%res = fpext double %val to fp128
store fp128 %res, fp128 *%dst
; CHECK: std %f0, 0(%r2)
; CHECK: std %f2, 8(%r2)
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%val = load double *%ptr2
%res = fpext double %val to fp128
store fp128 %res, fp128 *%dst
; CHECK-LABEL: f3:
; CHECK: deb %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%res = fdiv float %f1, %f2
ret float %res
; CHECK: aghi %r2, 4096
; CHECK: deb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%res = fdiv float %f1, %f2
ret float %res
; CHECK: aghi %r2, -4
; CHECK: deb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%res = fdiv float %f1, %f2
ret float %res
; CHECK: sllg %r1, %r3, 2
; CHECK: deb %f0, 400(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%f2 = load float *%ptr2
%res = fdiv float %f1, %f2
ret float %res
; CHECK: brasl %r14, foo@PLT
; CHECK: deb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr float *%ptr0, i64 2
- %ptr2 = getelementptr float *%ptr0, i64 4
- %ptr3 = getelementptr float *%ptr0, i64 6
- %ptr4 = getelementptr float *%ptr0, i64 8
- %ptr5 = getelementptr float *%ptr0, i64 10
- %ptr6 = getelementptr float *%ptr0, i64 12
- %ptr7 = getelementptr float *%ptr0, i64 14
- %ptr8 = getelementptr float *%ptr0, i64 16
- %ptr9 = getelementptr float *%ptr0, i64 18
- %ptr10 = getelementptr float *%ptr0, i64 20
+ %ptr1 = getelementptr float, float *%ptr0, i64 2
+ %ptr2 = getelementptr float, float *%ptr0, i64 4
+ %ptr3 = getelementptr float, float *%ptr0, i64 6
+ %ptr4 = getelementptr float, float *%ptr0, i64 8
+ %ptr5 = getelementptr float, float *%ptr0, i64 10
+ %ptr6 = getelementptr float, float *%ptr0, i64 12
+ %ptr7 = getelementptr float, float *%ptr0, i64 14
+ %ptr8 = getelementptr float, float *%ptr0, i64 16
+ %ptr9 = getelementptr float, float *%ptr0, i64 18
+ %ptr10 = getelementptr float, float *%ptr0, i64 20
%val0 = load float *%ptr0
%val1 = load float *%ptr1
; CHECK-LABEL: f3:
; CHECK: ddb %f0, 4088(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%res = fdiv double %f1, %f2
ret double %res
; CHECK: aghi %r2, 4096
; CHECK: ddb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%res = fdiv double %f1, %f2
ret double %res
; CHECK: aghi %r2, -8
; CHECK: ddb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%res = fdiv double %f1, %f2
ret double %res
; CHECK: sllg %r1, %r3, 3
; CHECK: ddb %f0, 800(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%f2 = load double *%ptr2
%res = fdiv double %f1, %f2
ret double %res
; CHECK: brasl %r14, foo@PLT
; CHECK: ddb %f0, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr double *%ptr0, i64 2
- %ptr2 = getelementptr double *%ptr0, i64 4
- %ptr3 = getelementptr double *%ptr0, i64 6
- %ptr4 = getelementptr double *%ptr0, i64 8
- %ptr5 = getelementptr double *%ptr0, i64 10
- %ptr6 = getelementptr double *%ptr0, i64 12
- %ptr7 = getelementptr double *%ptr0, i64 14
- %ptr8 = getelementptr double *%ptr0, i64 16
- %ptr9 = getelementptr double *%ptr0, i64 18
- %ptr10 = getelementptr double *%ptr0, i64 20
+ %ptr1 = getelementptr double, double *%ptr0, i64 2
+ %ptr2 = getelementptr double, double *%ptr0, i64 4
+ %ptr3 = getelementptr double, double *%ptr0, i64 6
+ %ptr4 = getelementptr double, double *%ptr0, i64 8
+ %ptr5 = getelementptr double, double *%ptr0, i64 10
+ %ptr6 = getelementptr double, double *%ptr0, i64 12
+ %ptr7 = getelementptr double, double *%ptr0, i64 14
+ %ptr8 = getelementptr double, double *%ptr0, i64 16
+ %ptr9 = getelementptr double, double *%ptr0, i64 18
+ %ptr10 = getelementptr double, double *%ptr0, i64 20
%val0 = load double *%ptr0
%val1 = load double *%ptr1
; CHECK-LABEL: f2:
; CHECK: le %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 1023
+ %ptr = getelementptr float, float *%src, i64 1023
%val = load float *%ptr
ret float %val
}
; CHECK-LABEL: f3:
; CHECK: ley %f0, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 1024
+ %ptr = getelementptr float, float *%src, i64 1024
%val = load float *%ptr
ret float %val
}
; CHECK-LABEL: f4:
; CHECK: ley %f0, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 131071
+ %ptr = getelementptr float, float *%src, i64 131071
%val = load float *%ptr
ret float %val
}
; CHECK: agfi %r2, 524288
; CHECK: le %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 131072
+ %ptr = getelementptr float, float *%src, i64 131072
%val = load float *%ptr
ret float %val
}
; CHECK-LABEL: f6:
; CHECK: ley %f0, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 -1
+ %ptr = getelementptr float, float *%src, i64 -1
%val = load float *%ptr
ret float %val
}
; CHECK-LABEL: f7:
; CHECK: ley %f0, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 -131072
+ %ptr = getelementptr float, float *%src, i64 -131072
%val = load float *%ptr
ret float %val
}
; CHECK: agfi %r2, -524292
; CHECK: le %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 -131073
+ %ptr = getelementptr float, float *%src, i64 -131073
%val = load float *%ptr
ret float %val
}
; CHECK-LABEL: f2:
; CHECK: ld %f0, 4088(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 511
+ %ptr = getelementptr double, double *%src, i64 511
%val = load double *%ptr
ret double %val
}
; CHECK-LABEL: f3:
; CHECK: ldy %f0, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 512
+ %ptr = getelementptr double, double *%src, i64 512
%val = load double *%ptr
ret double %val
}
; CHECK-LABEL: f4:
; CHECK: ldy %f0, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 65535
+ %ptr = getelementptr double, double *%src, i64 65535
%val = load double *%ptr
ret double %val
}
; CHECK: agfi %r2, 524288
; CHECK: ld %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 65536
+ %ptr = getelementptr double, double *%src, i64 65536
%val = load double *%ptr
ret double %val
}
; CHECK-LABEL: f6:
; CHECK: ldy %f0, -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 -1
+ %ptr = getelementptr double, double *%src, i64 -1
%val = load double *%ptr
ret double %val
}
; CHECK-LABEL: f7:
; CHECK: ldy %f0, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 -65536
+ %ptr = getelementptr double, double *%src, i64 -65536
%val = load double *%ptr
ret double %val
}
; CHECK: agfi %r2, -524296
; CHECK: ld %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 -65537
+ %ptr = getelementptr double, double *%src, i64 -65537
%val = load double *%ptr
ret double %val
}
; CHECK-LABEL: f2:
; CHECK: ste %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 1023
+ %ptr = getelementptr float, float *%src, i64 1023
store float %val, float *%ptr
ret void
}
; CHECK-LABEL: f3:
; CHECK: stey %f0, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 1024
+ %ptr = getelementptr float, float *%src, i64 1024
store float %val, float *%ptr
ret void
}
; CHECK-LABEL: f4:
; CHECK: stey %f0, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 131071
+ %ptr = getelementptr float, float *%src, i64 131071
store float %val, float *%ptr
ret void
}
; CHECK: agfi %r2, 524288
; CHECK: ste %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 131072
+ %ptr = getelementptr float, float *%src, i64 131072
store float %val, float *%ptr
ret void
}
; CHECK-LABEL: f6:
; CHECK: stey %f0, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 -1
+ %ptr = getelementptr float, float *%src, i64 -1
store float %val, float *%ptr
ret void
}
; CHECK-LABEL: f7:
; CHECK: stey %f0, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 -131072
+ %ptr = getelementptr float, float *%src, i64 -131072
store float %val, float *%ptr
ret void
}
; CHECK: agfi %r2, -524292
; CHECK: ste %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%src, i64 -131073
+ %ptr = getelementptr float, float *%src, i64 -131073
store float %val, float *%ptr
ret void
}
; CHECK-LABEL: f2:
; CHECK: std %f0, 4088(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 511
+ %ptr = getelementptr double, double *%src, i64 511
store double %val, double *%ptr
ret void
}
; CHECK-LABEL: f3:
; CHECK: stdy %f0, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 512
+ %ptr = getelementptr double, double *%src, i64 512
store double %val, double *%ptr
ret void
}
; CHECK-LABEL: f4:
; CHECK: stdy %f0, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 65535
+ %ptr = getelementptr double, double *%src, i64 65535
store double %val, double *%ptr
ret void
}
; CHECK: agfi %r2, 524288
; CHECK: std %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 65536
+ %ptr = getelementptr double, double *%src, i64 65536
store double %val, double *%ptr
ret void
}
; CHECK-LABEL: f6:
; CHECK: stdy %f0, -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 -1
+ %ptr = getelementptr double, double *%src, i64 -1
store double %val, double *%ptr
ret void
}
; CHECK-LABEL: f7:
; CHECK: stdy %f0, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 -65536
+ %ptr = getelementptr double, double *%src, i64 -65536
store double %val, double *%ptr
ret void
}
; CHECK: agfi %r2, -524296
; CHECK: std %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%src, i64 -65537
+ %ptr = getelementptr double, double *%src, i64 -65537
store double %val, double *%ptr
ret void
}
; CHECK-LABEL: f3:
; CHECK: meeb %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%res = fmul float %f1, %f2
ret float %res
; CHECK: aghi %r2, 4096
; CHECK: meeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%res = fmul float %f1, %f2
ret float %res
; CHECK: aghi %r2, -4
; CHECK: meeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%res = fmul float %f1, %f2
ret float %res
; CHECK: sllg %r1, %r3, 2
; CHECK: meeb %f0, 400(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%f2 = load float *%ptr2
%res = fmul float %f1, %f2
ret float %res
; CHECK: brasl %r14, foo@PLT
; CHECK: meeb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr float *%ptr0, i64 2
- %ptr2 = getelementptr float *%ptr0, i64 4
- %ptr3 = getelementptr float *%ptr0, i64 6
- %ptr4 = getelementptr float *%ptr0, i64 8
- %ptr5 = getelementptr float *%ptr0, i64 10
- %ptr6 = getelementptr float *%ptr0, i64 12
- %ptr7 = getelementptr float *%ptr0, i64 14
- %ptr8 = getelementptr float *%ptr0, i64 16
- %ptr9 = getelementptr float *%ptr0, i64 18
- %ptr10 = getelementptr float *%ptr0, i64 20
+ %ptr1 = getelementptr float, float *%ptr0, i64 2
+ %ptr2 = getelementptr float, float *%ptr0, i64 4
+ %ptr3 = getelementptr float, float *%ptr0, i64 6
+ %ptr4 = getelementptr float, float *%ptr0, i64 8
+ %ptr5 = getelementptr float, float *%ptr0, i64 10
+ %ptr6 = getelementptr float, float *%ptr0, i64 12
+ %ptr7 = getelementptr float, float *%ptr0, i64 14
+ %ptr8 = getelementptr float, float *%ptr0, i64 16
+ %ptr9 = getelementptr float, float *%ptr0, i64 18
+ %ptr10 = getelementptr float, float *%ptr0, i64 20
%val0 = load float *%ptr0
%val1 = load float *%ptr1
; CHECK-LABEL: f3:
; CHECK: mdeb %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%f1x = fpext float %f1 to double
%f2x = fpext float %f2 to double
; CHECK: aghi %r2, 4096
; CHECK: mdeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%f1x = fpext float %f1 to double
%f2x = fpext float %f2 to double
; CHECK: aghi %r2, -4
; CHECK: mdeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%f1x = fpext float %f1 to double
%f2x = fpext float %f2 to double
; CHECK: sllg %r1, %r3, 2
; CHECK: mdeb %f0, 400(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%f2 = load float *%ptr2
%f1x = fpext float %f1 to double
%f2x = fpext float %f2 to double
; CHECK: brasl %r14, foo@PLT
; CHECK: mdeb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr float *%ptr0, i64 2
- %ptr2 = getelementptr float *%ptr0, i64 4
- %ptr3 = getelementptr float *%ptr0, i64 6
- %ptr4 = getelementptr float *%ptr0, i64 8
- %ptr5 = getelementptr float *%ptr0, i64 10
- %ptr6 = getelementptr float *%ptr0, i64 12
- %ptr7 = getelementptr float *%ptr0, i64 14
- %ptr8 = getelementptr float *%ptr0, i64 16
- %ptr9 = getelementptr float *%ptr0, i64 18
- %ptr10 = getelementptr float *%ptr0, i64 20
+ %ptr1 = getelementptr float, float *%ptr0, i64 2
+ %ptr2 = getelementptr float, float *%ptr0, i64 4
+ %ptr3 = getelementptr float, float *%ptr0, i64 6
+ %ptr4 = getelementptr float, float *%ptr0, i64 8
+ %ptr5 = getelementptr float, float *%ptr0, i64 10
+ %ptr6 = getelementptr float, float *%ptr0, i64 12
+ %ptr7 = getelementptr float, float *%ptr0, i64 14
+ %ptr8 = getelementptr float, float *%ptr0, i64 16
+ %ptr9 = getelementptr float, float *%ptr0, i64 18
+ %ptr10 = getelementptr float, float *%ptr0, i64 20
%val0 = load float *%ptr0
%val1 = load float *%ptr1
; CHECK-LABEL: f3:
; CHECK: mdb %f0, 4088(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%res = fmul double %f1, %f2
ret double %res
; CHECK: aghi %r2, 4096
; CHECK: mdb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%res = fmul double %f1, %f2
ret double %res
; CHECK: aghi %r2, -8
; CHECK: mdb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%res = fmul double %f1, %f2
ret double %res
; CHECK: sllg %r1, %r3, 3
; CHECK: mdb %f0, 800(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%f2 = load double *%ptr2
%res = fmul double %f1, %f2
ret double %res
; CHECK: brasl %r14, foo@PLT
; CHECK: mdb %f0, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr double *%ptr0, i64 2
- %ptr2 = getelementptr double *%ptr0, i64 4
- %ptr3 = getelementptr double *%ptr0, i64 6
- %ptr4 = getelementptr double *%ptr0, i64 8
- %ptr5 = getelementptr double *%ptr0, i64 10
- %ptr6 = getelementptr double *%ptr0, i64 12
- %ptr7 = getelementptr double *%ptr0, i64 14
- %ptr8 = getelementptr double *%ptr0, i64 16
- %ptr9 = getelementptr double *%ptr0, i64 18
- %ptr10 = getelementptr double *%ptr0, i64 20
+ %ptr1 = getelementptr double, double *%ptr0, i64 2
+ %ptr2 = getelementptr double, double *%ptr0, i64 4
+ %ptr3 = getelementptr double, double *%ptr0, i64 6
+ %ptr4 = getelementptr double, double *%ptr0, i64 8
+ %ptr5 = getelementptr double, double *%ptr0, i64 10
+ %ptr6 = getelementptr double, double *%ptr0, i64 12
+ %ptr7 = getelementptr double, double *%ptr0, i64 14
+ %ptr8 = getelementptr double, double *%ptr0, i64 16
+ %ptr9 = getelementptr double, double *%ptr0, i64 18
+ %ptr10 = getelementptr double, double *%ptr0, i64 20
%val0 = load double *%ptr0
%val1 = load double *%ptr1
; CHECK: std %f0, 0(%r3)
; CHECK: std %f2, 8(%r3)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%f1x = fpext double %f1 to fp128
%f2x = fpext double %f2 to fp128
; CHECK: std %f0, 0(%r3)
; CHECK: std %f2, 8(%r3)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%f1x = fpext double %f1 to fp128
%f2x = fpext double %f2 to fp128
; CHECK: std %f0, 0(%r3)
; CHECK: std %f2, 8(%r3)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%f1x = fpext double %f1 to fp128
%f2x = fpext double %f2 to fp128
; CHECK: std %f0, 0(%r4)
; CHECK: std %f2, 8(%r4)
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%f2 = load double *%ptr2
%f1x = fpext double %f1 to fp128
%f2x = fpext double %f2 to fp128
; CHECK: brasl %r14, foo@PLT
; CHECK: mxdb %f0, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr double *%ptr0, i64 2
- %ptr2 = getelementptr double *%ptr0, i64 4
- %ptr3 = getelementptr double *%ptr0, i64 6
- %ptr4 = getelementptr double *%ptr0, i64 8
- %ptr5 = getelementptr double *%ptr0, i64 10
- %ptr6 = getelementptr double *%ptr0, i64 12
- %ptr7 = getelementptr double *%ptr0, i64 14
- %ptr8 = getelementptr double *%ptr0, i64 16
- %ptr9 = getelementptr double *%ptr0, i64 18
- %ptr10 = getelementptr double *%ptr0, i64 20
+ %ptr1 = getelementptr double, double *%ptr0, i64 2
+ %ptr2 = getelementptr double, double *%ptr0, i64 4
+ %ptr3 = getelementptr double, double *%ptr0, i64 6
+ %ptr4 = getelementptr double, double *%ptr0, i64 8
+ %ptr5 = getelementptr double, double *%ptr0, i64 10
+ %ptr6 = getelementptr double, double *%ptr0, i64 12
+ %ptr7 = getelementptr double, double *%ptr0, i64 14
+ %ptr8 = getelementptr double, double *%ptr0, i64 16
+ %ptr9 = getelementptr double, double *%ptr0, i64 18
+ %ptr10 = getelementptr double, double *%ptr0, i64 20
%val0 = load double *%ptr0
%val1 = load double *%ptr1
; CHECK: maeb %f2, %f0, 4092(%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
; CHECK: maeb %f2, %f0, 0(%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
; CHECK: maeb %f2, %f0, 0(%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
; CHECK: maeb %f2, %f0, 0(%r1,%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 %index
+ %ptr = getelementptr float, float *%base, i64 %index
%f2 = load float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
; CHECK: ler %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1023
- %ptr = getelementptr float *%base, i64 %index2
+ %ptr = getelementptr float, float *%base, i64 %index2
%f2 = load float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
; CHECK: ler %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1024
- %ptr = getelementptr float *%base, i64 %index2
+ %ptr = getelementptr float, float *%base, i64 %index2
%f2 = load float *%ptr
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %acc)
ret float %res
; CHECK: madb %f2, %f0, 4088(%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
ret double %res
; CHECK: madb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
ret double %res
; CHECK: madb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
ret double %res
; CHECK: madb %f2, %f0, 0(%r1,%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 %index
+ %ptr = getelementptr double, double *%base, i64 %index
%f2 = load double *%ptr
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
ret double %res
; CHECK: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 511
- %ptr = getelementptr double *%base, i64 %index2
+ %ptr = getelementptr double, double *%base, i64 %index2
%f2 = load double *%ptr
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
ret double %res
; CHECK: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 512
- %ptr = getelementptr double *%base, i64 %index2
+ %ptr = getelementptr double, double *%base, i64 %index2
%f2 = load double *%ptr
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %acc)
ret double %res
; CHECK: mseb %f2, %f0, 4092(%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
; CHECK: mseb %f2, %f0, 0(%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
; CHECK: mseb %f2, %f0, 0(%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
; CHECK: mseb %f2, %f0, 0(%r1,%r2)
; CHECK: ler %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 %index
+ %ptr = getelementptr float, float *%base, i64 %index
%f2 = load float *%ptr
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
; CHECK: ler %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1023
- %ptr = getelementptr float *%base, i64 %index2
+ %ptr = getelementptr float, float *%base, i64 %index2
%f2 = load float *%ptr
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
; CHECK: ler %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 1024
- %ptr = getelementptr float *%base, i64 %index2
+ %ptr = getelementptr float, float *%base, i64 %index2
%f2 = load float *%ptr
%negacc = fsub float -0.0, %acc
%res = call float @llvm.fma.f32 (float %f1, float %f2, float %negacc)
; CHECK: msdb %f2, %f0, 4088(%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%negacc = fsub double -0.0, %acc
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
; CHECK: msdb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%negacc = fsub double -0.0, %acc
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
; CHECK: msdb %f2, %f0, 0(%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%negacc = fsub double -0.0, %acc
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
; CHECK: msdb %f2, %f0, 0(%r1,%r2)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 %index
+ %ptr = getelementptr double, double *%base, i64 %index
%f2 = load double *%ptr
%negacc = fsub double -0.0, %acc
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 511
- %ptr = getelementptr double *%base, i64 %index2
+ %ptr = getelementptr double, double *%base, i64 %index2
%f2 = load double *%ptr
%negacc = fsub double -0.0, %acc
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
; CHECK: ldr %f0, %f2
; CHECK: br %r14
%index2 = add i64 %index, 512
- %ptr = getelementptr double *%base, i64 %index2
+ %ptr = getelementptr double, double *%base, i64 %index2
%f2 = load double *%ptr
%negacc = fsub double -0.0, %acc
%res = call double @llvm.fma.f64 (double %f1, double %f2, double %negacc)
; CHECK-LABEL: f3:
; CHECK: sqeb %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%val = load float *%ptr
%res = call float @llvm.sqrt.f32(float %val)
ret float %res
; CHECK: aghi %r2, 4096
; CHECK: sqeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%val = load float *%ptr
%res = call float @llvm.sqrt.f32(float %val)
ret float %res
; CHECK: aghi %r2, -4
; CHECK: sqeb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%val = load float *%ptr
%res = call float @llvm.sqrt.f32(float %val)
ret float %res
; CHECK: sllg %r1, %r3, 2
; CHECK: sqeb %f0, 400(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%val = load float *%ptr2
%res = call float @llvm.sqrt.f32(float %val)
ret float %res
; CHECK-LABEL: f3:
; CHECK: sqdb %f0, 4088(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%val = load double *%ptr
%res = call double @llvm.sqrt.f64(double %val)
ret double %res
; CHECK: aghi %r2, 4096
; CHECK: sqdb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%val = load double *%ptr
%res = call double @llvm.sqrt.f64(double %val)
ret double %res
; CHECK: aghi %r2, -8
; CHECK: sqdb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%val = load double *%ptr
%res = call double @llvm.sqrt.f64(double %val)
ret double %res
; CHECK: sllg %r1, %r3, 3
; CHECK: sqdb %f0, 800(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%val = load double *%ptr2
%res = call double @llvm.sqrt.f64(double %val)
ret double %res
; CHECK-LABEL: f3:
; CHECK: seb %f0, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1023
+ %ptr = getelementptr float, float *%base, i64 1023
%f2 = load float *%ptr
%res = fsub float %f1, %f2
ret float %res
; CHECK: aghi %r2, 4096
; CHECK: seb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 1024
+ %ptr = getelementptr float, float *%base, i64 1024
%f2 = load float *%ptr
%res = fsub float %f1, %f2
ret float %res
; CHECK: aghi %r2, -4
; CHECK: seb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr float *%base, i64 -1
+ %ptr = getelementptr float, float *%base, i64 -1
%f2 = load float *%ptr
%res = fsub float %f1, %f2
ret float %res
; CHECK: sllg %r1, %r3, 2
; CHECK: seb %f0, 400(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr float *%base, i64 %index
- %ptr2 = getelementptr float *%ptr1, i64 100
+ %ptr1 = getelementptr float, float *%base, i64 %index
+ %ptr2 = getelementptr float, float *%ptr1, i64 100
%f2 = load float *%ptr2
%res = fsub float %f1, %f2
ret float %res
; CHECK: brasl %r14, foo@PLT
; CHECK: seb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr float *%ptr0, i64 2
- %ptr2 = getelementptr float *%ptr0, i64 4
- %ptr3 = getelementptr float *%ptr0, i64 6
- %ptr4 = getelementptr float *%ptr0, i64 8
- %ptr5 = getelementptr float *%ptr0, i64 10
- %ptr6 = getelementptr float *%ptr0, i64 12
- %ptr7 = getelementptr float *%ptr0, i64 14
- %ptr8 = getelementptr float *%ptr0, i64 16
- %ptr9 = getelementptr float *%ptr0, i64 18
- %ptr10 = getelementptr float *%ptr0, i64 20
+ %ptr1 = getelementptr float, float *%ptr0, i64 2
+ %ptr2 = getelementptr float, float *%ptr0, i64 4
+ %ptr3 = getelementptr float, float *%ptr0, i64 6
+ %ptr4 = getelementptr float, float *%ptr0, i64 8
+ %ptr5 = getelementptr float, float *%ptr0, i64 10
+ %ptr6 = getelementptr float, float *%ptr0, i64 12
+ %ptr7 = getelementptr float, float *%ptr0, i64 14
+ %ptr8 = getelementptr float, float *%ptr0, i64 16
+ %ptr9 = getelementptr float, float *%ptr0, i64 18
+ %ptr10 = getelementptr float, float *%ptr0, i64 20
%val0 = load float *%ptr0
%val1 = load float *%ptr1
; CHECK-LABEL: f3:
; CHECK: sdb %f0, 4088(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 511
+ %ptr = getelementptr double, double *%base, i64 511
%f2 = load double *%ptr
%res = fsub double %f1, %f2
ret double %res
; CHECK: aghi %r2, 4096
; CHECK: sdb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 512
+ %ptr = getelementptr double, double *%base, i64 512
%f2 = load double *%ptr
%res = fsub double %f1, %f2
ret double %res
; CHECK: aghi %r2, -8
; CHECK: sdb %f0, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr double *%base, i64 -1
+ %ptr = getelementptr double, double *%base, i64 -1
%f2 = load double *%ptr
%res = fsub double %f1, %f2
ret double %res
; CHECK: sllg %r1, %r3, 3
; CHECK: sdb %f0, 800(%r1,%r2)
; CHECK: br %r14
- %ptr1 = getelementptr double *%base, i64 %index
- %ptr2 = getelementptr double *%ptr1, i64 100
+ %ptr1 = getelementptr double, double *%base, i64 %index
+ %ptr2 = getelementptr double, double *%ptr1, i64 100
%f2 = load double *%ptr2
%res = fsub double %f1, %f2
ret double %res
; CHECK: brasl %r14, foo@PLT
; CHECK: sdb %f0, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr double *%ptr0, i64 2
- %ptr2 = getelementptr double *%ptr0, i64 4
- %ptr3 = getelementptr double *%ptr0, i64 6
- %ptr4 = getelementptr double *%ptr0, i64 8
- %ptr5 = getelementptr double *%ptr0, i64 10
- %ptr6 = getelementptr double *%ptr0, i64 12
- %ptr7 = getelementptr double *%ptr0, i64 14
- %ptr8 = getelementptr double *%ptr0, i64 16
- %ptr9 = getelementptr double *%ptr0, i64 18
- %ptr10 = getelementptr double *%ptr0, i64 20
+ %ptr1 = getelementptr double, double *%ptr0, i64 2
+ %ptr2 = getelementptr double, double *%ptr0, i64 4
+ %ptr3 = getelementptr double, double *%ptr0, i64 6
+ %ptr4 = getelementptr double, double *%ptr0, i64 8
+ %ptr5 = getelementptr double, double *%ptr0, i64 10
+ %ptr6 = getelementptr double, double *%ptr0, i64 12
+ %ptr7 = getelementptr double, double *%ptr0, i64 14
+ %ptr8 = getelementptr double, double *%ptr0, i64 16
+ %ptr9 = getelementptr double, double *%ptr0, i64 18
+ %ptr10 = getelementptr double, double *%ptr0, i64 20
%val0 = load double *%ptr0
%val1 = load double *%ptr1
; CHECK: aghi %r15, 32760
; CHECK: br %r14
%y = alloca [4073 x i64], align 8
- %ptr = getelementptr inbounds [4073 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [4073 x i64], [4073 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
; CHECK: agfi %r15, 32768
; CHECK: br %r14
%y = alloca [4074 x i64], align 8
- %ptr = getelementptr inbounds [4074 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [4074 x i64], [4074 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
; CHECK: agfi %r15, 32776
; CHECK: br %r14
%y = alloca [4075 x i64], align 8
- %ptr = getelementptr inbounds [4075 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [4075 x i64], [4075 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
; CHECK: agfi %r15, 2147483640
; CHECK: br %r14
%y = alloca [268435433 x i64], align 8
- %ptr = getelementptr inbounds [268435433 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [268435433 x i64], [268435433 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
; CHECK: aghi %r15, 8
; CHECK: br %r14
%y = alloca [268435434 x i64], align 8
- %ptr = getelementptr inbounds [268435434 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [268435434 x i64], [268435434 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
; CHECK: aghi %r15, 16
; CHECK: br %r14
%y = alloca [268435435 x i64], align 8
- %ptr = getelementptr inbounds [268435435 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [268435435 x i64], [268435435 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
store volatile i32 %add11, i32 *%ptr
store volatile i32 %add12, i32 *%ptr
store volatile i32 %add13, i32 *%ptr
- %final = getelementptr i32 *%ptr, i32 1
+ %final = getelementptr i32, i32 *%ptr, i32 1
store volatile i32 %add14, i32 *%final
ret void
}
store volatile i32 %add11, i32 *%ptr
store volatile i32 %add12, i32 *%ptr
store volatile i32 %add13, i32 *%ptr
- %final = getelementptr i32 *%ptr, i32 1
+ %final = getelementptr i32, i32 *%ptr, i32 1
store volatile i32 %add14, i32 *%final
ret void
}
store volatile i32 %add3, i32 *%ptr
store volatile i32 %add4, i32 *%ptr
store volatile i32 %add5, i32 *%ptr
- %final = getelementptr i32 *%ptr, i32 1
+ %final = getelementptr i32, i32 *%ptr, i32 1
store volatile i32 %add14, i32 *%final
ret void
}
store volatile i32 %add1, i32 *%ptr
store volatile i32 %add3, i32 *%ptr
store volatile i32 %add4, i32 *%ptr
- %final = getelementptr i32 *%ptr, i32 1
+ %final = getelementptr i32, i32 *%ptr, i32 1
store volatile i32 %add5, i32 *%final
ret void
}
store volatile i64 %add11, i64 *%ptr
store volatile i64 %add12, i64 *%ptr
store volatile i64 %add13, i64 *%ptr
- %final = getelementptr i64 *%ptr, i64 1
+ %final = getelementptr i64, i64 *%ptr, i64 1
store volatile i64 %add14, i64 *%final
ret void
}
store volatile i64 %add11, i64 *%ptr
store volatile i64 %add12, i64 *%ptr
store volatile i64 %add13, i64 *%ptr
- %final = getelementptr i64 *%ptr, i64 1
+ %final = getelementptr i64, i64 *%ptr, i64 1
store volatile i64 %add14, i64 *%final
ret void
}
store volatile i64 %add3, i64 *%ptr
store volatile i64 %add4, i64 *%ptr
store volatile i64 %add5, i64 *%ptr
- %final = getelementptr i64 *%ptr, i64 1
+ %final = getelementptr i64, i64 *%ptr, i64 1
store volatile i64 %add14, i64 *%final
ret void
}
store volatile i64 %add1, i64 *%ptr
store volatile i64 %add3, i64 *%ptr
store volatile i64 %add4, i64 *%ptr
- %final = getelementptr i64 *%ptr, i64 1
+ %final = getelementptr i64, i64 *%ptr, i64 1
store volatile i64 %add5, i64 *%final
ret void
}
; CHECK-FP: lmg %r11, %r15, 4216(%r11)
; CHECK-FP: br %r14
%y = alloca [486 x i64], align 8
- %elem = getelementptr inbounds [486 x i64]* %y, i64 0, i64 0
+ %elem = getelementptr inbounds [486 x i64], [486 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %elem
%l0 = load volatile double *%ptr
%l1 = load volatile double *%ptr
; CHECK-FP: lmg %r11, %r15, 524280(%r11)
; CHECK-FP: br %r14
%y = alloca [65510 x i64], align 8
- %elem = getelementptr inbounds [65510 x i64]* %y, i64 0, i64 0
+ %elem = getelementptr inbounds [65510 x i64], [65510 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %elem
%l0 = load volatile double *%ptr
%l1 = load volatile double *%ptr
store volatile i32 %add13, i32 *%ptr
store volatile i32 %add14, i32 *%ptr
%y = alloca [65507 x i64], align 8
- %entry = getelementptr inbounds [65507 x i64]* %y, i64 0, i64 0
+ %entry = getelementptr inbounds [65507 x i64], [65507 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %entry
ret void
}
store volatile i32 %add5, i32 *%ptr
store volatile i32 %add14, i32 *%ptr
%y = alloca [65499 x i64], align 8
- %entry = getelementptr inbounds [65499 x i64]* %y, i64 0, i64 0
+ %entry = getelementptr inbounds [65499 x i64], [65499 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %entry
ret void
}
store volatile i32 %add13, i32 *%ptr
store volatile i32 %add14, i32 *%ptr
%y = alloca [65508 x i64], align 8
- %entry = getelementptr inbounds [65508 x i64]* %y, i64 0, i64 0
+ %entry = getelementptr inbounds [65508 x i64], [65508 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %entry
ret void
}
store volatile i32 %add5, i32 *%ptr
store volatile i32 %add14, i32 *%ptr
%y = alloca [65500 x i64], align 8
- %entry = getelementptr inbounds [65500 x i64]* %y, i64 0, i64 0
+ %entry = getelementptr inbounds [65500 x i64], [65500 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %entry
ret void
}
store volatile i32 %add5, i32 *%ptr
store volatile i32 %add14, i32 *%ptr
%y = alloca [69594 x i64], align 8
- %entry = getelementptr inbounds [69594 x i64]* %y, i64 0, i64 0
+ %entry = getelementptr inbounds [69594 x i64], [69594 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %entry
ret void
}
store volatile i32 %add5, i32 *%ptr
store volatile i32 %add14, i32 *%ptr
%y = alloca [69595 x i64], align 8
- %entry = getelementptr inbounds [69595 x i64]* %y, i64 0, i64 0
+ %entry = getelementptr inbounds [69595 x i64], [69595 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %entry
ret void
}
store volatile i32 %add10, i32 *%ptr
store volatile i32 %add12, i32 *%ptr
store volatile i32 %add13, i32 *%ptr
- %final = getelementptr i32 *%ptr, i32 1
+ %final = getelementptr i32, i32 *%ptr, i32 1
store volatile i32 %add14, i32 *%final
ret void
}
; CHECK: lmg %r11, %r15, 524280(%r11)
; CHECK: br %r14
%y = alloca [65502 x i64], align 8
- %ptr = getelementptr inbounds [65502 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [65502 x i64], [65502 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
; CHECK: lmg %r11, %r15, 524280(%r11)
; CHECK: br %r14
%y = alloca [65503 x i64], align 8
- %ptr = getelementptr inbounds [65503 x i64]* %y, i64 0, i64 0
+ %ptr = getelementptr inbounds [65503 x i64], [65503 x i64]* %y, i64 0, i64 0
store volatile i64 %x, i64* %ptr
ret void
}
; CHECK-FP: br %r14
%region1 = alloca [978 x i32], align 8
%region2 = alloca [978 x i32], align 8
- %ptr1 = getelementptr inbounds [978 x i32]* %region1, i64 0, i64 1
- %ptr2 = getelementptr inbounds [978 x i32]* %region2, i64 0, i64 1
+ %ptr1 = getelementptr inbounds [978 x i32], [978 x i32]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [978 x i32], [978 x i32]* %region2, i64 0, i64 1
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [978 x i32], align 8
%region2 = alloca [978 x i32], align 8
- %ptr1 = getelementptr inbounds [978 x i32]* %region1, i64 0, i64 2
- %ptr2 = getelementptr inbounds [978 x i32]* %region2, i64 0, i64 2
+ %ptr1 = getelementptr inbounds [978 x i32], [978 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [978 x i32], [978 x i32]* %region2, i64 0, i64 2
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [978 x i32], align 8
%region2 = alloca [978 x i32], align 8
- %ptr1 = getelementptr inbounds [978 x i32]* %region1, i64 0, i64 3
- %ptr2 = getelementptr inbounds [978 x i32]* %region2, i64 0, i64 3
+ %ptr1 = getelementptr inbounds [978 x i32], [978 x i32]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [978 x i32], [978 x i32]* %region2, i64 0, i64 3
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [2002 x i32], align 8
%region2 = alloca [2002 x i32], align 8
- %ptr1 = getelementptr inbounds [2002 x i32]* %region1, i64 0, i64 1
- %ptr2 = getelementptr inbounds [2002 x i32]* %region2, i64 0, i64 1
+ %ptr1 = getelementptr inbounds [2002 x i32], [2002 x i32]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [2002 x i32], [2002 x i32]* %region2, i64 0, i64 1
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [2002 x i32], align 8
%region2 = alloca [2002 x i32], align 8
- %ptr1 = getelementptr inbounds [2002 x i32]* %region1, i64 0, i64 2
- %ptr2 = getelementptr inbounds [2002 x i32]* %region2, i64 0, i64 2
+ %ptr1 = getelementptr inbounds [2002 x i32], [2002 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [2002 x i32], [2002 x i32]* %region2, i64 0, i64 2
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [2002 x i32], align 8
%region2 = alloca [2002 x i32], align 8
- %ptr1 = getelementptr inbounds [2002 x i32]* %region1, i64 0, i64 3
- %ptr2 = getelementptr inbounds [2002 x i32]* %region2, i64 0, i64 3
+ %ptr1 = getelementptr inbounds [2002 x i32], [2002 x i32]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [2002 x i32], [2002 x i32]* %region2, i64 0, i64 3
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [2004 x i32], align 8
%region2 = alloca [2004 x i32], align 8
- %ptr1 = getelementptr inbounds [2004 x i32]* %region1, i64 0, i64 1023
- %ptr2 = getelementptr inbounds [2004 x i32]* %region2, i64 0, i64 1023
+ %ptr1 = getelementptr inbounds [2004 x i32], [2004 x i32]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2004 x i32], [2004 x i32]* %region2, i64 0, i64 1023
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [2006 x i32], align 8
%region2 = alloca [2006 x i32], align 8
- %ptr1 = getelementptr inbounds [2006 x i32]* %region1, i64 0, i64 1023
- %ptr2 = getelementptr inbounds [2006 x i32]* %region2, i64 0, i64 1023
+ %ptr1 = getelementptr inbounds [2006 x i32], [2006 x i32]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2006 x i32], [2006 x i32]* %region2, i64 0, i64 1023
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [2006 x i32], align 8
%region2 = alloca [2006 x i32], align 8
- %ptr1 = getelementptr inbounds [2006 x i32]* %region1, i64 0, i64 1024
- %ptr2 = getelementptr inbounds [2006 x i32]* %region2, i64 0, i64 1024
+ %ptr1 = getelementptr inbounds [2006 x i32], [2006 x i32]* %region1, i64 0, i64 1024
+ %ptr2 = getelementptr inbounds [2006 x i32], [2006 x i32]* %region2, i64 0, i64 1024
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
ret void
%i5 = load volatile i32 *%vptr
%region1 = alloca [978 x i32], align 8
%region2 = alloca [978 x i32], align 8
- %ptr1 = getelementptr inbounds [978 x i32]* %region1, i64 0, i64 2
- %ptr2 = getelementptr inbounds [978 x i32]* %region2, i64 0, i64 2
+ %ptr1 = getelementptr inbounds [978 x i32], [978 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [978 x i32], [978 x i32]* %region2, i64 0, i64 2
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
store volatile i32 %i0, i32 *%vptr
%i14 = load volatile i32 *%vptr
%region1 = alloca [978 x i32], align 8
%region2 = alloca [978 x i32], align 8
- %ptr1 = getelementptr inbounds [978 x i32]* %region1, i64 0, i64 2
- %ptr2 = getelementptr inbounds [978 x i32]* %region2, i64 0, i64 2
+ %ptr1 = getelementptr inbounds [978 x i32], [978 x i32]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [978 x i32], [978 x i32]* %region2, i64 0, i64 2
store volatile i32 42, i32 *%ptr1
store volatile i32 42, i32 *%ptr2
store volatile i32 %i0, i32 *%vptr
; CHECK-FP: br %r14
%region1 = alloca [3912 x i8], align 8
%region2 = alloca [3912 x i8], align 8
- %ptr1 = getelementptr inbounds [3912 x i8]* %region1, i64 0, i64 7
- %ptr2 = getelementptr inbounds [3912 x i8]* %region2, i64 0, i64 7
+ %ptr1 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region2, i64 0, i64 7
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [3912 x i8], align 8
%region2 = alloca [3912 x i8], align 8
- %ptr1 = getelementptr inbounds [3912 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [3912 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region2, i64 0, i64 8
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 7
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 7
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 7
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 8
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 4103
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 4103
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 4103
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 4103
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 4104
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 4104
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 4104
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 4104
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [1048400 x i8], align 8
%region2 = alloca [1048400 x i8], align 8
- %ptr1 = getelementptr inbounds [1048400 x i8]* %region1, i64 0, i64 524287
- %ptr2 = getelementptr inbounds [1048400 x i8]* %region2, i64 0, i64 524287
+ %ptr1 = getelementptr inbounds [1048400 x i8], [1048400 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048400 x i8], [1048400 x i8]* %region2, i64 0, i64 524287
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [1048408 x i8], align 8
%region2 = alloca [1048408 x i8], align 8
- %ptr1 = getelementptr inbounds [1048408 x i8]* %region1, i64 0, i64 524287
- %ptr2 = getelementptr inbounds [1048408 x i8]* %region2, i64 0, i64 524287
+ %ptr1 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region2, i64 0, i64 524287
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [1048408 x i8], align 8
%region2 = alloca [1048408 x i8], align 8
- %ptr1 = getelementptr inbounds [1048408 x i8]* %region1, i64 0, i64 524288
- %ptr2 = getelementptr inbounds [1048408 x i8]* %region2, i64 0, i64 524288
+ %ptr1 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region1, i64 0, i64 524288
+ %ptr2 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region2, i64 0, i64 524288
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
ret void
%i5 = load volatile i32 *%vptr
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 8
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
store volatile i32 %i0, i32 *%vptr
%i14 = load volatile i32 *%vptr
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 8
store volatile i8 42, i8 *%ptr1
store volatile i8 42, i8 *%ptr2
store volatile i32 %i0, i32 *%vptr
; CHECK-FP: br %r14
%region1 = alloca [978 x float], align 8
%region2 = alloca [978 x float], align 8
- %start1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 1
- %ptr2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 1
+ %ptr1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 1
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [978 x float], align 8
%region2 = alloca [978 x float], align 8
- %start1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 2
- %ptr2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 2
+ %ptr1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 2
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [978 x float], align 8
%region2 = alloca [978 x float], align 8
- %start1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 3
- %ptr2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 3
+ %ptr1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 3
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [2002 x float], align 8
%region2 = alloca [2002 x float], align 8
- %start1 = getelementptr inbounds [2002 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [2002 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [2002 x float], [2002 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2002 x float], [2002 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [2002 x float]* %region1, i64 0, i64 1
- %ptr2 = getelementptr inbounds [2002 x float]* %region2, i64 0, i64 1
+ %ptr1 = getelementptr inbounds [2002 x float], [2002 x float]* %region1, i64 0, i64 1
+ %ptr2 = getelementptr inbounds [2002 x float], [2002 x float]* %region2, i64 0, i64 1
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [2002 x float], align 8
%region2 = alloca [2002 x float], align 8
- %start1 = getelementptr inbounds [2002 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [2002 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [2002 x float], [2002 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2002 x float], [2002 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [2002 x float]* %region1, i64 0, i64 2
- %ptr2 = getelementptr inbounds [2002 x float]* %region2, i64 0, i64 2
+ %ptr1 = getelementptr inbounds [2002 x float], [2002 x float]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [2002 x float], [2002 x float]* %region2, i64 0, i64 2
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [2002 x float], align 8
%region2 = alloca [2002 x float], align 8
- %start1 = getelementptr inbounds [2002 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [2002 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [2002 x float], [2002 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2002 x float], [2002 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [2002 x float]* %region1, i64 0, i64 3
- %ptr2 = getelementptr inbounds [2002 x float]* %region2, i64 0, i64 3
+ %ptr1 = getelementptr inbounds [2002 x float], [2002 x float]* %region1, i64 0, i64 3
+ %ptr2 = getelementptr inbounds [2002 x float], [2002 x float]* %region2, i64 0, i64 3
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [2004 x float], align 8
%region2 = alloca [2004 x float], align 8
- %start1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [2004 x float], [2004 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2004 x float], [2004 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [2004 x float]* %region1, i64 0, i64 1023
- %ptr2 = getelementptr inbounds [2004 x float]* %region2, i64 0, i64 1023
+ %ptr1 = getelementptr inbounds [2004 x float], [2004 x float]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2004 x float], [2004 x float]* %region2, i64 0, i64 1023
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [2006 x float], align 8
%region2 = alloca [2006 x float], align 8
- %start1 = getelementptr inbounds [2006 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [2006 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [2006 x float], [2006 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2006 x float], [2006 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [2006 x float]* %region1, i64 0, i64 1023
- %ptr2 = getelementptr inbounds [2006 x float]* %region2, i64 0, i64 1023
+ %ptr1 = getelementptr inbounds [2006 x float], [2006 x float]* %region1, i64 0, i64 1023
+ %ptr2 = getelementptr inbounds [2006 x float], [2006 x float]* %region2, i64 0, i64 1023
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [2006 x float], align 8
%region2 = alloca [2006 x float], align 8
- %start1 = getelementptr inbounds [2006 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [2006 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [2006 x float], [2006 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [2006 x float], [2006 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [2006 x float]* %region1, i64 0, i64 1024
- %ptr2 = getelementptr inbounds [2006 x float]* %region2, i64 0, i64 1024
+ %ptr1 = getelementptr inbounds [2006 x float], [2006 x float]* %region1, i64 0, i64 1024
+ %ptr2 = getelementptr inbounds [2006 x float], [2006 x float]* %region2, i64 0, i64 1024
%float1 = load float *%ptr1
%float2 = load float *%ptr2
%double1 = fpext float %float1 to double
; CHECK-FP: br %r14
%region1 = alloca [978 x float], align 8
%region2 = alloca [978 x float], align 8
- %start1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %ptr1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 2
- %ptr2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 2
+ %ptr1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 2
+ %ptr2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 2
%i0 = load volatile i32 *%vptr
%i1 = load volatile i32 *%vptr
%i2 = load volatile i32 *%vptr
; CHECK-FP: br %r14
%region1 = alloca [978 x float], align 8
%region2 = alloca [978 x float], align 8
- %start1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 0
- %start2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 0
+ %start1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 0
+ %start2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 0
call void @foo(float *%start1, float *%start2)
- %elem1 = getelementptr inbounds [978 x float]* %region1, i64 0, i64 2
- %elem2 = getelementptr inbounds [978 x float]* %region2, i64 0, i64 2
+ %elem1 = getelementptr inbounds [978 x float], [978 x float]* %region1, i64 0, i64 2
+ %elem2 = getelementptr inbounds [978 x float], [978 x float]* %region2, i64 0, i64 2
%base1 = ptrtoint float *%elem1 to i64
%base2 = ptrtoint float *%elem2 to i64
%addr1 = add i64 %base1, %index
; CHECK-FP: br %r14
%region1 = alloca [3912 x i8], align 8
%region2 = alloca [3912 x i8], align 8
- %ptr1 = getelementptr inbounds [3912 x i8]* %region1, i64 0, i64 7
- %ptr2 = getelementptr inbounds [3912 x i8]* %region2, i64 0, i64 7
+ %ptr1 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region2, i64 0, i64 7
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [3912 x i8], align 8
%region2 = alloca [3912 x i8], align 8
- %ptr1 = getelementptr inbounds [3912 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [3912 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [3912 x i8], [3912 x i8]* %region2, i64 0, i64 8
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 7
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 7
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 7
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 7
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 8
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 4103
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 4103
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 4103
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 4103
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 4104
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 4104
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 4104
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 4104
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [1048400 x i8], align 8
%region2 = alloca [1048400 x i8], align 8
- %ptr1 = getelementptr inbounds [1048400 x i8]* %region1, i64 0, i64 524287
- %ptr2 = getelementptr inbounds [1048400 x i8]* %region2, i64 0, i64 524287
+ %ptr1 = getelementptr inbounds [1048400 x i8], [1048400 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048400 x i8], [1048400 x i8]* %region2, i64 0, i64 524287
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [1048408 x i8], align 8
%region2 = alloca [1048408 x i8], align 8
- %ptr1 = getelementptr inbounds [1048408 x i8]* %region1, i64 0, i64 524287
- %ptr2 = getelementptr inbounds [1048408 x i8]* %region2, i64 0, i64 524287
+ %ptr1 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region1, i64 0, i64 524287
+ %ptr2 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region2, i64 0, i64 524287
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-FP: br %r14
%region1 = alloca [1048408 x i8], align 8
%region2 = alloca [1048408 x i8], align 8
- %ptr1 = getelementptr inbounds [1048408 x i8]* %region1, i64 0, i64 524288
- %ptr2 = getelementptr inbounds [1048408 x i8]* %region2, i64 0, i64 524288
+ %ptr1 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region1, i64 0, i64 524288
+ %ptr2 = getelementptr inbounds [1048408 x i8], [1048408 x i8]* %region2, i64 0, i64 524288
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
%i5 = load volatile i32 *%vptr
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 8
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
store volatile i32 %i0, i32 *%vptr
%i14 = load volatile i32 *%vptr
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 8
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 8
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 8
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 8
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
store volatile i32 %i0, i32 *%vptr
%region1 = alloca [524104 x i8], align 8
%region2 = alloca [524104 x i8], align 8
%index1 = add i64 %index, 8
- %ptr1 = getelementptr inbounds [524104 x i8]* %region1, i64 0, i64 %index1
- %ptr2 = getelementptr inbounds [524104 x i8]* %region2, i64 0, i64 %index1
+ %ptr1 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region1, i64 0, i64 %index1
+ %ptr2 = getelementptr inbounds [524104 x i8], [524104 x i8]* %region2, i64 0, i64 %index1
store volatile i8 %byte, i8 *%ptr1
store volatile i8 %byte, i8 *%ptr2
ret void
; CHECK-LABEL: f9:
; CHECK: ic %r2, 4095(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
%val = load i8 *%ptr
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK-LABEL: f10:
; CHECK: icy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
%val = load i8 *%ptr
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK-LABEL: f11:
; CHECK: icy %r2, 524287(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%val = load i8 *%ptr
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK: agfi %r3, 524288
; CHECK: ic %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%val = load i8 *%ptr
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK-LABEL: f13:
; CHECK: icy %r2, -1(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%val = load i8 *%ptr
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK-LABEL: f14:
; CHECK: icy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%val = load i8 *%ptr
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK: agfi %r3, -524289
; CHECK: ic %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%val = load i8 *%ptr
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK-LABEL: f16:
; CHECK: ic %r2, 4095({{%r4,%r3|%r3,%r4}})
; CHECK: br %r14
- %ptr1 = getelementptr i8 *%src, i64 %index
- %ptr2 = getelementptr i8 *%ptr1, i64 4095
+ %ptr1 = getelementptr i8, i8 *%src, i64 %index
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 4095
%val = load i8 *%ptr2
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK-LABEL: f17:
; CHECK: icy %r2, 4096({{%r4,%r3|%r3,%r4}})
; CHECK: br %r14
- %ptr1 = getelementptr i8 *%src, i64 %index
- %ptr2 = getelementptr i8 *%ptr1, i64 4096
+ %ptr1 = getelementptr i8, i8 *%src, i64 %index
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 4096
%val = load i8 *%ptr2
%src2 = zext i8 %val to i32
%src1 = and i32 %orig, -256
; CHECK-LABEL: f9:
; CHECK: ic %r2, 4095(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
%val = load i8 *%ptr
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK-LABEL: f10:
; CHECK: icy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
%val = load i8 *%ptr
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK-LABEL: f11:
; CHECK: icy %r2, 524287(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%val = load i8 *%ptr
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK: agfi %r3, 524288
; CHECK: ic %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%val = load i8 *%ptr
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK-LABEL: f13:
; CHECK: icy %r2, -1(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%val = load i8 *%ptr
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK-LABEL: f14:
; CHECK: icy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%val = load i8 *%ptr
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK: agfi %r3, -524289
; CHECK: ic %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%val = load i8 *%ptr
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK-LABEL: f16:
; CHECK: ic %r2, 4095({{%r4,%r3|%r3,%r4}})
; CHECK: br %r14
- %ptr1 = getelementptr i8 *%src, i64 %index
- %ptr2 = getelementptr i8 *%ptr1, i64 4095
+ %ptr1 = getelementptr i8, i8 *%src, i64 %index
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 4095
%val = load i8 *%ptr2
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK-LABEL: f17:
; CHECK: icy %r2, 4096({{%r4,%r3|%r3,%r4}})
; CHECK: br %r14
- %ptr1 = getelementptr i8 *%src, i64 %index
- %ptr2 = getelementptr i8 *%ptr1, i64 4096
+ %ptr1 = getelementptr i8, i8 *%src, i64 %index
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 4096
%val = load i8 *%ptr2
%src2 = zext i8 %val to i64
%src1 = and i64 %orig, -256
; CHECK-LABEL: f2:
; CHECK: ah %r2, 4094(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2047
+ %ptr = getelementptr i16, i16 *%src, i64 2047
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = add i32 %lhs, %rhs
; CHECK-LABEL: f3:
; CHECK: ahy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2048
+ %ptr = getelementptr i16, i16 *%src, i64 2048
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = add i32 %lhs, %rhs
; CHECK-LABEL: f4:
; CHECK: ahy %r2, 524286(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = add i32 %lhs, %rhs
; CHECK: agfi %r3, 524288
; CHECK: ah %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = add i32 %lhs, %rhs
; CHECK-LABEL: f6:
; CHECK: ahy %r2, -2(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = add i32 %lhs, %rhs
; CHECK-LABEL: f7:
; CHECK: ahy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = add i32 %lhs, %rhs
; CHECK: agfi %r3, -524290
; CHECK: ah %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = add i32 %lhs, %rhs
; CHECK-LABEL: f3:
; CHECK: a %r2, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%b = load i32 *%ptr
%add = add i32 %a, %b
ret i32 %add
; CHECK-LABEL: f4:
; CHECK: ay %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%b = load i32 *%ptr
%add = add i32 %a, %b
ret i32 %add
; CHECK-LABEL: f5:
; CHECK: ay %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%add = add i32 %a, %b
ret i32 %add
; CHECK: agfi %r3, 524288
; CHECK: a %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%add = add i32 %a, %b
ret i32 %add
; CHECK-LABEL: f7:
; CHECK: ay %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%add = add i32 %a, %b
ret i32 %add
; CHECK-LABEL: f8:
; CHECK: ay %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%add = add i32 %a, %b
ret i32 %add
; CHECK: agfi %r3, -524292
; CHECK: a %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%add = add i32 %a, %b
ret i32 %add
; CHECK: brasl %r14, foo@PLT
; CHECK: a %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: agf %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%add = add i64 %a, %bext
; CHECK: agfi %r3, 524288
; CHECK: agf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%add = add i64 %a, %bext
; CHECK-LABEL: f5:
; CHECK: agf %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%add = add i64 %a, %bext
; CHECK-LABEL: f6:
; CHECK: agf %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%add = add i64 %a, %bext
; CHECK: agfi %r3, -524292
; CHECK: agf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%add = add i64 %a, %bext
; CHECK: brasl %r14, foo@PLT
; CHECK: agf %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: algf %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%add = add i64 %a, %bext
; CHECK: agfi %r3, 524288
; CHECK: algf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%add = add i64 %a, %bext
; CHECK-LABEL: f5:
; CHECK: algf %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%add = add i64 %a, %bext
; CHECK-LABEL: f6:
; CHECK: algf %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%add = add i64 %a, %bext
; CHECK: agfi %r3, -524292
; CHECK: algf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%add = add i64 %a, %bext
; CHECK: brasl %r14, foo@PLT
; CHECK: algf %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: ag %r2, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%add = add i64 %a, %b
ret i64 %add
; CHECK: agfi %r3, 524288
; CHECK: ag %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%add = add i64 %a, %b
ret i64 %add
; CHECK-LABEL: f5:
; CHECK: ag %r2, -8(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%add = add i64 %a, %b
ret i64 %add
; CHECK-LABEL: f6:
; CHECK: ag %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%add = add i64 %a, %b
ret i64 %add
; CHECK: agfi %r3, -524296
; CHECK: ag %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%add = add i64 %a, %b
ret i64 %add
; CHECK: brasl %r14, foo@PLT
; CHECK: ag %r2, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK: alg {{%r[0-9]+}}, {{[0-9]+}}(%r15)
; CHECK: alcg {{%r[0-9]+}}, {{[0-9]+}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i128 *%ptr0, i128 2
- %ptr2 = getelementptr i128 *%ptr0, i128 4
- %ptr3 = getelementptr i128 *%ptr0, i128 6
- %ptr4 = getelementptr i128 *%ptr0, i128 8
+ %ptr1 = getelementptr i128, i128 *%ptr0, i128 2
+ %ptr2 = getelementptr i128, i128 *%ptr0, i128 4
+ %ptr3 = getelementptr i128, i128 *%ptr0, i128 6
+ %ptr4 = getelementptr i128, i128 *%ptr0, i128 8
%val0 = load i128 *%ptr0
%val1 = load i128 *%ptr1
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i64 131071
+ %ptr = getelementptr i32, i32 *%bsrc, i64 131071
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%add = add i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i64 131072
+ %ptr = getelementptr i32, i32 *%bsrc, i64 131072
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%add = add i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i128 -1
+ %ptr = getelementptr i32, i32 *%bsrc, i128 -1
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%add = add i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i128 -131072
+ %ptr = getelementptr i32, i32 *%bsrc, i128 -131072
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%add = add i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i128 -131073
+ %ptr = getelementptr i32, i32 *%bsrc, i128 -131073
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%add = add i128 %xor, %bext
; CHECK-LABEL: f6:
; CHECK: asi 524284(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%val = load i32 *%ptr
%add = add i32 %val, 1
store i32 %add, i32 *%ptr
; CHECK: agfi %r2, 524288
; CHECK: asi 0(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%val = load i32 *%ptr
%add = add i32 %val, 1
store i32 %add, i32 *%ptr
; CHECK-LABEL: f8:
; CHECK: asi -524288(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%val = load i32 *%ptr
%add = add i32 %val, 1
store i32 %add, i32 *%ptr
; CHECK: agfi %r2, -524292
; CHECK: asi 0(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%val = load i32 *%ptr
%add = add i32 %val, 1
store i32 %add, i32 *%ptr
; CHECK-LABEL: f6:
; CHECK: agsi 524280(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65535
+ %ptr = getelementptr i64, i64 *%base, i64 65535
%val = load i64 *%ptr
%add = add i64 %val, 1
store i64 %add, i64 *%ptr
; CHECK: agfi %r2, 524288
; CHECK: agsi 0(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65536
+ %ptr = getelementptr i64, i64 *%base, i64 65536
%val = load i64 *%ptr
%add = add i64 %val, 1
store i64 %add, i64 *%ptr
; CHECK-LABEL: f8:
; CHECK: agsi -524288(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65536
+ %ptr = getelementptr i64, i64 *%base, i64 -65536
%val = load i64 *%ptr
%add = add i64 %val, 1
store i64 %add, i64 *%ptr
; CHECK: agfi %r2, -524296
; CHECK: agsi 0(%r2), 1
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65537
+ %ptr = getelementptr i64, i64 *%base, i64 -65537
%val = load i64 *%ptr
%add = add i64 %val, 1
store i64 %add, i64 *%ptr
; CHECK-LABEL: f2:
; CHECK: ch %r2, 4094(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2047
+ %ptr = getelementptr i16, i16 *%src, i64 2047
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%cond = icmp slt i32 %lhs, %rhs
; CHECK-LABEL: f3:
; CHECK: chy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2048
+ %ptr = getelementptr i16, i16 *%src, i64 2048
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%cond = icmp slt i32 %lhs, %rhs
; CHECK-LABEL: f4:
; CHECK: chy %r2, 524286(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%cond = icmp slt i32 %lhs, %rhs
; CHECK: agfi %r3, 524288
; CHECK: ch %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%cond = icmp slt i32 %lhs, %rhs
; CHECK-LABEL: f6:
; CHECK: chy %r2, -2(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%cond = icmp slt i32 %lhs, %rhs
; CHECK-LABEL: f7:
; CHECK: chy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%cond = icmp slt i32 %lhs, %rhs
; CHECK: agfi %r3, -524290
; CHECK: ch %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%cond = icmp slt i32 %lhs, %rhs
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1023
+ %ptr = getelementptr i32, i32 *%base, i64 1023
%i2 = load i32 *%ptr
%cond = icmp slt i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1024
+ %ptr = getelementptr i32, i32 *%base, i64 1024
%i2 = load i32 *%ptr
%cond = icmp slt i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%i2 = load i32 *%ptr
%cond = icmp slt i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%i2 = load i32 *%ptr
%cond = icmp slt i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -1
+ %ptr = getelementptr i32, i32 *%base, i64 -1
%i2 = load i32 *%ptr
%cond = icmp slt i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%i2 = load i32 *%ptr
%cond = icmp slt i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%i2 = load i32 *%ptr
%cond = icmp slt i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1023
+ %ptr = getelementptr i32, i32 *%base, i64 1023
%i2 = load i32 *%ptr
%cond = icmp ult i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1024
+ %ptr = getelementptr i32, i32 *%base, i64 1024
%i2 = load i32 *%ptr
%cond = icmp ult i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%i2 = load i32 *%ptr
%cond = icmp ult i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%i2 = load i32 *%ptr
%cond = icmp ult i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -1
+ %ptr = getelementptr i32, i32 *%base, i64 -1
%i2 = load i32 *%ptr
%cond = icmp ult i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%i2 = load i32 *%ptr
%cond = icmp ult i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%i2 = load i32 *%ptr
%cond = icmp ult i32 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-LABEL: f2:
; CHECK: cgh %r2, 524286(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%rhs = sext i16 %half to i64
%cond = icmp slt i64 %lhs, %rhs
; CHECK: agfi %r3, 524288
; CHECK: cgh %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i64
%cond = icmp slt i64 %lhs, %rhs
; CHECK-LABEL: f4:
; CHECK: cgh %r2, -2(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%rhs = sext i16 %half to i64
%cond = icmp slt i64 %lhs, %rhs
; CHECK-LABEL: f5:
; CHECK: cgh %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i64
%cond = icmp slt i64 %lhs, %rhs
; CHECK: agfi %r3, -524290
; CHECK: cgh %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%rhs = sext i16 %half to i64
%cond = icmp slt i64 %lhs, %rhs
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%unext = load i32 *%ptr
%i2 = sext i32 %unext to i64
%cond = icmp slt i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%unext = load i32 *%ptr
%i2 = sext i32 %unext to i64
%cond = icmp slt i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -1
+ %ptr = getelementptr i32, i32 *%base, i64 -1
%unext = load i32 *%ptr
%i2 = sext i32 %unext to i64
%cond = icmp slt i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%unext = load i32 *%ptr
%i2 = sext i32 %unext to i64
%cond = icmp slt i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%unext = load i32 *%ptr
%i2 = sext i32 %unext to i64
%cond = icmp slt i64 %i1, %i2
; CHECK: brasl %r14, foo@PLT
; CHECK: cgf {{%r[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131071
+ %ptr = getelementptr i32, i32 *%base, i64 131071
%unext = load i32 *%ptr
%i2 = zext i32 %unext to i64
%cond = icmp ult i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 131072
+ %ptr = getelementptr i32, i32 *%base, i64 131072
%unext = load i32 *%ptr
%i2 = zext i32 %unext to i64
%cond = icmp ult i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -1
+ %ptr = getelementptr i32, i32 *%base, i64 -1
%unext = load i32 *%ptr
%i2 = zext i32 %unext to i64
%cond = icmp ult i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131072
+ %ptr = getelementptr i32, i32 *%base, i64 -131072
%unext = load i32 *%ptr
%i2 = zext i32 %unext to i64
%cond = icmp ult i64 %i1, %i2
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -131073
+ %ptr = getelementptr i32, i32 *%base, i64 -131073
%unext = load i32 *%ptr
%i2 = zext i32 %unext to i64
%cond = icmp ult i64 %i1, %i2
; CHECK: brasl %r14, foo@PLT
; CHECK: clgf {{%r[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65535
+ %ptr = getelementptr i64, i64 *%base, i64 65535
%i2 = load i64 *%ptr
%cond = icmp slt i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65536
+ %ptr = getelementptr i64, i64 *%base, i64 65536
%i2 = load i64 *%ptr
%cond = icmp slt i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -1
+ %ptr = getelementptr i64, i64 *%base, i64 -1
%i2 = load i64 *%ptr
%cond = icmp slt i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65536
+ %ptr = getelementptr i64, i64 *%base, i64 -65536
%i2 = load i64 *%ptr
%cond = icmp slt i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65537
+ %ptr = getelementptr i64, i64 *%base, i64 -65537
%i2 = load i64 *%ptr
%cond = icmp slt i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65535
+ %ptr = getelementptr i64, i64 *%base, i64 65535
%i2 = load i64 *%ptr
%cond = icmp ult i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 65536
+ %ptr = getelementptr i64, i64 *%base, i64 65536
%i2 = load i64 *%ptr
%cond = icmp ult i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -1
+ %ptr = getelementptr i64, i64 *%base, i64 -1
%i2 = load i64 *%ptr
%cond = icmp ult i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65536
+ %ptr = getelementptr i64, i64 *%base, i64 -65536
%i2 = load i64 *%ptr
%cond = icmp ult i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -65537
+ %ptr = getelementptr i64, i64 *%base, i64 -65537
%i2 = load i64 *%ptr
%cond = icmp ult i64 %i1, %i2
%res = select i1 %cond, double %a, double %b
; CHECK-LABEL: f11:
; CHECK: cli 4095(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
%val = load i8 *%ptr
%cond = icmp ult i8 %val, 127
%res = select i1 %cond, double %a, double %b
; CHECK-LABEL: f12:
; CHECK: cliy 4096(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
%val = load i8 *%ptr
%cond = icmp ult i8 %val, 127
%res = select i1 %cond, double %a, double %b
; CHECK-LABEL: f13:
; CHECK: cliy 524287(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%val = load i8 *%ptr
%cond = icmp ult i8 %val, 127
%res = select i1 %cond, double %a, double %b
; CHECK: agfi %r2, 524288
; CHECK: cli 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%val = load i8 *%ptr
%cond = icmp ult i8 %val, 127
%res = select i1 %cond, double %a, double %b
; CHECK-LABEL: f15:
; CHECK: cliy -1(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%val = load i8 *%ptr
%cond = icmp ult i8 %val, 127
%res = select i1 %cond, double %a, double %b
; CHECK-LABEL: f16:
; CHECK: cliy -524288(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%val = load i8 *%ptr
%cond = icmp ult i8 %val, 127
%res = select i1 %cond, double %a, double %b
; CHECK: agfi %r2, -524289
; CHECK: cli 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%val = load i8 *%ptr
%cond = icmp ult i8 %val, 127
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 2047
+ %ptr = getelementptr i16, i16 *%base, i64 2047
%val = load i16 *%ptr
%cond = icmp slt i16 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 2048
+ %ptr = getelementptr i16, i16 *%base, i64 2048
%val = load i16 *%ptr
%cond = icmp slt i16 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 -1
+ %ptr = getelementptr i16, i16 *%base, i64 -1
%val = load i16 *%ptr
%cond = icmp slt i16 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jh
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 2047
+ %ptr = getelementptr i16, i16 *%base, i64 2047
%val = load i16 *%ptr
%cond = icmp ugt i16 %val, 1
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jh
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 2048
+ %ptr = getelementptr i16, i16 *%base, i64 2048
%val = load i16 *%ptr
%cond = icmp ugt i16 %val, 1
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jh
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i16 *%base, i64 -1
+ %ptr = getelementptr i16, i16 *%base, i64 -1
%val = load i16 *%ptr
%cond = icmp ugt i16 %val, 1
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1023
+ %ptr = getelementptr i32, i32 *%base, i64 1023
%val = load i32 *%ptr
%cond = icmp slt i32 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1024
+ %ptr = getelementptr i32, i32 *%base, i64 1024
%val = load i32 *%ptr
%cond = icmp slt i32 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -1
+ %ptr = getelementptr i32, i32 *%base, i64 -1
%val = load i32 *%ptr
%cond = icmp slt i32 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jh
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1023
+ %ptr = getelementptr i32, i32 *%base, i64 1023
%val = load i32 *%ptr
%cond = icmp ugt i32 %val, 1
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jh
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 1024
+ %ptr = getelementptr i32, i32 *%base, i64 1024
%val = load i32 *%ptr
%cond = icmp ugt i32 %val, 1
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jh
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i32 *%base, i64 -1
+ %ptr = getelementptr i32, i32 *%base, i64 -1
%val = load i32 *%ptr
%cond = icmp ugt i32 %val, 1
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 511
+ %ptr = getelementptr i64, i64 *%base, i64 511
%val = load i64 *%ptr
%cond = icmp slt i64 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 512
+ %ptr = getelementptr i64, i64 *%base, i64 512
%val = load i64 *%ptr
%cond = icmp slt i64 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -1
+ %ptr = getelementptr i64, i64 *%base, i64 -1
%val = load i64 *%ptr
%cond = icmp slt i64 %val, 0
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 511
+ %ptr = getelementptr i64, i64 *%base, i64 511
%val = load i64 *%ptr
%cond = icmp ult i64 %val, 2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 512
+ %ptr = getelementptr i64, i64 *%base, i64 512
%val = load i64 *%ptr
%cond = icmp ult i64 %val, 2
%res = select i1 %cond, double %a, double %b
; CHECK-NEXT: jl
; CHECK: ldr %f0, %f2
; CHECK: br %r14
- %ptr = getelementptr i64 *%base, i64 -1
+ %ptr = getelementptr i64, i64 *%base, i64 -1
%val = load i64 *%ptr
%cond = icmp ult i64 %val, 2
%res = select i1 %cond, double %a, double %b
; CHECK: tm 4095(%r2), 1
; CHECK: je {{\.L.*}}
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
%byte = load i8 *%ptr
%and = and i8 %byte, 1
%cmp = icmp eq i8 %and, 0
; CHECK: tmy 4096(%r2), 1
; CHECK: je {{\.L.*}}
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
%byte = load i8 *%ptr
%and = and i8 %byte, 1
%cmp = icmp eq i8 %and, 0
; CHECK: tmy 524287(%r2), 1
; CHECK: je {{\.L.*}}
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%byte = load i8 *%ptr
%and = and i8 %byte, 1
%cmp = icmp eq i8 %and, 0
; CHECK: tm 0(%r2), 1
; CHECK: je {{\.L.*}}
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%byte = load i8 *%ptr
%and = and i8 %byte, 1
%cmp = icmp eq i8 %and, 0
; CHECK: tmy -524288(%r2), 1
; CHECK: je {{\.L.*}}
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%byte = load i8 *%ptr
%and = and i8 %byte, 1
%cmp = icmp eq i8 %and, 0
; CHECK: tm 0(%r2), 1
; CHECK: je {{\.L.*}}
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%byte = load i8 *%ptr
%and = and i8 %byte, 1
%cmp = icmp eq i8 %and, 0
; CHECK: tm 0({{%r[1-5]}}), 1
; CHECK: je {{\.L.*}}
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 %index
+ %ptr = getelementptr i8, i8 *%src, i64 %index
%byte = load i8 *%ptr
%and = and i8 %byte, 1
%cmp = icmp eq i8 %and, 0
; CHECK-LABEL: f8:
; CHECK: mvi 4095(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
store i8 42, i8 *%ptr
ret void
}
; CHECK-LABEL: f9:
; CHECK: mviy 4096(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
store i8 42, i8 *%ptr
ret void
}
; CHECK-LABEL: f10:
; CHECK: mviy 524287(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
store i8 42, i8 *%ptr
ret void
}
; CHECK: agfi %r2, 524288
; CHECK: mvi 0(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
store i8 42, i8 *%ptr
ret void
}
; CHECK-LABEL: f12:
; CHECK: mviy -1(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
store i8 42, i8 *%ptr
ret void
}
; CHECK-LABEL: f13:
; CHECK: mviy -524288(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
store i8 42, i8 *%ptr
ret void
}
; CHECK: agfi %r2, -524289
; CHECK: mvi 0(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
store i8 42, i8 *%ptr
ret void
}
; CHECK-LABEL: f8:
; CHECK: mvhhi 4094(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i16 *%a, i64 2047
+ %ptr = getelementptr i16, i16 *%a, i64 2047
store i16 42, i16 *%ptr
ret void
}
; CHECK: lhi [[TMP:%r[0-5]]], 42
; CHECK: sthy [[TMP]], 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%a, i64 2048
+ %ptr = getelementptr i16, i16 *%a, i64 2048
store i16 42, i16 *%ptr
ret void
}
; CHECK: lhi [[TMP:%r[0-5]]], 42
; CHECK: sthy [[TMP]], -2(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%a, i64 -1
+ %ptr = getelementptr i16, i16 *%a, i64 -1
store i16 42, i16 *%ptr
ret void
}
; CHECK-LABEL: f7:
; CHECK: mvhi 4092(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i32 *%a, i64 1023
+ %ptr = getelementptr i32, i32 *%a, i64 1023
store i32 42, i32 *%ptr
ret void
}
; CHECK: lhi [[TMP:%r[0-5]]], 42
; CHECK: sty [[TMP]], 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%a, i64 1024
+ %ptr = getelementptr i32, i32 *%a, i64 1024
store i32 42, i32 *%ptr
ret void
}
; CHECK: lhi [[TMP:%r[0-5]]], 42
; CHECK: sty [[TMP]], -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%a, i64 -1
+ %ptr = getelementptr i32, i32 *%a, i64 -1
store i32 42, i32 *%ptr
ret void
}
; CHECK-LABEL: f7:
; CHECK: mvghi 4088(%r2), 42
; CHECK: br %r14
- %ptr = getelementptr i64 *%a, i64 511
+ %ptr = getelementptr i64, i64 *%a, i64 511
store i64 42, i64 *%ptr
ret void
}
; CHECK: lghi [[TMP:%r[0-5]]], 42
; CHECK: stg [[TMP]], 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%a, i64 512
+ %ptr = getelementptr i64, i64 *%a, i64 512
store i64 42, i64 *%ptr
ret void
}
; CHECK: lghi [[TMP:%r[0-5]]], 42
; CHECK: stg [[TMP]], -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%a, i64 -1
+ %ptr = getelementptr i64, i64 *%a, i64 -1
store i64 42, i64 *%ptr
ret void
}
; CHECK-LABEL: f4:
; CHECK: lb %r2, 524287(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%byte = load i8 *%ptr
%ext = sext i8 %byte to i32
ret i32 %ext
; CHECK: agfi %r2, 524288
; CHECK: lb %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%byte = load i8 *%ptr
%ext = sext i8 %byte to i32
ret i32 %ext
; CHECK-LABEL: f6:
; CHECK: lb %r2, -1(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%byte = load i8 *%ptr
%ext = sext i8 %byte to i32
ret i32 %ext
; CHECK-LABEL: f7:
; CHECK: lb %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%byte = load i8 *%ptr
%ext = sext i8 %byte to i32
ret i32 %ext
; CHECK: agfi %r2, -524289
; CHECK: lb %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%byte = load i8 *%ptr
%ext = sext i8 %byte to i32
ret i32 %ext
; CHECK-LABEL: f5:
; CHECK: llc %r2, 524287(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%byte = load i8 *%ptr
%ext = zext i8 %byte to i32
ret i32 %ext
; CHECK: agfi %r2, 524288
; CHECK: llc %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%byte = load i8 *%ptr
%ext = zext i8 %byte to i32
ret i32 %ext
; CHECK-LABEL: f7:
; CHECK: llc %r2, -1(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%byte = load i8 *%ptr
%ext = zext i8 %byte to i32
ret i32 %ext
; CHECK-LABEL: f8:
; CHECK: llc %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%byte = load i8 *%ptr
%ext = zext i8 %byte to i32
ret i32 %ext
; CHECK: agfi %r2, -524289
; CHECK: llc %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%byte = load i8 *%ptr
%ext = zext i8 %byte to i32
ret i32 %ext
; CHECK-LABEL: f4:
; CHECK: lgb %r2, 524287(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%byte = load i8 *%ptr
%ext = sext i8 %byte to i64
ret i64 %ext
; CHECK: agfi %r2, 524288
; CHECK: lgb %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%byte = load i8 *%ptr
%ext = sext i8 %byte to i64
ret i64 %ext
; CHECK-LABEL: f6:
; CHECK: lgb %r2, -1(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%byte = load i8 *%ptr
%ext = sext i8 %byte to i64
ret i64 %ext
; CHECK-LABEL: f7:
; CHECK: lgb %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%byte = load i8 *%ptr
%ext = sext i8 %byte to i64
ret i64 %ext
; CHECK: agfi %r2, -524289
; CHECK: lgb %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%byte = load i8 *%ptr
%ext = sext i8 %byte to i64
ret i64 %ext
; CHECK-LABEL: f5:
; CHECK: llgc %r2, 524287(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%byte = load i8 *%ptr
%ext = zext i8 %byte to i64
ret i64 %ext
; CHECK: agfi %r2, 524288
; CHECK: llgc %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%byte = load i8 *%ptr
%ext = zext i8 %byte to i64
ret i64 %ext
; CHECK-LABEL: f7:
; CHECK: llgc %r2, -1(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%byte = load i8 *%ptr
%ext = zext i8 %byte to i64
ret i64 %ext
; CHECK-LABEL: f8:
; CHECK: llgc %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%byte = load i8 *%ptr
%ext = zext i8 %byte to i64
ret i64 %ext
; CHECK: agfi %r2, -524289
; CHECK: llgc %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%byte = load i8 *%ptr
%ext = zext i8 %byte to i64
ret i64 %ext
; CHECK-LABEL: f4:
; CHECK: lh %r2, 4094(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2047
+ %ptr = getelementptr i16, i16 *%src, i64 2047
%half = load i16 *%ptr
%ext = sext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f5:
; CHECK: lhy %r2, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2048
+ %ptr = getelementptr i16, i16 *%src, i64 2048
%half = load i16 *%ptr
%ext = sext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f6:
; CHECK: lhy %r2, 524286(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%ext = sext i16 %half to i32
ret i32 %ext
; CHECK: agfi %r2, 524288
; CHECK: lh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%ext = sext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f8:
; CHECK: lhy %r2, -2(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%ext = sext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f9:
; CHECK: lhy %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%ext = sext i16 %half to i32
ret i32 %ext
; CHECK: agfi %r2, -524290
; CHECK: lh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%ext = sext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f5:
; CHECK: llh %r2, 524286(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%ext = zext i16 %half to i32
ret i32 %ext
; CHECK: agfi %r2, 524288
; CHECK: llh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%ext = zext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f7:
; CHECK: llh %r2, -2(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%ext = zext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f8:
; CHECK: llh %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%ext = zext i16 %half to i32
ret i32 %ext
; CHECK: agfi %r2, -524290
; CHECK: llh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%ext = zext i16 %half to i32
ret i32 %ext
; CHECK-LABEL: f4:
; CHECK: lgh %r2, 524286(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%ext = sext i16 %half to i64
ret i64 %ext
; CHECK: agfi %r2, 524288
; CHECK: lgh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%ext = sext i16 %half to i64
ret i64 %ext
; CHECK-LABEL: f6:
; CHECK: lgh %r2, -2(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%ext = sext i16 %half to i64
ret i64 %ext
; CHECK-LABEL: f7:
; CHECK: lgh %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%ext = sext i16 %half to i64
ret i64 %ext
; CHECK: agfi %r2, -524290
; CHECK: lgh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%ext = sext i16 %half to i64
ret i64 %ext
; CHECK-LABEL: f5:
; CHECK: llgh %r2, 524286(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%ext = zext i16 %half to i64
ret i64 %ext
; CHECK: agfi %r2, 524288
; CHECK: llgh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%ext = zext i16 %half to i64
ret i64 %ext
; CHECK-LABEL: f7:
; CHECK: llgh %r2, -2(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%ext = zext i16 %half to i64
ret i64 %ext
; CHECK-LABEL: f8:
; CHECK: llgh %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%ext = zext i16 %half to i64
ret i64 %ext
; CHECK: agfi %r2, -524290
; CHECK: llgh %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%ext = zext i16 %half to i64
ret i64 %ext
; CHECK-LABEL: f4:
; CHECK: lgf %r2, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%word = load i32 *%ptr
%ext = sext i32 %word to i64
ret i64 %ext
; CHECK: agfi %r2, 524288
; CHECK: lgf %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%word = load i32 *%ptr
%ext = sext i32 %word to i64
ret i64 %ext
; CHECK-LABEL: f6:
; CHECK: lgf %r2, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%word = load i32 *%ptr
%ext = sext i32 %word to i64
ret i64 %ext
; CHECK-LABEL: f7:
; CHECK: lgf %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%word = load i32 *%ptr
%ext = sext i32 %word to i64
ret i64 %ext
; CHECK: agfi %r2, -524292
; CHECK: lgf %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%word = load i32 *%ptr
%ext = sext i32 %word to i64
ret i64 %ext
; CHECK-LABEL: f5:
; CHECK: llgf %r2, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%word = load i32 *%ptr
%ext = zext i32 %word to i64
ret i64 %ext
; CHECK: agfi %r2, 524288
; CHECK: llgf %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%word = load i32 *%ptr
%ext = zext i32 %word to i64
ret i64 %ext
; CHECK-LABEL: f7:
; CHECK: llgf %r2, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%word = load i32 *%ptr
%ext = zext i32 %word to i64
ret i64 %ext
; CHECK-LABEL: f8:
; CHECK: llgf %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%word = load i32 *%ptr
%ext = zext i32 %word to i64
ret i64 %ext
; CHECK: agfi %r2, -524292
; CHECK: llgf %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%word = load i32 *%ptr
%ext = zext i32 %word to i64
ret i64 %ext
; CHECK-LABEL: f9:
; CHECK: dsgf %r2, 524284(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%rem = srem i32 %a, %b
ret i32 %rem
; CHECK: agfi %r4, 524288
; CHECK: dsgf %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%rem = srem i32 %a, %b
ret i32 %rem
; CHECK-LABEL: f11:
; CHECK: dsgf %r2, -4(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%rem = srem i32 %a, %b
ret i32 %rem
; CHECK-LABEL: f12:
; CHECK: dsgf %r2, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%rem = srem i32 %a, %b
ret i32 %rem
; CHECK: agfi %r4, -524292
; CHECK: dsgf %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%rem = srem i32 %a, %b
ret i32 %rem
; CHECK: brasl %r14, foo@PLT
; CHECK: dsgf {{%r[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f7:
; CHECK: dl %r2, 524284(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%rem = urem i32 %a, %b
ret i32 %rem
; CHECK: agfi %r4, 524288
; CHECK: dl %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%rem = urem i32 %a, %b
ret i32 %rem
; CHECK-LABEL: f9:
; CHECK: dl %r2, -4(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%rem = urem i32 %a, %b
ret i32 %rem
; CHECK-LABEL: f10:
; CHECK: dl %r2, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%rem = urem i32 %a, %b
ret i32 %rem
; CHECK: agfi %r4, -524292
; CHECK: dl %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%rem = urem i32 %a, %b
ret i32 %rem
; CHECK: brasl %r14, foo@PLT
; CHECK: dl {{%r[0-9]+}}, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f9:
; CHECK: dsgf %r2, 524284(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%rem = srem i64 %a, %bext
; CHECK: agfi %r4, 524288
; CHECK: dsgf %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%rem = srem i64 %a, %bext
; CHECK-LABEL: f11:
; CHECK: dsgf %r2, -4(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%rem = srem i64 %a, %bext
; CHECK-LABEL: f12:
; CHECK: dsgf %r2, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%rem = srem i64 %a, %bext
; CHECK: agfi %r4, -524292
; CHECK: dsgf %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%rem = srem i64 %a, %bext
; CHECK-LABEL: f7:
; CHECK: dsg %r2, 524280(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%rem = srem i64 %a, %b
ret i64 %rem
; CHECK: agfi %r4, 524288
; CHECK: dsg %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%rem = srem i64 %a, %b
ret i64 %rem
; CHECK-LABEL: f9:
; CHECK: dsg %r2, -8(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%rem = srem i64 %a, %b
ret i64 %rem
; CHECK-LABEL: f10:
; CHECK: dsg %r2, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%rem = srem i64 %a, %b
ret i64 %rem
; CHECK: agfi %r4, -524296
; CHECK: dsg %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%rem = srem i64 %a, %b
ret i64 %rem
; CHECK: brasl %r14, foo@PLT
; CHECK: dsg {{%r[0-9]+}}, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
- %ptr10 = getelementptr i64 *%ptr0, i64 20
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
+ %ptr10 = getelementptr i64, i64 *%ptr0, i64 20
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK-LABEL: f7:
; CHECK: dlg %r2, 524280(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%rem = urem i64 %a, %b
ret i64 %rem
; CHECK: agfi %r4, 524288
; CHECK: dlg %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%rem = urem i64 %a, %b
ret i64 %rem
; CHECK-LABEL: f9:
; CHECK: dlg %r2, -8(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%rem = urem i64 %a, %b
ret i64 %rem
; CHECK-LABEL: f10:
; CHECK: dlg %r2, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%rem = urem i64 %a, %b
ret i64 %rem
; CHECK: agfi %r4, -524296
; CHECK: dlg %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%rem = urem i64 %a, %b
ret i64 %rem
; CHECK: brasl %r14, foo@PLT
; CHECK: dlg {{%r[0-9]+}}, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
- %ptr10 = getelementptr i64 *%ptr0, i64 20
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
+ %ptr10 = getelementptr i64, i64 *%ptr0, i64 20
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK-LABEL: f2:
; CHECK: l %r2, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%val = load i32 *%ptr
ret i32 %val
}
; CHECK-LABEL: f3:
; CHECK: ly %r2, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%val = load i32 *%ptr
ret i32 %val
}
; CHECK-LABEL: f4:
; CHECK: ly %r2, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%val = load i32 *%ptr
ret i32 %val
}
; CHECK: agfi %r2, 524288
; CHECK: l %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%val = load i32 *%ptr
ret i32 %val
}
; CHECK-LABEL: f6:
; CHECK: ly %r2, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%val = load i32 *%ptr
ret i32 %val
}
; CHECK-LABEL: f7:
; CHECK: ly %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%val = load i32 *%ptr
ret i32 %val
}
; CHECK: agfi %r2, -524292
; CHECK: l %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%val = load i32 *%ptr
ret i32 %val
}
; CHECK-LABEL: f2:
; CHECK: lg %r2, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%val = load i64 *%ptr
ret i64 %val
}
; CHECK: agfi %r2, 524288
; CHECK: lg %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%val = load i64 *%ptr
ret i64 %val
}
; CHECK-LABEL: f4:
; CHECK: lg %r2, -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%val = load i64 *%ptr
ret i64 %val
}
; CHECK-LABEL: f5:
; CHECK: lg %r2, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%val = load i64 *%ptr
ret i64 %val
}
; CHECK: agfi %r2, -524296
; CHECK: lg %r2, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%val = load i64 *%ptr
ret i64 %val
}
; CHECK-LABEL: f4:
; CHECK: stc %r3, 4095(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%dst, i64 4095
+ %ptr = getelementptr i8, i8 *%dst, i64 4095
store i8 %val, i8 *%ptr
ret void
}
; CHECK-LABEL: f5:
; CHECK: stcy %r3, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%dst, i64 4096
+ %ptr = getelementptr i8, i8 *%dst, i64 4096
store i8 %val, i8 *%ptr
ret void
}
; CHECK-LABEL: f6:
; CHECK: stcy %r3, 524287(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%dst, i64 524287
+ %ptr = getelementptr i8, i8 *%dst, i64 524287
store i8 %val, i8 *%ptr
ret void
}
; CHECK: agfi %r2, 524288
; CHECK: stc %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%dst, i64 524288
+ %ptr = getelementptr i8, i8 *%dst, i64 524288
store i8 %val, i8 *%ptr
ret void
}
; CHECK-LABEL: f8:
; CHECK: stcy %r3, -1(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%dst, i64 -1
+ %ptr = getelementptr i8, i8 *%dst, i64 -1
store i8 %val, i8 *%ptr
ret void
}
; CHECK-LABEL: f9:
; CHECK: stcy %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%dst, i64 -524288
+ %ptr = getelementptr i8, i8 *%dst, i64 -524288
store i8 %val, i8 *%ptr
ret void
}
; CHECK: agfi %r2, -524289
; CHECK: stc %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i8 *%dst, i64 -524289
+ %ptr = getelementptr i8, i8 *%dst, i64 -524289
store i8 %val, i8 *%ptr
ret void
}
; CHECK-LABEL: f4:
; CHECK: sth %r3, 4094(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%dst, i64 2047
+ %ptr = getelementptr i16, i16 *%dst, i64 2047
store i16 %val, i16 *%ptr
ret void
}
; CHECK-LABEL: f5:
; CHECK: sthy %r3, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%dst, i64 2048
+ %ptr = getelementptr i16, i16 *%dst, i64 2048
store i16 %val, i16 *%ptr
ret void
}
; CHECK-LABEL: f6:
; CHECK: sthy %r3, 524286(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%dst, i64 262143
+ %ptr = getelementptr i16, i16 *%dst, i64 262143
store i16 %val, i16 *%ptr
ret void
}
; CHECK: agfi %r2, 524288
; CHECK: sth %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%dst, i64 262144
+ %ptr = getelementptr i16, i16 *%dst, i64 262144
store i16 %val, i16 *%ptr
ret void
}
; CHECK-LABEL: f8:
; CHECK: sthy %r3, -2(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%dst, i64 -1
+ %ptr = getelementptr i16, i16 *%dst, i64 -1
store i16 %val, i16 *%ptr
ret void
}
; CHECK-LABEL: f9:
; CHECK: sthy %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%dst, i64 -262144
+ %ptr = getelementptr i16, i16 *%dst, i64 -262144
store i16 %val, i16 *%ptr
ret void
}
; CHECK: agfi %r2, -524290
; CHECK: sth %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i16 *%dst, i64 -262145
+ %ptr = getelementptr i16, i16 *%dst, i64 -262145
store i16 %val, i16 *%ptr
ret void
}
; CHECK-LABEL: f3:
; CHECK: st %r3, 4092(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 1023
+ %ptr = getelementptr i32, i32 *%dst, i64 1023
store i32 %val, i32 *%ptr
ret void
}
; CHECK-LABEL: f4:
; CHECK: sty %r3, 4096(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 1024
+ %ptr = getelementptr i32, i32 *%dst, i64 1024
store i32 %val, i32 *%ptr
ret void
}
; CHECK-LABEL: f5:
; CHECK: sty %r3, 524284(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 131071
+ %ptr = getelementptr i32, i32 *%dst, i64 131071
store i32 %val, i32 *%ptr
ret void
}
; CHECK: agfi %r2, 524288
; CHECK: st %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 131072
+ %ptr = getelementptr i32, i32 *%dst, i64 131072
store i32 %val, i32 *%ptr
ret void
}
; CHECK-LABEL: f7:
; CHECK: sty %r3, -4(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 -1
+ %ptr = getelementptr i32, i32 *%dst, i64 -1
store i32 %val, i32 *%ptr
ret void
}
; CHECK-LABEL: f8:
; CHECK: sty %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 -131072
+ %ptr = getelementptr i32, i32 *%dst, i64 -131072
store i32 %val, i32 *%ptr
ret void
}
; CHECK: agfi %r2, -524292
; CHECK: st %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i32 *%dst, i64 -131073
+ %ptr = getelementptr i32, i32 *%dst, i64 -131073
store i32 %val, i32 *%ptr
ret void
}
; CHECK-LABEL: f2:
; CHECK: stg %r3, 524280(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 65535
+ %ptr = getelementptr i64, i64 *%dst, i64 65535
store i64 %val, i64 *%ptr
ret void
}
; CHECK: agfi %r2, 524288
; CHECK: stg %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 65536
+ %ptr = getelementptr i64, i64 *%dst, i64 65536
store i64 %val, i64 *%ptr
ret void
}
; CHECK-LABEL: f4:
; CHECK: stg %r3, -8(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 -1
+ %ptr = getelementptr i64, i64 *%dst, i64 -1
store i64 %val, i64 *%ptr
ret void
}
; CHECK-LABEL: f5:
; CHECK: stg %r3, -524288(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 -65536
+ %ptr = getelementptr i64, i64 *%dst, i64 -65536
store i64 %val, i64 *%ptr
ret void
}
; CHECK: agfi %r2, -524296
; CHECK: stg %r3, 0(%r2)
; CHECK: br %r14
- %ptr = getelementptr i64 *%dst, i64 -65537
+ %ptr = getelementptr i64, i64 *%dst, i64 -65537
store i64 %val, i64 *%ptr
ret void
}
; CHECK: srl [[VAL]], 1
; CHECK: stc [[VAL]], 1([[REG]])
; CHECK: br %r14
- %ptr1 = getelementptr [2 x i8] *@garray8, i64 0, i64 0
- %ptr2 = getelementptr [2 x i8] *@garray8, i64 0, i64 1
+ %ptr1 = getelementptr [2 x i8], [2 x i8] *@garray8, i64 0, i64 0
+ %ptr2 = getelementptr [2 x i8], [2 x i8] *@garray8, i64 0, i64 1
%val = load i8 *%ptr1
%shr = lshr i8 %val, 1
store i8 %shr, i8 *%ptr2
; CHECK: srl [[VAL]], 1
; CHECK: sthrl [[VAL]], garray16+2
; CHECK: br %r14
- %ptr1 = getelementptr [2 x i16] *@garray16, i64 0, i64 0
- %ptr2 = getelementptr [2 x i16] *@garray16, i64 0, i64 1
+ %ptr1 = getelementptr [2 x i16], [2 x i16] *@garray16, i64 0, i64 0
+ %ptr2 = getelementptr [2 x i16], [2 x i16] *@garray16, i64 0, i64 1
%val = load i16 *%ptr1
%shr = lshr i16 %val, 1
store i16 %shr, i16 *%ptr2
; CHECK-LABEL: f2:
; CHECK: mh %r2, 4094(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2047
+ %ptr = getelementptr i16, i16 *%src, i64 2047
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = mul i32 %lhs, %rhs
; CHECK-LABEL: f3:
; CHECK: mhy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2048
+ %ptr = getelementptr i16, i16 *%src, i64 2048
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = mul i32 %lhs, %rhs
; CHECK-LABEL: f4:
; CHECK: mhy %r2, 524286(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = mul i32 %lhs, %rhs
; CHECK: agfi %r3, 524288
; CHECK: mh %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = mul i32 %lhs, %rhs
; CHECK-LABEL: f6:
; CHECK: mhy %r2, -2(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = mul i32 %lhs, %rhs
; CHECK-LABEL: f7:
; CHECK: mhy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = mul i32 %lhs, %rhs
; CHECK: agfi %r3, -524290
; CHECK: mh %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = mul i32 %lhs, %rhs
; CHECK-LABEL: f3:
; CHECK: ms %r2, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%b = load i32 *%ptr
%mul = mul i32 %a, %b
ret i32 %mul
; CHECK-LABEL: f4:
; CHECK: msy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%b = load i32 *%ptr
%mul = mul i32 %a, %b
ret i32 %mul
; CHECK-LABEL: f5:
; CHECK: msy %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%mul = mul i32 %a, %b
ret i32 %mul
; CHECK: agfi %r3, 524288
; CHECK: ms %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%mul = mul i32 %a, %b
ret i32 %mul
; CHECK-LABEL: f7:
; CHECK: msy %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%mul = mul i32 %a, %b
ret i32 %mul
; CHECK-LABEL: f8:
; CHECK: msy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%mul = mul i32 %a, %b
ret i32 %mul
; CHECK: agfi %r3, -524292
; CHECK: ms %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%mul = mul i32 %a, %b
ret i32 %mul
; CHECK: brasl %r14, foo@PLT
; CHECK: ms %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: msgf %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%mul = mul i64 %a, %bext
; CHECK: agfi %r3, 524288
; CHECK: msgf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%mul = mul i64 %a, %bext
; CHECK-LABEL: f5:
; CHECK: msgf %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%mul = mul i64 %a, %bext
; CHECK-LABEL: f6:
; CHECK: msgf %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%mul = mul i64 %a, %bext
; CHECK: agfi %r3, -524292
; CHECK: msgf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%mul = mul i64 %a, %bext
; CHECK: brasl %r14, foo@PLT
; CHECK: msgf %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: msg %r2, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%mul = mul i64 %a, %b
ret i64 %mul
; CHECK: agfi %r3, 524288
; CHECK: msg %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%mul = mul i64 %a, %b
ret i64 %mul
; CHECK-LABEL: f5:
; CHECK: msg %r2, -8(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%mul = mul i64 %a, %b
ret i64 %mul
; CHECK-LABEL: f6:
; CHECK: msg %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%mul = mul i64 %a, %b
ret i64 %mul
; CHECK: agfi %r3, -524296
; CHECK: msg %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%mul = mul i64 %a, %b
ret i64 %mul
; CHECK: brasl %r14, foo@PLT
; CHECK: msg %r2, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK-LABEL: f7:
; CHECK: mlg %r2, 524280(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%ax = zext i64 %a to i128
%bx = zext i64 %b to i128
; CHECK: agfi %r4, 524288
; CHECK: mlg %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%ax = zext i64 %a to i128
%bx = zext i64 %b to i128
; CHECK-LABEL: f9:
; CHECK: mlg %r2, -8(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%ax = zext i64 %a to i128
%bx = zext i64 %b to i128
; CHECK-LABEL: f10:
; CHECK: mlg %r2, -524288(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%ax = zext i64 %a to i128
%bx = zext i64 %b to i128
; CHECK: agfi %r4, -524296
; CHECK: mlg %r2, 0(%r4)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%ax = zext i64 %a to i128
%bx = zext i64 %b to i128
; CHECK: brasl %r14, foo@PLT
; CHECK: mlg {{%r[0-9]+}}, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK-LABEL: f3:
; CHECK: s %r2, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%b = load i32 *%ptr
%sub = sub i32 %a, %b
ret i32 %sub
; CHECK-LABEL: f4:
; CHECK: sy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%b = load i32 *%ptr
%sub = sub i32 %a, %b
ret i32 %sub
; CHECK-LABEL: f5:
; CHECK: sy %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%sub = sub i32 %a, %b
ret i32 %sub
; CHECK: agfi %r3, 524288
; CHECK: s %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%sub = sub i32 %a, %b
ret i32 %sub
; CHECK-LABEL: f7:
; CHECK: sy %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%sub = sub i32 %a, %b
ret i32 %sub
; CHECK-LABEL: f8:
; CHECK: sy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%sub = sub i32 %a, %b
ret i32 %sub
; CHECK: agfi %r3, -524292
; CHECK: s %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%sub = sub i32 %a, %b
ret i32 %sub
; CHECK: brasl %r14, foo@PLT
; CHECK: s %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: sgf %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK: agfi %r3, 524288
; CHECK: sgf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK-LABEL: f5:
; CHECK: sgf %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK-LABEL: f6:
; CHECK: sgf %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK: agfi %r3, -524292
; CHECK: sgf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%bext = sext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK: brasl %r14, foo@PLT
; CHECK: sgf %r2, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: slgf %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK: agfi %r3, 524288
; CHECK: slgf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK-LABEL: f5:
; CHECK: slgf %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK-LABEL: f6:
; CHECK: slgf %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK: agfi %r3, -524292
; CHECK: slgf %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%bext = zext i32 %b to i64
%sub = sub i64 %a, %bext
; CHECK: brasl %r14, foo@PLT
; CHECK: slgf %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: sg %r2, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%sub = sub i64 %a, %b
ret i64 %sub
; CHECK: agfi %r3, 524288
; CHECK: sg %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%sub = sub i64 %a, %b
ret i64 %sub
; CHECK-LABEL: f5:
; CHECK: sg %r2, -8(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%sub = sub i64 %a, %b
ret i64 %sub
; CHECK-LABEL: f6:
; CHECK: sg %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%sub = sub i64 %a, %b
ret i64 %sub
; CHECK: agfi %r3, -524296
; CHECK: sg %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%sub = sub i64 %a, %b
ret i64 %sub
; CHECK: brasl %r14, foo@PLT
; CHECK: sg %r2, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK: slbg {{%r[0-5]}}, 0(%r2)
; CHECK: br %r14
%bptr = inttoptr i64 %addr to i128 *
- %aptr = getelementptr i128 *%bptr, i64 -8
+ %aptr = getelementptr i128, i128 *%bptr, i64 -8
%a = load i128 *%aptr
%b = load i128 *%bptr
%sub = sub i128 %a, %b
; CHECK: br %r14
%addr = add i64 %base, 524272
%bptr = inttoptr i64 %addr to i128 *
- %aptr = getelementptr i128 *%bptr, i64 -8
+ %aptr = getelementptr i128, i128 *%bptr, i64 -8
%a = load i128 *%aptr
%b = load i128 *%bptr
%sub = sub i128 %a, %b
; CHECK: br %r14
%addr = add i64 %base, 524280
%bptr = inttoptr i64 %addr to i128 *
- %aptr = getelementptr i128 *%bptr, i64 -8
+ %aptr = getelementptr i128, i128 *%bptr, i64 -8
%a = load i128 *%aptr
%b = load i128 *%bptr
%sub = sub i128 %a, %b
; CHECK: br %r14
%addr = add i64 %base, 524288
%bptr = inttoptr i64 %addr to i128 *
- %aptr = getelementptr i128 *%bptr, i64 -8
+ %aptr = getelementptr i128, i128 *%bptr, i64 -8
%a = load i128 *%aptr
%b = load i128 *%bptr
%sub = sub i128 %a, %b
; CHECK: br %r14
%addr = add i64 %base, -524288
%bptr = inttoptr i64 %addr to i128 *
- %aptr = getelementptr i128 *%bptr, i64 -8
+ %aptr = getelementptr i128, i128 *%bptr, i64 -8
%a = load i128 *%aptr
%b = load i128 *%bptr
%sub = sub i128 %a, %b
; CHECK: br %r14
%addr = add i64 %base, -524296
%bptr = inttoptr i64 %addr to i128 *
- %aptr = getelementptr i128 *%bptr, i64 -8
+ %aptr = getelementptr i128, i128 *%bptr, i64 -8
%a = load i128 *%aptr
%b = load i128 *%bptr
%sub = sub i128 %a, %b
; CHECK: slg {{%r[0-9]+}}, {{[0-9]+}}(%r15)
; CHECK: slbg {{%r[0-9]+}}, {{[0-9]+}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i128 *%ptr0, i128 2
- %ptr2 = getelementptr i128 *%ptr0, i128 4
- %ptr3 = getelementptr i128 *%ptr0, i128 6
- %ptr4 = getelementptr i128 *%ptr0, i128 8
+ %ptr1 = getelementptr i128, i128 *%ptr0, i128 2
+ %ptr2 = getelementptr i128, i128 *%ptr0, i128 4
+ %ptr3 = getelementptr i128, i128 *%ptr0, i128 6
+ %ptr4 = getelementptr i128, i128 *%ptr0, i128 8
%val0 = load i128 *%ptr0
%val1 = load i128 *%ptr1
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i64 131071
+ %ptr = getelementptr i32, i32 *%bsrc, i64 131071
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%sub = sub i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i64 131072
+ %ptr = getelementptr i32, i32 *%bsrc, i64 131072
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%sub = sub i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i128 -1
+ %ptr = getelementptr i32, i32 *%bsrc, i128 -1
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%sub = sub i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i128 -131072
+ %ptr = getelementptr i32, i32 *%bsrc, i128 -131072
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%sub = sub i128 %xor, %bext
; CHECK: br %r14
%a = load i128 *%aptr
%xor = xor i128 %a, 127
- %ptr = getelementptr i32 *%bsrc, i128 -131073
+ %ptr = getelementptr i32, i32 *%bsrc, i128 -131073
%b = load i32 *%ptr
%bext = zext i32 %b to i128
%sub = sub i128 %xor, %bext
; CHECK-LABEL: f2:
; CHECK: sh %r2, 4094(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2047
+ %ptr = getelementptr i16, i16 *%src, i64 2047
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = sub i32 %lhs, %rhs
; CHECK-LABEL: f3:
; CHECK: shy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 2048
+ %ptr = getelementptr i16, i16 *%src, i64 2048
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = sub i32 %lhs, %rhs
; CHECK-LABEL: f4:
; CHECK: shy %r2, 524286(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262143
+ %ptr = getelementptr i16, i16 *%src, i64 262143
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = sub i32 %lhs, %rhs
; CHECK: agfi %r3, 524288
; CHECK: sh %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 262144
+ %ptr = getelementptr i16, i16 *%src, i64 262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = sub i32 %lhs, %rhs
; CHECK-LABEL: f6:
; CHECK: shy %r2, -2(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -1
+ %ptr = getelementptr i16, i16 *%src, i64 -1
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = sub i32 %lhs, %rhs
; CHECK-LABEL: f7:
; CHECK: shy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262144
+ %ptr = getelementptr i16, i16 *%src, i64 -262144
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = sub i32 %lhs, %rhs
; CHECK: agfi %r3, -524290
; CHECK: sh %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i16 *%src, i64 -262145
+ %ptr = getelementptr i16, i16 *%src, i64 -262145
%half = load i16 *%ptr
%rhs = sext i16 %half to i32
%res = sub i32 %lhs, %rhs
loop:
%index = phi i64 [ 0, %entry ], [ %next, %loop ]
- %ptr = getelementptr i32 *%dest, i64 %index
+ %ptr = getelementptr i32, i32 *%dest, i64 %index
store i32 %a, i32 *%ptr
%next = add i64 %index, 1
%cmp = icmp ne i64 %next, 100
; CHECK: mvc 512(256,[[NEWDEST]]), 0([[NEWSRC]])
; CHECK: mvc 768(255,[[NEWDEST]]), 256([[NEWSRC]])
; CHECK: br %r14
- %dest = getelementptr i8 *%srcbase, i64 4000
- %src = getelementptr i8* %destbase, i64 3500
+ %dest = getelementptr i8, i8 *%srcbase, i64 4000
+ %src = getelementptr i8, i8* %destbase, i64 3500
call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1279, i32 1,
i1 false)
ret void
; CHECK: brasl %r14, foo@PLT
; CHECK: br %r14
%arr = alloca [6000 x i8]
- %dest = getelementptr [6000 x i8] *%arr, i64 0, i64 3900
- %src = getelementptr [6000 x i8] *%arr, i64 0, i64 1924
+ %dest = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 3900
+ %src = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 1924
call void @foo(i8 *%dest, i8 *%src)
call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1279, i32 1,
i1 false)
; CHECK: brasl %r14, foo@PLT
; CHECK: br %r14
%arr = alloca [6000 x i8]
- %dest = getelementptr [6000 x i8] *%arr, i64 0, i64 24
- %src = getelementptr [6000 x i8] *%arr, i64 0, i64 3650
+ %dest = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 24
+ %src = getelementptr [6000 x i8], [6000 x i8] *%arr, i64 0, i64 3650
call void @foo(i8 *%dest, i8 *%src)
call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1279, i32 1,
i1 false)
; CHECK: brasl %r14, foo@PLT
; CHECK: br %r14
%arr = alloca [3200 x i8]
- %dest = getelementptr [3200 x i8] *%arr, i64 0, i64 1600
- %src = getelementptr [3200 x i8] *%arr, i64 0, i64 0
+ %dest = getelementptr [3200 x i8], [3200 x i8] *%arr, i64 0, i64 1600
+ %src = getelementptr [3200 x i8], [3200 x i8] *%arr, i64 0, i64 0
call void @foo(i8 *%dest, i8 *%src)
call void @llvm.memcpy.p0i8.p0i8.i64(i8 *%dest, i8 *%src, i64 1537, i32 1,
i1 false)
; CHECK-LABEL: f1:
; CHECK: mvc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
store i8 %val, i8 *%ptr2
ret void
; CHECK-LABEL: f2:
; CHECK: mvc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%ext = zext i8 %val to i32
%trunc = trunc i32 %ext to i8
; CHECK-LABEL: f3:
; CHECK: mvc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%ext = zext i8 %val to i64
%trunc = trunc i64 %ext to i8
; CHECK-LABEL: f4:
; CHECK: mvc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%ext = sext i8 %val to i32
%trunc = trunc i32 %ext to i8
; CHECK-LABEL: f5:
; CHECK: mvc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%ext = sext i8 %val to i64
%trunc = trunc i64 %ext to i8
; CHECK-LABEL: f6:
; CHECK: mvc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
store i16 %val, i16 *%ptr2
ret void
; CHECK-LABEL: f7:
; CHECK: mvc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%ext = zext i16 %val to i32
%trunc = trunc i32 %ext to i16
; CHECK-LABEL: f8:
; CHECK: mvc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%ext = zext i16 %val to i64
%trunc = trunc i64 %ext to i16
; CHECK-LABEL: f9:
; CHECK: mvc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%ext = sext i16 %val to i32
%trunc = trunc i32 %ext to i16
; CHECK-LABEL: f10:
; CHECK: mvc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%ext = sext i16 %val to i64
%trunc = trunc i64 %ext to i16
; CHECK-LABEL: f11:
; CHECK: mvc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i32 *%ptr1, i64 1
+ %ptr2 = getelementptr i32, i32 *%ptr1, i64 1
%val = load i32 *%ptr1
store i32 %val, i32 *%ptr2
ret void
; CHECK-LABEL: f12:
; CHECK: mvc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i32 *%ptr1, i64 1
+ %ptr2 = getelementptr i32, i32 *%ptr1, i64 1
%val = load i32 *%ptr1
%ext = zext i32 %val to i64
%trunc = trunc i64 %ext to i32
; CHECK-LABEL: f13:
; CHECK: mvc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i32 *%ptr1, i64 1
+ %ptr2 = getelementptr i32, i32 *%ptr1, i64 1
%val = load i32 *%ptr1
%ext = sext i32 %val to i64
%trunc = trunc i64 %ext to i32
; CHECK-LABEL: f14:
; CHECK: mvc 8(8,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1
store i64 %val, i64 *%ptr2
ret void
; CHECK-LABEL: f15:
; CHECK: mvc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr float *%ptr1, i64 1
+ %ptr2 = getelementptr float, float *%ptr1, i64 1
%val = load float *%ptr1
store float %val, float *%ptr2
ret void
; CHECK-LABEL: f16:
; CHECK: mvc 8(8,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr double *%ptr1, i64 1
+ %ptr2 = getelementptr double, double *%ptr1, i64 1
%val = load double *%ptr1
store double %val, double *%ptr2
ret void
; CHECK-LABEL: f17:
; CHECK: mvc 16(16,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr fp128 *%ptr1, i64 1
+ %ptr2 = getelementptr fp128, fp128 *%ptr1, i64 1
%val = load fp128 *%ptr1
store fp128 %val, fp128 *%ptr2
ret void
; CHECK-LABEL: f18:
; CHECK-NOT: mvc
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load volatile i64 *%ptr1
store i64 %val, i64 *%ptr2
ret void
; CHECK-LABEL: f19:
; CHECK-NOT: mvc
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1
store volatile i64 %val, i64 *%ptr2
ret void
; CHECK-LABEL: f30:
; CHECK: mvc 8(8,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1, align 1
store i64 %val, i64 *%ptr2, align 1
ret void
; CHECK-LABEL: f3:
; CHECK: o %r2, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%b = load i32 *%ptr
%or = or i32 %a, %b
ret i32 %or
; CHECK-LABEL: f4:
; CHECK: oy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%b = load i32 *%ptr
%or = or i32 %a, %b
ret i32 %or
; CHECK-LABEL: f5:
; CHECK: oy %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%or = or i32 %a, %b
ret i32 %or
; CHECK: agfi %r3, 524288
; CHECK: o %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%or = or i32 %a, %b
ret i32 %or
; CHECK-LABEL: f7:
; CHECK: oy %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%or = or i32 %a, %b
ret i32 %or
; CHECK-LABEL: f8:
; CHECK: oy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%or = or i32 %a, %b
ret i32 %or
; CHECK: agfi %r3, -524292
; CHECK: o %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%or = or i32 %a, %b
ret i32 %or
; CHECK: brasl %r14, foo@PLT
; CHECK: o %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: og %r2, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%or = or i64 %a, %b
ret i64 %or
; CHECK: agfi %r3, 524288
; CHECK: og %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%or = or i64 %a, %b
ret i64 %or
; CHECK-LABEL: f5:
; CHECK: og %r2, -8(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%or = or i64 %a, %b
ret i64 %or
; CHECK-LABEL: f6:
; CHECK: og %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%or = or i64 %a, %b
ret i64 %or
; CHECK: agfi %r3, -524296
; CHECK: og %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%or = or i64 %a, %b
ret i64 %or
; CHECK: brasl %r14, foo@PLT
; CHECK: og %r2, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK-LABEL: f5:
; CHECK: oi 4095(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
%val = load i8 *%ptr
%or = or i8 %val, 127
store i8 %or, i8 *%ptr
; CHECK-LABEL: f6:
; CHECK: oiy 4096(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
%val = load i8 *%ptr
%or = or i8 %val, 127
store i8 %or, i8 *%ptr
; CHECK-LABEL: f7:
; CHECK: oiy 524287(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%val = load i8 *%ptr
%or = or i8 %val, 127
store i8 %or, i8 *%ptr
; CHECK: agfi %r2, 524288
; CHECK: oi 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%val = load i8 *%ptr
%or = or i8 %val, 127
store i8 %or, i8 *%ptr
; CHECK-LABEL: f9:
; CHECK: oiy -1(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%val = load i8 *%ptr
%or = or i8 %val, 127
store i8 %or, i8 *%ptr
; CHECK-LABEL: f10:
; CHECK: oiy -524288(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%val = load i8 *%ptr
%or = or i8 %val, 127
store i8 %or, i8 *%ptr
; CHECK: agfi %r2, -524289
; CHECK: oi 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%val = load i8 *%ptr
%or = or i8 %val, 127
store i8 %or, i8 *%ptr
; CHECK-LABEL: f1:
; CHECK: oc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%old = load i8 *%ptr2
%or = or i8 %val, %old
; CHECK-LABEL: f2:
; CHECK: oc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%old = load i16 *%ptr2
%or = or i16 %val, %old
; CHECK-LABEL: f3:
; CHECK: oc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i32 *%ptr1, i64 1
+ %ptr2 = getelementptr i32, i32 *%ptr1, i64 1
%val = load i32 *%ptr1
%old = load i32 *%ptr2
%or = or i32 %old, %val
; CHECK-LABEL: f4:
; CHECK: oc 8(8,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1
%old = load i64 *%ptr2
%or = or i64 %old, %val
; CHECK: pfd 2, -524288({{%r2,%r3|%r3,%r2}})
; CHECK: br %r14
%add = add i64 %index, -524288
- %ptr = getelementptr i8 *%base, i64 %add
+ %ptr = getelementptr i8, i8 *%base, i64 %add
call void @llvm.prefetch(i8 *%ptr, i32 1, i32 0, i32 1)
ret void
}
; CHECK: pfd 2, 524287({{%r2,%r3|%r3,%r2}})
; CHECK: br %r14
%add = add i64 %index, 524287
- %ptr = getelementptr i8 *%base, i64 %add
+ %ptr = getelementptr i8, i8 *%base, i64 %add
call void @llvm.prefetch(i8 *%ptr, i32 1, i32 0, i32 1)
ret void
}
; CHECK: pfd 2,
; CHECK: br %r14
%add = add i64 %index, 524288
- %ptr = getelementptr i8 *%base, i64 %add
+ %ptr = getelementptr i8, i8 *%base, i64 %add
call void @llvm.prefetch(i8 *%ptr, i32 1, i32 0, i32 1)
ret void
}
; CHECK-LABEL: f8:
; CHECK: pfdrl 2, g
; CHECK: br %r14
- %ptr = getelementptr [4096 x i8] *@g, i64 0, i64 0
+ %ptr = getelementptr [4096 x i8], [4096 x i8] *@g, i64 0, i64 0
call void @llvm.prefetch(i8 *%ptr, i32 1, i32 0, i32 1)
ret void
}
; CHECK-NOT: %r15
; CHECK: lmg
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i32 2
- %ptr2 = getelementptr i32 *%ptr0, i32 4
- %ptr3 = getelementptr i32 *%ptr0, i32 6
- %ptr4 = getelementptr i32 *%ptr0, i32 8
- %ptr5 = getelementptr i32 *%ptr0, i32 10
- %ptr6 = getelementptr i32 *%ptr0, i32 12
+ %ptr1 = getelementptr i32, i32 *%ptr0, i32 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i32 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i32 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i32 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i32 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i32 12
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK: brasl %r14, foo@PLT
; CHECK: mvc [[OFFSET2]](4,{{%r[0-9]+}}), [[OFFSET1]](%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK: brasl %r14, foo@PLT
; CHECK: mvc [[OFFSET]](8,{{%r[0-9]+}}), 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK: brasl %r14, foo@PLT
; CHECK: mvc [[OFFSET2]](4,{{%r[0-9]+}}), [[OFFSET1]](%r15)
; CHECK: br %r14
- %ptr1 = getelementptr float *%ptr0, i64 2
- %ptr2 = getelementptr float *%ptr0, i64 4
- %ptr3 = getelementptr float *%ptr0, i64 6
- %ptr4 = getelementptr float *%ptr0, i64 8
- %ptr5 = getelementptr float *%ptr0, i64 10
- %ptr6 = getelementptr float *%ptr0, i64 12
- %ptr7 = getelementptr float *%ptr0, i64 14
- %ptr8 = getelementptr float *%ptr0, i64 16
- %ptr9 = getelementptr float *%ptr0, i64 18
+ %ptr1 = getelementptr float, float *%ptr0, i64 2
+ %ptr2 = getelementptr float, float *%ptr0, i64 4
+ %ptr3 = getelementptr float, float *%ptr0, i64 6
+ %ptr4 = getelementptr float, float *%ptr0, i64 8
+ %ptr5 = getelementptr float, float *%ptr0, i64 10
+ %ptr6 = getelementptr float, float *%ptr0, i64 12
+ %ptr7 = getelementptr float, float *%ptr0, i64 14
+ %ptr8 = getelementptr float, float *%ptr0, i64 16
+ %ptr9 = getelementptr float, float *%ptr0, i64 18
%val0 = load float *%ptr0
%val1 = load float *%ptr1
; CHECK: brasl %r14, foo@PLT
; CHECK: mvc [[OFFSET]](8,{{%r[0-9]+}}), 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr double *%ptr0, i64 2
- %ptr2 = getelementptr double *%ptr0, i64 4
- %ptr3 = getelementptr double *%ptr0, i64 6
- %ptr4 = getelementptr double *%ptr0, i64 8
- %ptr5 = getelementptr double *%ptr0, i64 10
- %ptr6 = getelementptr double *%ptr0, i64 12
- %ptr7 = getelementptr double *%ptr0, i64 14
- %ptr8 = getelementptr double *%ptr0, i64 16
- %ptr9 = getelementptr double *%ptr0, i64 18
+ %ptr1 = getelementptr double, double *%ptr0, i64 2
+ %ptr2 = getelementptr double, double *%ptr0, i64 4
+ %ptr3 = getelementptr double, double *%ptr0, i64 6
+ %ptr4 = getelementptr double, double *%ptr0, i64 8
+ %ptr5 = getelementptr double, double *%ptr0, i64 10
+ %ptr6 = getelementptr double, double *%ptr0, i64 12
+ %ptr7 = getelementptr double, double *%ptr0, i64 14
+ %ptr8 = getelementptr double, double *%ptr0, i64 16
+ %ptr9 = getelementptr double, double *%ptr0, i64 18
%val0 = load double *%ptr0
%val1 = load double *%ptr1
; CHECK-LABEL: f6:
; CHECK-NOT: mvc
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
%val0 = load atomic i32 *%ptr0 unordered, align 4
%val1 = load atomic i32 *%ptr1 unordered, align 4
; CHECK-LABEL: f7:
; CHECK-NOT: mvc
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
%val0 = load volatile i32 *%ptr0
%val1 = load volatile i32 *%ptr1
; CHECK: iilf [[REG:%r[0-5]]], 66051
; CHECK: st [[REG]], 0(%r2)
; CHECK: br %r14
- %off1 = getelementptr i8 *%ptr, i64 1
- %off2 = getelementptr i8 *%ptr, i64 2
- %off3 = getelementptr i8 *%ptr, i64 3
+ %off1 = getelementptr i8, i8 *%ptr, i64 1
+ %off2 = getelementptr i8, i8 *%ptr, i64 2
+ %off3 = getelementptr i8, i8 *%ptr, i64 3
store i8 0, i8 *%ptr
store i8 1, i8 *%off1
store i8 2, i8 *%off2
; CHECK-LABEL: f3:
; CHECK: x %r2, 4092(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1023
+ %ptr = getelementptr i32, i32 *%src, i64 1023
%b = load i32 *%ptr
%xor = xor i32 %a, %b
ret i32 %xor
; CHECK-LABEL: f4:
; CHECK: xy %r2, 4096(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 1024
+ %ptr = getelementptr i32, i32 *%src, i64 1024
%b = load i32 *%ptr
%xor = xor i32 %a, %b
ret i32 %xor
; CHECK-LABEL: f5:
; CHECK: xy %r2, 524284(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131071
+ %ptr = getelementptr i32, i32 *%src, i64 131071
%b = load i32 *%ptr
%xor = xor i32 %a, %b
ret i32 %xor
; CHECK: agfi %r3, 524288
; CHECK: x %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 131072
+ %ptr = getelementptr i32, i32 *%src, i64 131072
%b = load i32 *%ptr
%xor = xor i32 %a, %b
ret i32 %xor
; CHECK-LABEL: f7:
; CHECK: xy %r2, -4(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -1
+ %ptr = getelementptr i32, i32 *%src, i64 -1
%b = load i32 *%ptr
%xor = xor i32 %a, %b
ret i32 %xor
; CHECK-LABEL: f8:
; CHECK: xy %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131072
+ %ptr = getelementptr i32, i32 *%src, i64 -131072
%b = load i32 *%ptr
%xor = xor i32 %a, %b
ret i32 %xor
; CHECK: agfi %r3, -524292
; CHECK: x %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i32 *%src, i64 -131073
+ %ptr = getelementptr i32, i32 *%src, i64 -131073
%b = load i32 *%ptr
%xor = xor i32 %a, %b
ret i32 %xor
; CHECK: brasl %r14, foo@PLT
; CHECK: x %r2, 16{{[04]}}(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i32 *%ptr0, i64 2
- %ptr2 = getelementptr i32 *%ptr0, i64 4
- %ptr3 = getelementptr i32 *%ptr0, i64 6
- %ptr4 = getelementptr i32 *%ptr0, i64 8
- %ptr5 = getelementptr i32 *%ptr0, i64 10
- %ptr6 = getelementptr i32 *%ptr0, i64 12
- %ptr7 = getelementptr i32 *%ptr0, i64 14
- %ptr8 = getelementptr i32 *%ptr0, i64 16
- %ptr9 = getelementptr i32 *%ptr0, i64 18
+ %ptr1 = getelementptr i32, i32 *%ptr0, i64 2
+ %ptr2 = getelementptr i32, i32 *%ptr0, i64 4
+ %ptr3 = getelementptr i32, i32 *%ptr0, i64 6
+ %ptr4 = getelementptr i32, i32 *%ptr0, i64 8
+ %ptr5 = getelementptr i32, i32 *%ptr0, i64 10
+ %ptr6 = getelementptr i32, i32 *%ptr0, i64 12
+ %ptr7 = getelementptr i32, i32 *%ptr0, i64 14
+ %ptr8 = getelementptr i32, i32 *%ptr0, i64 16
+ %ptr9 = getelementptr i32, i32 *%ptr0, i64 18
%val0 = load i32 *%ptr0
%val1 = load i32 *%ptr1
; CHECK-LABEL: f3:
; CHECK: xg %r2, 524280(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65535
+ %ptr = getelementptr i64, i64 *%src, i64 65535
%b = load i64 *%ptr
%xor = xor i64 %a, %b
ret i64 %xor
; CHECK: agfi %r3, 524288
; CHECK: xg %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 65536
+ %ptr = getelementptr i64, i64 *%src, i64 65536
%b = load i64 *%ptr
%xor = xor i64 %a, %b
ret i64 %xor
; CHECK-LABEL: f5:
; CHECK: xg %r2, -8(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -1
+ %ptr = getelementptr i64, i64 *%src, i64 -1
%b = load i64 *%ptr
%xor = xor i64 %a, %b
ret i64 %xor
; CHECK-LABEL: f6:
; CHECK: xg %r2, -524288(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65536
+ %ptr = getelementptr i64, i64 *%src, i64 -65536
%b = load i64 *%ptr
%xor = xor i64 %a, %b
ret i64 %xor
; CHECK: agfi %r3, -524296
; CHECK: xg %r2, 0(%r3)
; CHECK: br %r14
- %ptr = getelementptr i64 *%src, i64 -65537
+ %ptr = getelementptr i64, i64 *%src, i64 -65537
%b = load i64 *%ptr
%xor = xor i64 %a, %b
ret i64 %xor
; CHECK: brasl %r14, foo@PLT
; CHECK: xg %r2, 160(%r15)
; CHECK: br %r14
- %ptr1 = getelementptr i64 *%ptr0, i64 2
- %ptr2 = getelementptr i64 *%ptr0, i64 4
- %ptr3 = getelementptr i64 *%ptr0, i64 6
- %ptr4 = getelementptr i64 *%ptr0, i64 8
- %ptr5 = getelementptr i64 *%ptr0, i64 10
- %ptr6 = getelementptr i64 *%ptr0, i64 12
- %ptr7 = getelementptr i64 *%ptr0, i64 14
- %ptr8 = getelementptr i64 *%ptr0, i64 16
- %ptr9 = getelementptr i64 *%ptr0, i64 18
+ %ptr1 = getelementptr i64, i64 *%ptr0, i64 2
+ %ptr2 = getelementptr i64, i64 *%ptr0, i64 4
+ %ptr3 = getelementptr i64, i64 *%ptr0, i64 6
+ %ptr4 = getelementptr i64, i64 *%ptr0, i64 8
+ %ptr5 = getelementptr i64, i64 *%ptr0, i64 10
+ %ptr6 = getelementptr i64, i64 *%ptr0, i64 12
+ %ptr7 = getelementptr i64, i64 *%ptr0, i64 14
+ %ptr8 = getelementptr i64, i64 *%ptr0, i64 16
+ %ptr9 = getelementptr i64, i64 *%ptr0, i64 18
%val0 = load i64 *%ptr0
%val1 = load i64 *%ptr1
; CHECK-LABEL: f5:
; CHECK: xi 4095(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4095
+ %ptr = getelementptr i8, i8 *%src, i64 4095
%val = load i8 *%ptr
%xor = xor i8 %val, 127
store i8 %xor, i8 *%ptr
; CHECK-LABEL: f6:
; CHECK: xiy 4096(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 4096
+ %ptr = getelementptr i8, i8 *%src, i64 4096
%val = load i8 *%ptr
%xor = xor i8 %val, 127
store i8 %xor, i8 *%ptr
; CHECK-LABEL: f7:
; CHECK: xiy 524287(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524287
+ %ptr = getelementptr i8, i8 *%src, i64 524287
%val = load i8 *%ptr
%xor = xor i8 %val, 127
store i8 %xor, i8 *%ptr
; CHECK: agfi %r2, 524288
; CHECK: xi 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 524288
+ %ptr = getelementptr i8, i8 *%src, i64 524288
%val = load i8 *%ptr
%xor = xor i8 %val, 127
store i8 %xor, i8 *%ptr
; CHECK-LABEL: f9:
; CHECK: xiy -1(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -1
+ %ptr = getelementptr i8, i8 *%src, i64 -1
%val = load i8 *%ptr
%xor = xor i8 %val, 127
store i8 %xor, i8 *%ptr
; CHECK-LABEL: f10:
; CHECK: xiy -524288(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524288
+ %ptr = getelementptr i8, i8 *%src, i64 -524288
%val = load i8 *%ptr
%xor = xor i8 %val, 127
store i8 %xor, i8 *%ptr
; CHECK: agfi %r2, -524289
; CHECK: xi 0(%r2), 127
; CHECK: br %r14
- %ptr = getelementptr i8 *%src, i64 -524289
+ %ptr = getelementptr i8, i8 *%src, i64 -524289
%val = load i8 *%ptr
%xor = xor i8 %val, 127
store i8 %xor, i8 *%ptr
; CHECK-LABEL: f1:
; CHECK: xc 1(1,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i8 *%ptr1, i64 1
+ %ptr2 = getelementptr i8, i8 *%ptr1, i64 1
%val = load i8 *%ptr1
%old = load i8 *%ptr2
%xor = xor i8 %val, %old
; CHECK-LABEL: f2:
; CHECK: xc 2(2,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i16 *%ptr1, i64 1
+ %ptr2 = getelementptr i16, i16 *%ptr1, i64 1
%val = load i16 *%ptr1
%old = load i16 *%ptr2
%xor = xor i16 %val, %old
; CHECK-LABEL: f3:
; CHECK: xc 4(4,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i32 *%ptr1, i64 1
+ %ptr2 = getelementptr i32, i32 *%ptr1, i64 1
%val = load i32 *%ptr1
%old = load i32 *%ptr2
%xor = xor i32 %old, %val
; CHECK-LABEL: f4:
; CHECK: xc 8(8,%r2), 0(%r2)
; CHECK: br %r14
- %ptr2 = getelementptr i64 *%ptr1, i64 1
+ %ptr2 = getelementptr i64, i64 *%ptr1, i64 1
%val = load i64 *%ptr1
%old = load i64 *%ptr2
%xor = xor i64 %old, %val
define void @BF_encrypt(i32* nocapture %data, %struct.BF_KEY* nocapture %key, i32 %encrypt) nounwind {
entry:
- %0 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 0; <i32*> [#uses=2]
+ %0 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 0; <i32*> [#uses=2]
%1 = load i32* %data, align 4 ; <i32> [#uses=2]
%2 = load i32* undef, align 4 ; <i32> [#uses=2]
br i1 undef, label %bb1, label %bb
%4 = xor i32 %3, %1 ; <i32> [#uses=4]
%5 = load i32* null, align 4 ; <i32> [#uses=1]
%6 = lshr i32 %4, 24 ; <i32> [#uses=1]
- %7 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %6; <i32*> [#uses=1]
+ %7 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %6; <i32*> [#uses=1]
%8 = load i32* %7, align 4 ; <i32> [#uses=1]
%9 = lshr i32 %4, 16 ; <i32> [#uses=1]
%10 = or i32 %9, 256 ; <i32> [#uses=1]
%11 = and i32 %10, 511 ; <i32> [#uses=1]
- %12 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %11; <i32*> [#uses=1]
+ %12 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %11; <i32*> [#uses=1]
%13 = load i32* %12, align 4 ; <i32> [#uses=1]
%14 = add i32 %13, %8 ; <i32> [#uses=1]
- %15 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 undef; <i32*> [#uses=1]
+ %15 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 undef; <i32*> [#uses=1]
%16 = load i32* %15, align 4 ; <i32> [#uses=1]
%17 = xor i32 %14, %16 ; <i32> [#uses=1]
%18 = or i32 %4, 768 ; <i32> [#uses=1]
%19 = and i32 %18, 1023 ; <i32> [#uses=1]
- %20 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %19; <i32*> [#uses=1]
+ %20 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %19; <i32*> [#uses=1]
%21 = load i32* %20, align 4 ; <i32> [#uses=1]
%22 = add i32 %17, %21 ; <i32> [#uses=1]
%23 = xor i32 %5, %2 ; <i32> [#uses=1]
%24 = xor i32 %23, %22 ; <i32> [#uses=5]
- %25 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 2; <i32*> [#uses=1]
+ %25 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 2; <i32*> [#uses=1]
%26 = load i32* %25, align 4 ; <i32> [#uses=1]
%27 = lshr i32 %24, 24 ; <i32> [#uses=1]
- %28 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %27; <i32*> [#uses=1]
+ %28 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %27; <i32*> [#uses=1]
%29 = load i32* %28, align 4 ; <i32> [#uses=1]
%30 = lshr i32 %24, 16 ; <i32> [#uses=1]
%31 = or i32 %30, 256 ; <i32> [#uses=1]
%32 = and i32 %31, 511 ; <i32> [#uses=1]
- %33 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %32; <i32*> [#uses=1]
+ %33 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %32; <i32*> [#uses=1]
%34 = load i32* %33, align 4 ; <i32> [#uses=1]
%35 = add i32 %34, %29 ; <i32> [#uses=1]
%36 = lshr i32 %24, 8 ; <i32> [#uses=1]
%37 = or i32 %36, 512 ; <i32> [#uses=1]
%38 = and i32 %37, 767 ; <i32> [#uses=1]
- %39 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %38; <i32*> [#uses=1]
+ %39 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %38; <i32*> [#uses=1]
%40 = load i32* %39, align 4 ; <i32> [#uses=1]
%41 = xor i32 %35, %40 ; <i32> [#uses=1]
%42 = or i32 %24, 768 ; <i32> [#uses=1]
%43 = and i32 %42, 1023 ; <i32> [#uses=1]
- %44 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %43; <i32*> [#uses=1]
+ %44 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %43; <i32*> [#uses=1]
%45 = load i32* %44, align 4 ; <i32> [#uses=1]
%46 = add i32 %41, %45 ; <i32> [#uses=1]
%47 = xor i32 %26, %4 ; <i32> [#uses=1]
%48 = xor i32 %47, %46 ; <i32> [#uses=5]
- %49 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 3; <i32*> [#uses=1]
+ %49 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 3; <i32*> [#uses=1]
%50 = load i32* %49, align 4 ; <i32> [#uses=1]
%51 = lshr i32 %48, 24 ; <i32> [#uses=1]
- %52 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %51; <i32*> [#uses=1]
+ %52 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %51; <i32*> [#uses=1]
%53 = load i32* %52, align 4 ; <i32> [#uses=1]
%54 = lshr i32 %48, 16 ; <i32> [#uses=1]
%55 = or i32 %54, 256 ; <i32> [#uses=1]
%56 = and i32 %55, 511 ; <i32> [#uses=1]
- %57 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %56; <i32*> [#uses=1]
+ %57 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %56; <i32*> [#uses=1]
%58 = load i32* %57, align 4 ; <i32> [#uses=1]
%59 = add i32 %58, %53 ; <i32> [#uses=1]
%60 = lshr i32 %48, 8 ; <i32> [#uses=1]
%61 = or i32 %60, 512 ; <i32> [#uses=1]
%62 = and i32 %61, 767 ; <i32> [#uses=1]
- %63 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %62; <i32*> [#uses=1]
+ %63 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %62; <i32*> [#uses=1]
%64 = load i32* %63, align 4 ; <i32> [#uses=1]
%65 = xor i32 %59, %64 ; <i32> [#uses=1]
%66 = or i32 %48, 768 ; <i32> [#uses=1]
%67 = and i32 %66, 1023 ; <i32> [#uses=1]
- %68 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %67; <i32*> [#uses=1]
+ %68 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %67; <i32*> [#uses=1]
%69 = load i32* %68, align 4 ; <i32> [#uses=1]
%70 = add i32 %65, %69 ; <i32> [#uses=1]
%71 = xor i32 %50, %24 ; <i32> [#uses=1]
%72 = xor i32 %71, %70 ; <i32> [#uses=5]
%73 = load i32* null, align 4 ; <i32> [#uses=1]
%74 = lshr i32 %72, 24 ; <i32> [#uses=1]
- %75 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %74; <i32*> [#uses=1]
+ %75 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %74; <i32*> [#uses=1]
%76 = load i32* %75, align 4 ; <i32> [#uses=1]
%77 = lshr i32 %72, 16 ; <i32> [#uses=1]
%78 = or i32 %77, 256 ; <i32> [#uses=1]
%79 = and i32 %78, 511 ; <i32> [#uses=1]
- %80 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %79; <i32*> [#uses=1]
+ %80 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %79; <i32*> [#uses=1]
%81 = load i32* %80, align 4 ; <i32> [#uses=1]
%82 = add i32 %81, %76 ; <i32> [#uses=1]
%83 = lshr i32 %72, 8 ; <i32> [#uses=1]
%84 = or i32 %83, 512 ; <i32> [#uses=1]
%85 = and i32 %84, 767 ; <i32> [#uses=1]
- %86 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %85; <i32*> [#uses=1]
+ %86 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %85; <i32*> [#uses=1]
%87 = load i32* %86, align 4 ; <i32> [#uses=1]
%88 = xor i32 %82, %87 ; <i32> [#uses=1]
%89 = or i32 %72, 768 ; <i32> [#uses=1]
%90 = and i32 %89, 1023 ; <i32> [#uses=1]
- %91 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %90; <i32*> [#uses=1]
+ %91 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %90; <i32*> [#uses=1]
%92 = load i32* %91, align 4 ; <i32> [#uses=1]
%93 = add i32 %88, %92 ; <i32> [#uses=1]
%94 = xor i32 %73, %48 ; <i32> [#uses=1]
%95 = xor i32 %94, %93 ; <i32> [#uses=5]
%96 = load i32* undef, align 4 ; <i32> [#uses=1]
%97 = lshr i32 %95, 24 ; <i32> [#uses=1]
- %98 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %97; <i32*> [#uses=1]
+ %98 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %97; <i32*> [#uses=1]
%99 = load i32* %98, align 4 ; <i32> [#uses=1]
%100 = lshr i32 %95, 16 ; <i32> [#uses=1]
%101 = or i32 %100, 256 ; <i32> [#uses=1]
%102 = and i32 %101, 511 ; <i32> [#uses=1]
- %103 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %102; <i32*> [#uses=1]
+ %103 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %102; <i32*> [#uses=1]
%104 = load i32* %103, align 4 ; <i32> [#uses=1]
%105 = add i32 %104, %99 ; <i32> [#uses=1]
%106 = lshr i32 %95, 8 ; <i32> [#uses=1]
%107 = or i32 %106, 512 ; <i32> [#uses=1]
%108 = and i32 %107, 767 ; <i32> [#uses=1]
- %109 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %108; <i32*> [#uses=1]
+ %109 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %108; <i32*> [#uses=1]
%110 = load i32* %109, align 4 ; <i32> [#uses=1]
%111 = xor i32 %105, %110 ; <i32> [#uses=1]
%112 = or i32 %95, 768 ; <i32> [#uses=1]
%113 = and i32 %112, 1023 ; <i32> [#uses=1]
- %114 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %113; <i32*> [#uses=1]
+ %114 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %113; <i32*> [#uses=1]
%115 = load i32* %114, align 4 ; <i32> [#uses=1]
%116 = add i32 %111, %115 ; <i32> [#uses=1]
%117 = xor i32 %96, %72 ; <i32> [#uses=1]
%118 = xor i32 %117, %116 ; <i32> [#uses=5]
- %119 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 6; <i32*> [#uses=1]
+ %119 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 6; <i32*> [#uses=1]
%120 = load i32* %119, align 4 ; <i32> [#uses=1]
%121 = lshr i32 %118, 24 ; <i32> [#uses=1]
- %122 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %121; <i32*> [#uses=1]
+ %122 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %121; <i32*> [#uses=1]
%123 = load i32* %122, align 4 ; <i32> [#uses=1]
%124 = lshr i32 %118, 16 ; <i32> [#uses=1]
%125 = or i32 %124, 256 ; <i32> [#uses=1]
%126 = and i32 %125, 511 ; <i32> [#uses=1]
- %127 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %126; <i32*> [#uses=1]
+ %127 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %126; <i32*> [#uses=1]
%128 = load i32* %127, align 4 ; <i32> [#uses=1]
%129 = add i32 %128, %123 ; <i32> [#uses=1]
%130 = lshr i32 %118, 8 ; <i32> [#uses=1]
%131 = or i32 %130, 512 ; <i32> [#uses=1]
%132 = and i32 %131, 767 ; <i32> [#uses=1]
- %133 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %132; <i32*> [#uses=1]
+ %133 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %132; <i32*> [#uses=1]
%134 = load i32* %133, align 4 ; <i32> [#uses=1]
%135 = xor i32 %129, %134 ; <i32> [#uses=1]
%136 = or i32 %118, 768 ; <i32> [#uses=1]
%137 = and i32 %136, 1023 ; <i32> [#uses=1]
- %138 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %137; <i32*> [#uses=1]
+ %138 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %137; <i32*> [#uses=1]
%139 = load i32* %138, align 4 ; <i32> [#uses=1]
%140 = add i32 %135, %139 ; <i32> [#uses=1]
%141 = xor i32 %120, %95 ; <i32> [#uses=1]
%142 = xor i32 %141, %140 ; <i32> [#uses=5]
- %143 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 7; <i32*> [#uses=1]
+ %143 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 7; <i32*> [#uses=1]
%144 = load i32* %143, align 4 ; <i32> [#uses=1]
%145 = lshr i32 %142, 24 ; <i32> [#uses=1]
- %146 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %145; <i32*> [#uses=1]
+ %146 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %145; <i32*> [#uses=1]
%147 = load i32* %146, align 4 ; <i32> [#uses=1]
%148 = lshr i32 %142, 16 ; <i32> [#uses=1]
%149 = or i32 %148, 256 ; <i32> [#uses=1]
%150 = and i32 %149, 511 ; <i32> [#uses=1]
- %151 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %150; <i32*> [#uses=1]
+ %151 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %150; <i32*> [#uses=1]
%152 = load i32* %151, align 4 ; <i32> [#uses=1]
%153 = add i32 %152, %147 ; <i32> [#uses=1]
%154 = lshr i32 %142, 8 ; <i32> [#uses=1]
%155 = or i32 %154, 512 ; <i32> [#uses=1]
%156 = and i32 %155, 767 ; <i32> [#uses=1]
- %157 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %156; <i32*> [#uses=1]
+ %157 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %156; <i32*> [#uses=1]
%158 = load i32* %157, align 4 ; <i32> [#uses=1]
%159 = xor i32 %153, %158 ; <i32> [#uses=1]
%160 = or i32 %142, 768 ; <i32> [#uses=1]
%161 = and i32 %160, 1023 ; <i32> [#uses=1]
- %162 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %161; <i32*> [#uses=1]
+ %162 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %161; <i32*> [#uses=1]
%163 = load i32* %162, align 4 ; <i32> [#uses=1]
%164 = add i32 %159, %163 ; <i32> [#uses=1]
%165 = xor i32 %144, %118 ; <i32> [#uses=1]
%166 = xor i32 %165, %164 ; <i32> [#uses=5]
%167 = load i32* undef, align 4 ; <i32> [#uses=1]
%168 = lshr i32 %166, 24 ; <i32> [#uses=1]
- %169 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %168; <i32*> [#uses=1]
+ %169 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %168; <i32*> [#uses=1]
%170 = load i32* %169, align 4 ; <i32> [#uses=1]
%171 = lshr i32 %166, 16 ; <i32> [#uses=1]
%172 = or i32 %171, 256 ; <i32> [#uses=1]
%173 = and i32 %172, 511 ; <i32> [#uses=1]
- %174 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %173; <i32*> [#uses=1]
+ %174 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %173; <i32*> [#uses=1]
%175 = load i32* %174, align 4 ; <i32> [#uses=1]
%176 = add i32 %175, %170 ; <i32> [#uses=1]
%177 = lshr i32 %166, 8 ; <i32> [#uses=1]
%178 = or i32 %177, 512 ; <i32> [#uses=1]
%179 = and i32 %178, 767 ; <i32> [#uses=1]
- %180 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %179; <i32*> [#uses=1]
+ %180 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %179; <i32*> [#uses=1]
%181 = load i32* %180, align 4 ; <i32> [#uses=1]
%182 = xor i32 %176, %181 ; <i32> [#uses=1]
%183 = or i32 %166, 768 ; <i32> [#uses=1]
%184 = and i32 %183, 1023 ; <i32> [#uses=1]
- %185 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %184; <i32*> [#uses=1]
+ %185 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %184; <i32*> [#uses=1]
%186 = load i32* %185, align 4 ; <i32> [#uses=1]
%187 = add i32 %182, %186 ; <i32> [#uses=1]
%188 = xor i32 %167, %142 ; <i32> [#uses=1]
%189 = xor i32 %188, %187 ; <i32> [#uses=5]
- %190 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 9; <i32*> [#uses=1]
+ %190 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 9; <i32*> [#uses=1]
%191 = load i32* %190, align 4 ; <i32> [#uses=1]
%192 = lshr i32 %189, 24 ; <i32> [#uses=1]
- %193 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %192; <i32*> [#uses=1]
+ %193 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %192; <i32*> [#uses=1]
%194 = load i32* %193, align 4 ; <i32> [#uses=1]
%195 = lshr i32 %189, 16 ; <i32> [#uses=1]
%196 = or i32 %195, 256 ; <i32> [#uses=1]
%197 = and i32 %196, 511 ; <i32> [#uses=1]
- %198 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %197; <i32*> [#uses=1]
+ %198 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %197; <i32*> [#uses=1]
%199 = load i32* %198, align 4 ; <i32> [#uses=1]
%200 = add i32 %199, %194 ; <i32> [#uses=1]
%201 = lshr i32 %189, 8 ; <i32> [#uses=1]
%202 = or i32 %201, 512 ; <i32> [#uses=1]
%203 = and i32 %202, 767 ; <i32> [#uses=1]
- %204 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %203; <i32*> [#uses=1]
+ %204 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %203; <i32*> [#uses=1]
%205 = load i32* %204, align 4 ; <i32> [#uses=1]
%206 = xor i32 %200, %205 ; <i32> [#uses=1]
%207 = or i32 %189, 768 ; <i32> [#uses=1]
%208 = and i32 %207, 1023 ; <i32> [#uses=1]
- %209 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %208; <i32*> [#uses=1]
+ %209 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %208; <i32*> [#uses=1]
%210 = load i32* %209, align 4 ; <i32> [#uses=1]
%211 = add i32 %206, %210 ; <i32> [#uses=1]
%212 = xor i32 %191, %166 ; <i32> [#uses=1]
%213 = xor i32 %212, %211 ; <i32> [#uses=5]
- %214 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 10; <i32*> [#uses=1]
+ %214 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 10; <i32*> [#uses=1]
%215 = load i32* %214, align 4 ; <i32> [#uses=1]
%216 = lshr i32 %213, 24 ; <i32> [#uses=1]
- %217 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %216; <i32*> [#uses=1]
+ %217 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %216; <i32*> [#uses=1]
%218 = load i32* %217, align 4 ; <i32> [#uses=1]
%219 = lshr i32 %213, 16 ; <i32> [#uses=1]
%220 = or i32 %219, 256 ; <i32> [#uses=1]
%221 = and i32 %220, 511 ; <i32> [#uses=1]
- %222 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %221; <i32*> [#uses=1]
+ %222 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %221; <i32*> [#uses=1]
%223 = load i32* %222, align 4 ; <i32> [#uses=1]
%224 = add i32 %223, %218 ; <i32> [#uses=1]
%225 = lshr i32 %213, 8 ; <i32> [#uses=1]
%226 = or i32 %225, 512 ; <i32> [#uses=1]
%227 = and i32 %226, 767 ; <i32> [#uses=1]
- %228 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %227; <i32*> [#uses=1]
+ %228 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %227; <i32*> [#uses=1]
%229 = load i32* %228, align 4 ; <i32> [#uses=1]
%230 = xor i32 %224, %229 ; <i32> [#uses=1]
%231 = or i32 %213, 768 ; <i32> [#uses=1]
%232 = and i32 %231, 1023 ; <i32> [#uses=1]
- %233 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %232; <i32*> [#uses=1]
+ %233 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %232; <i32*> [#uses=1]
%234 = load i32* %233, align 4 ; <i32> [#uses=1]
%235 = add i32 %230, %234 ; <i32> [#uses=1]
%236 = xor i32 %215, %189 ; <i32> [#uses=1]
%237 = xor i32 %236, %235 ; <i32> [#uses=5]
- %238 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 11; <i32*> [#uses=1]
+ %238 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 11; <i32*> [#uses=1]
%239 = load i32* %238, align 4 ; <i32> [#uses=1]
%240 = lshr i32 %237, 24 ; <i32> [#uses=1]
- %241 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %240; <i32*> [#uses=1]
+ %241 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %240; <i32*> [#uses=1]
%242 = load i32* %241, align 4 ; <i32> [#uses=1]
%243 = lshr i32 %237, 16 ; <i32> [#uses=1]
%244 = or i32 %243, 256 ; <i32> [#uses=1]
%245 = and i32 %244, 511 ; <i32> [#uses=1]
- %246 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %245; <i32*> [#uses=1]
+ %246 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %245; <i32*> [#uses=1]
%247 = load i32* %246, align 4 ; <i32> [#uses=1]
%248 = add i32 %247, %242 ; <i32> [#uses=1]
%249 = lshr i32 %237, 8 ; <i32> [#uses=1]
%250 = or i32 %249, 512 ; <i32> [#uses=1]
%251 = and i32 %250, 767 ; <i32> [#uses=1]
- %252 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %251; <i32*> [#uses=1]
+ %252 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %251; <i32*> [#uses=1]
%253 = load i32* %252, align 4 ; <i32> [#uses=1]
%254 = xor i32 %248, %253 ; <i32> [#uses=1]
%255 = or i32 %237, 768 ; <i32> [#uses=1]
%256 = and i32 %255, 1023 ; <i32> [#uses=1]
- %257 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %256; <i32*> [#uses=1]
+ %257 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %256; <i32*> [#uses=1]
%258 = load i32* %257, align 4 ; <i32> [#uses=1]
%259 = add i32 %254, %258 ; <i32> [#uses=1]
%260 = xor i32 %239, %213 ; <i32> [#uses=1]
%261 = xor i32 %260, %259 ; <i32> [#uses=5]
%262 = load i32* undef, align 4 ; <i32> [#uses=1]
%263 = lshr i32 %261, 24 ; <i32> [#uses=1]
- %264 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %263; <i32*> [#uses=1]
+ %264 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %263; <i32*> [#uses=1]
%265 = load i32* %264, align 4 ; <i32> [#uses=1]
%266 = lshr i32 %261, 16 ; <i32> [#uses=1]
%267 = or i32 %266, 256 ; <i32> [#uses=1]
%268 = and i32 %267, 511 ; <i32> [#uses=1]
- %269 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %268; <i32*> [#uses=1]
+ %269 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %268; <i32*> [#uses=1]
%270 = load i32* %269, align 4 ; <i32> [#uses=1]
%271 = add i32 %270, %265 ; <i32> [#uses=1]
%272 = lshr i32 %261, 8 ; <i32> [#uses=1]
%273 = or i32 %272, 512 ; <i32> [#uses=1]
%274 = and i32 %273, 767 ; <i32> [#uses=1]
- %275 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %274; <i32*> [#uses=1]
+ %275 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %274; <i32*> [#uses=1]
%276 = load i32* %275, align 4 ; <i32> [#uses=1]
%277 = xor i32 %271, %276 ; <i32> [#uses=1]
%278 = or i32 %261, 768 ; <i32> [#uses=1]
%279 = and i32 %278, 1023 ; <i32> [#uses=1]
- %280 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %279; <i32*> [#uses=1]
+ %280 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %279; <i32*> [#uses=1]
%281 = load i32* %280, align 4 ; <i32> [#uses=1]
%282 = add i32 %277, %281 ; <i32> [#uses=1]
%283 = xor i32 %262, %237 ; <i32> [#uses=1]
%284 = xor i32 %283, %282 ; <i32> [#uses=4]
%285 = load i32* null, align 4 ; <i32> [#uses=1]
%286 = lshr i32 %284, 24 ; <i32> [#uses=1]
- %287 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %286; <i32*> [#uses=1]
+ %287 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %286; <i32*> [#uses=1]
%288 = load i32* %287, align 4 ; <i32> [#uses=1]
%289 = lshr i32 %284, 16 ; <i32> [#uses=1]
%290 = or i32 %289, 256 ; <i32> [#uses=1]
%291 = and i32 %290, 511 ; <i32> [#uses=1]
- %292 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %291; <i32*> [#uses=1]
+ %292 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %291; <i32*> [#uses=1]
%293 = load i32* %292, align 4 ; <i32> [#uses=1]
%294 = add i32 %293, %288 ; <i32> [#uses=1]
%295 = lshr i32 %284, 8 ; <i32> [#uses=1]
%296 = or i32 %295, 512 ; <i32> [#uses=1]
%297 = and i32 %296, 767 ; <i32> [#uses=1]
- %298 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %297; <i32*> [#uses=1]
+ %298 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %297; <i32*> [#uses=1]
%299 = load i32* %298, align 4 ; <i32> [#uses=1]
%300 = xor i32 %294, %299 ; <i32> [#uses=1]
%301 = or i32 %284, 768 ; <i32> [#uses=1]
%302 = and i32 %301, 1023 ; <i32> [#uses=1]
- %303 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %302; <i32*> [#uses=1]
+ %303 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %302; <i32*> [#uses=1]
%304 = load i32* %303, align 4 ; <i32> [#uses=1]
%305 = add i32 %300, %304 ; <i32> [#uses=1]
%306 = xor i32 %285, %261 ; <i32> [#uses=1]
%307 = xor i32 %306, %305 ; <i32> [#uses=1]
- %308 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 15; <i32*> [#uses=1]
+ %308 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 15; <i32*> [#uses=1]
%309 = load i32* %308, align 4 ; <i32> [#uses=1]
- %310 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 0; <i32*> [#uses=1]
+ %310 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 0; <i32*> [#uses=1]
%311 = load i32* %310, align 4 ; <i32> [#uses=1]
%312 = or i32 0, 256 ; <i32> [#uses=1]
%313 = and i32 %312, 511 ; <i32> [#uses=1]
- %314 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %313; <i32*> [#uses=1]
+ %314 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %313; <i32*> [#uses=1]
%315 = load i32* %314, align 4 ; <i32> [#uses=1]
%316 = add i32 %315, %311 ; <i32> [#uses=1]
%317 = or i32 0, 512 ; <i32> [#uses=1]
%318 = and i32 %317, 767 ; <i32> [#uses=1]
- %319 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %318; <i32*> [#uses=1]
+ %319 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %318; <i32*> [#uses=1]
%320 = load i32* %319, align 4 ; <i32> [#uses=1]
%321 = xor i32 %316, %320 ; <i32> [#uses=1]
%322 = or i32 0, 768 ; <i32> [#uses=1]
%323 = and i32 %322, 1023 ; <i32> [#uses=1]
- %324 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %323; <i32*> [#uses=1]
+ %324 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %323; <i32*> [#uses=1]
%325 = load i32* %324, align 4 ; <i32> [#uses=1]
%326 = add i32 %321, %325 ; <i32> [#uses=1]
%327 = xor i32 %309, %307 ; <i32> [#uses=1]
%328 = xor i32 %327, %326 ; <i32> [#uses=5]
- %329 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 17; <i32*> [#uses=1]
+ %329 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 17; <i32*> [#uses=1]
br label %bb2
bb1: ; preds = %entry
%331 = xor i32 %330, %1 ; <i32> [#uses=4]
%332 = load i32* null, align 4 ; <i32> [#uses=1]
%333 = lshr i32 %331, 24 ; <i32> [#uses=1]
- %334 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %333; <i32*> [#uses=1]
+ %334 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %333; <i32*> [#uses=1]
%335 = load i32* %334, align 4 ; <i32> [#uses=1]
%336 = load i32* null, align 4 ; <i32> [#uses=1]
%337 = add i32 %336, %335 ; <i32> [#uses=1]
%338 = lshr i32 %331, 8 ; <i32> [#uses=1]
%339 = or i32 %338, 512 ; <i32> [#uses=1]
%340 = and i32 %339, 767 ; <i32> [#uses=1]
- %341 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %340; <i32*> [#uses=1]
+ %341 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %340; <i32*> [#uses=1]
%342 = load i32* %341, align 4 ; <i32> [#uses=1]
%343 = xor i32 %337, %342 ; <i32> [#uses=1]
%344 = or i32 %331, 768 ; <i32> [#uses=1]
%345 = and i32 %344, 1023 ; <i32> [#uses=1]
- %346 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %345; <i32*> [#uses=1]
+ %346 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %345; <i32*> [#uses=1]
%347 = load i32* %346, align 4 ; <i32> [#uses=1]
%348 = add i32 %343, %347 ; <i32> [#uses=1]
%349 = xor i32 %332, %2 ; <i32> [#uses=1]
%350 = xor i32 %349, %348 ; <i32> [#uses=5]
- %351 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 15; <i32*> [#uses=1]
+ %351 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 15; <i32*> [#uses=1]
%352 = load i32* %351, align 4 ; <i32> [#uses=1]
%353 = lshr i32 %350, 24 ; <i32> [#uses=1]
- %354 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %353; <i32*> [#uses=1]
+ %354 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %353; <i32*> [#uses=1]
%355 = load i32* %354, align 4 ; <i32> [#uses=1]
%356 = lshr i32 %350, 16 ; <i32> [#uses=1]
%357 = or i32 %356, 256 ; <i32> [#uses=1]
%358 = and i32 %357, 511 ; <i32> [#uses=1]
- %359 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %358; <i32*> [#uses=1]
+ %359 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %358; <i32*> [#uses=1]
%360 = load i32* %359, align 4 ; <i32> [#uses=1]
%361 = add i32 %360, %355 ; <i32> [#uses=1]
%362 = lshr i32 %350, 8 ; <i32> [#uses=1]
%363 = or i32 %362, 512 ; <i32> [#uses=1]
%364 = and i32 %363, 767 ; <i32> [#uses=1]
- %365 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %364; <i32*> [#uses=1]
+ %365 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %364; <i32*> [#uses=1]
%366 = load i32* %365, align 4 ; <i32> [#uses=1]
%367 = xor i32 %361, %366 ; <i32> [#uses=1]
%368 = or i32 %350, 768 ; <i32> [#uses=1]
%369 = and i32 %368, 1023 ; <i32> [#uses=1]
- %370 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %369; <i32*> [#uses=1]
+ %370 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %369; <i32*> [#uses=1]
%371 = load i32* %370, align 4 ; <i32> [#uses=1]
%372 = add i32 %367, %371 ; <i32> [#uses=1]
%373 = xor i32 %352, %331 ; <i32> [#uses=1]
%374 = xor i32 %373, %372 ; <i32> [#uses=5]
- %375 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 14; <i32*> [#uses=1]
+ %375 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 14; <i32*> [#uses=1]
%376 = load i32* %375, align 4 ; <i32> [#uses=1]
%377 = lshr i32 %374, 24 ; <i32> [#uses=1]
- %378 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %377; <i32*> [#uses=1]
+ %378 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %377; <i32*> [#uses=1]
%379 = load i32* %378, align 4 ; <i32> [#uses=1]
%380 = lshr i32 %374, 16 ; <i32> [#uses=1]
%381 = or i32 %380, 256 ; <i32> [#uses=1]
%382 = and i32 %381, 511 ; <i32> [#uses=1]
- %383 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %382; <i32*> [#uses=1]
+ %383 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %382; <i32*> [#uses=1]
%384 = load i32* %383, align 4 ; <i32> [#uses=1]
%385 = add i32 %384, %379 ; <i32> [#uses=1]
%386 = lshr i32 %374, 8 ; <i32> [#uses=1]
%387 = or i32 %386, 512 ; <i32> [#uses=1]
%388 = and i32 %387, 767 ; <i32> [#uses=1]
- %389 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %388; <i32*> [#uses=1]
+ %389 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %388; <i32*> [#uses=1]
%390 = load i32* %389, align 4 ; <i32> [#uses=1]
%391 = xor i32 %385, %390 ; <i32> [#uses=1]
%392 = or i32 %374, 768 ; <i32> [#uses=1]
%393 = and i32 %392, 1023 ; <i32> [#uses=1]
- %394 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %393; <i32*> [#uses=1]
+ %394 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %393; <i32*> [#uses=1]
%395 = load i32* %394, align 4 ; <i32> [#uses=1]
%396 = add i32 %391, %395 ; <i32> [#uses=1]
%397 = xor i32 %376, %350 ; <i32> [#uses=1]
%398 = xor i32 %397, %396 ; <i32> [#uses=5]
- %399 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 13; <i32*> [#uses=1]
+ %399 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 13; <i32*> [#uses=1]
%400 = load i32* %399, align 4 ; <i32> [#uses=1]
%401 = lshr i32 %398, 24 ; <i32> [#uses=1]
- %402 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %401; <i32*> [#uses=1]
+ %402 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %401; <i32*> [#uses=1]
%403 = load i32* %402, align 4 ; <i32> [#uses=1]
%404 = lshr i32 %398, 16 ; <i32> [#uses=1]
%405 = or i32 %404, 256 ; <i32> [#uses=1]
%406 = and i32 %405, 511 ; <i32> [#uses=1]
- %407 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %406; <i32*> [#uses=1]
+ %407 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %406; <i32*> [#uses=1]
%408 = load i32* %407, align 4 ; <i32> [#uses=1]
%409 = add i32 %408, %403 ; <i32> [#uses=1]
%410 = lshr i32 %398, 8 ; <i32> [#uses=1]
%411 = or i32 %410, 512 ; <i32> [#uses=1]
%412 = and i32 %411, 767 ; <i32> [#uses=1]
- %413 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %412; <i32*> [#uses=1]
+ %413 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %412; <i32*> [#uses=1]
%414 = load i32* %413, align 4 ; <i32> [#uses=1]
%415 = xor i32 %409, %414 ; <i32> [#uses=1]
%416 = or i32 %398, 768 ; <i32> [#uses=1]
%417 = and i32 %416, 1023 ; <i32> [#uses=1]
- %418 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %417; <i32*> [#uses=1]
+ %418 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %417; <i32*> [#uses=1]
%419 = load i32* %418, align 4 ; <i32> [#uses=1]
%420 = add i32 %415, %419 ; <i32> [#uses=1]
%421 = xor i32 %400, %374 ; <i32> [#uses=1]
%422 = xor i32 %421, %420 ; <i32> [#uses=5]
- %423 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 12; <i32*> [#uses=1]
+ %423 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 12; <i32*> [#uses=1]
%424 = load i32* %423, align 4 ; <i32> [#uses=1]
%425 = lshr i32 %422, 24 ; <i32> [#uses=1]
- %426 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %425; <i32*> [#uses=1]
+ %426 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %425; <i32*> [#uses=1]
%427 = load i32* %426, align 4 ; <i32> [#uses=1]
%428 = lshr i32 %422, 16 ; <i32> [#uses=1]
%429 = or i32 %428, 256 ; <i32> [#uses=1]
%430 = and i32 %429, 511 ; <i32> [#uses=1]
- %431 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %430; <i32*> [#uses=1]
+ %431 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %430; <i32*> [#uses=1]
%432 = load i32* %431, align 4 ; <i32> [#uses=1]
%433 = add i32 %432, %427 ; <i32> [#uses=1]
%434 = lshr i32 %422, 8 ; <i32> [#uses=1]
%435 = or i32 %434, 512 ; <i32> [#uses=1]
%436 = and i32 %435, 767 ; <i32> [#uses=1]
- %437 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %436; <i32*> [#uses=1]
+ %437 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %436; <i32*> [#uses=1]
%438 = load i32* %437, align 4 ; <i32> [#uses=1]
%439 = xor i32 %433, %438 ; <i32> [#uses=1]
%440 = or i32 %422, 768 ; <i32> [#uses=1]
%441 = and i32 %440, 1023 ; <i32> [#uses=1]
- %442 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %441; <i32*> [#uses=1]
+ %442 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %441; <i32*> [#uses=1]
%443 = load i32* %442, align 4 ; <i32> [#uses=1]
%444 = add i32 %439, %443 ; <i32> [#uses=1]
%445 = xor i32 %424, %398 ; <i32> [#uses=1]
%446 = xor i32 %445, %444 ; <i32> [#uses=5]
%447 = load i32* undef, align 4 ; <i32> [#uses=1]
%448 = lshr i32 %446, 24 ; <i32> [#uses=1]
- %449 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %448; <i32*> [#uses=1]
+ %449 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %448; <i32*> [#uses=1]
%450 = load i32* %449, align 4 ; <i32> [#uses=1]
%451 = lshr i32 %446, 16 ; <i32> [#uses=1]
%452 = or i32 %451, 256 ; <i32> [#uses=1]
%453 = and i32 %452, 511 ; <i32> [#uses=1]
- %454 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %453; <i32*> [#uses=1]
+ %454 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %453; <i32*> [#uses=1]
%455 = load i32* %454, align 4 ; <i32> [#uses=1]
%456 = add i32 %455, %450 ; <i32> [#uses=1]
%457 = lshr i32 %446, 8 ; <i32> [#uses=1]
%458 = or i32 %457, 512 ; <i32> [#uses=1]
%459 = and i32 %458, 767 ; <i32> [#uses=1]
- %460 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %459; <i32*> [#uses=1]
+ %460 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %459; <i32*> [#uses=1]
%461 = load i32* %460, align 4 ; <i32> [#uses=1]
%462 = xor i32 %456, %461 ; <i32> [#uses=1]
%463 = or i32 %446, 768 ; <i32> [#uses=1]
%464 = and i32 %463, 1023 ; <i32> [#uses=1]
- %465 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %464; <i32*> [#uses=1]
+ %465 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %464; <i32*> [#uses=1]
%466 = load i32* %465, align 4 ; <i32> [#uses=1]
%467 = add i32 %462, %466 ; <i32> [#uses=1]
%468 = xor i32 %447, %422 ; <i32> [#uses=1]
%469 = xor i32 %468, %467 ; <i32> [#uses=5]
- %470 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 10; <i32*> [#uses=1]
+ %470 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 10; <i32*> [#uses=1]
%471 = load i32* %470, align 4 ; <i32> [#uses=1]
%472 = lshr i32 %469, 24 ; <i32> [#uses=1]
- %473 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %472; <i32*> [#uses=1]
+ %473 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %472; <i32*> [#uses=1]
%474 = load i32* %473, align 4 ; <i32> [#uses=1]
%475 = lshr i32 %469, 16 ; <i32> [#uses=1]
%476 = or i32 %475, 256 ; <i32> [#uses=1]
%477 = and i32 %476, 511 ; <i32> [#uses=1]
- %478 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %477; <i32*> [#uses=1]
+ %478 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %477; <i32*> [#uses=1]
%479 = load i32* %478, align 4 ; <i32> [#uses=1]
%480 = add i32 %479, %474 ; <i32> [#uses=1]
%481 = lshr i32 %469, 8 ; <i32> [#uses=1]
%482 = or i32 %481, 512 ; <i32> [#uses=1]
%483 = and i32 %482, 767 ; <i32> [#uses=1]
- %484 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %483; <i32*> [#uses=1]
+ %484 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %483; <i32*> [#uses=1]
%485 = load i32* %484, align 4 ; <i32> [#uses=1]
%486 = xor i32 %480, %485 ; <i32> [#uses=1]
%487 = or i32 %469, 768 ; <i32> [#uses=1]
%488 = and i32 %487, 1023 ; <i32> [#uses=1]
- %489 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %488; <i32*> [#uses=1]
+ %489 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %488; <i32*> [#uses=1]
%490 = load i32* %489, align 4 ; <i32> [#uses=1]
%491 = add i32 %486, %490 ; <i32> [#uses=1]
%492 = xor i32 %471, %446 ; <i32> [#uses=1]
%493 = xor i32 %492, %491 ; <i32> [#uses=5]
- %494 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 9; <i32*> [#uses=1]
+ %494 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 9; <i32*> [#uses=1]
%495 = load i32* %494, align 4 ; <i32> [#uses=1]
%496 = lshr i32 %493, 24 ; <i32> [#uses=1]
- %497 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %496; <i32*> [#uses=1]
+ %497 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %496; <i32*> [#uses=1]
%498 = load i32* %497, align 4 ; <i32> [#uses=1]
%499 = lshr i32 %493, 16 ; <i32> [#uses=1]
%500 = or i32 %499, 256 ; <i32> [#uses=1]
%501 = and i32 %500, 511 ; <i32> [#uses=1]
- %502 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %501; <i32*> [#uses=1]
+ %502 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %501; <i32*> [#uses=1]
%503 = load i32* %502, align 4 ; <i32> [#uses=1]
%504 = add i32 %503, %498 ; <i32> [#uses=1]
%505 = lshr i32 %493, 8 ; <i32> [#uses=1]
%506 = or i32 %505, 512 ; <i32> [#uses=1]
%507 = and i32 %506, 767 ; <i32> [#uses=1]
- %508 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %507; <i32*> [#uses=1]
+ %508 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %507; <i32*> [#uses=1]
%509 = load i32* %508, align 4 ; <i32> [#uses=1]
%510 = xor i32 %504, %509 ; <i32> [#uses=1]
%511 = or i32 %493, 768 ; <i32> [#uses=1]
%512 = and i32 %511, 1023 ; <i32> [#uses=1]
- %513 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %512; <i32*> [#uses=1]
+ %513 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %512; <i32*> [#uses=1]
%514 = load i32* %513, align 4 ; <i32> [#uses=1]
%515 = add i32 %510, %514 ; <i32> [#uses=1]
%516 = xor i32 %495, %469 ; <i32> [#uses=1]
%517 = xor i32 %516, %515 ; <i32> [#uses=5]
- %518 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 8; <i32*> [#uses=1]
+ %518 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 8; <i32*> [#uses=1]
%519 = load i32* %518, align 4 ; <i32> [#uses=1]
%520 = lshr i32 %517, 24 ; <i32> [#uses=1]
- %521 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %520; <i32*> [#uses=1]
+ %521 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %520; <i32*> [#uses=1]
%522 = load i32* %521, align 4 ; <i32> [#uses=1]
%523 = lshr i32 %517, 16 ; <i32> [#uses=1]
%524 = or i32 %523, 256 ; <i32> [#uses=1]
%525 = and i32 %524, 511 ; <i32> [#uses=1]
- %526 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %525; <i32*> [#uses=1]
+ %526 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %525; <i32*> [#uses=1]
%527 = load i32* %526, align 4 ; <i32> [#uses=1]
%528 = add i32 %527, %522 ; <i32> [#uses=1]
%529 = lshr i32 %517, 8 ; <i32> [#uses=1]
%530 = or i32 %529, 512 ; <i32> [#uses=1]
%531 = and i32 %530, 767 ; <i32> [#uses=1]
- %532 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %531; <i32*> [#uses=1]
+ %532 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %531; <i32*> [#uses=1]
%533 = load i32* %532, align 4 ; <i32> [#uses=1]
%534 = xor i32 %528, %533 ; <i32> [#uses=1]
%535 = or i32 %517, 768 ; <i32> [#uses=1]
%536 = and i32 %535, 1023 ; <i32> [#uses=1]
- %537 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %536; <i32*> [#uses=1]
+ %537 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %536; <i32*> [#uses=1]
%538 = load i32* %537, align 4 ; <i32> [#uses=1]
%539 = add i32 %534, %538 ; <i32> [#uses=1]
%540 = xor i32 %519, %493 ; <i32> [#uses=1]
%541 = xor i32 %540, %539 ; <i32> [#uses=5]
%542 = load i32* undef, align 4 ; <i32> [#uses=1]
%543 = lshr i32 %541, 24 ; <i32> [#uses=1]
- %544 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %543; <i32*> [#uses=1]
+ %544 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %543; <i32*> [#uses=1]
%545 = load i32* %544, align 4 ; <i32> [#uses=1]
%546 = lshr i32 %541, 16 ; <i32> [#uses=1]
%547 = or i32 %546, 256 ; <i32> [#uses=1]
%548 = and i32 %547, 511 ; <i32> [#uses=1]
- %549 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %548; <i32*> [#uses=1]
+ %549 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %548; <i32*> [#uses=1]
%550 = load i32* %549, align 4 ; <i32> [#uses=1]
%551 = add i32 %550, %545 ; <i32> [#uses=1]
%552 = lshr i32 %541, 8 ; <i32> [#uses=1]
%553 = or i32 %552, 512 ; <i32> [#uses=1]
%554 = and i32 %553, 767 ; <i32> [#uses=1]
- %555 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %554; <i32*> [#uses=1]
+ %555 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %554; <i32*> [#uses=1]
%556 = load i32* %555, align 4 ; <i32> [#uses=1]
%557 = xor i32 %551, %556 ; <i32> [#uses=1]
%558 = or i32 %541, 768 ; <i32> [#uses=1]
%559 = and i32 %558, 1023 ; <i32> [#uses=1]
- %560 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %559; <i32*> [#uses=1]
+ %560 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %559; <i32*> [#uses=1]
%561 = load i32* %560, align 4 ; <i32> [#uses=1]
%562 = add i32 %557, %561 ; <i32> [#uses=1]
%563 = xor i32 %542, %517 ; <i32> [#uses=1]
%564 = xor i32 %563, %562 ; <i32> [#uses=5]
- %565 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 6; <i32*> [#uses=1]
+ %565 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 6; <i32*> [#uses=1]
%566 = load i32* %565, align 4 ; <i32> [#uses=1]
%567 = lshr i32 %564, 24 ; <i32> [#uses=1]
- %568 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %567; <i32*> [#uses=1]
+ %568 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %567; <i32*> [#uses=1]
%569 = load i32* %568, align 4 ; <i32> [#uses=1]
%570 = lshr i32 %564, 16 ; <i32> [#uses=1]
%571 = or i32 %570, 256 ; <i32> [#uses=1]
%572 = and i32 %571, 511 ; <i32> [#uses=1]
- %573 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %572; <i32*> [#uses=1]
+ %573 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %572; <i32*> [#uses=1]
%574 = load i32* %573, align 4 ; <i32> [#uses=1]
%575 = add i32 %574, %569 ; <i32> [#uses=1]
%576 = lshr i32 %564, 8 ; <i32> [#uses=1]
%577 = or i32 %576, 512 ; <i32> [#uses=1]
%578 = and i32 %577, 767 ; <i32> [#uses=1]
- %579 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %578; <i32*> [#uses=1]
+ %579 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %578; <i32*> [#uses=1]
%580 = load i32* %579, align 4 ; <i32> [#uses=1]
%581 = xor i32 %575, %580 ; <i32> [#uses=1]
%582 = or i32 %564, 768 ; <i32> [#uses=1]
%583 = and i32 %582, 1023 ; <i32> [#uses=1]
- %584 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %583; <i32*> [#uses=1]
+ %584 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %583; <i32*> [#uses=1]
%585 = load i32* %584, align 4 ; <i32> [#uses=1]
%586 = add i32 %581, %585 ; <i32> [#uses=1]
%587 = xor i32 %566, %541 ; <i32> [#uses=1]
%588 = xor i32 %587, %586 ; <i32> [#uses=5]
- %589 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 5; <i32*> [#uses=1]
+ %589 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 5; <i32*> [#uses=1]
%590 = load i32* %589, align 4 ; <i32> [#uses=1]
%591 = lshr i32 %588, 24 ; <i32> [#uses=1]
- %592 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %591; <i32*> [#uses=1]
+ %592 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %591; <i32*> [#uses=1]
%593 = load i32* %592, align 4 ; <i32> [#uses=1]
%594 = lshr i32 %588, 16 ; <i32> [#uses=1]
%595 = or i32 %594, 256 ; <i32> [#uses=1]
%596 = and i32 %595, 511 ; <i32> [#uses=1]
- %597 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %596; <i32*> [#uses=1]
+ %597 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %596; <i32*> [#uses=1]
%598 = load i32* %597, align 4 ; <i32> [#uses=1]
%599 = add i32 %598, %593 ; <i32> [#uses=1]
%600 = lshr i32 %588, 8 ; <i32> [#uses=1]
%601 = or i32 %600, 512 ; <i32> [#uses=1]
%602 = and i32 %601, 767 ; <i32> [#uses=1]
- %603 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %602; <i32*> [#uses=1]
+ %603 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %602; <i32*> [#uses=1]
%604 = load i32* %603, align 4 ; <i32> [#uses=1]
%605 = xor i32 %599, %604 ; <i32> [#uses=1]
%606 = or i32 %588, 768 ; <i32> [#uses=1]
%607 = and i32 %606, 1023 ; <i32> [#uses=1]
- %608 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %607; <i32*> [#uses=1]
+ %608 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %607; <i32*> [#uses=1]
%609 = load i32* %608, align 4 ; <i32> [#uses=1]
%610 = add i32 %605, %609 ; <i32> [#uses=1]
%611 = xor i32 %590, %564 ; <i32> [#uses=1]
%612 = xor i32 %611, %610 ; <i32> [#uses=5]
- %613 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 4; <i32*> [#uses=1]
+ %613 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 4; <i32*> [#uses=1]
%614 = load i32* %613, align 4 ; <i32> [#uses=1]
%615 = lshr i32 %612, 24 ; <i32> [#uses=1]
- %616 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %615; <i32*> [#uses=1]
+ %616 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %615; <i32*> [#uses=1]
%617 = load i32* %616, align 4 ; <i32> [#uses=1]
%618 = lshr i32 %612, 16 ; <i32> [#uses=1]
%619 = or i32 %618, 256 ; <i32> [#uses=1]
%620 = and i32 %619, 511 ; <i32> [#uses=1]
- %621 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %620; <i32*> [#uses=1]
+ %621 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %620; <i32*> [#uses=1]
%622 = load i32* %621, align 4 ; <i32> [#uses=1]
%623 = add i32 %622, %617 ; <i32> [#uses=1]
%624 = lshr i32 %612, 8 ; <i32> [#uses=1]
%625 = or i32 %624, 512 ; <i32> [#uses=1]
%626 = and i32 %625, 767 ; <i32> [#uses=1]
- %627 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %626; <i32*> [#uses=1]
+ %627 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %626; <i32*> [#uses=1]
%628 = load i32* %627, align 4 ; <i32> [#uses=1]
%629 = xor i32 %623, %628 ; <i32> [#uses=1]
%630 = or i32 %612, 768 ; <i32> [#uses=1]
%631 = and i32 %630, 1023 ; <i32> [#uses=1]
- %632 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %631; <i32*> [#uses=1]
+ %632 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %631; <i32*> [#uses=1]
%633 = load i32* %632, align 4 ; <i32> [#uses=1]
%634 = add i32 %629, %633 ; <i32> [#uses=1]
%635 = xor i32 %614, %588 ; <i32> [#uses=1]
%636 = xor i32 %635, %634 ; <i32> [#uses=5]
- %637 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 3; <i32*> [#uses=1]
+ %637 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 3; <i32*> [#uses=1]
%638 = load i32* %637, align 4 ; <i32> [#uses=1]
%639 = lshr i32 %636, 24 ; <i32> [#uses=1]
- %640 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %639; <i32*> [#uses=1]
+ %640 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %639; <i32*> [#uses=1]
%641 = load i32* %640, align 4 ; <i32> [#uses=1]
%642 = lshr i32 %636, 16 ; <i32> [#uses=1]
%643 = or i32 %642, 256 ; <i32> [#uses=1]
%644 = and i32 %643, 511 ; <i32> [#uses=1]
- %645 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %644; <i32*> [#uses=1]
+ %645 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %644; <i32*> [#uses=1]
%646 = load i32* %645, align 4 ; <i32> [#uses=1]
%647 = add i32 %646, %641 ; <i32> [#uses=1]
%648 = lshr i32 %636, 8 ; <i32> [#uses=1]
%649 = or i32 %648, 512 ; <i32> [#uses=1]
%650 = and i32 %649, 767 ; <i32> [#uses=1]
- %651 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %650; <i32*> [#uses=1]
+ %651 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %650; <i32*> [#uses=1]
%652 = load i32* %651, align 4 ; <i32> [#uses=1]
%653 = xor i32 %647, %652 ; <i32> [#uses=1]
%654 = or i32 %636, 768 ; <i32> [#uses=1]
%655 = and i32 %654, 1023 ; <i32> [#uses=1]
- %656 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %655; <i32*> [#uses=1]
+ %656 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %655; <i32*> [#uses=1]
%657 = load i32* %656, align 4 ; <i32> [#uses=1]
%658 = add i32 %653, %657 ; <i32> [#uses=1]
%659 = xor i32 %638, %612 ; <i32> [#uses=1]
%660 = xor i32 %659, %658 ; <i32> [#uses=5]
%661 = load i32* undef, align 4 ; <i32> [#uses=1]
%662 = lshr i32 %660, 24 ; <i32> [#uses=1]
- %663 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %662; <i32*> [#uses=1]
+ %663 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %662; <i32*> [#uses=1]
%664 = load i32* %663, align 4 ; <i32> [#uses=1]
%665 = lshr i32 %660, 16 ; <i32> [#uses=1]
%666 = or i32 %665, 256 ; <i32> [#uses=1]
%667 = and i32 %666, 511 ; <i32> [#uses=1]
- %668 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %667; <i32*> [#uses=1]
+ %668 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %667; <i32*> [#uses=1]
%669 = load i32* %668, align 4 ; <i32> [#uses=1]
%670 = add i32 %669, %664 ; <i32> [#uses=1]
%671 = lshr i32 %660, 8 ; <i32> [#uses=1]
%672 = or i32 %671, 512 ; <i32> [#uses=1]
%673 = and i32 %672, 767 ; <i32> [#uses=1]
- %674 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %673; <i32*> [#uses=1]
+ %674 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %673; <i32*> [#uses=1]
%675 = load i32* %674, align 4 ; <i32> [#uses=1]
%676 = xor i32 %670, %675 ; <i32> [#uses=1]
%677 = or i32 %660, 768 ; <i32> [#uses=1]
%678 = and i32 %677, 1023 ; <i32> [#uses=1]
- %679 = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %678; <i32*> [#uses=1]
+ %679 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %678; <i32*> [#uses=1]
%680 = load i32* %679, align 4 ; <i32> [#uses=1]
%681 = add i32 %676, %680 ; <i32> [#uses=1]
%682 = xor i32 %661, %636 ; <i32> [#uses=1]
%683 = xor i32 %682, %681 ; <i32> [#uses=5]
- %684 = getelementptr %struct.BF_KEY* %key, i32 0, i32 0, i32 1; <i32*> [#uses=1]
+ %684 = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 0, i32 1; <i32*> [#uses=1]
br label %bb2
bb2: ; preds = %bb1, %bb
%.pn15 = lshr i32 %.pn15.in, 24 ; <i32> [#uses=1]
%.pn14 = and i32 %.pn14.in, 511 ; <i32> [#uses=1]
%.pn13.in = or i32 %.pn13.in.in, 512 ; <i32> [#uses=1]
- %.pn11.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn15; <i32*> [#uses=1]
- %.pn12.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn14; <i32*> [#uses=1]
+ %.pn11.in = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn15; <i32*> [#uses=1]
+ %.pn12.in = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn14; <i32*> [#uses=1]
%.pn13 = and i32 %.pn13.in, 767 ; <i32> [#uses=1]
%.pn10.in = or i32 %.pn10.in.in, 768 ; <i32> [#uses=1]
%.pn11 = load i32* %.pn11.in ; <i32> [#uses=1]
%.pn12 = load i32* %.pn12.in ; <i32> [#uses=1]
- %.pn9.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn13; <i32*> [#uses=1]
+ %.pn9.in = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn13; <i32*> [#uses=1]
%.pn10 = and i32 %.pn10.in, 1023 ; <i32> [#uses=1]
%.pn8 = add i32 %.pn12, %.pn11 ; <i32> [#uses=1]
%.pn9 = load i32* %.pn9.in ; <i32> [#uses=1]
- %.pn7.in = getelementptr %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn10; <i32*> [#uses=1]
+ %.pn7.in = getelementptr %struct.BF_KEY, %struct.BF_KEY* %key, i32 0, i32 1, i32 %.pn10; <i32*> [#uses=1]
%.pn6 = xor i32 %.pn8, %.pn9 ; <i32> [#uses=1]
%.pn7 = load i32* %.pn7.in ; <i32> [#uses=1]
%.pn4 = load i32* %.pn4.in ; <i32> [#uses=1]
%0 = alloca i8, i32 undef, align 4 ; <i8*> [#uses=2]
%1 = call i8* @__strcpy_chk(i8* %0, i8* %tag, i32 -1) nounwind; <i8*> [#uses=0]
%2 = call i8* @__strcat_chk(i8* %0, i8* getelementptr ([2 x i8]* @.str16, i32 0, i32 0), i32 -1) nounwind; <i8*> [#uses=0]
- %3 = getelementptr %struct.vorbis_comment* %vc, i32 0, i32 0; <i8***> [#uses=1]
+ %3 = getelementptr %struct.vorbis_comment, %struct.vorbis_comment* %vc, i32 0, i32 0; <i8***> [#uses=1]
br label %bb11
bb6: ; preds = %bb11
%4 = load i8*** %3, align 4 ; <i8**> [#uses=1]
- %scevgep = getelementptr i8** %4, i32 %8 ; <i8**> [#uses=1]
+ %scevgep = getelementptr i8*, i8** %4, i32 %8 ; <i8**> [#uses=1]
%5 = load i8** %scevgep, align 4 ; <i8*> [#uses=1]
br label %bb3.i
bb3.i: ; preds = %bb3.i, %bb6
- %scevgep7.i = getelementptr i8* %5, i32 0 ; <i8*> [#uses=1]
+ %scevgep7.i = getelementptr i8, i8* %5, i32 0 ; <i8*> [#uses=1]
%6 = load i8* %scevgep7.i, align 1 ; <i8> [#uses=0]
br i1 undef, label %bb3.i, label %bb10
br i1 %0, label %bb13, label %bb1
bb1: ; preds = %entry
- %1 = getelementptr inbounds %struct.asl_file_t* %s, i32 0, i32 11 ; <%struct.FILE**> [#uses=2]
+ %1 = getelementptr inbounds %struct.asl_file_t, %struct.asl_file_t* %s, i32 0, i32 11 ; <%struct.FILE**> [#uses=2]
%2 = load %struct.FILE** %1, align 4 ; <%struct.FILE*> [#uses=2]
%3 = icmp eq %struct.FILE* %2, null ; <i1> [#uses=1]
br i1 %3, label %bb13, label %bb3
bb3: ; preds = %bb1
%4 = add nsw i64 %off, 8 ; <i64> [#uses=1]
- %5 = getelementptr inbounds %struct.asl_file_t* %s, i32 0, i32 10 ; <i32*> [#uses=1]
+ %5 = getelementptr inbounds %struct.asl_file_t, %struct.asl_file_t* %s, i32 0, i32 10 ; <i32*> [#uses=1]
%6 = load i32* %5, align 4 ; <i32> [#uses=1]
%7 = zext i32 %6 to i64 ; <i64> [#uses=1]
%8 = icmp sgt i64 %4, %7 ; <i1> [#uses=1]
entry:
%0 = load i8* %opcodes, align 1 ; <i8> [#uses=1]
%1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- %2 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %1 ; <i8**> [#uses=1]
+ %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @codetable.2928, i32 0, i32 %1 ; <i8**> [#uses=1]
br label %bb
bb: ; preds = %bb.backedge, %entry
%indvar = phi i32 [ %phitmp, %bb.backedge ], [ 1, %entry ] ; <i32> [#uses=2]
%gotovar.22.0.in = phi i8** [ %gotovar.22.0.in.be, %bb.backedge ], [ %2, %entry ] ; <i8**> [#uses=1]
%result.0 = phi i32 [ %result.0.be, %bb.backedge ], [ 0, %entry ] ; <i32> [#uses=6]
- %opcodes_addr.0 = getelementptr i8* %opcodes, i32 %indvar ; <i8*> [#uses=4]
+ %opcodes_addr.0 = getelementptr i8, i8* %opcodes, i32 %indvar ; <i8*> [#uses=4]
%gotovar.22.0 = load i8** %gotovar.22.0.in, align 4 ; <i8*> [#uses=1]
indirectbr i8* %gotovar.22.0, [label %RETURN, label %INCREMENT, label %DECREMENT, label %DOUBLE, label %SWAPWORD]
%3 = add nsw i32 %result.0, 1 ; <i32> [#uses=1]
%4 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
%5 = zext i8 %4 to i32 ; <i32> [#uses=1]
- %6 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %5 ; <i8**> [#uses=1]
+ %6 = getelementptr inbounds [5 x i8*], [5 x i8*]* @codetable.2928, i32 0, i32 %5 ; <i8**> [#uses=1]
br label %bb.backedge
bb.backedge: ; preds = %SWAPWORD, %DOUBLE, %DECREMENT, %INCREMENT
%7 = add i32 %result.0, -1 ; <i32> [#uses=1]
%8 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
%9 = zext i8 %8 to i32 ; <i32> [#uses=1]
- %10 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %9 ; <i8**> [#uses=1]
+ %10 = getelementptr inbounds [5 x i8*], [5 x i8*]* @codetable.2928, i32 0, i32 %9 ; <i8**> [#uses=1]
br label %bb.backedge
DOUBLE: ; preds = %bb
%11 = shl i32 %result.0, 1 ; <i32> [#uses=1]
%12 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
%13 = zext i8 %12 to i32 ; <i32> [#uses=1]
- %14 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %13 ; <i8**> [#uses=1]
+ %14 = getelementptr inbounds [5 x i8*], [5 x i8*]* @codetable.2928, i32 0, i32 %13 ; <i8**> [#uses=1]
br label %bb.backedge
SWAPWORD: ; preds = %bb
%17 = or i32 %15, %16 ; <i32> [#uses=1]
%18 = load i8* %opcodes_addr.0, align 1 ; <i8> [#uses=1]
%19 = zext i8 %18 to i32 ; <i32> [#uses=1]
- %20 = getelementptr inbounds [5 x i8*]* @codetable.2928, i32 0, i32 %19 ; <i8**> [#uses=1]
+ %20 = getelementptr inbounds [5 x i8*], [5 x i8*]* @codetable.2928, i32 0, i32 %19 ; <i8**> [#uses=1]
br label %bb.backedge
}
call void @llvm.dbg.value(metadata double %storemerge, i64 0, metadata !91, metadata !{!"0x102"}), !dbg !0
%v_7 = icmp eq i32 %2, 1, !dbg !92 ; <i1> [#uses=1]
%storemerge2 = select i1 %v_7, double 1.000000e+00, double -1.000000e+00 ; <double> [#uses=3]
- %v_8 = getelementptr inbounds %0* %0, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
- %v_10 = getelementptr inbounds %0* %0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %v_8 = getelementptr inbounds %0, %0* %0, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %v_10 = getelementptr inbounds %0, %0* %0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%v_11 = fmul double %storemerge1, %storemerge1, !dbg !93 ; <double> [#uses=1]
%v_15 = tail call double @sqrt(double %v_11) nounwind readonly, !dbg !93 ; <double> [#uses=1]
%v_16 = fdiv double 1.000000e+00, %v_15, !dbg !93 ; <double> [#uses=3]
do.body: ; preds = %entry
%tmp = load i8** @kkkkkk, align 4
%tmp1 = load %struct.MMMMMMMMMMMM** %aidData.addr
- %eph = getelementptr inbounds %struct.MMMMMMMMMMMM* %tmp1, i32 0, i32 0
- %arrayidx = getelementptr inbounds [4 x %struct.RRRRRRRR]* %eph, i32 0, i32 0
+ %eph = getelementptr inbounds %struct.MMMMMMMMMMMM, %struct.MMMMMMMMMMMM* %tmp1, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph, i32 0, i32 0
%tmp2 = bitcast %struct.RRRRRRRR* %agg.tmp to i8*
%tmp3 = bitcast %struct.RRRRRRRR* %arrayidx to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp3, i32 312, i32 4, i1 false)
%tmp5 = load %struct.MMMMMMMMMMMM** %aidData.addr
- %eph6 = getelementptr inbounds %struct.MMMMMMMMMMMM* %tmp5, i32 0, i32 0
- %arrayidx7 = getelementptr inbounds [4 x %struct.RRRRRRRR]* %eph6, i32 0, i32 1
+ %eph6 = getelementptr inbounds %struct.MMMMMMMMMMMM, %struct.MMMMMMMMMMMM* %tmp5, i32 0, i32 0
+ %arrayidx7 = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph6, i32 0, i32 1
%tmp8 = bitcast %struct.RRRRRRRR* %agg.tmp4 to i8*
%tmp9 = bitcast %struct.RRRRRRRR* %arrayidx7 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp8, i8* %tmp9, i32 312, i32 4, i1 false)
%tmp11 = load %struct.MMMMMMMMMMMM** %aidData.addr
- %eph12 = getelementptr inbounds %struct.MMMMMMMMMMMM* %tmp11, i32 0, i32 0
- %arrayidx13 = getelementptr inbounds [4 x %struct.RRRRRRRR]* %eph12, i32 0, i32 2
+ %eph12 = getelementptr inbounds %struct.MMMMMMMMMMMM, %struct.MMMMMMMMMMMM* %tmp11, i32 0, i32 0
+ %arrayidx13 = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph12, i32 0, i32 2
%tmp14 = bitcast %struct.RRRRRRRR* %agg.tmp10 to i8*
%tmp15 = bitcast %struct.RRRRRRRR* %arrayidx13 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp14, i8* %tmp15, i32 312, i32 4, i1 false)
%tmp17 = load %struct.MMMMMMMMMMMM** %aidData.addr
- %eph18 = getelementptr inbounds %struct.MMMMMMMMMMMM* %tmp17, i32 0, i32 0
- %arrayidx19 = getelementptr inbounds [4 x %struct.RRRRRRRR]* %eph18, i32 0, i32 3
+ %eph18 = getelementptr inbounds %struct.MMMMMMMMMMMM, %struct.MMMMMMMMMMMM* %tmp17, i32 0, i32 0
+ %arrayidx19 = getelementptr inbounds [4 x %struct.RRRRRRRR], [4 x %struct.RRRRRRRR]* %eph18, i32 0, i32 3
%tmp20 = bitcast %struct.RRRRRRRR* %agg.tmp16 to i8*
%tmp21 = bitcast %struct.RRRRRRRR* %arrayidx19 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp20, i8* %tmp21, i32 312, i32 4, i1 false)
; CHECK-NEXT: subs
; CHECK-NEXT: bl
%0 = load i32* %A, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i32 1
%1 = load i32* %arrayidx1, align 4
tail call void @bar(i32* %A, i32 %0, i32 %1) #2
ret void
define void @pass_C() #0 {
entry:
%c = alloca %struct.C, align 1
- %0 = getelementptr inbounds %struct.C* %c, i32 0, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 0, i32 0
call void @llvm.lifetime.start(i64 1000, i8* %0) #1
call void @use_C(%struct.C* byval %c) #3
call void @llvm.lifetime.end(i64 1000, i8* %0) #1
define void @adpcm_coder(i16* nocapture %indata, i8* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
entry:
- %0 = getelementptr %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
+ %0 = getelementptr %struct.adpcm_state, %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
%1 = load i16* %0, align 2 ; <i16> [#uses=1]
%2 = sext i16 %1 to i32 ; <i32> [#uses=2]
- %3 = getelementptr %struct.adpcm_state* %state, i32 0, i32 1 ; <i8*> [#uses=2]
+ %3 = getelementptr %struct.adpcm_state, %struct.adpcm_state* %state, i32 0, i32 1 ; <i8*> [#uses=2]
%4 = load i8* %3, align 2 ; <i8> [#uses=1]
%5 = sext i8 %4 to i32 ; <i32> [#uses=3]
- %6 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %5 ; <i32*> [#uses=1]
+ %6 = getelementptr [89 x i32], [89 x i32]* @stepsizeTable, i32 0, i32 %5 ; <i32*> [#uses=1]
%7 = load i32* %6, align 4 ; <i32> [#uses=1]
%8 = icmp sgt i32 %len, 0 ; <i1> [#uses=1]
br i1 %8, label %bb, label %bb27
%index.033 = phi i32 [ %5, %entry ], [ %index.2, %bb25 ] ; <i32> [#uses=1]
%valpred.132 = phi i32 [ %2, %entry ], [ %valpred.2, %bb25 ] ; <i32> [#uses=2]
%step.031 = phi i32 [ %7, %entry ], [ %36, %bb25 ] ; <i32> [#uses=5]
- %inp.038 = getelementptr i16* %indata, i32 %indvar ; <i16*> [#uses=1]
+ %inp.038 = getelementptr i16, i16* %indata, i32 %indvar ; <i16*> [#uses=1]
%9 = load i16* %inp.038, align 2 ; <i16> [#uses=1]
%10 = sext i16 %9 to i32 ; <i32> [#uses=1]
%11 = sub i32 %10, %valpred.132 ; <i32> [#uses=3]
%delta.1 = or i32 %21, %iftmp.1.0 ; <i32> [#uses=1]
%delta.2 = or i32 %delta.1, %25 ; <i32> [#uses=1]
%29 = xor i32 %delta.2, 1 ; <i32> [#uses=3]
- %30 = getelementptr [16 x i32]* @indexTable, i32 0, i32 %29 ; <i32*> [#uses=1]
+ %30 = getelementptr [16 x i32], [16 x i32]* @indexTable, i32 0, i32 %29 ; <i32*> [#uses=1]
%31 = load i32* %30, align 4 ; <i32> [#uses=1]
%32 = add i32 %31, %index.033 ; <i32> [#uses=2]
%33 = icmp slt i32 %32, 0 ; <i1> [#uses=1]
%index.1 = select i1 %33, i32 0, i32 %32 ; <i32> [#uses=2]
%34 = icmp sgt i32 %index.1, 88 ; <i1> [#uses=1]
%index.2 = select i1 %34, i32 88, i32 %index.1 ; <i32> [#uses=3]
- %35 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %index.2 ; <i32*> [#uses=1]
+ %35 = getelementptr [89 x i32], [89 x i32]* @stepsizeTable, i32 0, i32 %index.2 ; <i32*> [#uses=1]
%36 = load i32* %35, align 4 ; <i32> [#uses=1]
%37 = icmp eq i32 %bufferstep.035, 0 ; <i1> [#uses=1]
br i1 %37, label %bb24, label %bb23
%42 = trunc i32 %outputbuffer.134 to i8 ; <i8> [#uses=1]
%43 = or i8 %41, %42 ; <i8> [#uses=1]
store i8 %43, i8* %outp.136, align 1
- %44 = getelementptr i8* %outp.136, i32 1 ; <i8*> [#uses=1]
+ %44 = getelementptr i8, i8* %outp.136, i32 1 ; <i8*> [#uses=1]
br label %bb25
bb25: ; preds = %bb24, %bb23
define void @adpcm_decoder(i8* nocapture %indata, i16* nocapture %outdata, i32 %len, %struct.adpcm_state* nocapture %state) nounwind {
entry:
- %0 = getelementptr %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
+ %0 = getelementptr %struct.adpcm_state, %struct.adpcm_state* %state, i32 0, i32 0 ; <i16*> [#uses=2]
%1 = load i16* %0, align 2 ; <i16> [#uses=1]
%2 = sext i16 %1 to i32 ; <i32> [#uses=2]
- %3 = getelementptr %struct.adpcm_state* %state, i32 0, i32 1 ; <i8*> [#uses=2]
+ %3 = getelementptr %struct.adpcm_state, %struct.adpcm_state* %state, i32 0, i32 1 ; <i8*> [#uses=2]
%4 = load i8* %3, align 2 ; <i8> [#uses=1]
%5 = sext i8 %4 to i32 ; <i32> [#uses=3]
- %6 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %5 ; <i32*> [#uses=1]
+ %6 = getelementptr [89 x i32], [89 x i32]* @stepsizeTable, i32 0, i32 %5 ; <i32*> [#uses=1]
%7 = load i32* %6, align 4 ; <i32> [#uses=1]
%8 = icmp sgt i32 %len, 0 ; <i1> [#uses=1]
br i1 %8, label %bb, label %bb22
%index.026 = phi i32 [ %5, %entry ], [ %index.2, %bb20 ] ; <i32> [#uses=1]
%valpred.125 = phi i32 [ %2, %entry ], [ %valpred.2, %bb20 ] ; <i32> [#uses=1]
%step.024 = phi i32 [ %7, %entry ], [ %35, %bb20 ] ; <i32> [#uses=4]
- %outp.030 = getelementptr i16* %outdata, i32 %indvar ; <i16*> [#uses=1]
+ %outp.030 = getelementptr i16, i16* %outdata, i32 %indvar ; <i16*> [#uses=1]
%9 = icmp eq i32 %bufferstep.028, 0 ; <i1> [#uses=1]
br i1 %9, label %bb2, label %bb3
bb2: ; preds = %bb
%10 = load i8* %inp.131, align 1 ; <i8> [#uses=1]
%11 = sext i8 %10 to i32 ; <i32> [#uses=2]
- %12 = getelementptr i8* %inp.131, i32 1 ; <i8*> [#uses=1]
+ %12 = getelementptr i8, i8* %inp.131, i32 1 ; <i8*> [#uses=1]
%13 = ashr i32 %11, 4 ; <i32> [#uses=1]
br label %bb3
%inp.0 = phi i8* [ %12, %bb2 ], [ %inp.131, %bb ] ; <i8*> [#uses=1]
%delta.0 = and i32 %delta.0.in, 15 ; <i32> [#uses=1]
%tmp = xor i32 %bufferstep.028, 1 ; <i32> [#uses=1]
- %14 = getelementptr [16 x i32]* @indexTable, i32 0, i32 %delta.0 ; <i32*> [#uses=1]
+ %14 = getelementptr [16 x i32], [16 x i32]* @indexTable, i32 0, i32 %delta.0 ; <i32*> [#uses=1]
%15 = load i32* %14, align 4 ; <i32> [#uses=1]
%16 = add i32 %15, %index.026 ; <i32> [#uses=2]
%17 = icmp slt i32 %16, 0 ; <i1> [#uses=1]
bb20: ; preds = %bb19, %bb18, %bb13
%valpred.2 = phi i32 [ -32768, %bb19 ], [ 32767, %bb13 ], [ %valpred.0, %bb18 ] ; <i32> [#uses=3]
- %34 = getelementptr [89 x i32]* @stepsizeTable, i32 0, i32 %index.2 ; <i32*> [#uses=1]
+ %34 = getelementptr [89 x i32], [89 x i32]* @stepsizeTable, i32 0, i32 %index.2 ; <i32*> [#uses=1]
%35 = load i32* %34, align 4 ; <i32> [#uses=1]
%36 = trunc i32 %valpred.2 to i16 ; <i16> [#uses=1]
store i16 %36, i16* %outp.030, align 2
bb43:
call fastcc void @f1( float* %tmp8, float* null, i32 0 )
%tmp70 = load i32* null
- %tmp85 = getelementptr float* %tmp8, i32 0
+ %tmp85 = getelementptr float, float* %tmp8, i32 0
call fastcc void @f2( float* null, float* null, float* %tmp85, i32 %tmp70 )
ret void
%tmp6 = alloca i8, i32 %tmp5
%tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag )
%tmp6.len = call i32 @strlen( i8* %tmp6 )
- %tmp6.indexed = getelementptr i8* %tmp6, i32 %tmp6.len
+ %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8]* @str215, i32 0, i32 0), i32 2, i32 1, i1 false)
%tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents )
call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
; CHECK: ldm r[[BASE:[0-9]]]!,
; CHECK-NEXT: mov r[[BASE]],
%0 = load i32* %A, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i32 1
%1 = load i32* %arrayidx1, align 4
%call = tail call i32 @bar(i32 %0, i32 %1, i32 %0, i32 %1) #2
%call2 = tail call i32 @bar(i32 %0, i32 %1, i32 %0, i32 %1) #2
; CHECK: adds r[[NSB:[0-9]]], r[[SB]], #4
; CHECK-NEXT: stm r[[NSB]]
%0 = load i32** @a, align 4
- %arrayidx = getelementptr inbounds i32* %0, i32 1
+ %arrayidx = getelementptr inbounds i32, i32* %0, i32 1
%1 = bitcast i32* %arrayidx to i8*
%2 = load i32** @b, align 4
- %arrayidx1 = getelementptr inbounds i32* %2, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %2, i32 1
%3 = bitcast i32* %arrayidx1 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %3, i32 24, i32 4, i1 false)
ret void
; CHECK-LABEL: f1:
; CHECK: ldr r0
%buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 0
+ %tmp = getelementptr [32 x i32], [32 x i32]* %buf, i32 0, i32 0
%tmp1 = load i32* %tmp
ret i32 %tmp1
}
; CHECK: mov r0
; CHECK: ldrb
%buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 0
+ %tmp = getelementptr [32 x i8], [32 x i8]* %buf, i32 0, i32 0
%tmp1 = load i8* %tmp
%tmp2 = zext i8 %tmp1 to i32
ret i32 %tmp2
; CHECK-LABEL: f3:
; CHECK: ldr r0
%buf = alloca [32 x i32], align 4
- %tmp = getelementptr [32 x i32]* %buf, i32 0, i32 32
+ %tmp = getelementptr [32 x i32], [32 x i32]* %buf, i32 0, i32 32
%tmp1 = load i32* %tmp
ret i32 %tmp1
}
; CHECK: mov r0
; CHECK: ldrb
%buf = alloca [32 x i8], align 4
- %tmp = getelementptr [32 x i8]* %buf, i32 0, i32 2
+ %tmp = getelementptr [32 x i8], [32 x i8]* %buf, i32 0, i32 2
%tmp1 = load i8* %tmp
%tmp2 = zext i8 %tmp1 to i32
ret i32 %tmp2
%a1 = alloca [256 x i32], align 4
%1 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %1)
- %2 = getelementptr inbounds [256 x i32]* %a1, i32 0, i32 0
+ %2 = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i32 0, i32 0
call void @foo3(i32* %2) #3
call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
call void @llvm.lifetime.end(i64 1024, i8* %1)
bb: ; preds = %bb, %entry
%a_addr.0 = phi i32 [ %a, %entry ], [ %tmp5, %bb ] ; <i32> [#uses=2]
%tmp = load volatile i8** %va ; <i8*> [#uses=2]
- %tmp2 = getelementptr i8* %tmp, i32 4 ; <i8*> [#uses=1]
+ %tmp2 = getelementptr i8, i8* %tmp, i32 4 ; <i8*> [#uses=1]
store volatile i8* %tmp2, i8** %va
%tmp5 = add i32 %a_addr.0, -1 ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp eq i32 %a_addr.0, 1 ; <i1> [#uses=1]
bb.i: ; preds = %bb.i, %bb1
%indvar.i = phi i32 [ 0, %bb1 ], [ %2, %bb.i ] ; <i32> [#uses=3]
%tmp39 = add i32 %indvar.i, %tmp38 ; <i32> [#uses=1]
- %p_addr.0.i = getelementptr i8* undef, i32 %tmp39 ; <i8*> [#uses=1]
+ %p_addr.0.i = getelementptr i8, i8* undef, i32 %tmp39 ; <i8*> [#uses=1]
%0 = load i8* %p_addr.0.i, align 1 ; <i8> [#uses=1]
%1 = icmp slt i8 %0, 0 ; <i1> [#uses=1]
%2 = add i32 %indvar.i, 1 ; <i32> [#uses=1]
read_uleb128.exit: ; preds = %bb.i
%.sum40 = add i32 %indvar.i, undef ; <i32> [#uses=1]
%.sum31 = add i32 %.sum40, 2 ; <i32> [#uses=1]
- %scevgep.i = getelementptr %struct.dwarf_cie* %cie, i32 0, i32 3, i32 %.sum31 ; <i8*> [#uses=1]
+ %scevgep.i = getelementptr %struct.dwarf_cie, %struct.dwarf_cie* %cie, i32 0, i32 3, i32 %.sum31 ; <i8*> [#uses=1]
%3 = call i8* @read_sleb128(i8* %scevgep.i, i32* undef) ; <i8*> [#uses=0]
unreachable
; CHECK-LABEL: t:
; CHECK: add r7, sp, #12
%1 = load i8** undef, align 4 ; <i8*> [#uses=3]
- %2 = getelementptr i8* %1, i32 4 ; <i8*> [#uses=1]
- %3 = getelementptr i8* %1, i32 8 ; <i8*> [#uses=1]
+ %2 = getelementptr i8, i8* %1, i32 4 ; <i8*> [#uses=1]
+ %3 = getelementptr i8, i8* %1, i32 8 ; <i8*> [#uses=1]
%4 = bitcast i8* %2 to i32* ; <i32*> [#uses=1]
%5 = load i32* %4, align 4 ; <i32> [#uses=1]
%6 = trunc i32 %5 to i8 ; <i8> [#uses=1]
- %7 = getelementptr i8* %1, i32 12 ; <i8*> [#uses=1]
+ %7 = getelementptr i8, i8* %1, i32 12 ; <i8*> [#uses=1]
%8 = bitcast i8* %3 to i32* ; <i32*> [#uses=1]
%9 = load i32* %8, align 4 ; <i32> [#uses=1]
%10 = trunc i32 %9 to i16 ; <i16> [#uses=1]
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=11]
%tmp39 = add i32 %indvar, 8 ; <i32> [#uses=0]
%tmp41 = add i32 %indvar, 16 ; <i32> [#uses=2]
- %scevgep42 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp41 ; <float*> [#uses=1]
+ %scevgep42 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp41 ; <float*> [#uses=1]
%tmp43 = add i32 %indvar, 24 ; <i32> [#uses=1]
- %scevgep44 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp43 ; <float*> [#uses=1]
+ %scevgep44 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp43 ; <float*> [#uses=1]
%tmp45 = add i32 %indvar, 32 ; <i32> [#uses=1]
- %scevgep46 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp45 ; <float*> [#uses=1]
+ %scevgep46 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp45 ; <float*> [#uses=1]
%tmp47 = add i32 %indvar, 40 ; <i32> [#uses=1]
- %scevgep48 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp47 ; <float*> [#uses=1]
+ %scevgep48 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp47 ; <float*> [#uses=1]
%tmp49 = add i32 %indvar, 48 ; <i32> [#uses=1]
- %scevgep50 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp49 ; <float*> [#uses=1]
+ %scevgep50 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp49 ; <float*> [#uses=1]
%tmp51 = add i32 %indvar, 56 ; <i32> [#uses=1]
- %scevgep52 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp51 ; <float*> [#uses=1]
- %wsptr.119 = getelementptr [64 x float]* %workspace, i32 0, i32 %indvar ; <float*> [#uses=1]
+ %scevgep52 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp51 ; <float*> [#uses=1]
+ %wsptr.119 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %indvar ; <float*> [#uses=1]
%tmp54 = shl i32 %indvar, 2 ; <i32> [#uses=1]
- %scevgep76 = getelementptr i8* undef, i32 %tmp54 ; <i8*> [#uses=1]
+ %scevgep76 = getelementptr i8, i8* undef, i32 %tmp54 ; <i8*> [#uses=1]
%quantptr.118 = bitcast i8* %scevgep76 to float* ; <float*> [#uses=1]
- %scevgep79 = getelementptr i16* %coef_block, i32 %tmp41 ; <i16*> [#uses=0]
- %inptr.117 = getelementptr i16* %coef_block, i32 %indvar ; <i16*> [#uses=1]
+ %scevgep79 = getelementptr i16, i16* %coef_block, i32 %tmp41 ; <i16*> [#uses=0]
+ %inptr.117 = getelementptr i16, i16* %coef_block, i32 %indvar ; <i16*> [#uses=1]
%1 = load i16* null, align 2 ; <i16> [#uses=1]
%2 = load i16* undef, align 2 ; <i16> [#uses=1]
%3 = load i16* %inptr.117, align 2 ; <i16> [#uses=1]
bb8: ; preds = %bb8, %bb6
%ctr.116 = phi i32 [ 0, %bb6 ], [ %88, %bb8 ] ; <i32> [#uses=3]
- %scevgep = getelementptr i8** %output_buf, i32 %ctr.116 ; <i8**> [#uses=1]
+ %scevgep = getelementptr i8*, i8** %output_buf, i32 %ctr.116 ; <i8**> [#uses=1]
%tmp = shl i32 %ctr.116, 3 ; <i32> [#uses=5]
%tmp2392 = or i32 %tmp, 4 ; <i32> [#uses=1]
- %scevgep24 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2392 ; <float*> [#uses=1]
+ %scevgep24 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2392 ; <float*> [#uses=1]
%tmp2591 = or i32 %tmp, 2 ; <i32> [#uses=1]
- %scevgep26 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2591 ; <float*> [#uses=1]
+ %scevgep26 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2591 ; <float*> [#uses=1]
%tmp2790 = or i32 %tmp, 6 ; <i32> [#uses=1]
- %scevgep28 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp2790 ; <float*> [#uses=1]
+ %scevgep28 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp2790 ; <float*> [#uses=1]
%tmp3586 = or i32 %tmp, 7 ; <i32> [#uses=0]
- %wsptr.215 = getelementptr [64 x float]* %workspace, i32 0, i32 %tmp ; <float*> [#uses=1]
+ %wsptr.215 = getelementptr [64 x float], [64 x float]* %workspace, i32 0, i32 %tmp ; <float*> [#uses=1]
%40 = load i8** %scevgep, align 4 ; <i8*> [#uses=4]
%41 = load float* %wsptr.215, align 4 ; <float> [#uses=1]
%42 = load float* %scevgep24, align 4 ; <float> [#uses=1]
%52 = lshr i32 %51, 3 ; <i32> [#uses=1]
%53 = and i32 %52, 1023 ; <i32> [#uses=1]
%.sum14 = add i32 %53, 128 ; <i32> [#uses=1]
- %54 = getelementptr i8* %0, i32 %.sum14 ; <i8*> [#uses=1]
+ %54 = getelementptr i8, i8* %0, i32 %.sum14 ; <i8*> [#uses=1]
%55 = load i8* %54, align 1 ; <i8> [#uses=1]
store i8 %55, i8* null, align 1
- %56 = getelementptr i8* %40, i32 %.sum10 ; <i8*> [#uses=1]
+ %56 = getelementptr i8, i8* %40, i32 %.sum10 ; <i8*> [#uses=1]
store i8 0, i8* %56, align 1
%57 = load i8* null, align 1 ; <i8> [#uses=1]
- %58 = getelementptr i8* %40, i32 %.sum8 ; <i8*> [#uses=1]
+ %58 = getelementptr i8, i8* %40, i32 %.sum8 ; <i8*> [#uses=1]
store i8 %57, i8* %58, align 1
%59 = fadd float undef, %48 ; <float> [#uses=1]
%60 = fptosi float %59 to i32 ; <i32> [#uses=1]
%62 = lshr i32 %61, 3 ; <i32> [#uses=1]
%63 = and i32 %62, 1023 ; <i32> [#uses=1]
%.sum7 = add i32 %63, 128 ; <i32> [#uses=1]
- %64 = getelementptr i8* %0, i32 %.sum7 ; <i8*> [#uses=1]
+ %64 = getelementptr i8, i8* %0, i32 %.sum7 ; <i8*> [#uses=1]
%65 = load i8* %64, align 1 ; <i8> [#uses=1]
- %66 = getelementptr i8* %40, i32 %.sum6 ; <i8*> [#uses=1]
+ %66 = getelementptr i8, i8* %40, i32 %.sum6 ; <i8*> [#uses=1]
store i8 %65, i8* %66, align 1
%67 = fptosi float undef to i32 ; <i32> [#uses=1]
%68 = add i32 %67, 4 ; <i32> [#uses=1]
%69 = lshr i32 %68, 3 ; <i32> [#uses=1]
%70 = and i32 %69, 1023 ; <i32> [#uses=1]
%.sum5 = add i32 %70, 128 ; <i32> [#uses=1]
- %71 = getelementptr i8* %0, i32 %.sum5 ; <i8*> [#uses=1]
+ %71 = getelementptr i8, i8* %0, i32 %.sum5 ; <i8*> [#uses=1]
%72 = load i8* %71, align 1 ; <i8> [#uses=1]
store i8 %72, i8* undef, align 1
%73 = fadd float %47, undef ; <float> [#uses=1]
%76 = lshr i32 %75, 3 ; <i32> [#uses=1]
%77 = and i32 %76, 1023 ; <i32> [#uses=1]
%.sum3 = add i32 %77, 128 ; <i32> [#uses=1]
- %78 = getelementptr i8* %0, i32 %.sum3 ; <i8*> [#uses=1]
+ %78 = getelementptr i8, i8* %0, i32 %.sum3 ; <i8*> [#uses=1]
%79 = load i8* %78, align 1 ; <i8> [#uses=1]
store i8 %79, i8* undef, align 1
%80 = fsub float %47, undef ; <float> [#uses=1]
%83 = lshr i32 %82, 3 ; <i32> [#uses=1]
%84 = and i32 %83, 1023 ; <i32> [#uses=1]
%.sum1 = add i32 %84, 128 ; <i32> [#uses=1]
- %85 = getelementptr i8* %0, i32 %.sum1 ; <i8*> [#uses=1]
+ %85 = getelementptr i8, i8* %0, i32 %.sum1 ; <i8*> [#uses=1]
%86 = load i8* %85, align 1 ; <i8> [#uses=1]
- %87 = getelementptr i8* %40, i32 %.sum ; <i8*> [#uses=1]
+ %87 = getelementptr i8, i8* %40, i32 %.sum ; <i8*> [#uses=1]
store i8 %86, i8* %87, align 1
%88 = add i32 %ctr.116, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %88, 8 ; <i1> [#uses=1]
br i1 undef, label %tbl.exit, label %bb.i.preheader
bb.i.preheader: ; preds = %bb4.preheader.i
- %line3.i.i.i = getelementptr [200 x i8]* %line.i.i.i, i32 0, i32 0 ; <i8*> [#uses=1]
+ %line3.i.i.i = getelementptr [200 x i8], [200 x i8]* %line.i.i.i, i32 0, i32 0 ; <i8*> [#uses=1]
br label %bb.i
bb.i: ; preds = %bb4.backedge.i, %bb.i.preheader
bb.i171.i.i: ; preds = %bb3.i176.i.i, %bb36.i.i.i, %bb5.i185.i.i
%2 = phi i32 [ %4, %bb3.i176.i.i ], [ 0, %bb36.i.i.i ], [ 0, %bb5.i185.i.i ] ; <i32> [#uses=6]
- %scevgep16.i.i.i = getelementptr [20 x i32]* @sep, i32 0, i32 %2 ; <i32*> [#uses=1]
- %scevgep18.i.i.i = getelementptr [20 x [10 x i8]]* @cll, i32 0, i32 %2, i32 0 ; <i8*> [#uses=0]
+ %scevgep16.i.i.i = getelementptr [20 x i32], [20 x i32]* @sep, i32 0, i32 %2 ; <i32*> [#uses=1]
+ %scevgep18.i.i.i = getelementptr [20 x [10 x i8]], [20 x [10 x i8]]* @cll, i32 0, i32 %2, i32 0 ; <i8*> [#uses=0]
store i32 -1, i32* %scevgep16.i.i.i, align 4
br label %bb1.i175.i.i
bb1.i175.i.i: ; preds = %bb1.i175.i.i, %bb.i171.i.i
%i.03.i172.i.i = phi i32 [ 0, %bb.i171.i.i ], [ %3, %bb1.i175.i.i ] ; <i32> [#uses=4]
- %scevgep11.i.i.i = getelementptr [100 x [20 x i32]]* @lefline, i32 0, i32 %i.03.i172.i.i, i32 %2 ; <i32*> [#uses=1]
- %scevgep12.i.i.i = getelementptr [100 x [20 x [4 x i8]]]* @vsize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0 ; <i8*> [#uses=1]
- %scevgep13.i.i.i = getelementptr [100 x [20 x [4 x i8]]]* @csize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0 ; <i8*> [#uses=0]
+ %scevgep11.i.i.i = getelementptr [100 x [20 x i32]], [100 x [20 x i32]]* @lefline, i32 0, i32 %i.03.i172.i.i, i32 %2 ; <i32*> [#uses=1]
+ %scevgep12.i.i.i = getelementptr [100 x [20 x [4 x i8]]], [100 x [20 x [4 x i8]]]* @vsize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0 ; <i8*> [#uses=1]
+ %scevgep13.i.i.i = getelementptr [100 x [20 x [4 x i8]]], [100 x [20 x [4 x i8]]]* @csize, i32 0, i32 %i.03.i172.i.i, i32 %2, i32 0 ; <i8*> [#uses=0]
store i8 0, i8* %scevgep12.i.i.i, align 1
store i32 0, i32* %scevgep11.i.i.i, align 4
store i32 108, i32* undef, align 4
define void @_ZN10xalanc_1_814FormatterToXML5cdataEPKtj(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length) {
entry:
- %0 = getelementptr %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 13 ; <i8*> [#uses=1]
+ %0 = getelementptr %"struct.xalanc_1_8::FormatterToXML", %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 13 ; <i8*> [#uses=1]
br i1 undef, label %bb4, label %bb
bb: ; preds = %entry
store i8 0, i8* %0, align 1
- %1 = getelementptr %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 0, i32 0, i32 0 ; <i32 (...)***> [#uses=1]
+ %1 = getelementptr %"struct.xalanc_1_8::FormatterToXML", %"struct.xalanc_1_8::FormatterToXML"* %this, i32 0, i32 0, i32 0, i32 0 ; <i32 (...)***> [#uses=1]
%2 = load i32 (...)*** %1, align 4 ; <i32 (...)**> [#uses=1]
- %3 = getelementptr i32 (...)** %2, i32 11 ; <i32 (...)**> [#uses=1]
+ %3 = getelementptr i32 (...)*, i32 (...)** %2, i32 11 ; <i32 (...)**> [#uses=1]
%4 = load i32 (...)** %3, align 4 ; <i32 (...)*> [#uses=1]
%5 = bitcast i32 (...)* %4 to void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)* ; <void (%"struct.xalanc_1_8::FormatterToXML"*, i16*, i32)*> [#uses=1]
tail call void %5(%"struct.xalanc_1_8::FormatterToXML"* %this, i16* %ch, i32 %length)
bb5: ; preds = %bb5, %entry
%.pn = phi %struct.rec* [ %y.0, %bb5 ], [ undef, %entry ] ; <%struct.rec*> [#uses=1]
- %y.0.in = getelementptr %struct.rec* %.pn, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
+ %y.0.in = getelementptr %struct.rec, %struct.rec* %.pn, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
%y.0 = load %struct.rec** %y.0.in ; <%struct.rec*> [#uses=2]
br i1 undef, label %bb5, label %bb6
%7 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
%8 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %7, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %5, i32 %6) nounwind ; <i32> [#uses=0]
store i32 0, i32* @cpexists, align 4
- %9 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 1 ; <i32*> [#uses=1]
+ %9 = getelementptr %struct.rec, %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 1 ; <i32*> [#uses=1]
%10 = load i32* %9, align 4 ; <i32> [#uses=1]
%11 = sub i32 0, %10 ; <i32> [#uses=1]
%12 = load %struct.FILE** @out_fp, align 4 ; <%struct.FILE*> [#uses=1]
bb100.outer.outer: ; preds = %bb79.critedge, %bb1.i3, %FontName.exit
%x_addr.0.ph.ph = phi %struct.rec* [ %x, %FontName.exit ], [ null, %bb79.critedge ], [ null, %bb1.i3 ] ; <%struct.rec*> [#uses=1]
- %14 = getelementptr %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=0]
+ %14 = getelementptr %struct.rec, %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=0]
br label %bb100.outer
bb.i80: ; preds = %bb3.i85
define void @PS_PrintGraphicInclude(%struct.rec* %x, i32 %colmark, i32 %rowmark) nounwind {
entry:
%buff = alloca [512 x i8], align 4 ; <[512 x i8]*> [#uses=5]
- %0 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 1, i32 0, i32 0 ; <i8*> [#uses=2]
+ %0 = getelementptr %struct.rec, %struct.rec* %x, i32 0, i32 0, i32 1, i32 0, i32 0 ; <i8*> [#uses=2]
%1 = load i8* %0, align 4 ; <i8> [#uses=1]
%2 = add i8 %1, -94 ; <i8> [#uses=1]
%3 = icmp ugt i8 %2, 1 ; <i1> [#uses=1]
br label %bb1
bb1: ; preds = %bb, %entry
- %4 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
+ %4 = getelementptr %struct.rec, %struct.rec* %x, i32 0, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
%5 = bitcast %struct.SECOND_UNION* %4 to %5* ; <%5*> [#uses=1]
- %6 = getelementptr %5* %5, i32 0, i32 1 ; <i8*> [#uses=1]
+ %6 = getelementptr %5, %5* %5, i32 0, i32 1 ; <i8*> [#uses=1]
%7 = load i8* %6, align 1 ; <i8> [#uses=1]
%8 = icmp eq i8 %7, 0 ; <i1> [#uses=1]
br i1 %8, label %bb2, label %bb3
bb6: ; preds = %bb5
%10 = load i8* %0, align 4 ; <i8> [#uses=1]
- %11 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=1]
+ %11 = getelementptr %struct.rec, %struct.rec* %y.0, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=1]
%12 = call %struct.FILE* @OpenIncGraphicFile(i8* undef, i8 zeroext %10, %struct.rec** null, %struct.FILE_POS* %11, i32* undef) nounwind ; <%struct.FILE*> [#uses=4]
br i1 false, label %bb7, label %bb8
unreachable
FontSize.exit: ; preds = %bb1.i
- %17 = getelementptr %struct.FONT_INFO* undef, i32 %16, i32 5 ; <%struct.rec**> [#uses=0]
+ %17 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* undef, i32 %16, i32 5 ; <%struct.rec**> [#uses=0]
%18 = load i32* undef, align 4 ; <i32> [#uses=1]
%19 = load i32* @currentfont, align 4 ; <i32> [#uses=2]
%20 = load i32* @font_count, align 4 ; <i32> [#uses=1]
FontName.exit: ; preds = %bb.i5, %FontSize.exit
%22 = phi %struct.FONT_INFO* [ undef, %bb.i5 ], [ undef, %FontSize.exit ] ; <%struct.FONT_INFO*> [#uses=1]
- %23 = getelementptr %struct.FONT_INFO* %22, i32 %19, i32 5 ; <%struct.rec**> [#uses=0]
+ %23 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* %22, i32 %19, i32 5 ; <%struct.rec**> [#uses=0]
%24 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([8 x i8]* @.str1822946, i32 0, i32 0), i32 %18, i8* null) nounwind ; <i32> [#uses=0]
br label %bb10
%28 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %27, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 undef, i32 %26) nounwind ; <i32> [#uses=0]
store i32 0, i32* @cpexists, align 4
%29 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([17 x i8]* @.str192782, i32 0, i32 0), double 2.000000e+01, double 2.000000e+01) nounwind ; <i32> [#uses=0]
- %30 = getelementptr %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %30 = getelementptr %struct.rec, %struct.rec* %y.0, i32 0, i32 0, i32 3, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
%31 = load i32* %30, align 4 ; <i32> [#uses=1]
%32 = sub i32 0, %31 ; <i32> [#uses=1]
%33 = load i32* undef, align 4 ; <i32> [#uses=1]
%36 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* %35, i8* getelementptr ([17 x i8]* @.str212784, i32 0, i32 0), i32 %32, i32 %34) nounwind ; <i32> [#uses=0]
store i32 0, i32* @cpexists, align 4
%37 = load %struct.rec** null, align 4 ; <%struct.rec*> [#uses=1]
- %38 = getelementptr %struct.rec* %37, i32 0, i32 0, i32 4 ; <%struct.FOURTH_UNION*> [#uses=1]
+ %38 = getelementptr %struct.rec, %struct.rec* %37, i32 0, i32 0, i32 4 ; <%struct.FOURTH_UNION*> [#uses=1]
%39 = call i32 (%struct.FILE*, i8*, ...)* @fprintf(%struct.FILE* undef, i8* getelementptr ([23 x i8]* @.str1852949, i32 0, i32 0), %struct.FOURTH_UNION* %38) nounwind ; <i32> [#uses=0]
- %buff14 = getelementptr [512 x i8]* %buff, i32 0, i32 0 ; <i8*> [#uses=5]
+ %buff14 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 0 ; <i8*> [#uses=5]
%40 = call i8* @fgets(i8* %buff14, i32 512, %struct.FILE* %12) nounwind ; <i8*> [#uses=0]
%iftmp.506.0 = select i1 undef, i32 2, i32 0 ; <i32> [#uses=1]
- %41 = getelementptr [512 x i8]* %buff, i32 0, i32 26 ; <i8*> [#uses=1]
+ %41 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 26 ; <i8*> [#uses=1]
br label %bb100.outer.outer
bb100.outer.outer: ; preds = %bb83, %bb10
%state.0.ph.ph = phi i32 [ %iftmp.506.0, %bb10 ], [ undef, %bb83 ] ; <i32> [#uses=1]
%x_addr.0.ph.ph = phi %struct.rec* [ %x, %bb10 ], [ %71, %bb83 ] ; <%struct.rec*> [#uses=1]
- %42 = getelementptr %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=0]
+ %42 = getelementptr %struct.rec, %struct.rec* %x_addr.0.ph.ph, i32 0, i32 0, i32 1, i32 0 ; <%struct.FILE_POS*> [#uses=0]
br label %bb100.outer
bb.i80: ; preds = %bb3.i85
bb2.i84: ; preds = %bb100.outer, %bb.i80
%indvar.i81 = phi i32 [ %indvar.next.i79, %bb.i80 ], [ 0, %bb100.outer ] ; <i32> [#uses=3]
- %pp.0.i82 = getelementptr [27 x i8]* @.str141878, i32 0, i32 %indvar.i81 ; <i8*> [#uses=2]
- %sp.0.i83 = getelementptr [512 x i8]* %buff, i32 0, i32 %indvar.i81 ; <i8*> [#uses=1]
+ %pp.0.i82 = getelementptr [27 x i8], [27 x i8]* @.str141878, i32 0, i32 %indvar.i81 ; <i8*> [#uses=2]
+ %sp.0.i83 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 %indvar.i81 ; <i8*> [#uses=1]
%44 = load i8* %sp.0.i83, align 1 ; <i8> [#uses=2]
%45 = icmp eq i8 %44, 0 ; <i1> [#uses=1]
br i1 %45, label %StringBeginsWith.exit88thread-split, label %bb3.i85
br label %bb3.i77
bb3.i77: ; preds = %bb2.i75, %StringBeginsWith.exit88
- %sp.0.i76 = getelementptr [512 x i8]* %buff, i32 0, i32 undef ; <i8*> [#uses=1]
+ %sp.0.i76 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 undef ; <i8*> [#uses=1]
%49 = load i8* %sp.0.i76, align 1 ; <i8> [#uses=1]
%50 = icmp eq i8 %49, 0 ; <i1> [#uses=1]
br i1 %50, label %bb24, label %bb2.i.i68
%51 = call %struct.rec* @MakeWord(i32 11, i8* %41, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=0]
%52 = load i8* getelementptr ([150 x i8]* @zz_lengths, i32 0, i32 0), align 4 ; <i8> [#uses=1]
%53 = zext i8 %52 to i32 ; <i32> [#uses=2]
- %54 = getelementptr [524 x %struct.rec*]* @zz_free, i32 0, i32 %53 ; <%struct.rec**> [#uses=2]
+ %54 = getelementptr [524 x %struct.rec*], [524 x %struct.rec*]* @zz_free, i32 0, i32 %53 ; <%struct.rec**> [#uses=2]
%55 = load %struct.rec** %54, align 4 ; <%struct.rec*> [#uses=3]
%56 = icmp eq %struct.rec* %55, null ; <i1> [#uses=1]
br i1 %56, label %bb27, label %bb28
GetMemory.exit62: ; preds = %bb2.i60, %bb27
%57 = phi i8** [ %.pre1.i59, %bb2.i60 ], [ undef, %bb27 ] ; <i8**> [#uses=1]
- %58 = getelementptr i8** %57, i32 %53 ; <i8**> [#uses=1]
+ %58 = getelementptr i8*, i8** %57, i32 %53 ; <i8**> [#uses=1]
store i8** %58, i8*** @next_free.4772, align 4
store %struct.rec* undef, %struct.rec** @zz_hold, align 4
br label %bb29
br i1 %63, label %bb2.i51, label %bb2.i41
bb2.i51: ; preds = %bb.i47, %bb2.i.i68, %StringBeginsWith.exit88, %bb.i80
- %pp.0.i49 = getelementptr [17 x i8]* @.str1872951, i32 0, i32 0 ; <i8*> [#uses=1]
+ %pp.0.i49 = getelementptr [17 x i8], [17 x i8]* @.str1872951, i32 0, i32 0 ; <i8*> [#uses=1]
%64 = load i8* null, align 1 ; <i8> [#uses=1]
br i1 false, label %StringBeginsWith.exit55thread-split, label %bb3.i52
br i1 false, label %bb2.i41, label %bb2.i.i15
bb2.i.i15: ; preds = %bb2.i41
- %pp.0.i.i13 = getelementptr [6 x i8]* @.str742838, i32 0, i32 0 ; <i8*> [#uses=1]
+ %pp.0.i.i13 = getelementptr [6 x i8], [6 x i8]* @.str742838, i32 0, i32 0 ; <i8*> [#uses=1]
br i1 false, label %StringBeginsWith.exitthread-split.i18, label %bb3.i.i16
bb3.i.i16: ; preds = %bb2.i.i15
bb2.i6.i26: ; preds = %bb2.i6.i26, %StringBeginsWith.exit.i20
%indvar.i3.i23 = phi i32 [ %indvar.next.i1.i21, %bb2.i6.i26 ], [ 0, %StringBeginsWith.exit.i20 ] ; <i32> [#uses=3]
- %sp.0.i5.i25 = getelementptr [512 x i8]* %buff, i32 0, i32 %indvar.i3.i23 ; <i8*> [#uses=0]
- %pp.0.i4.i24 = getelementptr [10 x i8]* @.str752839, i32 0, i32 %indvar.i3.i23 ; <i8*> [#uses=1]
+ %sp.0.i5.i25 = getelementptr [512 x i8], [512 x i8]* %buff, i32 0, i32 %indvar.i3.i23 ; <i8*> [#uses=0]
+ %pp.0.i4.i24 = getelementptr [10 x i8], [10 x i8]* @.str752839, i32 0, i32 %indvar.i3.i23 ; <i8*> [#uses=1]
%68 = load i8* %pp.0.i4.i24, align 1 ; <i8> [#uses=0]
%indvar.next.i1.i21 = add i32 %indvar.i3.i23, 1 ; <i32> [#uses=1]
br i1 undef, label %bb2.i6.i26, label %bb55
%71 = call %struct.rec* @MakeWord(i32 11, i8* undef, %struct.FILE_POS* bitcast (%4* @no_file_pos to %struct.FILE_POS*)) nounwind ; <%struct.rec*> [#uses=4]
%72 = load i8* getelementptr ([150 x i8]* @zz_lengths, i32 0, i32 0), align 4 ; <i8> [#uses=1]
%73 = zext i8 %72 to i32 ; <i32> [#uses=2]
- %74 = getelementptr [524 x %struct.rec*]* @zz_free, i32 0, i32 %73 ; <%struct.rec**> [#uses=2]
+ %74 = getelementptr [524 x %struct.rec*], [524 x %struct.rec*]* @zz_free, i32 0, i32 %73 ; <%struct.rec**> [#uses=2]
%75 = load %struct.rec** %74, align 4 ; <%struct.rec*> [#uses=3]
%76 = icmp eq %struct.rec* %75, null ; <i1> [#uses=1]
br i1 %76, label %bb69, label %bb70
bb2.i4: ; preds = %bb1.i3, %bb.i2
%.pre1.i = phi i8** [ undef, %bb1.i3 ], [ %78, %bb.i2 ] ; <i8**> [#uses=1]
%79 = phi i8** [ undef, %bb1.i3 ], [ %78, %bb.i2 ] ; <i8**> [#uses=1]
- %80 = getelementptr i8** %79, i32 1020 ; <i8**> [#uses=1]
+ %80 = getelementptr i8*, i8** %79, i32 1020 ; <i8**> [#uses=1]
store i8** %80, i8*** @top_free.4773, align 4
br label %GetMemory.exit
GetMemory.exit: ; preds = %bb2.i4, %bb69
%81 = phi i8** [ %.pre1.i, %bb2.i4 ], [ undef, %bb69 ] ; <i8**> [#uses=2]
%82 = bitcast i8** %81 to %struct.rec* ; <%struct.rec*> [#uses=3]
- %83 = getelementptr i8** %81, i32 %73 ; <i8**> [#uses=1]
+ %83 = getelementptr i8*, i8** %81, i32 %73 ; <i8**> [#uses=1]
store i8** %83, i8*** @next_free.4772, align 4
store %struct.rec* %82, %struct.rec** @zz_hold, align 4
br label %bb71
bb71: ; preds = %bb70, %GetMemory.exit
%.pre185 = phi %struct.rec* [ %75, %bb70 ], [ %82, %GetMemory.exit ] ; <%struct.rec*> [#uses=8]
%85 = phi %struct.rec* [ %75, %bb70 ], [ %82, %GetMemory.exit ] ; <%struct.rec*> [#uses=1]
- %86 = getelementptr %struct.rec* %85, i32 0, i32 0, i32 1, i32 0, i32 0 ; <i8*> [#uses=0]
- %87 = getelementptr %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 1 ; <%struct.rec**> [#uses=0]
- %88 = getelementptr %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
+ %86 = getelementptr %struct.rec, %struct.rec* %85, i32 0, i32 0, i32 1, i32 0, i32 0 ; <i8*> [#uses=0]
+ %87 = getelementptr %struct.rec, %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 1 ; <%struct.rec**> [#uses=0]
+ %88 = getelementptr %struct.rec, %struct.rec* %.pre185, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
store %struct.rec* %.pre185, %struct.rec** @xx_link, align 4
store %struct.rec* %.pre185, %struct.rec** @zz_res, align 4
%89 = load %struct.rec** @needs, align 4 ; <%struct.rec*> [#uses=2]
br i1 false, label %bb77, label %bb73
bb73: ; preds = %bb71
- %90 = getelementptr %struct.rec* %89, i32 0, i32 0, i32 0, i32 0, i32 0 ; <%struct.rec**> [#uses=1]
+ %90 = getelementptr %struct.rec, %struct.rec* %89, i32 0, i32 0, i32 0, i32 0, i32 0 ; <%struct.rec**> [#uses=1]
store %struct.rec* null, %struct.rec** @zz_tmp, align 4
store %struct.rec* %.pre185, %struct.rec** %90
store %struct.rec* %.pre185, %struct.rec** undef, align 4
br i1 undef, label %bb83, label %bb79
bb79: ; preds = %bb77
- %91 = getelementptr %struct.rec* %71, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
+ %91 = getelementptr %struct.rec, %struct.rec* %71, i32 0, i32 0, i32 0, i32 1, i32 0 ; <%struct.rec**> [#uses=1]
store %struct.rec* null, %struct.rec** @zz_tmp, align 4
%92 = load %struct.rec** %88, align 4 ; <%struct.rec*> [#uses=1]
store %struct.rec* %92, %struct.rec** %91
- %93 = getelementptr %struct.rec* undef, i32 0, i32 0, i32 0, i32 1, i32 1 ; <%struct.rec**> [#uses=1]
+ %93 = getelementptr %struct.rec, %struct.rec* undef, i32 0, i32 0, i32 0, i32 1, i32 1 ; <%struct.rec**> [#uses=1]
store %struct.rec* %71, %struct.rec** %93, align 4
store %struct.rec* %.pre185, %struct.rec** undef, align 4
br label %bb83
%0 = load float* undef, align 4 ; <float> [#uses=1]
%1 = fmul float undef, %0 ; <float> [#uses=2]
%tmp73 = add i32 0, 224 ; <i32> [#uses=1]
- %scevgep74 = getelementptr i8* null, i32 %tmp73 ; <i8*> [#uses=1]
+ %scevgep74 = getelementptr i8, i8* null, i32 %tmp73 ; <i8*> [#uses=1]
%scevgep7475 = bitcast i8* %scevgep74 to float* ; <float*> [#uses=1]
%2 = load float* null, align 4 ; <float> [#uses=1]
%3 = fmul float 0.000000e+00, %2 ; <float> [#uses=2]
%0 = load [4 x i8]** undef, align 4 ; <[4 x i8]*> [#uses=1]
%1 = load i8* undef, align 1 ; <i8> [#uses=1]
%2 = zext i8 %1 to i32 ; <i32> [#uses=1]
- %3 = getelementptr [4 x i8]* %0, i32 %v0, i32 0 ; <i8*> [#uses=1]
+ %3 = getelementptr [4 x i8], [4 x i8]* %0, i32 %v0, i32 0 ; <i8*> [#uses=1]
%4 = load i8* %3, align 1 ; <i8> [#uses=1]
%5 = zext i8 %4 to i32 ; <i32> [#uses=1]
%6 = sub i32 %5, %2 ; <i32> [#uses=1]
br label %bb5
bb: ; preds = %bb5
- %22 = getelementptr inbounds i32* %21, i32 %x.0 ; <i32*> [#uses=1]
+ %22 = getelementptr inbounds i32, i32* %21, i32 %x.0 ; <i32*> [#uses=1]
%23 = load i32* %22, align 4 ; <i32> [#uses=1]
%24 = icmp eq i32 %23, %16 ; <i1> [#uses=1]
br i1 %24, label %bb1, label %bb2
bb1: ; preds = %bb
%25 = load i8** @he, align 4 ; <i8*> [#uses=1]
- %26 = getelementptr inbounds i8* %25, i32 %x.0 ; <i8*> [#uses=1]
+ %26 = getelementptr inbounds i8, i8* %25, i32 %x.0 ; <i8*> [#uses=1]
%27 = load i8* %26, align 1 ; <i8> [#uses=1]
%28 = sext i8 %27 to i32 ; <i32> [#uses=1]
ret i32 %28
unreachable
if.end371: ; preds = %lor.end
- %arrayidx56.2.i = getelementptr [4 x %struct.pix_pos]* %pix_a.i294, i32 0, i32 2 ; <%struct.pix_pos*> [#uses=1]
- %arrayidx56.3.i = getelementptr [4 x %struct.pix_pos]* %pix_a.i294, i32 0, i32 3 ; <%struct.pix_pos*> [#uses=1]
+ %arrayidx56.2.i = getelementptr [4 x %struct.pix_pos], [4 x %struct.pix_pos]* %pix_a.i294, i32 0, i32 2 ; <%struct.pix_pos*> [#uses=1]
+ %arrayidx56.3.i = getelementptr [4 x %struct.pix_pos], [4 x %struct.pix_pos]* %pix_a.i294, i32 0, i32 3 ; <%struct.pix_pos*> [#uses=1]
br i1 undef, label %for.body1857, label %for.end4557
for.body1857: ; preds = %if.end371
if.then3689: ; preds = %for.cond2882.preheader
%add3695 = add nsw i32 %mul3693, %shl1959 ; <i32> [#uses=1]
%mul3697 = shl i32 %add3695, 2 ; <i32> [#uses=2]
- %arrayidx3705 = getelementptr inbounds i16* undef, i32 1 ; <i16*> [#uses=1]
+ %arrayidx3705 = getelementptr inbounds i16, i16* undef, i32 1 ; <i16*> [#uses=1]
%tmp3706 = load i16* %arrayidx3705 ; <i16> [#uses=1]
%conv3707 = sext i16 %tmp3706 to i32 ; <i32> [#uses=1]
%add3708 = add nsw i32 %conv3707, %mul3697 ; <i32> [#uses=1]
- %arrayidx3724 = getelementptr inbounds i16* null, i32 1 ; <i16*> [#uses=1]
+ %arrayidx3724 = getelementptr inbounds i16, i16* null, i32 1 ; <i16*> [#uses=1]
%tmp3725 = load i16* %arrayidx3724 ; <i16> [#uses=1]
%conv3726 = sext i16 %tmp3725 to i32 ; <i32> [#uses=1]
%add3727 = add nsw i32 %conv3726, %mul3697 ; <i32> [#uses=1]
%tmp3746 = load i16* undef ; <i16> [#uses=1]
%conv3747 = sext i16 %tmp3746 to i32 ; <i32> [#uses=1]
%add3748 = add nsw i32 %conv3747, %mul3737 ; <i32> [#uses=1]
- %arrayidx3765 = getelementptr inbounds i16* null, i32 1 ; <i16*> [#uses=1]
+ %arrayidx3765 = getelementptr inbounds i16, i16* null, i32 1 ; <i16*> [#uses=1]
%tmp3766 = load i16* %arrayidx3765 ; <i16> [#uses=1]
%conv3767 = sext i16 %tmp3766 to i32 ; <i32> [#uses=1]
%add3768 = add nsw i32 %conv3767, %mul3737 ; <i32> [#uses=1]
br i1 undef, label %bb8, label %bb7
bb7: ; preds = %bb6
- %1 = getelementptr inbounds %struct.SV* %0, i32 0, i32 0 ; <i8**> [#uses=1]
+ %1 = getelementptr inbounds %struct.SV, %struct.SV* %0, i32 0, i32 0 ; <i8**> [#uses=1]
%2 = load i8** %1, align 4 ; <i8*> [#uses=1]
- %3 = getelementptr inbounds i8* %2, i32 12 ; <i8*> [#uses=1]
+ %3 = getelementptr inbounds i8, i8* %2, i32 12 ; <i8*> [#uses=1]
%4 = bitcast i8* %3 to i32* ; <i32*> [#uses=1]
%5 = load i32* %4, align 4 ; <i32> [#uses=1]
%storemerge5 = xor i32 %5, -1 ; <i32> [#uses=1]
call void @Perl_sv_setiv(%struct.SV* undef, i32 %storemerge5) nounwind
- %6 = getelementptr inbounds %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
+ %6 = getelementptr inbounds %struct.SV, %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
%7 = load i32* %6, align 4 ; <i32> [#uses=1]
%8 = and i32 %7, 16384 ; <i32> [#uses=1]
%9 = icmp eq i32 %8, 0 ; <i1> [#uses=1]
br label %Perl_sv_setuv.exit
Perl_sv_setuv.exit: ; preds = %bb1.i, %bb.i
- %11 = getelementptr inbounds %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
+ %11 = getelementptr inbounds %struct.SV, %struct.SV* undef, i32 0, i32 2 ; <i32*> [#uses=1]
%12 = load i32* %11, align 4 ; <i32> [#uses=1]
%13 = and i32 %12, 16384 ; <i32> [#uses=1]
%14 = icmp eq i32 %13, 0 ; <i1> [#uses=1]
%34 = load i8** %dpt, align 4 ; <i8*> [#uses=1]
store i8 %33, i8* %34, align 1
%35 = load i8** %dpt, align 4 ; <i8*> [#uses=1]
- %36 = getelementptr inbounds i8* %35, i64 1 ; <i8*> [#uses=1]
+ %36 = getelementptr inbounds i8, i8* %35, i64 1 ; <i8*> [#uses=1]
store i8* %36, i8** %dpt, align 4
%37 = load i32* %j, align 4 ; <i32> [#uses=1]
%38 = add nsw i32 %37, 1 ; <i32> [#uses=1]
br label %bb11
bb7: ; preds = %bb11
- %2 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 1
+ %2 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 1
%3 = load %struct._opaque_pthread_t** %2, align 4
%4 = tail call i32 @pthread_equal(%struct._opaque_pthread_t* %3, %struct._opaque_pthread_t* %me.0) nounwind
%5 = icmp eq i32 %4, 0
br i1 %5, label %bb10, label %bb14
bb10: ; preds = %bb7
- %6 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 6
+ %6 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", %"struct.WTF::TCMalloc_ThreadCache"* %h.0, i32 0, i32 6
br label %bb11
bb11: ; preds = %bb10, %bb6
bb14: ; preds = %bb13, %bb7
%heap.1 = phi %"struct.WTF::TCMalloc_ThreadCache"* [ %8, %bb13 ], [ %h.0, %bb7 ] ; <%"struct.WTF::TCMalloc_ThreadCache"*> [#uses=4]
%9 = tail call i32 @pthread_mutex_unlock(%struct.PlatformMutex* getelementptr inbounds (%struct.SpinLock* @_ZN3WTFL13pageheap_lockE, i32 0, i32 0)) nounwind
- %10 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache"* %heap.1, i32 0, i32 2
+ %10 = getelementptr inbounds %"struct.WTF::TCMalloc_ThreadCache", %"struct.WTF::TCMalloc_ThreadCache"* %heap.1, i32 0, i32 2
%11 = load i8* %10, align 4
%toBool15not = icmp eq i8 %11, 0 ; <i1> [#uses=1]
br i1 %toBool15not, label %bb19, label %bb22
bb46: ; preds = %bb26, %bb10
%1 = bitcast double* %value to i16* ; <i16*> [#uses=1]
- %v47 = getelementptr inbounds [6 x i16]* %v, i32 0, i32 0 ; <i16*> [#uses=1]
+ %v47 = getelementptr inbounds [6 x i16], [6 x i16]* %v, i32 0, i32 0 ; <i16*> [#uses=1]
call void @etoe53(i16* %v47, i16* %1) nounwind
ret void
}
%4 = phi i8 [ %5, %bb345 ], [ undef, %bb339 ] ; <i8> [#uses=0]
%indvar670 = phi i32 [ %tmp673, %bb345 ], [ 0, %bb339 ] ; <i32> [#uses=1]
%tmp673 = add i32 %indvar670, 1 ; <i32> [#uses=2]
- %scevgep674 = getelementptr [256 x i8]* %last, i32 0, i32 %tmp673 ; <i8*> [#uses=1]
+ %scevgep674 = getelementptr [256 x i8], [256 x i8]* %last, i32 0, i32 %tmp673 ; <i8*> [#uses=1]
%5 = load i8* %scevgep674, align 1 ; <i8> [#uses=1]
br i1 undef, label %bb347, label %bb345
bb366: ; preds = %bb366, %bb360
%indvar662 = phi i32 [ %tmp665, %bb366 ], [ 0, %bb360 ] ; <i32> [#uses=1]
%tmp665 = add i32 %indvar662, 1 ; <i32> [#uses=2]
- %scevgep666 = getelementptr [256 x i8]* %last2, i32 0, i32 %tmp665 ; <i8*> [#uses=1]
+ %scevgep666 = getelementptr [256 x i8], [256 x i8]* %last2, i32 0, i32 %tmp665 ; <i8*> [#uses=1]
%6 = load i8* %scevgep666, align 1 ; <i8> [#uses=0]
br i1 false, label %bb368, label %bb366
bb395: ; preds = %bb394, %isdigit1498.exit83, %bb391
%storemerge14.sum = add i32 %indvar724, undef ; <i32> [#uses=1]
- %p.26 = getelementptr [256 x i8]* %line, i32 0, i32 %storemerge14.sum ; <i8*> [#uses=1]
+ %p.26 = getelementptr [256 x i8], [256 x i8]* %line, i32 0, i32 %storemerge14.sum ; <i8*> [#uses=1]
br i1 undef, label %bb400, label %isdigit1498.exit87
isdigit1498.exit87: ; preds = %bb395
br i1 undef, label %bb402, label %bb403
bb402: ; preds = %bb400
- %12 = getelementptr inbounds i8* %p.26, i32 undef ; <i8*> [#uses=1]
+ %12 = getelementptr inbounds i8, i8* %p.26, i32 undef ; <i8*> [#uses=1]
br label %bb403
bb403: ; preds = %bb402, %bb400
; CHECK: InlineAsm Start
define void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind {
entry:
- %tmp1 = getelementptr inbounds %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
+ %tmp1 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
store volatile i32 1, i32* %tmp1, align 4
- %tmp12 = getelementptr inbounds %s1* %this, i32 0, i32 1
+ %tmp12 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 1
store i32 %levels, i32* %tmp12, align 4
- %tmp13 = getelementptr inbounds %s1* %this, i32 0, i32 3
+ %tmp13 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 3
store i8* %data, i8** %tmp13, align 4
- %tmp14 = getelementptr inbounds %s1* %this, i32 0, i32 4
+ %tmp14 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 4
store void (i8*, i8*)* %release, void (i8*, i8*)** %tmp14, align 4
- %tmp15 = getelementptr inbounds %s1* %this, i32 0, i32 5
+ %tmp15 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 5
store i8* %info, i8** %tmp15, align 4
- %tmp16 = getelementptr inbounds %s1* %this, i32 0, i32 6
+ %tmp16 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 6
store i32* null, i32** %tmp16, align 4
- %tmp17 = getelementptr inbounds %s1* %this, i32 0, i32 7
+ %tmp17 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 7
store i32* null, i32** %tmp17, align 4
- %tmp19 = getelementptr inbounds %s1* %this, i32 0, i32 10
+ %tmp19 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 10
store i64 0, i64* %tmp19, align 4
- %tmp20 = getelementptr inbounds %s1* %this, i32 0, i32 0
+ %tmp20 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 0
tail call void @f1(%s3* %tmp20, i32* %s) nounwind
%tmp21 = shl i32 %format, 6
%tmp22 = tail call zeroext i8 @f2(i32 %format) nounwind
%flags.0 = or i32 %tmp23, %tmp21
%tmp24 = shl i32 %flags.0, 16
%asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(i32* %tmp1, i32 %tmp24) nounwind
- %tmp25 = getelementptr inbounds %s1* %this, i32 0, i32 2, i32 0, i32 0
+ %tmp25 = getelementptr inbounds %s1, %s1* %this, i32 0, i32 2, i32 0, i32 0
store volatile i32 1, i32* %tmp25, align 4
%tmp26 = icmp eq i32 %levels, 0
br i1 %tmp26, label %return, label %bb4
bb4:
%l.09 = phi i32 [ %tmp28, %bb4 ], [ 0, %entry ]
- %scevgep = getelementptr %s1* %this, i32 0, i32 11, i32 %l.09
- %scevgep10 = getelementptr i32* %rowbytes, i32 %l.09
+ %scevgep = getelementptr %s1, %s1* %this, i32 0, i32 11, i32 %l.09
+ %scevgep10 = getelementptr i32, i32* %rowbytes, i32 %l.09
%tmp27 = load i32* %scevgep10, align 4
store i32 %tmp27, i32* %scevgep, align 4
%tmp28 = add i32 %l.09, 1
bb: ; preds = %entry
%1 = alloca [1000 x i8], align 4 ; <[1000 x i8]*> [#uses=1]
- %.sub = getelementptr inbounds [1000 x i8]* %1, i32 0, i32 0 ; <i8*> [#uses=2]
+ %.sub = getelementptr inbounds [1000 x i8], [1000 x i8]* %1, i32 0, i32 0 ; <i8*> [#uses=2]
%2 = call i32 (i8*, i32, i32, i8*, ...)* @__sprintf_chk(i8* %.sub, i32 0, i32 1000, i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %i) nounwind ; <i32> [#uses=0]
%3 = load i8* %.sub, align 4 ; <i8> [#uses=1]
%4 = sext i8 %3 to i32 ; <i32> [#uses=1]
br i1 undef, label %if.then67, label %if.end95
if.then67: ; preds = %if.then60
- %op_next71 = getelementptr inbounds %struct.op* %tmp27, i32 0, i32 0
+ %op_next71 = getelementptr inbounds %struct.op, %struct.op* %tmp27, i32 0, i32 0
store %struct.op* %tmp27, %struct.op** %op_next71, align 4
- %0 = getelementptr inbounds %struct.op* %tmp27, i32 1, i32 0
+ %0 = getelementptr inbounds %struct.op, %struct.op* %tmp27, i32 1, i32 0
br label %if.end95
if.end95: ; preds = %if.else92, %if.then67
bb: ; preds = %bb3
%Scan.0.idx7.val = load i8** undef, align 4
- %.idx = getelementptr i8* %Scan.0.idx7.val, i32 4
+ %.idx = getelementptr i8, i8* %Scan.0.idx7.val, i32 4
%0 = bitcast i8* %.idx to i8**
%.idx.val = load i8** %0, align 4
%1 = icmp eq i8* %.idx.val, %Key
br i1 %cmp, label %if.end11, label %if.end
if.end: ; preds = %tailrecurse
- %string = getelementptr inbounds %struct.Dict_node_struct* %dn.tr, i32 0, i32 0
+ %string = getelementptr inbounds %struct.Dict_node_struct, %struct.Dict_node_struct* %dn.tr, i32 0, i32 0
%0 = load i8** %string, align 4
br label %while.cond.i
br i1 %cmp4.i, label %while.body.i, label %while.end.i
while.body.i: ; preds = %land.end.i
- %incdec.ptr.i = getelementptr inbounds i8* %1, i32 1
- %incdec.ptr6.i = getelementptr inbounds i8* %storemerge.i, i32 1
+ %incdec.ptr.i = getelementptr inbounds i8, i8* %1, i32 1
+ %incdec.ptr6.i = getelementptr inbounds i8, i8* %storemerge.i, i32 1
br label %while.cond.i
while.end.i: ; preds = %land.end.i
; CHECK: cmp
; CHECK-NOT: cbnz
%storemerge1.i3 = phi i32 [ %sub.i, %dict_match.exit ], [ 0, %lor.lhs.false.i ], [ 0, %while.end.i ]
- %right = getelementptr inbounds %struct.Dict_node_struct* %dn.tr, i32 0, i32 4
+ %right = getelementptr inbounds %struct.Dict_node_struct, %struct.Dict_node_struct* %dn.tr, i32 0, i32 4
%4 = load %struct.Dict_node_struct** %right, align 4
tail call fastcc void @rdictionary_lookup(%struct.Dict_node_struct* %4, i8* %s)
%cmp4 = icmp eq i32 %storemerge1.i3, 0
%6 = bitcast %struct.Dict_node_struct* %dn.tr to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call6, i8* %6, i32 16, i32 4, i1 false)
%7 = load %struct.Dict_node_struct** @lookup_list, align 4
- %right7 = getelementptr inbounds i8* %call6, i32 16
+ %right7 = getelementptr inbounds i8, i8* %call6, i32 16
%8 = bitcast i8* %right7 to %struct.Dict_node_struct**
store %struct.Dict_node_struct* %7, %struct.Dict_node_struct** %8, align 4
store %struct.Dict_node_struct* %5, %struct.Dict_node_struct** @lookup_list, align 4
br i1 %cmp9, label %if.then10, label %if.end11
if.then10: ; preds = %if.end8, %if.then5, %dict_match.exit
- %left = getelementptr inbounds %struct.Dict_node_struct* %dn.tr, i32 0, i32 3
+ %left = getelementptr inbounds %struct.Dict_node_struct, %struct.Dict_node_struct* %dn.tr, i32 0, i32 3
%9 = load %struct.Dict_node_struct** %left, align 4
br label %tailrecurse
%struct.foo = type { i32, [40 x i8] }
define hidden void @func(i8* %Data) nounwind ssp {
- %1 = getelementptr inbounds i8* %Data, i32 12
+ %1 = getelementptr inbounds i8, i8* %Data, i32 12
%2 = bitcast i8* %1 to %"myclass"*
tail call void @abc(%"myclass"* %2) nounwind
tail call void @def(%"myclass"* %2) nounwind
- %3 = getelementptr inbounds i8* %Data, i32 8
+ %3 = getelementptr inbounds i8, i8* %Data, i32 8
%4 = bitcast i8* %3 to i8**
%5 = load i8** %4, align 4
tail call void @ghi(i8* %5) nounwind
%6 = bitcast i8* %Data to void (i8*)**
%7 = load void (i8*)** %6, align 4
- %8 = getelementptr inbounds i8* %Data, i32 4
+ %8 = getelementptr inbounds i8, i8* %Data, i32 4
%9 = bitcast i8* %8 to i8**
%10 = load i8** %9, align 4
%11 = icmp eq i8* %Data, null
store %class.RagDoll* %this1, %class.RagDoll** %retval
%0 = bitcast %class.RagDoll* %this1 to i8***
store i8** getelementptr inbounds ([4 x i8*]* @_ZTV7RagDoll, i64 0, i64 2), i8*** %0
- %m_ownerWorld = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%1 = load %class.btDynamicsWorld** %ownerWorld.addr, align 4
store %class.btDynamicsWorld* %1, %class.btDynamicsWorld** %m_ownerWorld, align 4
%call = call i8* @_ZN13btConvexShapenwEm(i32 56)
invoke.cont: ; preds = %entry
%5 = bitcast %class.btCapsuleShape* %2 to %class.btCollisionShape*
- %m_shapes = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes, i32 0, i32 0
+ %m_shapes = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes, i32 0, i32 0
store %class.btCollisionShape* %5, %class.btCollisionShape** %arrayidx, align 4
%call5 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%6 = bitcast i8* %call5 to %class.btCapsuleShape*
invoke.cont9: ; preds = %invoke.cont
%9 = bitcast %class.btCapsuleShape* %6 to %class.btCollisionShape*
- %m_shapes12 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx13 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes12, i32 0, i32 1
+ %m_shapes12 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx13 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes12, i32 0, i32 1
store %class.btCollisionShape* %9, %class.btCollisionShape** %arrayidx13, align 4
%call14 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%10 = bitcast i8* %call14 to %class.btCapsuleShape*
invoke.cont18: ; preds = %invoke.cont9
%13 = bitcast %class.btCapsuleShape* %10 to %class.btCollisionShape*
- %m_shapes21 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx22 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes21, i32 0, i32 2
+ %m_shapes21 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx22 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes21, i32 0, i32 2
store %class.btCollisionShape* %13, %class.btCollisionShape** %arrayidx22, align 4
%call23 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%14 = bitcast i8* %call23 to %class.btCapsuleShape*
invoke.cont27: ; preds = %invoke.cont18
%17 = bitcast %class.btCapsuleShape* %14 to %class.btCollisionShape*
- %m_shapes30 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx31 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes30, i32 0, i32 3
+ %m_shapes30 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx31 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes30, i32 0, i32 3
store %class.btCollisionShape* %17, %class.btCollisionShape** %arrayidx31, align 4
%call32 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%18 = bitcast i8* %call32 to %class.btCapsuleShape*
invoke.cont36: ; preds = %invoke.cont27
%21 = bitcast %class.btCapsuleShape* %18 to %class.btCollisionShape*
- %m_shapes39 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx40 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes39, i32 0, i32 4
+ %m_shapes39 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx40 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes39, i32 0, i32 4
store %class.btCollisionShape* %21, %class.btCollisionShape** %arrayidx40, align 4
%call41 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%22 = bitcast i8* %call41 to %class.btCapsuleShape*
invoke.cont45: ; preds = %invoke.cont36
%25 = bitcast %class.btCapsuleShape* %22 to %class.btCollisionShape*
- %m_shapes48 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx49 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes48, i32 0, i32 5
+ %m_shapes48 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx49 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes48, i32 0, i32 5
store %class.btCollisionShape* %25, %class.btCollisionShape** %arrayidx49, align 4
%call50 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%26 = bitcast i8* %call50 to %class.btCapsuleShape*
invoke.cont54: ; preds = %invoke.cont45
%29 = bitcast %class.btCapsuleShape* %26 to %class.btCollisionShape*
- %m_shapes57 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx58 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes57, i32 0, i32 6
+ %m_shapes57 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx58 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes57, i32 0, i32 6
store %class.btCollisionShape* %29, %class.btCollisionShape** %arrayidx58, align 4
%call59 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%30 = bitcast i8* %call59 to %class.btCapsuleShape*
invoke.cont63: ; preds = %invoke.cont54
%33 = bitcast %class.btCapsuleShape* %30 to %class.btCollisionShape*
- %m_shapes66 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx67 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes66, i32 0, i32 7
+ %m_shapes66 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx67 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes66, i32 0, i32 7
store %class.btCollisionShape* %33, %class.btCollisionShape** %arrayidx67, align 4
%call68 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%34 = bitcast i8* %call68 to %class.btCapsuleShape*
invoke.cont72: ; preds = %invoke.cont63
%37 = bitcast %class.btCapsuleShape* %34 to %class.btCollisionShape*
- %m_shapes75 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx76 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes75, i32 0, i32 8
+ %m_shapes75 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx76 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes75, i32 0, i32 8
store %class.btCollisionShape* %37, %class.btCollisionShape** %arrayidx76, align 4
%call77 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%38 = bitcast i8* %call77 to %class.btCapsuleShape*
invoke.cont81: ; preds = %invoke.cont72
%41 = bitcast %class.btCapsuleShape* %38 to %class.btCollisionShape*
- %m_shapes84 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx85 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes84, i32 0, i32 9
+ %m_shapes84 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx85 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes84, i32 0, i32 9
store %class.btCollisionShape* %41, %class.btCollisionShape** %arrayidx85, align 4
%call86 = call i8* @_ZN13btConvexShapenwEm(i32 56)
%42 = bitcast i8* %call86 to %class.btCapsuleShape*
invoke.cont90: ; preds = %invoke.cont81
%45 = bitcast %class.btCapsuleShape* %42 to %class.btCollisionShape*
- %m_shapes93 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx94 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes93, i32 0, i32 10
+ %m_shapes93 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx94 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes93, i32 0, i32 10
store %class.btCollisionShape* %45, %class.btCollisionShape** %arrayidx94, align 4
%call95 = call %class.btTransform* @_ZN11btTransformC1Ev(%class.btTransform* %offset)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %offset)
call void @_ZmlRKfRK9btVector3(%class.btVector3* sret %ref.tmp, float* %scale.addr, %class.btVector3* %ref.tmp97)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp102, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes103 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx104 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes103, i32 0, i32 0
+ %m_shapes103 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx104 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes103, i32 0, i32 0
%47 = load %class.btCollisionShape** %arrayidx104, align 4
%call105 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp102, %class.btCollisionShape* %47)
- %m_bodies = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx106 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies, i32 0, i32 0
+ %m_bodies = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx106 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies, i32 0, i32 0
store %class.btRigidBody* %call105, %class.btRigidBody** %arrayidx106, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0.000000e+00, float* %ref.tmp109, align 4
call void @_ZmlRKfRK9btVector3(%class.btVector3* sret %ref.tmp107, float* %scale.addr, %class.btVector3* %ref.tmp108)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp107)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp113, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes114 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx115 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes114, i32 0, i32 1
+ %m_shapes114 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx115 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes114, i32 0, i32 1
%48 = load %class.btCollisionShape** %arrayidx115, align 4
%call116 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp113, %class.btCollisionShape* %48)
- %m_bodies117 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx118 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies117, i32 0, i32 1
+ %m_bodies117 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx118 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies117, i32 0, i32 1
store %class.btRigidBody* %call116, %class.btRigidBody** %arrayidx118, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0.000000e+00, float* %ref.tmp121, align 4
call void @_ZmlRKfRK9btVector3(%class.btVector3* sret %ref.tmp119, float* %scale.addr, %class.btVector3* %ref.tmp120)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp119)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp125, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes126 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx127 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes126, i32 0, i32 2
+ %m_shapes126 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx127 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes126, i32 0, i32 2
%49 = load %class.btCollisionShape** %arrayidx127, align 4
%call128 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp125, %class.btCollisionShape* %49)
- %m_bodies129 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx130 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies129, i32 0, i32 2
+ %m_bodies129 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx130 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies129, i32 0, i32 2
store %class.btRigidBody* %call128, %class.btRigidBody** %arrayidx130, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0xBFC70A3D80000000, float* %ref.tmp133, align 4
call void @_ZmlRKfRK9btVector3(%class.btVector3* sret %ref.tmp131, float* %scale.addr, %class.btVector3* %ref.tmp132)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp131)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp137, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes138 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx139 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes138, i32 0, i32 3
+ %m_shapes138 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx139 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes138, i32 0, i32 3
%50 = load %class.btCollisionShape** %arrayidx139, align 4
%call140 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp137, %class.btCollisionShape* %50)
- %m_bodies141 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx142 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies141, i32 0, i32 3
+ %m_bodies141 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx142 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies141, i32 0, i32 3
store %class.btRigidBody* %call140, %class.btRigidBody** %arrayidx142, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0xBFC70A3D80000000, float* %ref.tmp145, align 4
call void @_ZmlRKfRK9btVector3(%class.btVector3* sret %ref.tmp143, float* %scale.addr, %class.btVector3* %ref.tmp144)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp143)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp149, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes150 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx151 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes150, i32 0, i32 4
+ %m_shapes150 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx151 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes150, i32 0, i32 4
%51 = load %class.btCollisionShape** %arrayidx151, align 4
%call152 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp149, %class.btCollisionShape* %51)
- %m_bodies153 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx154 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies153, i32 0, i32 4
+ %m_bodies153 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx154 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies153, i32 0, i32 4
store %class.btRigidBody* %call152, %class.btRigidBody** %arrayidx154, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0x3FC70A3D80000000, float* %ref.tmp157, align 4
call void @_ZmlRKfRK9btVector3(%class.btVector3* sret %ref.tmp155, float* %scale.addr, %class.btVector3* %ref.tmp156)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp155)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp161, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes162 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx163 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes162, i32 0, i32 5
+ %m_shapes162 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx163 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes162, i32 0, i32 5
%52 = load %class.btCollisionShape** %arrayidx163, align 4
%call164 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp161, %class.btCollisionShape* %52)
- %m_bodies165 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx166 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies165, i32 0, i32 5
+ %m_bodies165 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx166 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies165, i32 0, i32 5
store %class.btRigidBody* %call164, %class.btRigidBody** %arrayidx166, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0x3FC70A3D80000000, float* %ref.tmp169, align 4
call void @_ZmlRKfRK9btVector3(%class.btVector3* sret %ref.tmp167, float* %scale.addr, %class.btVector3* %ref.tmp168)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %transform, %class.btVector3* %ref.tmp167)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp173, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes174 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx175 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes174, i32 0, i32 6
+ %m_shapes174 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx175 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes174, i32 0, i32 6
%53 = load %class.btCollisionShape** %arrayidx175, align 4
%call176 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp173, %class.btCollisionShape* %53)
- %m_bodies177 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx178 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies177, i32 0, i32 6
+ %m_bodies177 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx178 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies177, i32 0, i32 6
store %class.btRigidBody* %call176, %class.btRigidBody** %arrayidx178, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0xBFD6666660000000, float* %ref.tmp181, align 4
%call185 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call185, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp186, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes187 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx188 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes187, i32 0, i32 7
+ %m_shapes187 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx188 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes187, i32 0, i32 7
%54 = load %class.btCollisionShape** %arrayidx188, align 4
%call189 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp186, %class.btCollisionShape* %54)
- %m_bodies190 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx191 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies190, i32 0, i32 7
+ %m_bodies190 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx191 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies190, i32 0, i32 7
store %class.btRigidBody* %call189, %class.btRigidBody** %arrayidx191, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0xBFE6666660000000, float* %ref.tmp194, align 4
%call198 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call198, float 0.000000e+00, float 0.000000e+00, float 0x3FF921FB60000000)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp199, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes200 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx201 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes200, i32 0, i32 8
+ %m_shapes200 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx201 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes200, i32 0, i32 8
%55 = load %class.btCollisionShape** %arrayidx201, align 4
%call202 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp199, %class.btCollisionShape* %55)
- %m_bodies203 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx204 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies203, i32 0, i32 8
+ %m_bodies203 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx204 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies203, i32 0, i32 8
store %class.btRigidBody* %call202, %class.btRigidBody** %arrayidx204, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0x3FD6666660000000, float* %ref.tmp207, align 4
%call211 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call211, float 0.000000e+00, float 0.000000e+00, float 0xBFF921FB60000000)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp212, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes213 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx214 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes213, i32 0, i32 9
+ %m_shapes213 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx214 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes213, i32 0, i32 9
%56 = load %class.btCollisionShape** %arrayidx214, align 4
%call215 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp212, %class.btCollisionShape* %56)
- %m_bodies216 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx217 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies216, i32 0, i32 9
+ %m_bodies216 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx217 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies216, i32 0, i32 9
store %class.btRigidBody* %call215, %class.btRigidBody** %arrayidx217, align 4
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %transform)
store float 0x3FE6666660000000, float* %ref.tmp220, align 4
%call224 = call %class.btMatrix3x3* @_ZN11btTransform8getBasisEv(%class.btTransform* %transform)
call void @_ZN11btMatrix3x311setEulerZYXEfff(%class.btMatrix3x3* %call224, float 0.000000e+00, float 0.000000e+00, float 0xBFF921FB60000000)
call void @_ZNK11btTransformmlERKS_(%class.btTransform* sret %ref.tmp225, %class.btTransform* %offset, %class.btTransform* %transform)
- %m_shapes226 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 2
- %arrayidx227 = getelementptr inbounds [11 x %class.btCollisionShape*]* %m_shapes226, i32 0, i32 10
+ %m_shapes226 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 2
+ %arrayidx227 = getelementptr inbounds [11 x %class.btCollisionShape*], [11 x %class.btCollisionShape*]* %m_shapes226, i32 0, i32 10
%57 = load %class.btCollisionShape** %arrayidx227, align 4
%call228 = call %class.btRigidBody* @_ZN7RagDoll20localCreateRigidBodyEfRK11btTransformP16btCollisionShape(%class.RagDoll* %this1, float 1.000000e+00, %class.btTransform* %ref.tmp225, %class.btCollisionShape* %57)
- %m_bodies229 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx230 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies229, i32 0, i32 10
+ %m_bodies229 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx230 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies229, i32 0, i32 10
store %class.btRigidBody* %call228, %class.btRigidBody** %arrayidx230, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.body: ; preds = %for.cond
%59 = load i32* %i, align 4
- %m_bodies231 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx232 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies231, i32 0, i32 %59
+ %m_bodies231 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx232 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies231, i32 0, i32 %59
%60 = load %class.btRigidBody** %arrayidx232, align 4
call void @_ZN11btRigidBody10setDampingEff(%class.btRigidBody* %60, float 0x3FA99999A0000000, float 0x3FEB333340000000)
%61 = load i32* %i, align 4
- %m_bodies233 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx234 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies233, i32 0, i32 %61
+ %m_bodies233 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx234 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies233, i32 0, i32 %61
%62 = load %class.btRigidBody** %arrayidx234, align 4
%63 = bitcast %class.btRigidBody* %62 to %class.btCollisionObject*
call void @_ZN17btCollisionObject19setDeactivationTimeEf(%class.btCollisionObject* %63, float 0x3FE99999A0000000)
%64 = load i32* %i, align 4
- %m_bodies235 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx236 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies235, i32 0, i32 %64
+ %m_bodies235 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx236 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies235, i32 0, i32 %64
%65 = load %class.btRigidBody** %arrayidx236, align 4
call void @_ZN11btRigidBody21setSleepingThresholdsEff(%class.btRigidBody* %65, float 0x3FF99999A0000000, float 2.500000e+00)
br label %for.inc
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp247)
%call253 = call noalias i8* @_Znwm(i32 780)
%100 = bitcast i8* %call253 to %class.btHingeConstraint*
- %m_bodies254 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx255 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies254, i32 0, i32 0
+ %m_bodies254 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx255 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies254, i32 0, i32 0
%101 = load %class.btRigidBody** %arrayidx255, align 4
- %m_bodies256 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx257 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies256, i32 0, i32 1
+ %m_bodies256 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx257 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies256, i32 0, i32 1
%102 = load %class.btRigidBody** %arrayidx257, align 4
%call260 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %100, %class.btRigidBody* %101, %class.btRigidBody* %102, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
to label %invoke.cont259 unwind label %lpad258
call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %103, float 0xBFE921FB60000000, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
%104 = load %class.btHingeConstraint** %hingeC, align 4
%105 = bitcast %class.btHingeConstraint* %104 to %class.btTypedConstraint*
- %m_joints = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx261 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints, i32 0, i32 0
+ %m_joints = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx261 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints, i32 0, i32 0
store %class.btTypedConstraint* %105, %class.btTypedConstraint** %arrayidx261, align 4
- %m_ownerWorld262 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld262 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%106 = load %class.btDynamicsWorld** %m_ownerWorld262, align 4
%107 = bitcast %class.btDynamicsWorld* %106 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %107
- %vfn = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable, i64 10
+ %vfn = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable, i64 10
%108 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn
- %m_joints263 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx264 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints263, i32 0, i32 0
+ %m_joints263 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx264 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints263, i32 0, i32 0
%109 = load %class.btTypedConstraint** %arrayidx264, align 4
call void %108(%class.btDynamicsWorld* %106, %class.btTypedConstraint* %109, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp273)
%call279 = call noalias i8* @_Znwm(i32 628)
%110 = bitcast i8* %call279 to %class.btConeTwistConstraint*
- %m_bodies280 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx281 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies280, i32 0, i32 1
+ %m_bodies280 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx281 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies280, i32 0, i32 1
%111 = load %class.btRigidBody** %arrayidx281, align 4
- %m_bodies282 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx283 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies282, i32 0, i32 2
+ %m_bodies282 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx283 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies282, i32 0, i32 2
%112 = load %class.btRigidBody** %arrayidx283, align 4
%call286 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %110, %class.btRigidBody* %111, %class.btRigidBody* %112, %class.btTransform* %localA, %class.btTransform* %localB)
to label %invoke.cont285 unwind label %lpad284
call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %113, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0x3FF921FB60000000, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
%114 = load %class.btConeTwistConstraint** %coneC, align 4
%115 = bitcast %class.btConeTwistConstraint* %114 to %class.btTypedConstraint*
- %m_joints287 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx288 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints287, i32 0, i32 1
+ %m_joints287 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx288 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints287, i32 0, i32 1
store %class.btTypedConstraint* %115, %class.btTypedConstraint** %arrayidx288, align 4
- %m_ownerWorld289 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld289 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%116 = load %class.btDynamicsWorld** %m_ownerWorld289, align 4
%117 = bitcast %class.btDynamicsWorld* %116 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable290 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %117
- %vfn291 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable290, i64 10
+ %vfn291 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable290, i64 10
%118 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn291
- %m_joints292 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx293 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints292, i32 0, i32 1
+ %m_joints292 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx293 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints292, i32 0, i32 1
%119 = load %class.btTypedConstraint** %arrayidx293, align 4
call void %118(%class.btDynamicsWorld* %116, %class.btTypedConstraint* %119, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp302)
%call308 = call noalias i8* @_Znwm(i32 628)
%120 = bitcast i8* %call308 to %class.btConeTwistConstraint*
- %m_bodies309 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx310 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies309, i32 0, i32 0
+ %m_bodies309 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx310 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies309, i32 0, i32 0
%121 = load %class.btRigidBody** %arrayidx310, align 4
- %m_bodies311 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx312 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies311, i32 0, i32 3
+ %m_bodies311 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx312 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies311, i32 0, i32 3
%122 = load %class.btRigidBody** %arrayidx312, align 4
%call315 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %120, %class.btRigidBody* %121, %class.btRigidBody* %122, %class.btTransform* %localA, %class.btTransform* %localB)
to label %invoke.cont314 unwind label %lpad313
call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %123, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
%124 = load %class.btConeTwistConstraint** %coneC, align 4
%125 = bitcast %class.btConeTwistConstraint* %124 to %class.btTypedConstraint*
- %m_joints316 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx317 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints316, i32 0, i32 2
+ %m_joints316 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx317 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints316, i32 0, i32 2
store %class.btTypedConstraint* %125, %class.btTypedConstraint** %arrayidx317, align 4
- %m_ownerWorld318 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld318 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%126 = load %class.btDynamicsWorld** %m_ownerWorld318, align 4
%127 = bitcast %class.btDynamicsWorld* %126 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable319 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %127
- %vfn320 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable319, i64 10
+ %vfn320 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable319, i64 10
%128 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn320
- %m_joints321 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx322 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints321, i32 0, i32 2
+ %m_joints321 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx322 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints321, i32 0, i32 2
%129 = load %class.btTypedConstraint** %arrayidx322, align 4
call void %128(%class.btDynamicsWorld* %126, %class.btTypedConstraint* %129, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp331)
%call337 = call noalias i8* @_Znwm(i32 780)
%130 = bitcast i8* %call337 to %class.btHingeConstraint*
- %m_bodies338 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx339 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies338, i32 0, i32 3
+ %m_bodies338 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx339 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies338, i32 0, i32 3
%131 = load %class.btRigidBody** %arrayidx339, align 4
- %m_bodies340 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx341 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies340, i32 0, i32 4
+ %m_bodies340 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx341 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies340, i32 0, i32 4
%132 = load %class.btRigidBody** %arrayidx341, align 4
%call344 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %130, %class.btRigidBody* %131, %class.btRigidBody* %132, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
to label %invoke.cont343 unwind label %lpad342
call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %133, float 0.000000e+00, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
%134 = load %class.btHingeConstraint** %hingeC, align 4
%135 = bitcast %class.btHingeConstraint* %134 to %class.btTypedConstraint*
- %m_joints345 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx346 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints345, i32 0, i32 3
+ %m_joints345 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx346 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints345, i32 0, i32 3
store %class.btTypedConstraint* %135, %class.btTypedConstraint** %arrayidx346, align 4
- %m_ownerWorld347 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld347 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%136 = load %class.btDynamicsWorld** %m_ownerWorld347, align 4
%137 = bitcast %class.btDynamicsWorld* %136 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable348 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %137
- %vfn349 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable348, i64 10
+ %vfn349 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable348, i64 10
%138 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn349
- %m_joints350 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx351 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints350, i32 0, i32 3
+ %m_joints350 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx351 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints350, i32 0, i32 3
%139 = load %class.btTypedConstraint** %arrayidx351, align 4
call void %138(%class.btDynamicsWorld* %136, %class.btTypedConstraint* %139, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp360)
%call366 = call noalias i8* @_Znwm(i32 628)
%140 = bitcast i8* %call366 to %class.btConeTwistConstraint*
- %m_bodies367 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx368 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies367, i32 0, i32 0
+ %m_bodies367 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx368 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies367, i32 0, i32 0
%141 = load %class.btRigidBody** %arrayidx368, align 4
- %m_bodies369 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx370 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies369, i32 0, i32 5
+ %m_bodies369 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx370 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies369, i32 0, i32 5
%142 = load %class.btRigidBody** %arrayidx370, align 4
%call373 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %140, %class.btRigidBody* %141, %class.btRigidBody* %142, %class.btTransform* %localA, %class.btTransform* %localB)
to label %invoke.cont372 unwind label %lpad371
call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %143, float 0x3FE921FB60000000, float 0x3FE921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
%144 = load %class.btConeTwistConstraint** %coneC, align 4
%145 = bitcast %class.btConeTwistConstraint* %144 to %class.btTypedConstraint*
- %m_joints374 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx375 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints374, i32 0, i32 4
+ %m_joints374 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx375 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints374, i32 0, i32 4
store %class.btTypedConstraint* %145, %class.btTypedConstraint** %arrayidx375, align 4
- %m_ownerWorld376 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld376 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%146 = load %class.btDynamicsWorld** %m_ownerWorld376, align 4
%147 = bitcast %class.btDynamicsWorld* %146 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable377 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %147
- %vfn378 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable377, i64 10
+ %vfn378 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable377, i64 10
%148 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn378
- %m_joints379 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx380 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints379, i32 0, i32 4
+ %m_joints379 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx380 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints379, i32 0, i32 4
%149 = load %class.btTypedConstraint** %arrayidx380, align 4
call void %148(%class.btDynamicsWorld* %146, %class.btTypedConstraint* %149, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp389)
%call395 = call noalias i8* @_Znwm(i32 780)
%150 = bitcast i8* %call395 to %class.btHingeConstraint*
- %m_bodies396 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx397 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies396, i32 0, i32 5
+ %m_bodies396 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx397 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies396, i32 0, i32 5
%151 = load %class.btRigidBody** %arrayidx397, align 4
- %m_bodies398 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx399 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies398, i32 0, i32 6
+ %m_bodies398 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx399 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies398, i32 0, i32 6
%152 = load %class.btRigidBody** %arrayidx399, align 4
%call402 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %150, %class.btRigidBody* %151, %class.btRigidBody* %152, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
to label %invoke.cont401 unwind label %lpad400
call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %153, float 0.000000e+00, float 0x3FF921FB60000000, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
%154 = load %class.btHingeConstraint** %hingeC, align 4
%155 = bitcast %class.btHingeConstraint* %154 to %class.btTypedConstraint*
- %m_joints403 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx404 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints403, i32 0, i32 5
+ %m_joints403 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx404 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints403, i32 0, i32 5
store %class.btTypedConstraint* %155, %class.btTypedConstraint** %arrayidx404, align 4
- %m_ownerWorld405 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld405 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%156 = load %class.btDynamicsWorld** %m_ownerWorld405, align 4
%157 = bitcast %class.btDynamicsWorld* %156 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable406 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %157
- %vfn407 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable406, i64 10
+ %vfn407 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable406, i64 10
%158 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn407
- %m_joints408 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx409 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints408, i32 0, i32 5
+ %m_joints408 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx409 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints408, i32 0, i32 5
%159 = load %class.btTypedConstraint** %arrayidx409, align 4
call void %158(%class.btDynamicsWorld* %156, %class.btTypedConstraint* %159, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp418)
%call424 = call noalias i8* @_Znwm(i32 628)
%160 = bitcast i8* %call424 to %class.btConeTwistConstraint*
- %m_bodies425 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx426 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies425, i32 0, i32 1
+ %m_bodies425 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx426 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies425, i32 0, i32 1
%161 = load %class.btRigidBody** %arrayidx426, align 4
- %m_bodies427 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx428 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies427, i32 0, i32 7
+ %m_bodies427 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx428 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies427, i32 0, i32 7
%162 = load %class.btRigidBody** %arrayidx428, align 4
%call431 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %160, %class.btRigidBody* %161, %class.btRigidBody* %162, %class.btTransform* %localA, %class.btTransform* %localB)
to label %invoke.cont430 unwind label %lpad429
call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %163, float 0x3FF921FB60000000, float 0x3FF921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
%164 = load %class.btConeTwistConstraint** %coneC, align 4
%165 = bitcast %class.btConeTwistConstraint* %164 to %class.btTypedConstraint*
- %m_joints432 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx433 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints432, i32 0, i32 6
+ %m_joints432 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx433 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints432, i32 0, i32 6
store %class.btTypedConstraint* %165, %class.btTypedConstraint** %arrayidx433, align 4
- %m_ownerWorld434 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld434 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%166 = load %class.btDynamicsWorld** %m_ownerWorld434, align 4
%167 = bitcast %class.btDynamicsWorld* %166 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable435 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %167
- %vfn436 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable435, i64 10
+ %vfn436 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable435, i64 10
%168 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn436
- %m_joints437 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx438 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints437, i32 0, i32 6
+ %m_joints437 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx438 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints437, i32 0, i32 6
%169 = load %class.btTypedConstraint** %arrayidx438, align 4
call void %168(%class.btDynamicsWorld* %166, %class.btTypedConstraint* %169, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp447)
%call453 = call noalias i8* @_Znwm(i32 780)
%170 = bitcast i8* %call453 to %class.btHingeConstraint*
- %m_bodies454 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx455 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies454, i32 0, i32 7
+ %m_bodies454 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx455 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies454, i32 0, i32 7
%171 = load %class.btRigidBody** %arrayidx455, align 4
- %m_bodies456 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx457 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies456, i32 0, i32 8
+ %m_bodies456 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx457 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies456, i32 0, i32 8
%172 = load %class.btRigidBody** %arrayidx457, align 4
%call460 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %170, %class.btRigidBody* %171, %class.btRigidBody* %172, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
to label %invoke.cont459 unwind label %lpad458
call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %173, float 0xBFF921FB60000000, float 0.000000e+00, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
%174 = load %class.btHingeConstraint** %hingeC, align 4
%175 = bitcast %class.btHingeConstraint* %174 to %class.btTypedConstraint*
- %m_joints461 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx462 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints461, i32 0, i32 7
+ %m_joints461 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx462 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints461, i32 0, i32 7
store %class.btTypedConstraint* %175, %class.btTypedConstraint** %arrayidx462, align 4
- %m_ownerWorld463 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld463 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%176 = load %class.btDynamicsWorld** %m_ownerWorld463, align 4
%177 = bitcast %class.btDynamicsWorld* %176 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable464 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %177
- %vfn465 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable464, i64 10
+ %vfn465 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable464, i64 10
%178 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn465
- %m_joints466 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx467 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints466, i32 0, i32 7
+ %m_joints466 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx467 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints466, i32 0, i32 7
%179 = load %class.btTypedConstraint** %arrayidx467, align 4
call void %178(%class.btDynamicsWorld* %176, %class.btTypedConstraint* %179, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp476)
%call482 = call noalias i8* @_Znwm(i32 628)
%180 = bitcast i8* %call482 to %class.btConeTwistConstraint*
- %m_bodies483 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx484 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies483, i32 0, i32 1
+ %m_bodies483 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx484 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies483, i32 0, i32 1
%181 = load %class.btRigidBody** %arrayidx484, align 4
- %m_bodies485 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx486 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies485, i32 0, i32 9
+ %m_bodies485 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx486 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies485, i32 0, i32 9
%182 = load %class.btRigidBody** %arrayidx486, align 4
%call489 = invoke %class.btConeTwistConstraint* @_ZN21btConeTwistConstraintC1ER11btRigidBodyS1_RK11btTransformS4_(%class.btConeTwistConstraint* %180, %class.btRigidBody* %181, %class.btRigidBody* %182, %class.btTransform* %localA, %class.btTransform* %localB)
to label %invoke.cont488 unwind label %lpad487
call void @_ZN21btConeTwistConstraint8setLimitEffffff(%class.btConeTwistConstraint* %183, float 0x3FF921FB60000000, float 0x3FF921FB60000000, float 0.000000e+00, float 1.000000e+00, float 0x3FD3333340000000, float 1.000000e+00)
%184 = load %class.btConeTwistConstraint** %coneC, align 4
%185 = bitcast %class.btConeTwistConstraint* %184 to %class.btTypedConstraint*
- %m_joints490 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx491 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints490, i32 0, i32 8
+ %m_joints490 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx491 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints490, i32 0, i32 8
store %class.btTypedConstraint* %185, %class.btTypedConstraint** %arrayidx491, align 4
- %m_ownerWorld492 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld492 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%186 = load %class.btDynamicsWorld** %m_ownerWorld492, align 4
%187 = bitcast %class.btDynamicsWorld* %186 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable493 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %187
- %vfn494 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable493, i64 10
+ %vfn494 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable493, i64 10
%188 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn494
- %m_joints495 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx496 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints495, i32 0, i32 8
+ %m_joints495 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx496 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints495, i32 0, i32 8
%189 = load %class.btTypedConstraint** %arrayidx496, align 4
call void %188(%class.btDynamicsWorld* %186, %class.btTypedConstraint* %189, i1 zeroext true)
call void @_ZN11btTransform11setIdentityEv(%class.btTransform* %localA)
call void @_ZN11btTransform9setOriginERK9btVector3(%class.btTransform* %localB, %class.btVector3* %ref.tmp505)
%call511 = call noalias i8* @_Znwm(i32 780)
%190 = bitcast i8* %call511 to %class.btHingeConstraint*
- %m_bodies512 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx513 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies512, i32 0, i32 9
+ %m_bodies512 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx513 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies512, i32 0, i32 9
%191 = load %class.btRigidBody** %arrayidx513, align 4
- %m_bodies514 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 3
- %arrayidx515 = getelementptr inbounds [11 x %class.btRigidBody*]* %m_bodies514, i32 0, i32 10
+ %m_bodies514 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 3
+ %arrayidx515 = getelementptr inbounds [11 x %class.btRigidBody*], [11 x %class.btRigidBody*]* %m_bodies514, i32 0, i32 10
%192 = load %class.btRigidBody** %arrayidx515, align 4
%call518 = invoke %class.btHingeConstraint* @_ZN17btHingeConstraintC1ER11btRigidBodyS1_RK11btTransformS4_b(%class.btHingeConstraint* %190, %class.btRigidBody* %191, %class.btRigidBody* %192, %class.btTransform* %localA, %class.btTransform* %localB, i1 zeroext false)
to label %invoke.cont517 unwind label %lpad516
call void @_ZN17btHingeConstraint8setLimitEfffff(%class.btHingeConstraint* %193, float 0xBFF921FB60000000, float 0.000000e+00, float 0x3FECCCCCC0000000, float 0x3FD3333340000000, float 1.000000e+00)
%194 = load %class.btHingeConstraint** %hingeC, align 4
%195 = bitcast %class.btHingeConstraint* %194 to %class.btTypedConstraint*
- %m_joints519 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx520 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints519, i32 0, i32 9
+ %m_joints519 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx520 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints519, i32 0, i32 9
store %class.btTypedConstraint* %195, %class.btTypedConstraint** %arrayidx520, align 4
- %m_ownerWorld521 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 1
+ %m_ownerWorld521 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 1
%196 = load %class.btDynamicsWorld** %m_ownerWorld521, align 4
%197 = bitcast %class.btDynamicsWorld* %196 to void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)***
%vtable522 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*** %197
- %vfn523 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable522, i64 10
+ %vfn523 = getelementptr inbounds void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)*, void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vtable522, i64 10
%198 = load void (%class.btDynamicsWorld*, %class.btTypedConstraint*, i1)** %vfn523
- %m_joints524 = getelementptr inbounds %class.RagDoll* %this1, i32 0, i32 4
- %arrayidx525 = getelementptr inbounds [10 x %class.btTypedConstraint*]* %m_joints524, i32 0, i32 9
+ %m_joints524 = getelementptr inbounds %class.RagDoll, %class.RagDoll* %this1, i32 0, i32 4
+ %arrayidx525 = getelementptr inbounds [10 x %class.btTypedConstraint*], [10 x %class.btTypedConstraint*]* %m_joints524, i32 0, i32 9
%199 = load %class.btTypedConstraint** %arrayidx525, align 4
call void %198(%class.btDynamicsWorld* %196, %class.btTypedConstraint* %199, i1 zeroext true)
%200 = load %class.RagDoll** %retval
bb: ; preds = %bb, %bb.nph
%0 = phi i32 [ 0, %bb.nph ], [ %1, %bb ] ; <i32> [#uses=4]
- %scevgep = getelementptr [16 x i32]* @sbuf, i32 0, i32 %0 ; <i32*> [#uses=1]
- %scevgep5 = getelementptr [16 x i32]* @dbuf, i32 0, i32 %0 ; <i32*> [#uses=1]
+ %scevgep = getelementptr [16 x i32], [16 x i32]* @sbuf, i32 0, i32 %0 ; <i32*> [#uses=1]
+ %scevgep5 = getelementptr [16 x i32], [16 x i32]* @dbuf, i32 0, i32 %0 ; <i32*> [#uses=1]
store i32 %0, i32* %scevgep, align 4
store i32 -1, i32* %scevgep5, align 4
%1 = add nsw i32 %0, 1 ; <i32> [#uses=2]
%class = type { i8*, %class*, i32 }
define void @f11101911(%class* %this, i32 %num) ssp align 2 {
entry:
- %p1 = getelementptr inbounds %class* %this, i32 0, i32 1
- %p2 = getelementptr inbounds %class* %this, i32 0, i32 2
+ %p1 = getelementptr inbounds %class, %class* %this, i32 0, i32 1
+ %p2 = getelementptr inbounds %class, %class* %this, i32 0, i32 2
tail call void asm sideeffect "", "~{r1},~{r3},~{r5},~{r11},~{r13}"() nounwind
store %class* %this, %class** %p1, align 4
store i32 %num, i32* %p2, align 4
; CHECK-NOT: vmov.f32
; CHECK: blt
%tmp54 = add i32 0, %tmp53 ; <i32> [#uses=0]
- %fi.1 = getelementptr float* %fz, i32 undef ; <float*> [#uses=2]
+ %fi.1 = getelementptr float, float* %fz, i32 undef ; <float*> [#uses=2]
%tmp80 = add i32 0, %tmp79 ; <i32> [#uses=1]
- %scevgep81 = getelementptr float* %fz, i32 %tmp80 ; <float*> [#uses=1]
+ %scevgep81 = getelementptr float, float* %fz, i32 %tmp80 ; <float*> [#uses=1]
%2 = load float* undef, align 4 ; <float> [#uses=1]
%3 = fmul float %2, %1 ; <float> [#uses=1]
%4 = load float* null, align 4 ; <float> [#uses=2]
define void @vorbis_encode_noisebias_setup(i8* nocapture %vi.0.7.val, double %s, i32 %block, i32* nocapture %suppress, %struct.noise3* nocapture %in, %struct.noiseguard* nocapture %guard, double %userbias) nounwind {
entry:
- %0 = getelementptr %struct.noiseguard* %guard, i32 %block, i32 2; <i32*> [#uses=1]
+ %0 = getelementptr %struct.noiseguard, %struct.noiseguard* %guard, i32 %block, i32 2; <i32*> [#uses=1]
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
store i32 %1, i32* undef, align 4
unreachable
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%tmp5 = sub i32 1000, %indvar ; <i32> [#uses=1]
%1 = load i32** @array, align 4 ; <i32*> [#uses=1]
- %scevgep = getelementptr i32* %1, i32 %tmp5 ; <i32*> [#uses=1]
+ %scevgep = getelementptr i32, i32* %1, i32 %tmp5 ; <i32*> [#uses=1]
%2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
%3 = add nsw i32 %2, %0 ; <i32> [#uses=2]
store i32 %3, i32* @G, align 4
bb: ; preds = %bb, %bb.nph
%1 = phi i32 [ %.pre, %bb.nph ], [ %3, %bb ] ; <i32> [#uses=1]
%i.03 = phi i32 [ 0, %bb.nph ], [ %4, %bb ] ; <i32> [#uses=2]
- %scevgep = getelementptr i32* %vals, i32 %i.03 ; <i32*> [#uses=1]
+ %scevgep = getelementptr i32, i32* %vals, i32 %i.03 ; <i32*> [#uses=1]
%2 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
%3 = add nsw i32 %1, %2 ; <i32> [#uses=2]
store i32 %3, i32* @GV, align 4
; CHECK: %bb1
%indvar = phi i32 [ %indvar.next, %bb1 ], [ 0, %entry ]
%tmp1 = shl i32 %indvar, 2
- %gep1 = getelementptr i8* %ptr1, i32 %tmp1
+ %gep1 = getelementptr i8, i8* %ptr1, i32 %tmp1
%tmp2 = call <4 x float> @llvm.arm.neon.vld1.v4f32(i8* %gep1, i32 1)
%tmp3 = call <4 x float> @llvm.arm.neon.vmaxs.v4f32(<4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, <4 x float> %tmp2)
- %gep2 = getelementptr i8* %ptr2, i32 %tmp1
+ %gep2 = getelementptr i8, i8* %ptr2, i32 %tmp1
call void @llvm.arm.neon.vst1.v4f32(i8* %gep2, <4 x float> %tmp3, i32 1)
%indvar.next = add i32 %indvar, 1
%cond = icmp eq i32 %indvar.next, 10
; CHECK-LABEL: atexit:
; CHECK: add r0, pc
%r = alloca %struct.one_atexit_routine, align 4 ; <%struct.one_atexit_routine*> [#uses=3]
- %0 = getelementptr %struct.one_atexit_routine* %r, i32 0, i32 0, i32 0 ; <void ()**> [#uses=1]
+ %0 = getelementptr %struct.one_atexit_routine, %struct.one_atexit_routine* %r, i32 0, i32 0, i32 0 ; <void ()**> [#uses=1]
store void ()* %func, void ()** %0, align 4
- %1 = getelementptr %struct.one_atexit_routine* %r, i32 0, i32 1 ; <i32*> [#uses=1]
+ %1 = getelementptr %struct.one_atexit_routine, %struct.one_atexit_routine* %r, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 0, i32* %1, align 4
%2 = call i32 @atexit_common(%struct.one_atexit_routine* %r, i8* bitcast ({ }* @__dso_handle to i8*)) nounwind ; <i32> [#uses=1]
ret i32 %2
%a1 = alloca [256 x i32], align 4
%1 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %1)
- %2 = getelementptr inbounds [256 x i32]* %a1, i32 0, i32 0
+ %2 = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i32 0, i32 0
call void @foo3(i32* %2) #3
call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
call void @llvm.lifetime.end(i64 1024, i8* %1)
br label %bb11
bb11: ; preds = %bb9, %bb7
- %1 = getelementptr i32* undef, i32 0
+ %1 = getelementptr i32, i32* undef, i32 0
store i32 0, i32* %1
ret void
}
entry:
; CHECK-LABEL: f2:
; CHECK: ldr.w r0, [r0, #4092]
- %tmp2 = getelementptr i32* %v, i32 1023
+ %tmp2 = getelementptr i32, i32* %v, i32 1023
%tmp = load i32* %tmp2
ret i32 %tmp
}
; CHECK-LABEL: f3:
; CHECK: mov.w r1, #4096
; CHECK: ldr r0, [r0, r1]
- %tmp2 = getelementptr i32* %v, i32 1024
+ %tmp2 = getelementptr i32, i32* %v, i32 1024
%tmp = load i32* %tmp2
ret i32 %tmp
}
; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2 %s -o - | FileCheck %s
define i32* @test1(i32* %X, i32* %dest) {
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
+ %Y = getelementptr i32, i32* %X, i32 4 ; <i32*> [#uses=2]
%A = load i32* %Y ; <i32> [#uses=1]
store i32 %A, i32* %dest
ret i32* %Y
; CHECK: ldr{{.*}}!
define i8* @test3(i8* %X, i32* %dest) {
- %tmp1 = getelementptr i8* %X, i32 4
+ %tmp1 = getelementptr i8, i8* %X, i32 4
%tmp2 = load i8* %tmp1
%tmp3 = sext i8 %tmp2 to i32
store i32 %tmp3, i32* %dest
entry:
; CHECK-LABEL: f2:
; CHECK: ldrb r0, [r0, #-1]
- %tmp2 = getelementptr i8* %v, i8 1023
+ %tmp2 = getelementptr i8, i8* %v, i8 1023
%tmp = load i8* %tmp2
ret i8 %tmp
}
entry:
; CHECK-LABEL: f2:
; CHECK: ldrh.w r0, [r0, #2046]
- %tmp2 = getelementptr i16* %v, i16 1023
+ %tmp2 = getelementptr i16, i16* %v, i16 1023
%tmp = load i16* %tmp2
ret i16 %tmp
}
; CHECK-LABEL: f3:
; CHECK: mov.w r1, #4096
; CHECK: ldrh r0, [r0, r1]
- %tmp2 = getelementptr i16* %v, i16 2048
+ %tmp2 = getelementptr i16, i16* %v, i16 2048
%tmp = load i16* %tmp2
ret i16 %tmp
}
define i32 @f2(i32 %a, i32* %v) {
; CHECK-LABEL: f2:
; CHECK: str.w r0, [r1, #4092]
- %tmp2 = getelementptr i32* %v, i32 1023
+ %tmp2 = getelementptr i32, i32* %v, i32 1023
store i32 %a, i32* %tmp2
ret i32 %a
}
define i32 @f2a(i32 %a, i32* %v) {
; CHECK-LABEL: f2a:
; CHECK: str r0, [r1, #-128]
- %tmp2 = getelementptr i32* %v, i32 -32
+ %tmp2 = getelementptr i32, i32* %v, i32 -32
store i32 %a, i32* %tmp2
ret i32 %a
}
; CHECK-LABEL: f3:
; CHECK: mov.w r2, #4096
; CHECK: str r0, [r1, r2]
- %tmp2 = getelementptr i32* %v, i32 1024
+ %tmp2 = getelementptr i32, i32* %v, i32 1024
store i32 %a, i32* %tmp2
ret i32 %a
}
; CHECK: test1
; CHECK: str r1, [r0, #16]!
%B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
+ %Y = getelementptr i32, i32* %X, i32 4 ; <i32*> [#uses=2]
store i32 %B, i32* %Y
store i32* %Y, i32** %dest
ret void
; CHECK: test2
; CHECK: strh r1, [r0, #8]!
%B = load i32* %A ; <i32> [#uses=1]
- %Y = getelementptr i16* %X, i32 4 ; <i16*> [#uses=2]
+ %Y = getelementptr i16, i16* %X, i32 4 ; <i16*> [#uses=2]
%tmp = trunc i32 %B to i16 ; <i16> [#uses=1]
store i16 %tmp, i16* %Y
ret i16* %Y
define i8 @f2(i8 %a, i8* %v) {
; CHECK-LABEL: f2:
; CHECK: strb.w r0, [r1, #4092]
- %tmp2 = getelementptr i8* %v, i32 4092
+ %tmp2 = getelementptr i8, i8* %v, i32 4092
store i8 %a, i8* %tmp2
ret i8 %a
}
define i8 @f2a(i8 %a, i8* %v) {
; CHECK-LABEL: f2a:
; CHECK: strb r0, [r1, #-128]
- %tmp2 = getelementptr i8* %v, i32 -128
+ %tmp2 = getelementptr i8, i8* %v, i32 -128
store i8 %a, i8* %tmp2
ret i8 %a
}
; CHECK-LABEL: f3:
; CHECK: mov.w r2, #4096
; CHECK: strb r0, [r1, r2]
- %tmp2 = getelementptr i8* %v, i32 4096
+ %tmp2 = getelementptr i8, i8* %v, i32 4096
store i8 %a, i8* %tmp2
ret i8 %a
}
define i16 @f2(i16 %a, i16* %v) {
; CHECK-LABEL: f2:
; CHECK: strh.w r0, [r1, #4092]
- %tmp2 = getelementptr i16* %v, i32 2046
+ %tmp2 = getelementptr i16, i16* %v, i32 2046
store i16 %a, i16* %tmp2
ret i16 %a
}
define i16 @f2a(i16 %a, i16* %v) {
; CHECK-LABEL: f2a:
; CHECK: strh r0, [r1, #-128]
- %tmp2 = getelementptr i16* %v, i32 -64
+ %tmp2 = getelementptr i16, i16* %v, i32 -64
store i16 %a, i16* %tmp2
ret i16 %a
}
; CHECK-LABEL: f3:
; CHECK: mov.w r2, #4096
; CHECK: strh r0, [r1, r2]
- %tmp2 = getelementptr i16* %v, i32 2048
+ %tmp2 = getelementptr i16, i16* %v, i32 2048
store i16 %a, i16* %tmp2
ret i16 %a
}
%inc.1 = add i32 %tmp.35, 1 ; <i32> [#uses=2]
store i32 %inc.1, i32* @last
%tmp.36 = load i8** @block ; <i8*> [#uses=1]
- %tmp.38 = getelementptr i8* %tmp.36, i32 %inc.1 ; <i8*> [#uses=1]
+ %tmp.38 = getelementptr i8, i8* %tmp.36, i32 %inc.1 ; <i8*> [#uses=1]
%tmp.40 = trunc i32 %tmp.21.reload to i8 ; <i8> [#uses=1]
store i8 %tmp.40, i8* %tmp.38
%tmp.910 = load i32* @last ; <i32> [#uses=1]
%tmp.upgrd.2 = load i8** @in5 ; <i8*> [#uses=2]
%tmp3 = load i32* %tmp1 ; <i32> [#uses=1]
%gep.upgrd.3 = zext i32 %tmp3 to i64 ; <i64> [#uses=1]
- %tmp4 = getelementptr i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2]
+ %tmp4 = getelementptr i8, i8* %tmp.upgrd.2, i64 %gep.upgrd.3 ; <i8*> [#uses=2]
%tmp7 = load i32* %tmp ; <i32> [#uses=1]
%gep.upgrd.4 = zext i32 %tmp7 to i64 ; <i64> [#uses=1]
- %tmp8 = getelementptr i8* %tmp.upgrd.2, i64 %gep.upgrd.4 ; <i8*> [#uses=2]
+ %tmp8 = getelementptr i8, i8* %tmp.upgrd.2, i64 %gep.upgrd.4 ; <i8*> [#uses=2]
%tmp.upgrd.5 = tail call i32 @memcmp( i8* %tmp8, i8* %tmp4, i32 %tmp.upgrd.1 ) ; <i32> [#uses=1]
ret i32 %tmp.upgrd.5
}
define void @test() {
%tmp = load i16** @A ; <i16*> [#uses=1]
- %tmp1 = getelementptr i16* %tmp, i32 1 ; <i16*> [#uses=1]
+ %tmp1 = getelementptr i16, i16* %tmp, i32 1 ; <i16*> [#uses=1]
%tmp.upgrd.1 = load i16* %tmp1 ; <i16> [#uses=1]
%tmp3 = zext i16 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
%tmp.upgrd.2 = load i32* @B ; <i32> [#uses=1]
%tmp. = shl i32 %indvar, 2 ; <i32> [#uses=1]
%tmp.10 = add nsw i32 %tmp., 1 ; <i32> [#uses=2]
%tmp31 = add nsw i32 %tmp.10, -1 ; <i32> [#uses=4]
- %tmp32 = getelementptr i32* %mpp, i32 %tmp31 ; <i32*> [#uses=1]
+ %tmp32 = getelementptr i32, i32* %mpp, i32 %tmp31 ; <i32*> [#uses=1]
%tmp34 = bitcast i32* %tmp32 to <16 x i8>* ; <i8*> [#uses=1]
%tmp = load <16 x i8>* %tmp34, align 1
- %tmp42 = getelementptr i32* %tpmm, i32 %tmp31 ; <i32*> [#uses=1]
+ %tmp42 = getelementptr i32, i32* %tpmm, i32 %tmp31 ; <i32*> [#uses=1]
%tmp42.upgrd.1 = bitcast i32* %tmp42 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
%tmp46 = load <4 x i32>* %tmp42.upgrd.1 ; <<4 x i32>> [#uses=1]
%tmp54 = bitcast <16 x i8> %tmp to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp55 = add <4 x i32> %tmp54, %tmp46 ; <<4 x i32>> [#uses=2]
%tmp55.upgrd.2 = bitcast <4 x i32> %tmp55 to <2 x i64> ; <<2 x i64>> [#uses=1]
- %tmp62 = getelementptr i32* %ip, i32 %tmp31 ; <i32*> [#uses=1]
+ %tmp62 = getelementptr i32, i32* %ip, i32 %tmp31 ; <i32*> [#uses=1]
%tmp65 = bitcast i32* %tmp62 to <16 x i8>* ; <i8*> [#uses=1]
%tmp66 = load <16 x i8>* %tmp65, align 1
- %tmp73 = getelementptr i32* %tpim, i32 %tmp31 ; <i32*> [#uses=1]
+ %tmp73 = getelementptr i32, i32* %tpim, i32 %tmp31 ; <i32*> [#uses=1]
%tmp73.upgrd.3 = bitcast i32* %tmp73 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
%tmp77 = load <4 x i32>* %tmp73.upgrd.3 ; <<4 x i32>> [#uses=1]
%tmp87 = bitcast <16 x i8> %tmp66 to <4 x i32> ; <<4 x i32>> [#uses=1]
%tmp111 = and <2 x i64> %tmp110, %tmp55.upgrd.2 ; <<2 x i64>> [#uses=1]
%tmp121 = and <2 x i64> %tmp99.upgrd.5, %tmp88.upgrd.4 ; <<2 x i64>> [#uses=1]
%tmp131 = or <2 x i64> %tmp121, %tmp111 ; <<2 x i64>> [#uses=1]
- %tmp137 = getelementptr i32* %mc, i32 %tmp.10 ; <i32*> [#uses=1]
+ %tmp137 = getelementptr i32, i32* %mc, i32 %tmp.10 ; <i32*> [#uses=1]
%tmp137.upgrd.7 = bitcast i32* %tmp137 to <2 x i64>* ; <<2 x i64>*> [#uses=1]
store <2 x i64> %tmp131, <2 x i64>* %tmp137.upgrd.7
%tmp147 = add nsw i32 %tmp.10, 8 ; <i32> [#uses=1]
cond_next: ; preds = %0
%gep.upgrd.1 = zext i32 %tmp11 to i64 ; <i64> [#uses=1]
- %tmp17 = getelementptr %struct.expr** null, i64 %gep.upgrd.1 ; <%struct.expr**> [#uses=0]
+ %tmp17 = getelementptr %struct.expr*, %struct.expr** null, i64 %gep.upgrd.1 ; <%struct.expr**> [#uses=0]
ret void
return: ; preds = %0
cond_true3632: ; preds = %newFuncRoot
%tmp3378 = load i32* %tmp3629 ; <i32> [#uses=1]
%tmp3379 = add i32 %tmp3378, -1 ; <i32> [#uses=1]
- %tmp3381 = getelementptr %struct.varray_head_tag* %stack3023.6, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
+ %tmp3381 = getelementptr %struct.varray_head_tag, %struct.varray_head_tag* %stack3023.6, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
%tmp3382 = bitcast %struct.varray_data* %tmp3381 to [1 x i32]* ; <[1 x i32]*> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp3379 to i64 ; <i64> [#uses=1]
- %tmp3383 = getelementptr [1 x i32]* %tmp3382, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp3383 = getelementptr [1 x i32], [1 x i32]* %tmp3382, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
%tmp3384 = load i32* %tmp3383 ; <i32> [#uses=1]
%tmp3387 = load i32* %tmp3629 ; <i32> [#uses=1]
%tmp3388 = add i32 %tmp3387, -1 ; <i32> [#uses=1]
store i32 %tmp3388, i32* %tmp3629
%tmp3391 = load %struct.varray_head_tag** @basic_block_info ; <%struct.varray_head_tag*> [#uses=1]
- %tmp3393 = getelementptr %struct.varray_head_tag* %tmp3391, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
+ %tmp3393 = getelementptr %struct.varray_head_tag, %struct.varray_head_tag* %tmp3391, i32 0, i32 4 ; <%struct.varray_data*> [#uses=1]
%tmp3394 = bitcast %struct.varray_data* %tmp3393 to [1 x %struct.basic_block_def*]* ; <[1 x %struct.basic_block_def*]*> [#uses=1]
- %tmp3395 = getelementptr [1 x %struct.basic_block_def*]* %tmp3394, i32 0, i32 %tmp3384 ; <%struct.basic_block_def**> [#uses=1]
+ %tmp3395 = getelementptr [1 x %struct.basic_block_def*], [1 x %struct.basic_block_def*]* %tmp3394, i32 0, i32 %tmp3384 ; <%struct.basic_block_def**> [#uses=1]
%tmp3396 = load %struct.basic_block_def** %tmp3395 ; <%struct.basic_block_def*> [#uses=1]
- %tmp3397 = getelementptr %struct.basic_block_def* %tmp3396, i32 0, i32 3 ; <%struct.VEC_edge**> [#uses=1]
+ %tmp3397 = getelementptr %struct.basic_block_def, %struct.basic_block_def* %tmp3396, i32 0, i32 3 ; <%struct.VEC_edge**> [#uses=1]
br label %bb3502.exitStub
}
%tmp = load i32* null ; <i32> [#uses=1]
%tmp56 = and i32 %tmp, 255 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp56 to i64 ; <i64> [#uses=1]
- %tmp8 = getelementptr [0 x i32]* @tree_code_type, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp8 = getelementptr [0 x i32], [0 x i32]* @tree_code_type, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
%tmp9 = load i32* %tmp8 ; <i32> [#uses=1]
%tmp10 = add i32 %tmp9, -1 ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp ugt i32 %tmp10, 2 ; <i1> [#uses=1]
cond_next472: ; preds = %cond_next330
%tmp490 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
- %tmp492 = getelementptr %struct.tree_node* %tmp490, i32 0, i32 0, i32 0, i32 3 ; <i8*> [#uses=1]
+ %tmp492 = getelementptr %struct.tree_node, %struct.tree_node* %tmp490, i32 0, i32 0, i32 0, i32 3 ; <i8*> [#uses=1]
%tmp492.upgrd.1 = bitcast i8* %tmp492 to i32* ; <i32*> [#uses=1]
%tmp493 = load i32* %tmp492.upgrd.1 ; <i32> [#uses=1]
%tmp495 = trunc i32 %tmp493 to i8 ; <i8> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store double 0x400921FB54442D18, double* %pi
%tmp.upgrd.1 = load double* %pi ; <double> [#uses=1]
- %real = getelementptr { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
+ %real = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %real
- %real3 = getelementptr { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
+ %real3 = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
store double %tmp.upgrd.1, double* %real3
- %tmp.upgrd.2 = getelementptr { double, double }* %tmp, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp4 = getelementptr { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp.upgrd.2 = getelementptr { double, double }, { double, double }* %tmp, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp4 = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 0 ; <double*> [#uses=1]
%tmp5 = load double* %tmp4 ; <double> [#uses=1]
store double %tmp5, double* %tmp.upgrd.2
- %tmp6 = getelementptr { double, double }* %tmp, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp7 = getelementptr { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp6 = getelementptr { double, double }, { double, double }* %tmp, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp7 = getelementptr { double, double }, { double, double }* %tmp1, i64 0, i32 1 ; <double*> [#uses=1]
%tmp8 = load double* %tmp7 ; <double> [#uses=1]
store double %tmp8, double* %tmp6
%tmp.upgrd.3 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp.upgrd.4 = getelementptr { i64, i64 }* %tmp.upgrd.3, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.4 = getelementptr { i64, i64 }, { i64, i64 }* %tmp.upgrd.3, i64 0, i32 0 ; <i64*> [#uses=1]
%tmp.upgrd.5 = load i64* %tmp.upgrd.4 ; <i64> [#uses=1]
%tmp9 = bitcast { double, double }* %tmp to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp10 = getelementptr { i64, i64 }* %tmp9, i64 0, i32 1 ; <i64*> [#uses=1]
+ %tmp10 = getelementptr { i64, i64 }, { i64, i64 }* %tmp9, i64 0, i32 1 ; <i64*> [#uses=1]
%tmp11 = load i64* %tmp10 ; <i64> [#uses=1]
call void @cexp( { double, double }* sret %tmp2, i64 %tmp.upgrd.5, i64 %tmp11 )
- %tmp12 = getelementptr { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
- %tmp13 = getelementptr { double, double }* %tmp2, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp12 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp13 = getelementptr { double, double }, { double, double }* %tmp2, i64 0, i32 0 ; <double*> [#uses=1]
%tmp14 = load double* %tmp13 ; <double> [#uses=1]
store double %tmp14, double* %tmp12
- %tmp15 = getelementptr { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
- %tmp16 = getelementptr { double, double }* %tmp2, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp15 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp16 = getelementptr { double, double }, { double, double }* %tmp2, i64 0, i32 1 ; <double*> [#uses=1]
%tmp17 = load double* %tmp16 ; <double> [#uses=1]
store double %tmp17, double* %tmp15
- %tmp18 = getelementptr { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
+ %tmp18 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 1 ; <double*> [#uses=1]
%tmp19 = load double* %tmp18 ; <double> [#uses=1]
- %tmp20 = getelementptr { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
+ %tmp20 = getelementptr { double, double }, { double, double }* %z, i64 0, i32 0 ; <double*> [#uses=1]
%tmp21 = load double* %tmp20 ; <double> [#uses=1]
- %tmp.upgrd.6 = getelementptr [9 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
+ %tmp.upgrd.6 = getelementptr [9 x i8], [9 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
%tmp.upgrd.7 = call i32 (i8*, ...)* @printf( i8* %tmp.upgrd.6, double %tmp21, double %tmp19 ) ; <i32> [#uses=0]
br label %finish
finish:
define i1 @_ZNK12QImageWriter8canWriteEv() {
%tmp62 = load %struct.QImageWriterPrivate** null ; <%struct.QImageWriterPrivate*> [#uses=1]
- %tmp = getelementptr %struct.QImageWriterPrivate* %tmp62, i32 0, i32 9 ; <%struct.QString*> [#uses=1]
+ %tmp = getelementptr %struct.QImageWriterPrivate, %struct.QImageWriterPrivate* %tmp62, i32 0, i32 9 ; <%struct.QString*> [#uses=1]
%tmp75 = call %struct.QString* @_ZN7QStringaSERKS_( %struct.QString* %tmp, %struct.QString* null ) ; <%struct.QString*> [#uses=0]
call void asm sideeffect "lock\0Adecl $0\0Asetne 1", "=*m"( i32* null )
ret i1 false
bb:
%i = phi i64 [ 0, %entry ], [ %k, %bb ]
- %j = getelementptr double* %y, i64 %i
+ %j = getelementptr double, double* %y, i64 %i
store double 0.000000e+00, double* %j
%k = add i64 %i, 1
%n = icmp eq i64 %k, 0
%r24 = shl i32 %r23a, 0
%r25 = add i32 %r24, 0
%ras2 = alloca i8, i32 %r25, align 16
- %r28 = getelementptr i8* %ras2, i32 0
+ %r28 = getelementptr i8, i8* %ras2, i32 0
%r38 = shl i64 %r12, 0
%s2013 = add i64 %r38, 0
- %c22012 = getelementptr i8* %ras2, i64 %s2013
+ %c22012 = getelementptr i8, i8* %ras2, i64 %s2013
%r42 = shl i64 %r12, 0
%s2011 = add i64 %r42, 16
- %c22010 = getelementptr i8* %ras2, i64 %s2011
+ %c22010 = getelementptr i8, i8* %ras2, i64 %s2011
%r50 = add i64 %r16, 0
%r51 = icmp slt i64 %r50, 0
%r50sh = shl i64 %r50, 0
%r54 = select i1 %r51, i64 0, i64 %r50j
%r56 = mul i64 %r54, %r12
%r28s = add i64 %r56, 16
- %c2 = getelementptr i8* %ras2, i64 %r28s
+ %c2 = getelementptr i8, i8* %ras2, i64 %r28s
%r60 = sub i32 %r2, %r
%r61 = icmp slt i32 %r60, 0
br i1 %r61, label %a29b, label %b63
%d753 = bitcast i64 %w1874 to i64
%r343 = add i64 %s661, 0
%r346 = add i64 %r343, 0
- %r347 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r346
+ %r347 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r346
%r348 = load float* %r347
%r352 = add i64 %r343, 0
- %r353 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r352
+ %r353 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r352
%r354 = load float* %r353
%r362 = load float* bitcast ([128 x i64]* @i6000 to float*)
%r363 = fadd float 0.000000e+00, %r362
%s923 = phi i64 [ 0, %b514 ], [ %r799, %b712 ]
%s933 = phi i64 [ %r533, %b514 ], [ %r795, %b712 ]
%r538 = add i64 %w1855, 0
- %r539 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r538
+ %r539 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r538
%r540 = load float* %r539
%r551 = load float* bitcast ([128 x i64]* @i6000 to float*)
%r562 = sub i64 %s933, 0
a45b714:
%r717 = add i64 %e944, 0
%r720 = add i64 %r717, 0
- %r721 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r720
+ %r721 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r720
%r722 = load float* %r721
%r726 = add i64 %r717, 0
- %r727 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r726
+ %r727 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r726
%r728 = load float* %r727
%r732 = add i64 %r717, 0
- %r733 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r732
+ %r733 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r732
%r734 = load float* %r733
%r738 = add i64 %r717, 0
- %r739 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r738
+ %r739 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r738
%r740 = load float* %r739
%r744 = add i64 %r717, 0
- %r745 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r744
+ %r745 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r744
%r746 = load float* %r745
%r750 = add i64 %r717, 0
- %r751 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r750
+ %r751 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r750
%r752 = load float* %r751
%r753 = fadd float %r752, %r746
%r754 = fadd float %r728, %r722
%r757 = fadd float %r753, %r756
%r759 = fadd float %r757, %r540
%r770 = add i64 %r717, 0
- %r771 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r770
+ %r771 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r770
%r772 = load float* %r771
%r776 = add i64 %r717, 0
- %r777 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r776
+ %r777 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r776
%r778 = load float* %r777
%r781 = fadd float %r363, %r772
%r782 = fadd float %r781, %r778
%s1173 = add i64 %b1902, 0
%r859 = add i64 %r856, 0
%r862 = add i64 %w1891, 0
- %r863 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r862
+ %r863 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r862
%r864 = load float* %r863
%r868 = add i64 %w1891, 0
- %r869 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r868
+ %r869 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r868
%r870 = load float* %r869
%r873 = sub i64 %r859, 0
%r876 = sub i64 %s1173, 0
%r1022 = add i64 %r876, 0
%r1024 = bitcast i8* %c2 to float*
%r1025 = add i64 %r1022, 0
- %r1026 = getelementptr float* %r1024, i64 %r1025
+ %r1026 = getelementptr float, float* %r1024, i64 %r1025
%r1027 = load float* %r1026
%r1032 = add i64 %r873, 0
%r1033 = add i64 %r1032, 0
- %r1034 = getelementptr float* %r1024, i64 %r1033
+ %r1034 = getelementptr float, float* %r1024, i64 %r1033
%r1035 = load float* %r1034
%r1037 = bitcast i8* %c22010 to float*
- %r1040 = getelementptr float* %r1037, i64 %r1025
+ %r1040 = getelementptr float, float* %r1037, i64 %r1025
%r1044 = fadd float %r864, %r1035
%r1046 = fadd float %r870, %r1027
%r1047 = fadd float %r1044, %r1046
%d1353 = bitcast i64 %w1915 to i64
%r1120 = add i64 %s661, 0
%r1121 = add i64 %r1120, 0
- %r1122 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1121
+ %r1122 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1121
%r1123 = load float* %r1122
%r1132 = bitcast i8* %c22012 to float*
- %r1134 = getelementptr float* %r1132, i64 %w1915
+ %r1134 = getelementptr float, float* %r1132, i64 %w1915
%r1135 = load float* %r1134
%r1136 = fadd float %r1123, %r1135
%r1138 = icmp slt i64 %r1114, 0
%r1352 = add i64 %s1523, 0
%r1355 = sub i64 %r1352, 0
%r1370 = add i64 %d1533, 0
- %r1371 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1370
+ %r1371 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1370
%r1372 = load float* %r1371
br label %a74b
a74b:
%r1614 = mul i64 %r1613, 0
%r1622 = add i64 %r1614, 0
%r1754 = bitcast i8* %r28 to float*
- %r1756 = getelementptr float* %r1754, i64 %w1970
+ %r1756 = getelementptr float, float* %r1754, i64 %w1970
%r1757 = load float* %r1756
%r1761 = add i64 %r1622, 0
- %r1762 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1761
+ %r1762 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1761
%r1763 = load float* %r1762
%r1767 = add i64 %r1622, 0
- %r1768 = getelementptr float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1767
+ %r1768 = getelementptr float, float* bitcast ([128 x i64]* @i6000 to float*), i64 %r1767
%r1772 = fadd float %r1763, 0.000000e+00
%r1773 = fadd float %r1772, 0.000000e+00
%r1809 = fadd float %r1757, 0.000000e+00
%tmp3 = load float** %tmp2
%tmp132 = shl i32 %tmp12, 2 ; <i32> [#uses=1]
%tmp4 = bitcast float* %tmp3 to i8* ; <i8*> [#uses=1]
- %ctg2 = getelementptr i8* %tmp4, i32 %tmp132 ; <i8*> [#uses=1]
+ %ctg2 = getelementptr i8, i8* %tmp4, i32 %tmp132 ; <i8*> [#uses=1]
%tmp6 = ptrtoint i8* %ctg2 to i32 ; <i32> [#uses=1]
%tmp14 = or i32 %tmp6, 1 ; <i32> [#uses=1]
ret i32 %tmp14
ret i16 4
bb.i9.i.i932.ce: ; preds = %newFuncRoot
- %tmp1.i3.i.i930 = getelementptr %struct.list* %l_addr.01.0.i2.i.i929, i32 0, i32 0 ; <i8**> [#uses=1]
+ %tmp1.i3.i.i930 = getelementptr %struct.list, %struct.list* %l_addr.01.0.i2.i.i929, i32 0, i32 0 ; <i8**> [#uses=1]
%tmp2.i4.i.i931 = load i8** %tmp1.i3.i.i930 ; <i8*> [#uses=1]
%tmp66.i62.i = bitcast i8* %tmp2.i4.i.i931 to %struct.operator* ; <%struct.operator*> [#uses=7]
- %tmp1.i6.i = getelementptr %struct.operator* %tmp66.i62.i, i32 0, i32 2 ; <i32*> [#uses=1]
+ %tmp1.i6.i = getelementptr %struct.operator, %struct.operator* %tmp66.i62.i, i32 0, i32 2 ; <i32*> [#uses=1]
%tmp2.i7.i = load i32* %tmp1.i6.i ; <i32> [#uses=1]
%tmp3.i8.i = load %struct.FILE** @outfile ; <%struct.FILE*> [#uses=1]
%tmp5.i9.i = call i32 (%struct.FILE*, i8*, ...)* @fprintf( %struct.FILE* %tmp3.i8.i, i8* getelementptr ([11 x i8]* @str1, i32 0, i32 0), i32 %tmp2.i7.i ) ; <i32> [#uses=0]
- %tmp7.i10.i = getelementptr %struct.operator* %tmp66.i62.i, i32 0, i32 5 ; <i32*> [#uses=1]
+ %tmp7.i10.i = getelementptr %struct.operator, %struct.operator* %tmp66.i62.i, i32 0, i32 5 ; <i32*> [#uses=1]
%tmp8.i11.i = load i32* %tmp7.i10.i ; <i32> [#uses=7]
br label %NodeBlock5
bb: ; preds = %bb, %bb.preheader
%i.073.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%p_addr.076.0.rec = mul i32 %i.073.0, 9 ; <i32> [#uses=9]
- %p_addr.076.0 = getelementptr i8* %p, i32 %p_addr.076.0.rec ; <i8*> [#uses=1]
- %tmp2 = getelementptr i8** %buf, i32 %i.073.0 ; <i8**> [#uses=1]
+ %p_addr.076.0 = getelementptr i8, i8* %p, i32 %p_addr.076.0.rec ; <i8*> [#uses=1]
+ %tmp2 = getelementptr i8*, i8** %buf, i32 %i.073.0 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=8]
- %tmp5 = getelementptr i8* %tmp3, i32 %col ; <i8*> [#uses=1]
+ %tmp5 = getelementptr i8, i8* %tmp3, i32 %col ; <i8*> [#uses=1]
%tmp7 = load i8* %p_addr.076.0 ; <i8> [#uses=1]
store i8 %tmp7, i8* %tmp5
%p_addr.076.0.sum93 = add i32 %p_addr.076.0.rec, 1 ; <i32> [#uses=1]
- %tmp11 = getelementptr i8* %p, i32 %p_addr.076.0.sum93 ; <i8*> [#uses=1]
+ %tmp11 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum93 ; <i8*> [#uses=1]
%tmp13 = load i8* %tmp11 ; <i8> [#uses=1]
- %tmp15 = getelementptr i8* %tmp3, i32 %tmp5.sum72 ; <i8*> [#uses=1]
+ %tmp15 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum72 ; <i8*> [#uses=1]
store i8 %tmp13, i8* %tmp15
%p_addr.076.0.sum92 = add i32 %p_addr.076.0.rec, 2 ; <i32> [#uses=1]
- %tmp17 = getelementptr i8* %p, i32 %p_addr.076.0.sum92 ; <i8*> [#uses=1]
+ %tmp17 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum92 ; <i8*> [#uses=1]
%tmp19 = load i8* %tmp17 ; <i8> [#uses=1]
- %tmp21 = getelementptr i8* %tmp3, i32 %tmp5.sum71 ; <i8*> [#uses=1]
+ %tmp21 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum71 ; <i8*> [#uses=1]
store i8 %tmp19, i8* %tmp21
%p_addr.076.0.sum91 = add i32 %p_addr.076.0.rec, 3 ; <i32> [#uses=1]
- %tmp23 = getelementptr i8* %p, i32 %p_addr.076.0.sum91 ; <i8*> [#uses=1]
+ %tmp23 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum91 ; <i8*> [#uses=1]
%tmp25 = load i8* %tmp23 ; <i8> [#uses=1]
- %tmp27 = getelementptr i8* %tmp3, i32 %tmp5.sum70 ; <i8*> [#uses=1]
+ %tmp27 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum70 ; <i8*> [#uses=1]
store i8 %tmp25, i8* %tmp27
%p_addr.076.0.sum90 = add i32 %p_addr.076.0.rec, 4 ; <i32> [#uses=1]
- %tmp29 = getelementptr i8* %p, i32 %p_addr.076.0.sum90 ; <i8*> [#uses=1]
+ %tmp29 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum90 ; <i8*> [#uses=1]
%tmp31 = load i8* %tmp29 ; <i8> [#uses=1]
- %tmp33 = getelementptr i8* %tmp3, i32 %tmp5.sum69 ; <i8*> [#uses=2]
+ %tmp33 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum69 ; <i8*> [#uses=2]
store i8 %tmp31, i8* %tmp33
%p_addr.076.0.sum89 = add i32 %p_addr.076.0.rec, 5 ; <i32> [#uses=1]
- %tmp35 = getelementptr i8* %p, i32 %p_addr.076.0.sum89 ; <i8*> [#uses=1]
+ %tmp35 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum89 ; <i8*> [#uses=1]
%tmp37 = load i8* %tmp35 ; <i8> [#uses=1]
- %tmp39 = getelementptr i8* %tmp3, i32 %tmp5.sum68 ; <i8*> [#uses=1]
+ %tmp39 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum68 ; <i8*> [#uses=1]
store i8 %tmp37, i8* %tmp39
%p_addr.076.0.sum88 = add i32 %p_addr.076.0.rec, 6 ; <i32> [#uses=1]
- %tmp41 = getelementptr i8* %p, i32 %p_addr.076.0.sum88 ; <i8*> [#uses=1]
+ %tmp41 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum88 ; <i8*> [#uses=1]
%tmp43 = load i8* %tmp41 ; <i8> [#uses=1]
store i8 %tmp43, i8* %tmp33
%p_addr.076.0.sum87 = add i32 %p_addr.076.0.rec, 7 ; <i32> [#uses=1]
- %tmp47 = getelementptr i8* %p, i32 %p_addr.076.0.sum87 ; <i8*> [#uses=1]
+ %tmp47 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum87 ; <i8*> [#uses=1]
%tmp49 = load i8* %tmp47 ; <i8> [#uses=1]
- %tmp51 = getelementptr i8* %tmp3, i32 %tmp5.sum66 ; <i8*> [#uses=1]
+ %tmp51 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum66 ; <i8*> [#uses=1]
store i8 %tmp49, i8* %tmp51
%p_addr.076.0.sum = add i32 %p_addr.076.0.rec, 8 ; <i32> [#uses=1]
- %tmp53 = getelementptr i8* %p, i32 %p_addr.076.0.sum ; <i8*> [#uses=1]
+ %tmp53 = getelementptr i8, i8* %p, i32 %p_addr.076.0.sum ; <i8*> [#uses=1]
%tmp55 = load i8* %tmp53 ; <i8> [#uses=1]
- %tmp57 = getelementptr i8* %tmp3, i32 %tmp5.sum ; <i8*> [#uses=1]
+ %tmp57 = getelementptr i8, i8* %tmp3, i32 %tmp5.sum ; <i8*> [#uses=1]
store i8 %tmp55, i8* %tmp57
%indvar.next = add i32 %i.073.0, 1 ; <i32> [#uses=2]
icmp eq i32 %indvar.next, %size ; <i1>:1 [#uses=1]
bb32: ; preds = %bb32, %cond_true
%i.2115.0 = phi i32 [ 0, %cond_true ], [ %indvar.next127, %bb32 ] ; <i32> [#uses=1]
%c.2112.0 = phi i32 [ 0, %cond_true ], [ %tmp49, %bb32 ] ; <i32> [#uses=1]
- %tmp43 = getelementptr %struct.partition_def* %part, i32 0, i32 1, i32 %c.2112.0, i32 1 ; <%struct.partition_elem**> [#uses=1]
+ %tmp43 = getelementptr %struct.partition_def, %struct.partition_def* %part, i32 0, i32 1, i32 %c.2112.0, i32 1 ; <%struct.partition_elem**> [#uses=1]
%tmp44 = load %struct.partition_elem** %tmp43 ; <%struct.partition_elem*> [#uses=1]
%tmp4445 = ptrtoint %struct.partition_elem* %tmp44 to i32 ; <i32> [#uses=1]
%tmp48 = sub i32 %tmp4445, 0 ; <i32> [#uses=1]
bb.i: ; preds = %bb.i, %entry
%i.1.i1.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb.i ] ; <i32> [#uses=2]
%tmp1012.i = sext i32 %i.1.i1.0 to i64 ; <i64> [#uses=1]
- %tmp13.i = getelementptr %struct.A* @_ZN1A1aE, i32 0, i32 0, i64 %tmp1012.i ; <i8*> [#uses=1]
+ %tmp13.i = getelementptr %struct.A, %struct.A* @_ZN1A1aE, i32 0, i32 0, i64 %tmp1012.i ; <i8*> [#uses=1]
store i8 0, i8* %tmp13.i
%indvar.next = add i32 %i.1.i1.0, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, 1024 ; <i1> [#uses=1]
; RUN: llc < %s -march=x86 -mattr=+sse2
define void @test(<4 x float>* %arg) {
- %tmp89 = getelementptr <4 x float>* %arg, i64 3
+ %tmp89 = getelementptr <4 x float>, <4 x float>* %arg, i64 3
%tmp1144 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, zeroinitializer
store <4 x float> %tmp1144, <4 x float>* null
%tmp1149 = load <4 x float>* %tmp89
; CHECK: movq 8([[A0]]), %rax
define i64 @foo_0(<2 x i64>* %val) {
entry:
- %val12 = getelementptr <2 x i64>* %val, i32 0, i32 0 ; <i64*> [#uses=1]
+ %val12 = getelementptr <2 x i64>, <2 x i64>* %val, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp7 = load i64* %val12 ; <i64> [#uses=1]
ret i64 %tmp7
}
define i64 @foo_1(<2 x i64>* %val) {
entry:
- %tmp2.gep = getelementptr <2 x i64>* %val, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp2.gep = getelementptr <2 x i64>, <2 x i64>* %val, i32 0, i32 1 ; <i64*> [#uses=1]
%tmp4 = load i64* %tmp2.gep ; <i64> [#uses=1]
ret i64 %tmp4
}
bb4: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
%tmp5 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp6 = getelementptr i8** %tmp5, i64 2 ; <i8**> [#uses=1]
+ %tmp6 = getelementptr i8*, i8** %tmp5, i64 2 ; <i8**> [#uses=1]
%tmp7 = load i8** %tmp6 ; <i8*> [#uses=1]
%tmp78 = bitcast i8* %tmp7 to %struct._typeobject* ; <%struct._typeobject*> [#uses=1]
- %tmp9 = getelementptr %struct._typeobject* %tmp78, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
+ %tmp9 = getelementptr %struct._typeobject, %struct._typeobject* %tmp78, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
%tmp10 = load %struct.PyNumberMethods** %tmp9 ; <%struct.PyNumberMethods*> [#uses=1]
- %tmp11 = getelementptr %struct.PyNumberMethods* %tmp10, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
+ %tmp11 = getelementptr %struct.PyNumberMethods, %struct.PyNumberMethods* %tmp10, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
%tmp12 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp11 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
%tmp15 = call %struct.PyObject* %tmp12( %struct.PyObject* %a, %struct.PyObject* %b ) ; <%struct.PyObject*> [#uses=1]
ret %struct.PyObject* %tmp15
cond_next: ; preds = %bb17
%tmp22 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp23 = getelementptr i8** %tmp22, i64 10 ; <i8**> [#uses=1]
+ %tmp23 = getelementptr i8*, i8** %tmp22, i64 10 ; <i8**> [#uses=1]
%tmp24 = load i8** %tmp23 ; <i8*> [#uses=1]
%tmp2425 = bitcast i8* %tmp24 to %struct._typeobject* ; <%struct._typeobject*> [#uses=1]
- %tmp26 = getelementptr %struct._typeobject* %tmp2425, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
+ %tmp26 = getelementptr %struct._typeobject, %struct._typeobject* %tmp2425, i32 0, i32 12 ; <%struct.PyNumberMethods**> [#uses=1]
%tmp27 = load %struct.PyNumberMethods** %tmp26 ; <%struct.PyNumberMethods*> [#uses=1]
- %tmp28 = getelementptr %struct.PyNumberMethods* %tmp27, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
+ %tmp28 = getelementptr %struct.PyNumberMethods, %struct.PyNumberMethods* %tmp27, i32 0, i32 5 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)**> [#uses=1]
%tmp29 = load %struct.PyObject* (%struct.PyObject*, %struct.PyObject*)** %tmp28 ; <%struct.PyObject* (%struct.PyObject*, %struct.PyObject*)*> [#uses=1]
%tmp32 = call %struct.PyObject* %tmp29( %struct.PyObject* %a, %struct.PyObject* %b ) ; <%struct.PyObject*> [#uses=1]
ret %struct.PyObject* %tmp32
bb35: ; preds = %_ubyte_convert2_to_ctypes.exit, %cond_next.i
%tmp36 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp37 = getelementptr i8** %tmp36, i64 27 ; <i8**> [#uses=1]
+ %tmp37 = getelementptr i8*, i8** %tmp36, i64 27 ; <i8**> [#uses=1]
%tmp38 = load i8** %tmp37 ; <i8*> [#uses=1]
%tmp3839 = bitcast i8* %tmp38 to void ()* ; <void ()*> [#uses=1]
call void %tmp3839( )
%out2.0 = phi i8 [ %tmp20.i, %cond_next17.i ], [ 0, %cond_true14.i ], [ 0, %cond_true.i200 ] ; <i8> [#uses=1]
%out.2 = phi i8 [ %out.1, %cond_next17.i ], [ %out.0, %cond_true14.i ], [ %out.0, %cond_true.i200 ] ; <i8> [#uses=1]
%tmp52 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp53 = getelementptr i8** %tmp52, i64 28 ; <i8**> [#uses=1]
+ %tmp53 = getelementptr i8*, i8** %tmp52, i64 28 ; <i8**> [#uses=1]
%tmp54 = load i8** %tmp53 ; <i8*> [#uses=1]
%tmp5455 = bitcast i8* %tmp54 to i32 ()* ; <i32 ()*> [#uses=1]
%tmp56 = call i32 %tmp5455( ) ; <i32> [#uses=2]
cond_true61: ; preds = %ubyte_ctype_remainder.exit
%tmp62 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp63 = getelementptr i8** %tmp62, i64 25 ; <i8**> [#uses=1]
+ %tmp63 = getelementptr i8*, i8** %tmp62, i64 25 ; <i8**> [#uses=1]
%tmp64 = load i8** %tmp63 ; <i8*> [#uses=1]
%tmp6465 = bitcast i8* %tmp64 to i32 (i8*, i32*, i32*, %struct.PyObject**)* ; <i32 (i8*, i32*, i32*, %struct.PyObject**)*> [#uses=1]
%tmp67 = call i32 %tmp6465( i8* getelementptr ([14 x i8]* @.str5, i32 0, i64 0), i32* %bufsize, i32* %errmask, %struct.PyObject** %errobj ) ; <i32> [#uses=1]
cond_next73: ; preds = %cond_true61
store i32 1, i32* %first, align 4
%tmp74 = load i8*** @PyUFunc_API, align 8 ; <i8**> [#uses=1]
- %tmp75 = getelementptr i8** %tmp74, i64 29 ; <i8**> [#uses=1]
+ %tmp75 = getelementptr i8*, i8** %tmp74, i64 29 ; <i8**> [#uses=1]
%tmp76 = load i8** %tmp75 ; <i8*> [#uses=1]
%tmp7677 = bitcast i8* %tmp76 to i32 (i32, %struct.PyObject*, i32, i32*)* ; <i32 (i32, %struct.PyObject*, i32, i32*)*> [#uses=1]
%tmp79 = load %struct.PyObject** %errobj, align 8 ; <%struct.PyObject*> [#uses=1]
cond_next97: ; preds = %cond_next89
%tmp98 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp99 = getelementptr i8** %tmp98, i64 25 ; <i8**> [#uses=1]
+ %tmp99 = getelementptr i8*, i8** %tmp98, i64 25 ; <i8**> [#uses=1]
%tmp100 = load i8** %tmp99 ; <i8*> [#uses=1]
%tmp100101 = bitcast i8* %tmp100 to %struct._typeobject* ; <%struct._typeobject*> [#uses=2]
- %tmp102 = getelementptr %struct._typeobject* %tmp100101, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
+ %tmp102 = getelementptr %struct._typeobject, %struct._typeobject* %tmp100101, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
%tmp103 = load %struct.PyObject* (%struct._typeobject*, i64)** %tmp102 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
%tmp108 = call %struct.PyObject* %tmp103( %struct._typeobject* %tmp100101, i64 0 ) ; <%struct.PyObject*> [#uses=3]
%tmp110 = icmp eq %struct.PyObject* %tmp108, null ; <i1> [#uses=1]
br i1 %tmp110, label %cond_true113, label %cond_next135
cond_true113: ; preds = %cond_next97
- %tmp115 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
+ %tmp115 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
%tmp116 = load i64* %tmp115 ; <i64> [#uses=1]
%tmp117 = add i64 %tmp116, -1 ; <i64> [#uses=2]
store i64 %tmp117, i64* %tmp115
br i1 %tmp123, label %cond_true126, label %UnifiedReturnBlock
cond_true126: ; preds = %cond_true113
- %tmp128 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
+ %tmp128 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
%tmp129 = load %struct._typeobject** %tmp128 ; <%struct._typeobject*> [#uses=1]
- %tmp130 = getelementptr %struct._typeobject* %tmp129, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
+ %tmp130 = getelementptr %struct._typeobject, %struct._typeobject* %tmp129, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
%tmp131 = load void (%struct.PyObject*)** %tmp130 ; <void (%struct.PyObject*)*> [#uses=1]
call void %tmp131( %struct.PyObject* %tmp90 )
ret %struct.PyObject* null
cond_next135: ; preds = %cond_next97
%tmp136137 = bitcast %struct.PyObject* %tmp108 to %struct.PyBoolScalarObject* ; <%struct.PyBoolScalarObject*> [#uses=1]
- %tmp139 = getelementptr %struct.PyBoolScalarObject* %tmp136137, i32 0, i32 2 ; <i8*> [#uses=1]
+ %tmp139 = getelementptr %struct.PyBoolScalarObject, %struct.PyBoolScalarObject* %tmp136137, i32 0, i32 2 ; <i8*> [#uses=1]
store i8 %out.2, i8* %tmp139
%tmp140141 = bitcast %struct.PyObject* %tmp90 to %struct.PyTupleObject* ; <%struct.PyTupleObject*> [#uses=2]
- %tmp143 = getelementptr %struct.PyTupleObject* %tmp140141, i32 0, i32 3, i64 0 ; <%struct.PyObject**> [#uses=1]
+ %tmp143 = getelementptr %struct.PyTupleObject, %struct.PyTupleObject* %tmp140141, i32 0, i32 3, i64 0 ; <%struct.PyObject**> [#uses=1]
store %struct.PyObject* %tmp108, %struct.PyObject** %tmp143
%tmp145 = load i8*** @PyArray_API, align 8 ; <i8**> [#uses=1]
- %tmp146 = getelementptr i8** %tmp145, i64 25 ; <i8**> [#uses=1]
+ %tmp146 = getelementptr i8*, i8** %tmp145, i64 25 ; <i8**> [#uses=1]
%tmp147 = load i8** %tmp146 ; <i8*> [#uses=1]
%tmp147148 = bitcast i8* %tmp147 to %struct._typeobject* ; <%struct._typeobject*> [#uses=2]
- %tmp149 = getelementptr %struct._typeobject* %tmp147148, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
+ %tmp149 = getelementptr %struct._typeobject, %struct._typeobject* %tmp147148, i32 0, i32 38 ; <%struct.PyObject* (%struct._typeobject*, i64)**> [#uses=1]
%tmp150 = load %struct.PyObject* (%struct._typeobject*, i64)** %tmp149 ; <%struct.PyObject* (%struct._typeobject*, i64)*> [#uses=1]
%tmp155 = call %struct.PyObject* %tmp150( %struct._typeobject* %tmp147148, i64 0 ) ; <%struct.PyObject*> [#uses=3]
%tmp157 = icmp eq %struct.PyObject* %tmp155, null ; <i1> [#uses=1]
br i1 %tmp157, label %cond_true160, label %cond_next182
cond_true160: ; preds = %cond_next135
- %tmp162 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
+ %tmp162 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 0 ; <i64*> [#uses=2]
%tmp163 = load i64* %tmp162 ; <i64> [#uses=1]
%tmp164 = add i64 %tmp163, -1 ; <i64> [#uses=2]
store i64 %tmp164, i64* %tmp162
br i1 %tmp170, label %cond_true173, label %UnifiedReturnBlock
cond_true173: ; preds = %cond_true160
- %tmp175 = getelementptr %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
+ %tmp175 = getelementptr %struct.PyObject, %struct.PyObject* %tmp90, i32 0, i32 1 ; <%struct._typeobject**> [#uses=1]
%tmp176 = load %struct._typeobject** %tmp175 ; <%struct._typeobject*> [#uses=1]
- %tmp177 = getelementptr %struct._typeobject* %tmp176, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
+ %tmp177 = getelementptr %struct._typeobject, %struct._typeobject* %tmp176, i32 0, i32 6 ; <void (%struct.PyObject*)**> [#uses=1]
%tmp178 = load void (%struct.PyObject*)** %tmp177 ; <void (%struct.PyObject*)*> [#uses=1]
call void %tmp178( %struct.PyObject* %tmp90 )
ret %struct.PyObject* null
cond_next182: ; preds = %cond_next135
%tmp183184 = bitcast %struct.PyObject* %tmp155 to %struct.PyBoolScalarObject* ; <%struct.PyBoolScalarObject*> [#uses=1]
- %tmp186 = getelementptr %struct.PyBoolScalarObject* %tmp183184, i32 0, i32 2 ; <i8*> [#uses=1]
+ %tmp186 = getelementptr %struct.PyBoolScalarObject, %struct.PyBoolScalarObject* %tmp183184, i32 0, i32 2 ; <i8*> [#uses=1]
store i8 %out2.0, i8* %tmp186
- %tmp190 = getelementptr %struct.PyTupleObject* %tmp140141, i32 0, i32 3, i64 1 ; <%struct.PyObject**> [#uses=1]
+ %tmp190 = getelementptr %struct.PyTupleObject, %struct.PyTupleObject* %tmp140141, i32 0, i32 3, i64 1 ; <%struct.PyObject**> [#uses=1]
store %struct.PyObject* %tmp155, %struct.PyObject** %tmp190
ret %struct.PyObject* %tmp90
cond_true: ; preds = %entry
%tmp1415 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp17 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
+ %tmp17 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
%tmp18 = load %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp1920 = bitcast %struct.ComponentParameters* %tmp18 to i8* ; <i8*> [#uses=1]
%tmp212223 = sext i16 %tmp1415 to i64 ; <i64> [#uses=1]
- %tmp24 = getelementptr i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
+ %tmp24 = getelementptr i8, i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
%tmp2425 = bitcast i8* %tmp24 to i64* ; <i64*> [#uses=1]
%tmp28 = load i64* %tmp2425, align 8 ; <i64> [#uses=1]
%tmp2829 = inttoptr i64 %tmp28 to i32* ; <i32*> [#uses=1]
- %tmp31 = getelementptr %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
+ %tmp31 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
store i32* %tmp2829, i32** %tmp31, align 8
br label %cond_next
cond_next: ; preds = %cond_true, %entry
%tmp4243 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp46 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
+ %tmp46 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
%tmp47 = load %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp4849 = bitcast %struct.ComponentParameters* %tmp47 to i8* ; <i8*> [#uses=1]
%tmp505152 = sext i16 %tmp4243 to i64 ; <i64> [#uses=1]
- %tmp53 = getelementptr i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
+ %tmp53 = getelementptr i8, i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
%tmp5354 = bitcast i8* %tmp53 to i64* ; <i64*> [#uses=1]
%tmp58 = load i64* %tmp5354, align 8 ; <i64> [#uses=1]
%tmp59 = icmp eq i64 %tmp58, 0 ; <i1> [#uses=1]
br i1 %tmp59, label %UnifiedReturnBlock, label %cond_true63
cond_true63: ; preds = %cond_next
- %tmp65 = getelementptr %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
+ %tmp65 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
%tmp66 = load %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
%tmp69 = tail call i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord( %struct.AGenericManager* %tmp66, %struct.ComponentInstanceRecord** %instance ) ; <i32> [#uses=1]
ret i32 %tmp69
bb171.preheader: ; preds = %entry
%tmp176 = fadd float 0.000000e+00, 1.000000e+00 ; <float> [#uses=2]
- %gi.1 = getelementptr float* %fz, i32 0 ; <float*> [#uses=2]
+ %gi.1 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=2]
%tmp240 = load float* %gi.1, align 4 ; <float> [#uses=1]
%tmp242 = fsub float %tmp240, 0.000000e+00 ; <float> [#uses=2]
- %tmp251 = getelementptr float* %fz, i32 0 ; <float*> [#uses=1]
+ %tmp251 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=1]
%tmp252 = load float* %tmp251, align 4 ; <float> [#uses=1]
- %tmp258 = getelementptr float* %fz, i32 0 ; <float*> [#uses=2]
+ %tmp258 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=2]
%tmp259 = load float* %tmp258, align 4 ; <float> [#uses=2]
%tmp261 = fmul float %tmp259, %tmp176 ; <float> [#uses=1]
%tmp262 = fsub float 0.000000e+00, %tmp261 ; <float> [#uses=2]
%tmp269 = fmul float %tmp252, %tmp176 ; <float> [#uses=1]
%tmp276 = fmul float %tmp259, 0.000000e+00 ; <float> [#uses=1]
%tmp277 = fadd float %tmp269, %tmp276 ; <float> [#uses=2]
- %tmp281 = getelementptr float* %fz, i32 0 ; <float*> [#uses=1]
+ %tmp281 = getelementptr float, float* %fz, i32 0 ; <float*> [#uses=1]
%tmp282 = load float* %tmp281, align 4 ; <float> [#uses=2]
%tmp284 = fsub float %tmp282, %tmp277 ; <float> [#uses=1]
%tmp291 = fadd float %tmp282, %tmp277 ; <float> [#uses=1]
%tmp269 = load i8** %byteptr, align 4 ; <i8*> [#uses=3]
%tmp270 = load i8* %tmp269, align 1 ; <i8> [#uses=1]
%tmp270271 = zext i8 %tmp270 to i32 ; <i32> [#uses=1]
- %tmp272 = getelementptr i8* %tmp269, i32 1 ; <i8*> [#uses=2]
+ %tmp272 = getelementptr i8, i8* %tmp269, i32 1 ; <i8*> [#uses=2]
store i8* %tmp272, i8** %byteptr, align 4
%tmp276 = load i8* %tmp272, align 1 ; <i8> [#uses=1]
- %tmp278 = getelementptr i8* %tmp269, i32 2 ; <i8*> [#uses=1]
+ %tmp278 = getelementptr i8, i8* %tmp269, i32 2 ; <i8*> [#uses=1]
store i8* %tmp278, i8** %byteptr, align 4
%tmp286 = icmp eq i32 %tmp270271, %markerPrefix ; <i1> [#uses=1]
%cond = icmp eq i8 %tmp276, 0 ; <i1> [#uses=1]
bb69.outer: ; preds = %bb76.split, %bb98.preheader
%from.0.reg2mem.0.ph.rec = phi i32 [ %tmp75.rec, %bb76.split ], [ 0, %bb98.preheader ] ; <i32> [#uses=1]
%tmp75.rec = add i32 %from.0.reg2mem.0.ph.rec, 1 ; <i32> [#uses=2]
- %tmp75 = getelementptr i8* null, i32 %tmp75.rec ; <i8*> [#uses=6]
+ %tmp75 = getelementptr i8, i8* null, i32 %tmp75.rec ; <i8*> [#uses=6]
br i1 false, label %bb69.us208, label %bb69.outer.split.split
bb69.us208: ; preds = %bb69.outer
bb18: ; preds = %bb18, %entry
%i.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp17, %bb18 ] ; <i32> [#uses=3]
%res.0.reg2mem.0 = phi float [ 0.000000e+00, %entry ], [ %tmp14, %bb18 ] ; <float> [#uses=1]
- %tmp3 = getelementptr i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr i32, i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
%tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
%tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
- %tmp8 = getelementptr float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
+ %tmp8 = getelementptr float, float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
%tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1]
%tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1]
%tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2]
ret void
bb1271: ; preds = %bb898
- %tmp1272 = getelementptr %struct.c34007g__pkg__parent* %x8, i32 0, i32 0 ; <i32**> [#uses=1]
+ %tmp1272 = getelementptr %struct.c34007g__pkg__parent, %struct.c34007g__pkg__parent* %x8, i32 0, i32 0 ; <i32**> [#uses=1]
%x82167 = bitcast %struct.c34007g__pkg__parent* %x8 to i64* ; <i64*> [#uses=1]
br i1 true, label %bb4668, label %bb848
define void @_ada_c34007g() {
entry:
%x8 = alloca %struct.c34007g__pkg__parent, align 8 ; <%struct.c34007g__pkg__parent*> [#uses=2]
- %tmp1272 = getelementptr %struct.c34007g__pkg__parent* %x8, i32 0, i32 0 ; <i32**> [#uses=1]
+ %tmp1272 = getelementptr %struct.c34007g__pkg__parent, %struct.c34007g__pkg__parent* %x8, i32 0, i32 0 ; <i32**> [#uses=1]
%x82167 = bitcast %struct.c34007g__pkg__parent* %x8 to i64* ; <i64*> [#uses=1]
br i1 true, label %bb4668, label %bb848
bb951: ; preds = %bb986, %entry
%tmp955 = sdiv i32 %offset, 2 ; <i32> [#uses=3]
- %tmp961 = getelementptr %struct.indexentry* null, i32 %tmp955, i32 0 ; <i32*> [#uses=1]
+ %tmp961 = getelementptr %struct.indexentry, %struct.indexentry* null, i32 %tmp955, i32 0 ; <i32*> [#uses=1]
br i1 %cond, label %bb986, label %bb967
bb967: ; preds = %bb951
define void @localize_local_bb19_bb(%struct.node_t** %cur_node) {
newFuncRoot:
%tmp1 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp2 = getelementptr %struct.node_t* %tmp1, i32 0, i32 4 ; <double**> [#uses=1]
+ %tmp2 = getelementptr %struct.node_t, %struct.node_t* %tmp1, i32 0, i32 4 ; <double**> [#uses=1]
%tmp3 = load double** %tmp2, align 4 ; <double*> [#uses=1]
%tmp4 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp5 = getelementptr %struct.node_t* %tmp4, i32 0, i32 4 ; <double**> [#uses=1]
+ %tmp5 = getelementptr %struct.node_t, %struct.node_t* %tmp4, i32 0, i32 4 ; <double**> [#uses=1]
store double* %tmp3, double** %tmp5, align 4
%tmp6 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp7 = getelementptr %struct.node_t* %tmp6, i32 0, i32 3 ; <double***> [#uses=1]
+ %tmp7 = getelementptr %struct.node_t, %struct.node_t* %tmp6, i32 0, i32 3 ; <double***> [#uses=1]
%tmp8 = load double*** %tmp7, align 4 ; <double**> [#uses=1]
%tmp9 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp10 = getelementptr %struct.node_t* %tmp9, i32 0, i32 3 ; <double***> [#uses=1]
+ %tmp10 = getelementptr %struct.node_t, %struct.node_t* %tmp9, i32 0, i32 3 ; <double***> [#uses=1]
store double** %tmp8, double*** %tmp10, align 4
%tmp11 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp12 = getelementptr %struct.node_t* %tmp11, i32 0, i32 0 ; <double**> [#uses=1]
+ %tmp12 = getelementptr %struct.node_t, %struct.node_t* %tmp11, i32 0, i32 0 ; <double**> [#uses=1]
%tmp13 = load double** %tmp12, align 4 ; <double*> [#uses=1]
%tmp14 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp15 = getelementptr %struct.node_t* %tmp14, i32 0, i32 0 ; <double**> [#uses=1]
+ %tmp15 = getelementptr %struct.node_t, %struct.node_t* %tmp14, i32 0, i32 0 ; <double**> [#uses=1]
store double* %tmp13, double** %tmp15, align 4
%tmp16 = load %struct.node_t** %cur_node, align 4 ; <%struct.node_t*> [#uses=1]
- %tmp17 = getelementptr %struct.node_t* %tmp16, i32 0, i32 1 ; <%struct.node_t**> [#uses=1]
+ %tmp17 = getelementptr %struct.node_t, %struct.node_t* %tmp16, i32 0, i32 1 ; <%struct.node_t**> [#uses=1]
%tmp18 = load %struct.node_t** %tmp17, align 4 ; <%struct.node_t*> [#uses=1]
store %struct.node_t* %tmp18, %struct.node_t** %cur_node, align 4
ret void
%memtmp = alloca { double, double }, align 8 ; <{ double, double }*> [#uses=3]
%tmp4 = fsub double -0.000000e+00, %z.1 ; <double> [#uses=1]
call void @casinh( { double, double }* sret %memtmp, double %tmp4, double %z.0 ) nounwind
- %tmp19 = getelementptr { double, double }* %memtmp, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp19 = getelementptr { double, double }, { double, double }* %memtmp, i32 0, i32 0 ; <double*> [#uses=1]
%tmp20 = load double* %tmp19, align 8 ; <double> [#uses=1]
- %tmp22 = getelementptr { double, double }* %memtmp, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp22 = getelementptr { double, double }, { double, double }* %memtmp, i32 0, i32 1 ; <double*> [#uses=1]
%tmp23 = load double* %tmp22, align 8 ; <double> [#uses=1]
%tmp32 = fsub double -0.000000e+00, %tmp20 ; <double> [#uses=1]
- %tmp37 = getelementptr { double, double }* %agg.result, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp37 = getelementptr { double, double }, { double, double }* %agg.result, i32 0, i32 0 ; <double*> [#uses=1]
store double %tmp23, double* %tmp37, align 8
- %tmp40 = getelementptr { double, double }* %agg.result, i32 0, i32 1 ; <double*> [#uses=1]
+ %tmp40 = getelementptr { double, double }, { double, double }* %agg.result, i32 0, i32 1 ; <double*> [#uses=1]
store double %tmp32, double* %tmp40, align 8
ret void
}
define void @minmax(float* %result) nounwind optsize {
entry:
%tmp2 = load float* %result, align 4 ; <float> [#uses=6]
- %tmp4 = getelementptr float* %result, i32 2 ; <float*> [#uses=5]
+ %tmp4 = getelementptr float, float* %result, i32 2 ; <float*> [#uses=5]
%tmp5 = load float* %tmp4, align 4 ; <float> [#uses=10]
- %tmp7 = getelementptr float* %result, i32 4 ; <float*> [#uses=5]
+ %tmp7 = getelementptr float, float* %result, i32 4 ; <float*> [#uses=5]
%tmp8 = load float* %tmp7, align 4 ; <float> [#uses=8]
- %tmp10 = getelementptr float* %result, i32 6 ; <float*> [#uses=3]
+ %tmp10 = getelementptr float, float* %result, i32 6 ; <float*> [#uses=3]
%tmp11 = load float* %tmp10, align 4 ; <float> [#uses=8]
%tmp12 = fcmp olt float %tmp8, %tmp11 ; <i1> [#uses=5]
br i1 %tmp12, label %bb, label %bb21
bb226: ; preds = %bb218, %bb195, %bb187, %bb165, %bb155, %bb141
%iftmp.7.0.in = phi float* [ %tmp10, %bb218 ], [ %result, %bb141 ], [ %result, %bb155 ], [ %result, %bb165 ], [ %tmp4.mux789, %bb195 ], [ %tmp4.mux791, %bb187 ] ; <float*> [#uses=1]
%iftmp.7.0 = load float* %iftmp.7.0.in ; <float> [#uses=1]
- %tmp229 = getelementptr float* %result, i32 1 ; <float*> [#uses=7]
+ %tmp229 = getelementptr float, float* %result, i32 1 ; <float*> [#uses=7]
%tmp230 = load float* %tmp229, align 4 ; <float> [#uses=6]
- %tmp232 = getelementptr float* %result, i32 3 ; <float*> [#uses=5]
+ %tmp232 = getelementptr float, float* %result, i32 3 ; <float*> [#uses=5]
%tmp233 = load float* %tmp232, align 4 ; <float> [#uses=10]
- %tmp235 = getelementptr float* %result, i32 5 ; <float*> [#uses=5]
+ %tmp235 = getelementptr float, float* %result, i32 5 ; <float*> [#uses=5]
%tmp236 = load float* %tmp235, align 4 ; <float> [#uses=8]
- %tmp238 = getelementptr float* %result, i32 7 ; <float*> [#uses=3]
+ %tmp238 = getelementptr float, float* %result, i32 7 ; <float*> [#uses=3]
%tmp239 = load float* %tmp238, align 4 ; <float> [#uses=8]
%tmp240 = fcmp olt float %tmp236, %tmp239 ; <i1> [#uses=5]
br i1 %tmp240, label %bb243, label %bb251
define void @test2(i16* %block, i8* %pixels, i32 %line_size) nounwind {
entry:
- %tmp1 = getelementptr i16* %block, i32 64 ; <i16*> [#uses=1]
+ %tmp1 = getelementptr i16, i16* %block, i32 64 ; <i16*> [#uses=1]
%tmp3 = tail call i8* asm sideeffect "b: $0 $1 $2", "=r,r,0,~{dirflag},~{fpsr},~{flags},~{ax}"( i16* %tmp1, i8* %pixels ) nounwind ; <i8*> [#uses=0]
ret void
}
store i32 %dst_stride, i32* %dst_stride_addr
store i32 %src_stride, i32* %src_stride_addr
%tmp = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8* %tmp, i32 0 ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %tmp, i32 0 ; <i8*> [#uses=1]
%tmp12 = bitcast i8* %tmp1 to i32* ; <i32*> [#uses=1]
%tmp3 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
%tmp4 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp5 = getelementptr i8* %tmp3, i32 %tmp4 ; <i8*> [#uses=1]
+ %tmp5 = getelementptr i8, i8* %tmp3, i32 %tmp4 ; <i8*> [#uses=1]
%tmp56 = bitcast i8* %tmp5 to i32* ; <i32*> [#uses=1]
%tmp7 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp8 = mul i32 %tmp7, 2 ; <i32> [#uses=1]
%tmp9 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp10 = getelementptr i8* %tmp9, i32 %tmp8 ; <i8*> [#uses=1]
+ %tmp10 = getelementptr i8, i8* %tmp9, i32 %tmp8 ; <i8*> [#uses=1]
%tmp1011 = bitcast i8* %tmp10 to i32* ; <i32*> [#uses=1]
%tmp13 = load i32* %dst_stride_addr, align 4 ; <i32> [#uses=1]
%tmp14 = mul i32 %tmp13, 3 ; <i32> [#uses=1]
%tmp15 = load i8** %dst_addr, align 4 ; <i8*> [#uses=1]
- %tmp16 = getelementptr i8* %tmp15, i32 %tmp14 ; <i8*> [#uses=1]
+ %tmp16 = getelementptr i8, i8* %tmp15, i32 %tmp14 ; <i8*> [#uses=1]
%tmp1617 = bitcast i8* %tmp16 to i32* ; <i32*> [#uses=1]
%tmp18 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp19 = getelementptr i8* %tmp18, i32 0 ; <i8*> [#uses=1]
+ %tmp19 = getelementptr i8, i8* %tmp18, i32 0 ; <i8*> [#uses=1]
%tmp1920 = bitcast i8* %tmp19 to i32* ; <i32*> [#uses=1]
%tmp21 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
%tmp22 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
- %tmp23 = getelementptr i8* %tmp21, i32 %tmp22 ; <i8*> [#uses=1]
+ %tmp23 = getelementptr i8, i8* %tmp21, i32 %tmp22 ; <i8*> [#uses=1]
%tmp2324 = bitcast i8* %tmp23 to i32* ; <i32*> [#uses=1]
%tmp25 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp26 = mul i32 %tmp25, 2 ; <i32> [#uses=1]
%tmp27 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp28 = getelementptr i8* %tmp27, i32 %tmp26 ; <i8*> [#uses=1]
+ %tmp28 = getelementptr i8, i8* %tmp27, i32 %tmp26 ; <i8*> [#uses=1]
%tmp2829 = bitcast i8* %tmp28 to i32* ; <i32*> [#uses=1]
%tmp30 = load i32* %src_stride_addr, align 4 ; <i32> [#uses=1]
%tmp31 = mul i32 %tmp30, 3 ; <i32> [#uses=1]
%tmp32 = load i8** %src_addr, align 4 ; <i8*> [#uses=1]
- %tmp33 = getelementptr i8* %tmp32, i32 %tmp31 ; <i8*> [#uses=1]
+ %tmp33 = getelementptr i8, i8* %tmp32, i32 %tmp31 ; <i8*> [#uses=1]
%tmp3334 = bitcast i8* %tmp33 to i32* ; <i32*> [#uses=1]
call void asm sideeffect "movd $4, %mm0 \0A\09movd $5, %mm1 \0A\09movd $6, %mm2 \0A\09movd $7, %mm3 \0A\09punpcklbw %mm1, %mm0 \0A\09punpcklbw %mm3, %mm2 \0A\09movq %mm0, %mm1 \0A\09punpcklwd %mm2, %mm0 \0A\09punpckhwd %mm2, %mm1 \0A\09movd %mm0, $0 \0A\09punpckhdq %mm0, %mm0 \0A\09movd %mm0, $1 \0A\09movd %mm1, $2 \0A\09punpckhdq %mm1, %mm1 \0A\09movd %mm1, $3 \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* %tmp12, i32* %tmp56, i32* %tmp1011, i32* %tmp1617, i32* %tmp1920, i32* %tmp2324, i32* %tmp2829, i32* %tmp3334 ) nounwind
br label %return
define void @h264_h_loop_filter_luma_mmx2(i8* %pix, i32 %stride, i32 %alpha, i32 %beta, i8* %tc0) nounwind {
entry:
- %tmp164 = getelementptr [16 x i32]* null, i32 0, i32 11 ; <i32*> [#uses=1]
- %tmp169 = getelementptr [16 x i32]* null, i32 0, i32 13 ; <i32*> [#uses=1]
- %tmp174 = getelementptr [16 x i32]* null, i32 0, i32 15 ; <i32*> [#uses=1]
+ %tmp164 = getelementptr [16 x i32], [16 x i32]* null, i32 0, i32 11 ; <i32*> [#uses=1]
+ %tmp169 = getelementptr [16 x i32], [16 x i32]* null, i32 0, i32 13 ; <i32*> [#uses=1]
+ %tmp174 = getelementptr [16 x i32], [16 x i32]* null, i32 0, i32 15 ; <i32*> [#uses=1]
%tmp154.sum317 = add i32 0, %stride ; <i32> [#uses=1]
%tmp154.sum315 = mul i32 %stride, 6 ; <i32> [#uses=1]
%tmp154.sum = mul i32 %stride, 7 ; <i32> [#uses=1]
bb32: ; preds = %entry
%pix_addr.0327.sum340 = add i32 %pix_addr.0327.rec, 0 ; <i32> [#uses=1]
- %tmp154 = getelementptr i8* %pix, i32 %pix_addr.0327.sum340 ; <i8*> [#uses=1]
+ %tmp154 = getelementptr i8, i8* %pix, i32 %pix_addr.0327.sum340 ; <i8*> [#uses=1]
%tmp177178 = bitcast i8* %tmp154 to i32* ; <i32*> [#uses=1]
%pix_addr.0327.sum339 = add i32 %pix_addr.0327.rec, %tmp154.sum317 ; <i32> [#uses=1]
- %tmp181 = getelementptr i8* %pix, i32 %pix_addr.0327.sum339 ; <i8*> [#uses=1]
+ %tmp181 = getelementptr i8, i8* %pix, i32 %pix_addr.0327.sum339 ; <i8*> [#uses=1]
%tmp181182 = bitcast i8* %tmp181 to i32* ; <i32*> [#uses=1]
%pix_addr.0327.sum338 = add i32 %pix_addr.0327.rec, %tmp154.sum315 ; <i32> [#uses=1]
- %tmp186 = getelementptr i8* %pix, i32 %pix_addr.0327.sum338 ; <i8*> [#uses=1]
+ %tmp186 = getelementptr i8, i8* %pix, i32 %pix_addr.0327.sum338 ; <i8*> [#uses=1]
%tmp186187 = bitcast i8* %tmp186 to i32* ; <i32*> [#uses=1]
%pix_addr.0327.sum337 = add i32 %pix_addr.0327.rec, %tmp154.sum ; <i32> [#uses=1]
- %tmp191 = getelementptr i8* %pix, i32 %pix_addr.0327.sum337 ; <i8*> [#uses=1]
+ %tmp191 = getelementptr i8, i8* %pix, i32 %pix_addr.0327.sum337 ; <i8*> [#uses=1]
%tmp191192 = bitcast i8* %tmp191 to i32* ; <i32*> [#uses=1]
call void asm sideeffect "movd $4, %mm0 \0A\09movd $5, %mm1 \0A\09movd $6, %mm2 \0A\09movd $7, %mm3 \0A\09punpcklbw %mm1, %mm0 \0A\09punpcklbw %mm3, %mm2 \0A\09movq %mm0, %mm1 \0A\09punpcklwd %mm2, %mm0 \0A\09punpckhwd %mm2, %mm1 \0A\09movd %mm0, $0 \0A\09punpckhdq %mm0, %mm0 \0A\09movd %mm0, $1 \0A\09movd %mm1, $2 \0A\09punpckhdq %mm1, %mm1 \0A\09movd %mm1, $3 \0A\09", "=*m,=*m,=*m,=*m,*m,*m,*m,*m,~{dirflag},~{fpsr},~{flags}"( i32* null, i32* %tmp164, i32* %tmp169, i32* %tmp174, i32* %tmp177178, i32* %tmp181182, i32* %tmp186187, i32* %tmp191192 ) nounwind
unreachable
%tmp55 = call %struct.YY** @AA( i64 1, %struct.XX* %uen ) ; <%struct.YY**> [#uses=3]
%tmp2728128 = load %struct.XX** null ; <%struct.XX*> [#uses=1]
%tmp61 = load %struct.YY** %tmp55, align 8 ; <%struct.YY*> [#uses=1]
- %tmp62 = getelementptr %struct.YY* %tmp61, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp62 = getelementptr %struct.YY, %struct.YY* %tmp61, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp63 = load i64* %tmp62, align 8 ; <i64> [#uses=1]
%tmp6566 = zext i16 %tmp45 to i64 ; <i64> [#uses=1]
%tmp67 = shl i64 %tmp6566, 1 ; <i64> [#uses=1]
bb24: ; preds = %bb24, %entry
%tmp9.0.reg2mem.0.rec = phi i32 [ %indvar.next, %bb24 ], [ 0, %entry ] ; <i32> [#uses=3]
- %tmp3.i.i = getelementptr %struct.CompAtom* %tmp1819, i32 %tmp9.0.reg2mem.0.rec, i32 0, i32 1 ; <double*> [#uses=0]
- %tmp5.i.i = getelementptr %struct.CompAtom* %tmp1819, i32 %tmp9.0.reg2mem.0.rec, i32 0, i32 2 ; <double*> [#uses=1]
+ %tmp3.i.i = getelementptr %struct.CompAtom, %struct.CompAtom* %tmp1819, i32 %tmp9.0.reg2mem.0.rec, i32 0, i32 1 ; <double*> [#uses=0]
+ %tmp5.i.i = getelementptr %struct.CompAtom, %struct.CompAtom* %tmp1819, i32 %tmp9.0.reg2mem.0.rec, i32 0, i32 2 ; <double*> [#uses=1]
store double -9.999900e+04, double* %tmp5.i.i, align 4
%indvar.next = add i32 %tmp9.0.reg2mem.0.rec, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %n ; <i1> [#uses=1]
%tmp55 = srem i32 %i, 3 ; <i32> [#uses=1]
%tmp56 = add i32 %tmp55, -1 ; <i32> [#uses=1]
%tmp5657 = sitofp i32 %tmp56 to double ; <double> [#uses=1]
- %tmp15.i49 = getelementptr %struct.Lattice* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp15.i49 = getelementptr %struct.Lattice, %struct.Lattice* %this, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
%tmp16.i50 = load double* %tmp15.i49, align 4 ; <double> [#uses=1]
%tmp17.i = fmul double %tmp5657, %tmp16.i50 ; <double> [#uses=1]
%tmp20.i39 = fadd double %tmp17.i, %tmp17.i63 ; <double> [#uses=1]
br label %bb58
bb58: ; preds = %bb58, %bb58.preheader
- %tmp20.i7 = getelementptr %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2]
- %tmp25.i = getelementptr %struct.CompAtom* %tmp1819, i32 0, i32 2 ; <i32*> [#uses=2]
+ %tmp20.i7 = getelementptr %struct.CompAtom, %struct.CompAtom* %d, i32 0, i32 2 ; <i32*> [#uses=2]
+ %tmp25.i = getelementptr %struct.CompAtom, %struct.CompAtom* %tmp1819, i32 0, i32 2 ; <i32*> [#uses=2]
%tmp74.i = load i32* %tmp20.i7, align 1 ; <i32> [#uses=1]
%tmp82.i = and i32 %tmp74.i, 134217728 ; <i32> [#uses=1]
%tmp85.i = or i32 0, %tmp82.i ; <i32> [#uses=1]
ret void
bb1163: ; preds = %newFuncRoot
%tmp1164 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
- %tmp1165 = getelementptr %struct.rec* %tmp1164, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
+ %tmp1165 = getelementptr %struct.rec, %struct.rec* %tmp1164, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11651166 = bitcast %struct.head_type* %tmp1165 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
- %tmp1167 = getelementptr %struct.symbol_type* %tmp11651166, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
+ %tmp1167 = getelementptr %struct.symbol_type, %struct.symbol_type* %tmp11651166, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
%tmp1168 = load %struct.rec** %tmp1167, align 1 ; <%struct.rec*> [#uses=2]
%tmp1169 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
- %tmp1170 = getelementptr %struct.rec* %tmp1169, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
+ %tmp1170 = getelementptr %struct.rec, %struct.rec* %tmp1169, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11701171 = bitcast %struct.head_type* %tmp1170 to %struct.symbol_type* ; <%struct.symbol_type*> [#uses=1]
- %tmp1172 = getelementptr %struct.symbol_type* %tmp11701171, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
+ %tmp1172 = getelementptr %struct.symbol_type, %struct.symbol_type* %tmp11701171, i32 0, i32 3 ; <%struct.rec**> [#uses=1]
%tmp1173 = load %struct.rec** %tmp1172, align 1 ; <%struct.rec*> [#uses=2]
- %tmp1174 = getelementptr %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
+ %tmp1174 = getelementptr %struct.rec, %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11741175 = bitcast %struct.head_type* %tmp1174 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1176 = getelementptr %struct.word_type* %tmp11741175, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1177 = getelementptr %struct.SECOND_UNION* %tmp1176, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
+ %tmp1176 = getelementptr %struct.word_type, %struct.word_type* %tmp11741175, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
+ %tmp1177 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1176, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp11771178 = bitcast { i16, i8, i8 }* %tmp1177 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1179 = getelementptr <{ i8, i8, i8, i8 }>* %tmp11771178, i32 0, i32 2 ; <i8*> [#uses=2]
+ %tmp1179 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11771178, i32 0, i32 2 ; <i8*> [#uses=2]
%mask1180 = and i8 1, 1 ; <i8> [#uses=2]
%tmp1181 = load i8* %tmp1179, align 1 ; <i8> [#uses=1]
%tmp1182 = shl i8 %mask1180, 7 ; <i8> [#uses=1]
%tmp1184 = or i8 %tmp1183, %tmp1182 ; <i8> [#uses=1]
store i8 %tmp1184, i8* %tmp1179, align 1
%mask1185 = and i8 %mask1180, 1 ; <i8> [#uses=0]
- %tmp1186 = getelementptr %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
+ %tmp1186 = getelementptr %struct.rec, %struct.rec* %tmp1173, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11861187 = bitcast %struct.head_type* %tmp1186 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1188 = getelementptr %struct.word_type* %tmp11861187, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1189 = getelementptr %struct.SECOND_UNION* %tmp1188, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
+ %tmp1188 = getelementptr %struct.word_type, %struct.word_type* %tmp11861187, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
+ %tmp1189 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1188, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp11891190 = bitcast { i16, i8, i8 }* %tmp1189 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1191 = getelementptr <{ i8, i8, i8, i8 }>* %tmp11891190, i32 0, i32 2 ; <i8*> [#uses=1]
+ %tmp1191 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11891190, i32 0, i32 2 ; <i8*> [#uses=1]
%tmp1192 = load i8* %tmp1191, align 1 ; <i8> [#uses=1]
%tmp1193 = lshr i8 %tmp1192, 7 ; <i8> [#uses=1]
%mask1194 = and i8 %tmp1193, 1 ; <i8> [#uses=2]
%mask1195 = and i8 %mask1194, 1 ; <i8> [#uses=0]
- %tmp1196 = getelementptr %struct.rec* %tmp1168, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
+ %tmp1196 = getelementptr %struct.rec, %struct.rec* %tmp1168, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp11961197 = bitcast %struct.head_type* %tmp1196 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1198 = getelementptr %struct.word_type* %tmp11961197, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1199 = getelementptr %struct.SECOND_UNION* %tmp1198, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
+ %tmp1198 = getelementptr %struct.word_type, %struct.word_type* %tmp11961197, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
+ %tmp1199 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1198, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp11991200 = bitcast { i16, i8, i8 }* %tmp1199 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1201 = getelementptr <{ i8, i8, i8, i8 }>* %tmp11991200, i32 0, i32 1 ; <i8*> [#uses=2]
+ %tmp1201 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp11991200, i32 0, i32 1 ; <i8*> [#uses=2]
%mask1202 = and i8 %mask1194, 1 ; <i8> [#uses=2]
%tmp1203 = load i8* %tmp1201, align 1 ; <i8> [#uses=1]
%tmp1204 = shl i8 %mask1202, 1 ; <i8> [#uses=1]
%tmp1207 = or i8 %tmp1206, %tmp1205 ; <i8> [#uses=1]
store i8 %tmp1207, i8* %tmp1201, align 1
%mask1208 = and i8 %mask1202, 1 ; <i8> [#uses=0]
- %tmp1209 = getelementptr %struct.rec* %tmp1168, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
+ %tmp1209 = getelementptr %struct.rec, %struct.rec* %tmp1168, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp12091210 = bitcast %struct.head_type* %tmp1209 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1211 = getelementptr %struct.word_type* %tmp12091210, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1212 = getelementptr %struct.SECOND_UNION* %tmp1211, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
+ %tmp1211 = getelementptr %struct.word_type, %struct.word_type* %tmp12091210, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
+ %tmp1212 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1211, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp12121213 = bitcast { i16, i8, i8 }* %tmp1212 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1214 = getelementptr <{ i8, i8, i8, i8 }>* %tmp12121213, i32 0, i32 1 ; <i8*> [#uses=1]
+ %tmp1214 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp12121213, i32 0, i32 1 ; <i8*> [#uses=1]
%tmp1215 = load i8* %tmp1214, align 1 ; <i8> [#uses=1]
%tmp1216 = shl i8 %tmp1215, 6 ; <i8> [#uses=1]
%tmp1217 = lshr i8 %tmp1216, 7 ; <i8> [#uses=1]
%mask1218 = and i8 %tmp1217, 1 ; <i8> [#uses=2]
%mask1219 = and i8 %mask1218, 1 ; <i8> [#uses=0]
%tmp1220 = load %struct.rec** %s, align 4 ; <%struct.rec*> [#uses=1]
- %tmp1221 = getelementptr %struct.rec* %tmp1220, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
+ %tmp1221 = getelementptr %struct.rec, %struct.rec* %tmp1220, i32 0, i32 0 ; <%struct.head_type*> [#uses=1]
%tmp12211222 = bitcast %struct.head_type* %tmp1221 to %struct.word_type* ; <%struct.word_type*> [#uses=1]
- %tmp1223 = getelementptr %struct.word_type* %tmp12211222, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
- %tmp1224 = getelementptr %struct.SECOND_UNION* %tmp1223, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
+ %tmp1223 = getelementptr %struct.word_type, %struct.word_type* %tmp12211222, i32 0, i32 2 ; <%struct.SECOND_UNION*> [#uses=1]
+ %tmp1224 = getelementptr %struct.SECOND_UNION, %struct.SECOND_UNION* %tmp1223, i32 0, i32 0 ; <{ i16, i8, i8 }*> [#uses=1]
%tmp12241225 = bitcast { i16, i8, i8 }* %tmp1224 to <{ i8, i8, i8, i8 }>* ; <<{ i8, i8, i8, i8 }>*> [#uses=1]
- %tmp1226 = getelementptr <{ i8, i8, i8, i8 }>* %tmp12241225, i32 0, i32 1 ; <i8*> [#uses=2]
+ %tmp1226 = getelementptr <{ i8, i8, i8, i8 }>, <{ i8, i8, i8, i8 }>* %tmp12241225, i32 0, i32 1 ; <i8*> [#uses=2]
%mask1227 = and i8 %mask1218, 1 ; <i8> [#uses=2]
%tmp1228 = load i8* %tmp1226, align 1 ; <i8> [#uses=1]
%tmp1229 = and i8 %mask1227, 1 ; <i8> [#uses=1]
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
- %tmp1 = getelementptr %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.__res_state, %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp1, align 4
br label %return
return: ; preds = %entry
%retval = alloca i32 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = load %struct.__res_state** @__libc_resp, align 4 ; <%struct.__res_state*> [#uses=1]
- %tmp1 = getelementptr %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.__res_state, %struct.__res_state* %tmp, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp1, align 4
br label %return
return: ; preds = %entry
define i64 @____wcstoll_l_internal(i32* %nptr, i32** %endptr, i32 %base, i32 %group, %struct.__locale_struct* %loc) nounwind {
entry:
%tmp27 = load i32* null, align 4 ; <i32> [#uses=1]
- %tmp83 = getelementptr i32* %nptr, i32 1 ; <i32*> [#uses=1]
+ %tmp83 = getelementptr i32, i32* %nptr, i32 1 ; <i32*> [#uses=1]
%tmp233 = add i32 0, -48 ; <i32> [#uses=1]
br label %bb271.us
bb271.us: ; preds = %entry
%tmp373.reg2mem.0.ph = add i64 %tmp370371552.pn, %tmp369551.pn ; <i64> [#uses=1]
br label %bb374.us
bb374.us: ; preds = %bb314.us, %bb374.outer
- %tmp376.us = getelementptr i32* %s.5.ph, i32 0 ; <i32*> [#uses=3]
+ %tmp376.us = getelementptr i32, i32* %s.5.ph, i32 0 ; <i32*> [#uses=3]
%tmp378.us = load i32* %tmp376.us, align 4 ; <i32> [#uses=2]
%tmp302.us = icmp eq i32* %tmp376.us, %tmp83 ; <i1> [#uses=1]
%bothcond484.us = or i1 false, %tmp302.us ; <i1> [#uses=1]
cond_true: ; preds = %entry
%tmp1415 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp17 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
+ %tmp17 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
%tmp18 = load %struct.ComponentParameters** %tmp17, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp1920 = bitcast %struct.ComponentParameters* %tmp18 to i8* ; <i8*> [#uses=1]
%tmp212223 = sext i16 %tmp1415 to i64 ; <i64> [#uses=1]
- %tmp24 = getelementptr i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
+ %tmp24 = getelementptr i8, i8* %tmp1920, i64 %tmp212223 ; <i8*> [#uses=1]
%tmp2425 = bitcast i8* %tmp24 to i64* ; <i64*> [#uses=1]
%tmp28 = load i64* %tmp2425, align 8 ; <i64> [#uses=1]
%tmp2829 = inttoptr i64 %tmp28 to i32* ; <i32*> [#uses=1]
- %tmp31 = getelementptr %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
+ %tmp31 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 2 ; <i32**> [#uses=1]
store i32* %tmp2829, i32** %tmp31, align 8
br label %cond_next
cond_next: ; preds = %cond_true, %entry
%tmp4243 = shl i16 %param, 3 ; <i16> [#uses=1]
- %tmp46 = getelementptr %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
+ %tmp46 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 1 ; <%struct.ComponentParameters**> [#uses=1]
%tmp47 = load %struct.ComponentParameters** %tmp46, align 8 ; <%struct.ComponentParameters*> [#uses=1]
%tmp4849 = bitcast %struct.ComponentParameters* %tmp47 to i8* ; <i8*> [#uses=1]
%tmp505152 = sext i16 %tmp4243 to i64 ; <i64> [#uses=1]
- %tmp53 = getelementptr i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
+ %tmp53 = getelementptr i8, i8* %tmp4849, i64 %tmp505152 ; <i8*> [#uses=1]
%tmp5354 = bitcast i8* %tmp53 to i64* ; <i64*> [#uses=1]
%tmp58 = load i64* %tmp5354, align 8 ; <i64> [#uses=1]
%tmp59 = icmp eq i64 %tmp58, 0 ; <i1> [#uses=1]
br i1 %tmp59, label %UnifiedReturnBlock, label %cond_true63
cond_true63: ; preds = %cond_next
- %tmp65 = getelementptr %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
+ %tmp65 = getelementptr %struct.AGenericCall, %struct.AGenericCall* %this, i32 0, i32 0 ; <%struct.AGenericManager**> [#uses=1]
%tmp66 = load %struct.AGenericManager** %tmp65, align 8 ; <%struct.AGenericManager*> [#uses=1]
%tmp69 = tail call i32 @_ZN15AGenericManager24DefaultComponentInstanceERP23ComponentInstanceRecord( %struct.AGenericManager* %tmp66, %struct.ComponentInstanceRecord** %instance ) ; <i32> [#uses=1]
ret i32 %tmp69
call void @_ZNSt8ios_baseC2Ev( %"struct.std::ios_base"* null ) nounwind
store i32 (...)** getelementptr ([4 x i32 (...)*]* @_ZTVSt9basic_iosIcSt11char_traitsIcEE, i32 0, i32 2), i32 (...)*** null, align 4
store i32 (...)** null, i32 (...)*** null, align 4
- %ctg2242.i.i163.i = getelementptr i8* %tmp96.i.i142.i, i32 0 ; <i8*> [#uses=1]
+ %ctg2242.i.i163.i = getelementptr i8, i8* %tmp96.i.i142.i, i32 0 ; <i8*> [#uses=1]
%tmp150.i.i164.i = load i8** getelementptr ([4 x i8*]* @_ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE, i32 0, i64 2), align 4 ; <i8*> [#uses=1]
%tmp150151.i.i165.i = bitcast i8* %tmp150.i.i164.i to i32 (...)** ; <i32 (...)**> [#uses=1]
%tmp153.i.i166.i = bitcast i8* %ctg2242.i.i163.i to i32 (...)*** ; <i32 (...)***> [#uses=1]
bb28: ; preds = %entry
br i1 false, label %bb37, label %done
bb37: ; preds = %bb28
- %tmp46 = getelementptr %struct.GENV_t* %tmp12, i32 0, i32 10 ; <i16*> [#uses=1]
+ %tmp46 = getelementptr %struct.GENV_t, %struct.GENV_t* %tmp12, i32 0, i32 10 ; <i16*> [#uses=1]
store i16 0, i16* %tmp46, align 4
br i1 false, label %bb74, label %bb92
bb74: ; preds = %bb37
br i1 false, label %bb23830, label %bb23827
bb23827: ; preds = %bb23821
- %tmp23829 = getelementptr %struct.V* null, i32 0, i32 42 ; <i32*> [#uses=0]
+ %tmp23829 = getelementptr %struct.V, %struct.V* null, i32 0, i32 42 ; <i32*> [#uses=0]
br label %bb23830
bb23830: ; preds = %bb23827, %bb23821, %bb23816.preheader
%d_addr = alloca i8 ; <i8*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = bitcast %struct.BoundaryAlignment* %str_addr to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp1 = getelementptr { i64, i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp1 = getelementptr { i64, i64 }, { i64, i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %str.0, i64* %tmp1
%tmp2 = bitcast %struct.BoundaryAlignment* %str_addr to { i64, i64 }* ; <{ i64, i64 }*> [#uses=1]
- %tmp3 = getelementptr { i64, i64 }* %tmp2, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp3 = getelementptr { i64, i64 }, { i64, i64 }* %tmp2, i32 0, i32 1 ; <i64*> [#uses=1]
%bc = bitcast i64* %tmp3 to i8* ; <i8*> [#uses=2]
%byte = trunc i64 %str.1 to i8 ; <i8> [#uses=1]
store i8 %byte, i8* %bc
%shft = lshr i64 %str.1, 8 ; <i64> [#uses=2]
- %Loc = getelementptr i8* %bc, i32 1 ; <i8*> [#uses=2]
+ %Loc = getelementptr i8, i8* %bc, i32 1 ; <i8*> [#uses=2]
%byte4 = trunc i64 %shft to i8 ; <i8> [#uses=1]
store i8 %byte4, i8* %Loc
%shft5 = lshr i64 %shft, 8 ; <i64> [#uses=2]
- %Loc6 = getelementptr i8* %Loc, i32 1 ; <i8*> [#uses=2]
+ %Loc6 = getelementptr i8, i8* %Loc, i32 1 ; <i8*> [#uses=2]
%byte7 = trunc i64 %shft5 to i8 ; <i8> [#uses=1]
store i8 %byte7, i8* %Loc6
%shft8 = lshr i64 %shft5, 8 ; <i64> [#uses=2]
- %Loc9 = getelementptr i8* %Loc6, i32 1 ; <i8*> [#uses=2]
+ %Loc9 = getelementptr i8, i8* %Loc6, i32 1 ; <i8*> [#uses=2]
%byte10 = trunc i64 %shft8 to i8 ; <i8> [#uses=1]
store i8 %byte10, i8* %Loc9
%shft11 = lshr i64 %shft8, 8 ; <i64> [#uses=0]
- %Loc12 = getelementptr i8* %Loc9, i32 1 ; <i8*> [#uses=0]
+ %Loc12 = getelementptr i8, i8* %Loc9, i32 1 ; <i8*> [#uses=0]
store i16 %s, i16* %s_addr
store i32 %j, i32* %j_addr
store i8 %c, i8* %c_addr
store i16 %t, i16* %t_addr
store i16 %u, i16* %u_addr
store i8 %d, i8* %d_addr
- %tmp13 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 0 ; <[3 x i8]*> [#uses=1]
+ %tmp13 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 0 ; <[3 x i8]*> [#uses=1]
%tmp1314 = bitcast [3 x i8]* %tmp13 to i32* ; <i32*> [#uses=1]
%tmp15 = load i32* %tmp1314, align 4 ; <i32> [#uses=1]
%tmp16 = shl i32 %tmp15, 14 ; <i32> [#uses=1]
unreachable
bb27: ; preds = %entry
- %tmp28 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 1 ; <i8*> [#uses=1]
+ %tmp28 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 1 ; <i8*> [#uses=1]
%tmp29 = load i8* %tmp28, align 4 ; <i8> [#uses=1]
%tmp30 = load i8* %c_addr, align 1 ; <i8> [#uses=1]
%tmp31 = icmp ne i8 %tmp29, %tmp30 ; <i1> [#uses=1]
unreachable
bb35: ; preds = %bb27
- %tmp36 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 2 ; <i16*> [#uses=1]
+ %tmp36 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 2 ; <i16*> [#uses=1]
%tmp37 = load i16* %tmp36, align 4 ; <i16> [#uses=1]
%tmp38 = shl i16 %tmp37, 7 ; <i16> [#uses=1]
%tmp39 = ashr i16 %tmp38, 7 ; <i16> [#uses=1]
unreachable
bb51: ; preds = %bb35
- %tmp52 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 3 ; <i16*> [#uses=1]
+ %tmp52 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 3 ; <i16*> [#uses=1]
%tmp53 = load i16* %tmp52, align 4 ; <i16> [#uses=1]
%tmp54 = shl i16 %tmp53, 7 ; <i16> [#uses=1]
%tmp55 = ashr i16 %tmp54, 7 ; <i16> [#uses=1]
unreachable
bb67: ; preds = %bb51
- %tmp68 = getelementptr %struct.BoundaryAlignment* %str_addr, i32 0, i32 4 ; <i8*> [#uses=1]
+ %tmp68 = getelementptr %struct.BoundaryAlignment, %struct.BoundaryAlignment* %str_addr, i32 0, i32 4 ; <i8*> [#uses=1]
%tmp69 = load i8* %tmp68, align 4 ; <i8> [#uses=1]
%tmp70 = load i8* %d_addr, align 1 ; <i8> [#uses=1]
%tmp71 = icmp ne i8 %tmp69, %tmp70 ; <i1> [#uses=1]
br i1 %foo, label %bb507, label %bb841
bb507: ; preds = %bb502
- %tmp517 = getelementptr %struct.tree_node* %last.0, i32 0, i32 0 ; <%struct.tree_function_decl*> [#uses=1]
+ %tmp517 = getelementptr %struct.tree_node, %struct.tree_node* %last.0, i32 0, i32 0 ; <%struct.tree_function_decl*> [#uses=1]
%tmp517518 = bitcast %struct.tree_function_decl* %tmp517 to %struct.tree_common* ; <%struct.tree_common*> [#uses=1]
- %tmp519 = getelementptr %struct.tree_common* %tmp517518, i32 0, i32 0 ; <%struct.tree_node**> [#uses=1]
+ %tmp519 = getelementptr %struct.tree_common, %struct.tree_common* %tmp517518, i32 0, i32 0 ; <%struct.tree_node**> [#uses=1]
store %struct.tree_node* null, %struct.tree_node** %tmp519, align 4
br label %bb841
br i1 false, label %bb39, label %bb49.outer
bb49.outer: ; preds = %bb39, %bb40.preheader
- getelementptr %struct.res_state* %state, i32 0, i32 3 ; <i32*>:1 [#uses=0]
- getelementptr %struct.res_state* %state, i32 0, i32 7 ; <i32*>:2 [#uses=0]
+ getelementptr %struct.res_state, %struct.res_state* %state, i32 0, i32 3 ; <i32*>:1 [#uses=0]
+ getelementptr %struct.res_state, %struct.res_state* %state, i32 0, i32 7 ; <i32*>:2 [#uses=0]
%base10.1 = select i1 false, float* null, float* null ; <float*> [#uses=1]
br label %bb74
bb73: ; preds = %bb71
%.rec = add i32 %base10.2.ph.rec, 1 ; <i32> [#uses=2]
- getelementptr float* %base10.1, i32 %.rec ; <float*>:4 [#uses=1]
+ getelementptr float, float* %base10.1, i32 %.rec ; <float*>:4 [#uses=1]
br label %bb74
bb74: ; preds = %bb73, %bb71, %bb49.outer
define i32 @vorbis_comment_query_count(%struct.vorbis_comment* %vc, i8* %tag) nounwind {
entry:
%strlen = call i32 @strlen( i8* null ) ; <i32> [#uses=1]
- %endptr = getelementptr i8* null, i32 %strlen ; <i8*> [#uses=0]
+ %endptr = getelementptr i8, i8* null, i32 %strlen ; <i8*> [#uses=0]
unreachable
}
define i32 @get(%struct.foo* %c, i8* %state) nounwind {
entry:
- %0 = getelementptr %struct.foo* %c, i32 0, i32 0 ; <i32*> [#uses=2]
- %1 = getelementptr %struct.foo* %c, i32 0, i32 1 ; <i32*> [#uses=2]
- %2 = getelementptr %struct.foo* %c, i32 0, i32 2 ; <i8**> [#uses=2]
+ %0 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 0 ; <i32*> [#uses=2]
+ %1 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 1 ; <i32*> [#uses=2]
+ %2 = getelementptr %struct.foo, %struct.foo* %c, i32 0, i32 2 ; <i8**> [#uses=2]
%3 = load i32* %0, align 4 ; <i32> [#uses=1]
%4 = load i32* %1, align 4 ; <i32> [#uses=1]
%5 = load i8* %state, align 1 ; <i8> [#uses=1]
br label %bb35.outer
bb34: ; preds = %bb35, %bb35, %bb35, %bb35
- %8 = getelementptr i8* %bufptr.0.lcssa, i32 %totalLength.0.ph ; <i8*> [#uses=1]
+ %8 = getelementptr i8, i8* %bufptr.0.lcssa, i32 %totalLength.0.ph ; <i8*> [#uses=1]
store i8 92, i8* %8, align 1
br label %bb35.outer
define i32 @foo(%struct.x* %p) nounwind {
entry:
- %0 = getelementptr %struct.x* %p, i32 0, i32 0 ; <i8*> [#uses=1]
+ %0 = getelementptr %struct.x, %struct.x* %p, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 55, i8* %0, align 1
%1 = bitcast %struct.x* %p to i32* ; <i32*> [#uses=1]
%2 = load i32* %1, align 1 ; <i32> [#uses=1]
bb1: ; preds = %bb, %entry
%P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
- %P.0 = getelementptr i8* %Q, i32 %P.0.rec ; <i8*> [#uses=2]
+ %P.0 = getelementptr i8, i8* %Q, i32 %P.0.rec ; <i8*> [#uses=2]
%0 = load i8* %P.0, align 1 ; <i8> [#uses=1]
switch i8 %0, label %bb3 [
i8 12, label %bb
bb3: ; preds = %bb1
%P.0.sum = add i32 %P.0.rec, 2 ; <i32> [#uses=1]
- %1 = getelementptr i8* %Q, i32 %P.0.sum ; <i8*> [#uses=1]
+ %1 = getelementptr i8, i8* %Q, i32 %P.0.sum ; <i8*> [#uses=1]
store i8 4, i8* %1, align 1
ret i8* %P.0
}
%4 = ptrtoint i8* %L to i32
%5 = add i32 %4, %3
%6 = add i32 %5, %1 ; <i32> [#uses=1]
- %7 = getelementptr i8* %a, i32 %6 ; <i8*> [#uses=1]
+ %7 = getelementptr i8, i8* %a, i32 %6 ; <i8*> [#uses=1]
br label %return
return: ; preds = %bb3
%2 = sub i32 %1, %0
%3 = ptrtoint i8* %L to i32
%4 = sub i32 %2, %3 ; <i32> [#uses=1]
- %5 = getelementptr i8* %a, i32 %4 ; <i8*> [#uses=1]
+ %5 = getelementptr i8, i8* %a, i32 %4 ; <i8*> [#uses=1]
br label %return
return: ; preds = %bb3
%Y = alloca i32
call void @frob(i32* %Y) nounwind
%Y3 = bitcast i32* %Y to i8*
- %ctg2 = getelementptr i8* %Y3, i32 ptrtoint ([0 x i32]* @X to i32)
+ %ctg2 = getelementptr i8, i8* %Y3, i32 ptrtoint ([0 x i32]* @X to i32)
%0 = ptrtoint i8* %ctg2 to i32
call void @borf(i32 %0) nounwind
ret void
define void @bar(i32 %i) nounwind {
entry:
%Y = alloca [10 x i32]
- %0 = getelementptr [10 x i32]* %Y, i32 0, i32 0
+ %0 = getelementptr [10 x i32], [10 x i32]* %Y, i32 0, i32 0
call void @frob(i32* %0) nounwind
- %1 = getelementptr [0 x i32]* @X, i32 0, i32 %i
- %2 = getelementptr [10 x i32]* %Y, i32 0, i32 0
+ %1 = getelementptr [0 x i32], [0 x i32]* @X, i32 0, i32 %i
+ %2 = getelementptr [10 x i32], [10 x i32]* %Y, i32 0, i32 0
%3 = ptrtoint i32* %2 to i32
%4 = bitcast i32* %1 to i8*
- %ctg2 = getelementptr i8* %4, i32 %3
+ %ctg2 = getelementptr i8, i8* %4, i32 %3
%5 = ptrtoint i8* %ctg2 to i32
call void @borf(i32 %5) nounwind
ret void
%indvar202 = trunc i64 %p_addr.0.pn.rec to i32 ; <i32>[#uses=1]
%frac_bits.0 = mul i32 %indvar202, %shift.0 ; <i32>[#uses=1]
%p_addr.6.rec = add i64 %p_addr.0.pn.rec, 1 ; <i64>[#uses=2]
- %p_addr.6 = getelementptr i8* null, i64 %p_addr.6.rec ; <i8*>[#uses=1]
+ %p_addr.6 = getelementptr i8, i8* null, i64 %p_addr.6.rec ; <i8*>[#uses=1]
br i1 false, label %bb66, label %bb62
bb62: ; preds = %bb56
define i32 @alac_decode_frame() nounwind {
entry:
%tmp2 = load i8** null ; <i8*> [#uses=2]
- %tmp34 = getelementptr i8* %tmp2, i32 4 ; <i8*> [#uses=2]
+ %tmp34 = getelementptr i8, i8* %tmp2, i32 4 ; <i8*> [#uses=2]
%tmp5.i424 = bitcast i8* %tmp34 to i8** ; <i8**> [#uses=2]
- %tmp15.i = getelementptr i8* %tmp2, i32 12 ; <i8*> [#uses=1]
+ %tmp15.i = getelementptr i8, i8* %tmp2, i32 12 ; <i8*> [#uses=1]
%0 = bitcast i8* %tmp15.i to i32* ; <i32*> [#uses=1]
br i1 false, label %if.then43, label %if.end47
%19 = bitcast i8* %18 to [0 x i8]*, !dbg !13 ; <[0 x i8]*> [#uses=1]
store [0 x i8]* %19, [0 x i8]** %str.0, align 8, !dbg !13
%20 = load [0 x i8]** %str.0, align 8, !dbg !15 ; <[0 x i8]*> [#uses=1]
- %21 = getelementptr inbounds [0 x i8]* %20, i64 0, i64 0, !dbg !15 ; <i8*> [#uses=1]
+ %21 = getelementptr inbounds [0 x i8], [0 x i8]* %20, i64 0, i64 0, !dbg !15 ; <i8*> [#uses=1]
store i8 0, i8* %21, align 1, !dbg !15
%22 = load [0 x i8]** %str.0, align 8, !dbg !16 ; <[0 x i8]*> [#uses=1]
- %23 = getelementptr inbounds [0 x i8]* %22, i64 0, i64 0, !dbg !16 ; <i8*> [#uses=1]
+ %23 = getelementptr inbounds [0 x i8], [0 x i8]* %22, i64 0, i64 0, !dbg !16 ; <i8*> [#uses=1]
%24 = load i8* %23, align 1, !dbg !16 ; <i8> [#uses=1]
%25 = sext i8 %24 to i32, !dbg !16 ; <i32> [#uses=1]
store i32 %25, i32* %0, align 4, !dbg !16
%3 = load i32* null, align 4 ; <i32> [#uses=1]
%4 = uitofp i32 %3 to float ; <float> [#uses=1]
%.sum13.i = add i64 0, 4 ; <i64> [#uses=1]
- %5 = getelementptr i8* null, i64 %.sum13.i ; <i8*> [#uses=1]
+ %5 = getelementptr i8, i8* null, i64 %.sum13.i ; <i8*> [#uses=1]
%6 = bitcast i8* %5 to i32* ; <i32*> [#uses=1]
%7 = load i32* %6, align 4 ; <i32> [#uses=1]
%8 = uitofp i32 %7 to float ; <float> [#uses=1]
%.sum.i = add i64 0, 8 ; <i64> [#uses=1]
- %9 = getelementptr i8* null, i64 %.sum.i ; <i8*> [#uses=1]
+ %9 = getelementptr i8, i8* null, i64 %.sum.i ; <i8*> [#uses=1]
%10 = bitcast i8* %9 to i32* ; <i32*> [#uses=1]
%11 = load i32* %10, align 4 ; <i32> [#uses=1]
%12 = uitofp i32 %11 to float ; <float> [#uses=1]
%0 = ptrtoint i8** %h to i32 ; <i32> [#uses=2]
%1 = and i32 %0, -4096 ; <i32> [#uses=1]
%2 = inttoptr i32 %1 to %struct.HandleBlock* ; <%struct.HandleBlock*> [#uses=3]
- %3 = getelementptr %struct.HandleBlock* %2, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %3 = getelementptr %struct.HandleBlock, %struct.HandleBlock* %2, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
%4 = load i32* %3, align 4096 ; <i32> [#uses=1]
%5 = icmp eq i32 %4, 1751280747 ; <i1> [#uses=1]
br i1 %5, label %bb, label %bb1
bb: ; preds = %entry
- %6 = getelementptr %struct.HandleBlock* %2, i32 0, i32 1 ; <[990 x i8*]*> [#uses=1]
+ %6 = getelementptr %struct.HandleBlock, %struct.HandleBlock* %2, i32 0, i32 1 ; <[990 x i8*]*> [#uses=1]
%7 = ptrtoint [990 x i8*]* %6 to i32 ; <i32> [#uses=1]
%8 = sub i32 %0, %7 ; <i32> [#uses=2]
%9 = lshr i32 %8, 2 ; <i32> [#uses=1]
%10 = ashr i32 %8, 7 ; <i32> [#uses=1]
%11 = and i32 %10, 134217727 ; <i32> [#uses=1]
- %12 = getelementptr %struct.HandleBlock* %2, i32 0, i32 0, i32 %11 ; <i32*> [#uses=1]
+ %12 = getelementptr %struct.HandleBlock, %struct.HandleBlock* %2, i32 0, i32 0, i32 %11 ; <i32*> [#uses=1]
%not.i = and i32 %9, 31 ; <i32> [#uses=1]
%13 = xor i32 %not.i, 31 ; <i32> [#uses=1]
%14 = shl i32 1, %13 ; <i32> [#uses=1]
%8 = sext i32 %5 to i64 ; <i64> [#uses=1]
%9 = sext i32 %7 to i64 ; <i64> [#uses=1]
%10 = sub i64 %8, %9 ; <i64> [#uses=1]
- %11 = getelementptr i8* %0, i64 %10 ; <i8*> [#uses=1]
+ %11 = getelementptr i8, i8* %0, i64 %10 ; <i8*> [#uses=1]
%12 = icmp sgt i32 %7, 0 ; <i1> [#uses=1]
br i1 %12, label %13, label %14
define void @off01(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %.sum
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %.sum
store i32* %0, i32** @ptr, align 8
ret void
}
%xxx = alloca %struct.X ; <%struct.X*> [#uses=6]
%0 = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %1 = getelementptr %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
- %2 = getelementptr [32 x i8]* %1, i32 0, i32 31 ; <i8*> [#uses=1]
+ %1 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
+ %2 = getelementptr [32 x i8], [32 x i8]* %1, i32 0, i32 31 ; <i8*> [#uses=1]
store i8 48, i8* %2, align 1
- %3 = getelementptr %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
- %4 = getelementptr [32 x i8]* %3, i32 0, i32 31 ; <i8*> [#uses=1]
+ %3 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
+ %4 = getelementptr [32 x i8], [32 x i8]* %3, i32 0, i32 31 ; <i8*> [#uses=1]
%5 = load i8* %4, align 1 ; <i8> [#uses=1]
- %6 = getelementptr %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
- %7 = getelementptr [32 x i8]* %6, i32 0, i32 0 ; <i8*> [#uses=1]
+ %6 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 1 ; <[32 x i8]*> [#uses=1]
+ %7 = getelementptr [32 x i8], [32 x i8]* %6, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 %5, i8* %7, align 1
- %8 = getelementptr %struct.X* %xxx, i32 0, i32 0 ; <i8*> [#uses=1]
+ %8 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 15, i8* %8, align 1
%9 = call i32 (...)* bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* byval align 4 %xxx, %struct.X* byval align 4 %xxx) nounwind ; <i32> [#uses=1]
store i32 %9, i32* %0, align 4
%23 = add i32 0, 12 ; <i32> [#uses=1]
%24 = and i32 %23, 12 ; <i32> [#uses=1]
%25 = zext i32 %24 to i64 ; <i64> [#uses=1]
- %26 = getelementptr [16 x i64]* null, i64 0, i64 %25 ; <i64*> [#uses=0]
+ %26 = getelementptr [16 x i64], [16 x i64]* null, i64 0, i64 %25 ; <i64*> [#uses=0]
%27 = add i64 0, %e.0489 ; <i64> [#uses=1]
%28 = add i64 %27, 0 ; <i64> [#uses=1]
%29 = add i64 %28, 0 ; <i64> [#uses=1]
%43 = or i32 0, 6 ; <i32> [#uses=1]
%44 = and i32 %43, 14 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
- %46 = getelementptr [16 x i64]* null, i64 0, i64 %45 ; <i64*> [#uses=1]
+ %46 = getelementptr [16 x i64], [16 x i64]* null, i64 0, i64 %45 ; <i64*> [#uses=1]
%not417 = xor i64 %42, -1 ; <i64> [#uses=1]
%47 = and i64 %20, %not417 ; <i64> [#uses=1]
%48 = xor i64 0, %47 ; <i64> [#uses=1]
- %49 = getelementptr [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
+ %49 = getelementptr [80 x i64], [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
%50 = load i64* %49, align 8 ; <i64> [#uses=1]
%51 = add i64 %48, 0 ; <i64> [#uses=1]
%52 = add i64 %51, 0 ; <i64> [#uses=1]
%60 = or i32 0, 7 ; <i32> [#uses=1]
%61 = and i32 %60, 15 ; <i32> [#uses=1]
%62 = zext i32 %61 to i64 ; <i64> [#uses=1]
- %63 = getelementptr [16 x i64]* null, i64 0, i64 %62 ; <i64*> [#uses=2]
+ %63 = getelementptr [16 x i64], [16 x i64]* null, i64 0, i64 %62 ; <i64*> [#uses=2]
%64 = load i64* null, align 8 ; <i64> [#uses=1]
%65 = lshr i64 %64, 6 ; <i64> [#uses=1]
%66 = xor i64 0, %65 ; <i64> [#uses=1]
%not429 = xor i64 %57, -1 ; <i64> [#uses=1]
%76 = and i64 %33, %not429 ; <i64> [#uses=1]
%77 = xor i64 %75, %76 ; <i64> [#uses=1]
- %78 = getelementptr [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
+ %78 = getelementptr [80 x i64], [80 x i64]* @K512, i64 0, i64 0 ; <i64*> [#uses=1]
%79 = load i64* %78, align 16 ; <i64> [#uses=1]
%80 = add i64 %77, %20 ; <i64> [#uses=1]
%81 = add i64 %80, %72 ; <i64> [#uses=1]
entry:
%Opq.sa.calc = add i32 0, 2 ; <i32> [#uses=2]
%0 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=3]
- %1 = getelementptr %struct.ImageParameters* %0, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %1 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%2 = load %struct.Macroblock** %1, align 8 ; <%struct.Macroblock*> [#uses=24]
%3 = zext i32 %curr_mb_nr to i64 ; <i64> [#uses=24]
%4 = sext i32 %is_chroma to i64 ; <i64> [#uses=8]
entry.fragment: ; preds = %meshBB392
%Opq.sa.calc747 = add i32 %Opq.sa.calc921, 70 ; <i32> [#uses=0]
- %5 = getelementptr %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 0 ; <i32*> [#uses=1]
+ %5 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 0 ; <i32*> [#uses=1]
%6 = load i32* %5, align 4 ; <i32> [#uses=2]
- %7 = getelementptr %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 1 ; <i32*> [#uses=1]
+ %7 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %0, i64 0, i32 119, i64 %4, i64 1 ; <i32*> [#uses=1]
%8 = load i32* %7, align 4 ; <i32> [#uses=5]
br label %entry.fragment181
entry.fragment181: ; preds = %entry.fragment
%Opq.sa.calc863 = add i32 %Opq.sa.calc921, -50 ; <i32> [#uses=4]
- %9 = getelementptr %struct.PixelPos* %pix, i64 0, i32 0 ; <i32*> [#uses=4]
+ %9 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 0 ; <i32*> [#uses=4]
store i32 0, i32* %9, align 4
%10 = add i32 %8, -1 ; <i32> [#uses=6]
%11 = icmp slt i32 %10, %yN ; <i1> [#uses=1]
bb5: ; preds = %meshBB428
%Opq.sa.calc470 = sub i32 %Opq.sa.calc897, -49 ; <i32> [#uses=1]
- %17 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
+ %17 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
%18 = load i32* %17, align 4 ; <i32> [#uses=1]
br label %bb5.fragment
bb7: ; preds = %bb6
%Opq.sa.calc476 = add i32 %Opq.sa.calc873, -58 ; <i32> [#uses=1]
- %22 = getelementptr %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
+ %22 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
%23 = load i32* %22, align 8 ; <i32> [#uses=1]
%24 = add i32 %23, 1 ; <i32> [#uses=1]
- %25 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %25 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %meshBB388
bb7.fragment: ; preds = %meshBB388
%Opq.sa.calc707 = add i32 %Opq.sa.calc709, %Opq.sa.calc886 ; <i32> [#uses=1]
%Opq.sa.calc708 = xor i32 %Opq.sa.calc707, 474 ; <i32> [#uses=0]
store i32 %.SV194.phi, i32* %.SV196.phi, align 4
- %26 = getelementptr %struct.Macroblock* %.load17.SV.phi, i64 %.load36.SV.phi, i32 29 ; <i32*> [#uses=1]
+ %26 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load17.SV.phi, i64 %.load36.SV.phi, i32 29 ; <i32*> [#uses=1]
%27 = load i32* %26, align 8 ; <i32> [#uses=2]
store i32 %27, i32* %.load67.SV.phi, align 4
br label %bb96
bb8: ; preds = %meshBB348
%Opq.sa.calc479 = sub i32 %Opq.sa.calc805, 141 ; <i32> [#uses=1]
- %28 = getelementptr %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=2]
+ %28 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=2]
%29 = load i32* %28, align 4 ; <i32> [#uses=2]
- %30 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
+ %30 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
br label %meshBB368
bb8.fragment: ; preds = %meshBB368
%Opq.sa.calc765 = sub i32 %Opq.sa.calc768, -115 ; <i32> [#uses=2]
store i32 %.SV198.phi, i32* %.SV200.phi, align 4
- %31 = getelementptr %struct.Macroblock* %.load16.SV.phi, i64 %.load35.SV.phi, i32 26 ; <i32*> [#uses=2]
+ %31 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load16.SV.phi, i64 %.load35.SV.phi, i32 26 ; <i32*> [#uses=2]
%32 = load i32* %31, align 4 ; <i32> [#uses=4]
store i32 %32, i32* %.load66.SV.phi, align 4
%33 = load i32* %31, align 4 ; <i32> [#uses=1]
bb9: ; preds = %bb8.fragment
%Opq.sa.calc482 = xor i32 %Opq.sa.calc765, 163 ; <i32> [#uses=0]
%35 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %36 = getelementptr %struct.ImageParameters* %35, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %36 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %35, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%37 = load %struct.Macroblock** %36, align 8 ; <%struct.Macroblock*> [#uses=1]
%38 = load i32* %.SV76.phi, align 4 ; <i32> [#uses=1]
br label %bb9.fragment
bb9.fragment: ; preds = %bb9
%Opq.sa.calc999 = add i32 %Opq.sa.calc765, -44 ; <i32> [#uses=1]
%39 = sext i32 %38 to i64 ; <i64> [#uses=1]
- %40 = getelementptr %struct.Macroblock* %37, i64 %39, i32 20 ; <i32*> [#uses=1]
+ %40 = getelementptr %struct.Macroblock, %struct.Macroblock* %37, i64 %39, i32 20 ; <i32*> [#uses=1]
%41 = load i32* %40, align 4 ; <i32> [#uses=1]
%42 = icmp eq i32 %41, 0 ; <i1> [#uses=1]
br i1 %42, label %bb96, label %bb11
%Opq.sa.calc490 = xor i32 %Opq.sa.calc873, 175 ; <i32> [#uses=1]
%Opq.sa.calc488 = sub i32 %Opq.sa.calc490, %Opq.sa.calc873 ; <i32> [#uses=1]
%Opq.sa.calc489 = sub i32 %Opq.sa.calc488, 133 ; <i32> [#uses=1]
- %46 = getelementptr %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
+ %46 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 25 ; <i32*> [#uses=1]
br label %meshBB360
bb13.fragment: ; preds = %meshBB360
bb14: ; preds = %bb13.fragment
%Opq.sa.calc493 = add i32 %Opq.sa.calc870, 103 ; <i32> [#uses=1]
- %48 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
+ %48 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
store i32 %47, i32* %48, align 4
- %49 = getelementptr %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=2]
+ %49 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=2]
br label %bb14.fragment
bb14.fragment: ; preds = %bb14
bb15: ; preds = %bb14.fragment
%Opq.sa.calc496 = sub i32 %Opq.sa.calc723, -8 ; <i32> [#uses=1]
%53 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %54 = getelementptr %struct.ImageParameters* %53, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %54 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %53, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%55 = load %struct.Macroblock** %54, align 8 ; <%struct.Macroblock*> [#uses=1]
%56 = load i32* %.SV208.phi, align 8 ; <i32> [#uses=1]
br label %meshBB324
bb15.fragment: ; preds = %meshBB324
%Opq.sa.calc925 = xor i32 %Opq.sa.calc750, 215 ; <i32> [#uses=2]
%57 = sext i32 %.SV214.phi to i64 ; <i64> [#uses=1]
- %58 = getelementptr %struct.Macroblock* %.SV212.phi, i64 %57, i32 20 ; <i32*> [#uses=1]
+ %58 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV212.phi, i64 %57, i32 20 ; <i32*> [#uses=1]
%59 = load i32* %58, align 4 ; <i32> [#uses=1]
%60 = icmp eq i32 %59, 0 ; <i1> [#uses=1]
br i1 %60, label %bb16, label %bb96
bb19: ; preds = %meshBB412
%Opq.sa.calc502 = sub i32 %Opq.sa.calc932, -94 ; <i32> [#uses=0]
%63 = add i32 %.SV87.phi1030, 1 ; <i32> [#uses=1]
- %64 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %64 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %bb19.fragment
bb19.fragment: ; preds = %bb19
%Opq.sa.calc880 = xor i32 %Opq.sa.calc932, 246 ; <i32> [#uses=0]
store i32 %63, i32* %64, align 4
- %65 = getelementptr %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=1]
+ %65 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 29 ; <i32*> [#uses=1]
%66 = load i32* %65, align 8 ; <i32> [#uses=2]
store i32 %66, i32* %.SV52.phi1186, align 4
br label %bb96
bb23: ; preds = %meshBB360
%Opq.sa.calc509 = xor i32 %Opq.sa.calc866, 70 ; <i32> [#uses=1]
%Opq.sa.calc508 = sub i32 %Opq.sa.calc509, -19 ; <i32> [#uses=0]
- %67 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
+ %67 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
%68 = load i32* %67, align 4 ; <i32> [#uses=1]
%69 = icmp eq i32 %68, 0 ; <i1> [#uses=1]
%70 = and i32 %curr_mb_nr, 1 ; <i32> [#uses=1]
bb23.fragment: ; preds = %bb23
%Opq.sa.calc847 = sub i32 %Opq.sa.calc866, -9 ; <i32> [#uses=2]
- %72 = getelementptr %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=3]
+ %72 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 22 ; <i32*> [#uses=3]
%73 = load i32* %72, align 4 ; <i32> [#uses=3]
- %74 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
+ %74 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
store i32 %73, i32* %74, align 4
br label %bb23.fragment182
%Opq.sa.calc744 = xor i32 %Opq.sa.calc847, 152 ; <i32> [#uses=4]
%Opq.sa.calc742 = add i32 %Opq.sa.calc744, %Opq.sa.calc847 ; <i32> [#uses=1]
%Opq.sa.calc743 = add i32 %Opq.sa.calc742, -149 ; <i32> [#uses=2]
- %75 = getelementptr %struct.Macroblock* %2, i64 %3, i32 26 ; <i32*> [#uses=2]
+ %75 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 26 ; <i32*> [#uses=2]
%76 = load i32* %75, align 4 ; <i32> [#uses=3]
store i32 %76, i32* %.SV52.phi1113, align 4
%77 = load i32* %75, align 4 ; <i32> [#uses=1]
%Opq.sa.calc519 = xor i32 %Opq.sa.calc515, 23 ; <i32> [#uses=2]
%Opq.sa.calc518 = xor i32 %Opq.sa.calc519, 84 ; <i32> [#uses=1]
%79 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %80 = getelementptr %struct.ImageParameters* %79, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %80 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %79, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%81 = load %struct.Macroblock** %80, align 8 ; <%struct.Macroblock*> [#uses=1]
%82 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
br label %meshBB340
%Opq.sa.calc916 = add i32 %Opq.sa.calc918, %Opq.sa.calc754 ; <i32> [#uses=1]
%Opq.sa.calc917 = add i32 %Opq.sa.calc916, -237 ; <i32> [#uses=1]
%83 = sext i32 %.SV230.phi to i64 ; <i64> [#uses=1]
- %84 = getelementptr %struct.Macroblock* %.SV228.phi, i64 %83, i32 20 ; <i32*> [#uses=1]
+ %84 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV228.phi, i64 %83, i32 20 ; <i32*> [#uses=1]
%85 = load i32* %84, align 4 ; <i32> [#uses=1]
%86 = icmp eq i32 %85, 0 ; <i1> [#uses=1]
br i1 %86, label %meshBB420, label %meshBB356
bb33: ; preds = %bb32
%Opq.sa.calc534 = sub i32 %Opq.sa.calc512, -75 ; <i32> [#uses=2]
%92 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %93 = getelementptr %struct.ImageParameters* %92, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %93 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %92, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%94 = load %struct.Macroblock** %93, align 8 ; <%struct.Macroblock*> [#uses=1]
%95 = load i32* %.SV99.phi, align 4 ; <i32> [#uses=1]
br label %bb33.fragment
bb33.fragment: ; preds = %bb33
%Opq.sa.calc712 = add i32 %Opq.sa.calc534, -109 ; <i32> [#uses=3]
%96 = sext i32 %95 to i64 ; <i64> [#uses=1]
- %97 = getelementptr %struct.Macroblock* %94, i64 %96, i32 20 ; <i32*> [#uses=1]
+ %97 = getelementptr %struct.Macroblock, %struct.Macroblock* %94, i64 %96, i32 20 ; <i32*> [#uses=1]
%98 = load i32* %97, align 4 ; <i32> [#uses=1]
%99 = icmp eq i32 %98, 0 ; <i1> [#uses=1]
br i1 %99, label %bb34, label %meshBB
bb41: ; preds = %meshBB336
%Opq.sa.calc557 = sub i32 %Opq.sa.calc979, 143 ; <i32> [#uses=1]
%108 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %109 = getelementptr %struct.ImageParameters* %108, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %109 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %108, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%110 = load %struct.Macroblock** %109, align 8 ; <%struct.Macroblock*> [#uses=1]
%111 = load i32* %.SV99.phi1128, align 4 ; <i32> [#uses=1]
br label %bb41.fragment
bb41.fragment: ; preds = %bb41
%Opq.sa.calc987 = xor i32 %Opq.sa.calc557, 213 ; <i32> [#uses=4]
%112 = sext i32 %111 to i64 ; <i64> [#uses=1]
- %113 = getelementptr %struct.Macroblock* %110, i64 %112, i32 20 ; <i32*> [#uses=1]
+ %113 = getelementptr %struct.Macroblock, %struct.Macroblock* %110, i64 %112, i32 20 ; <i32*> [#uses=1]
%114 = load i32* %113, align 4 ; <i32> [#uses=1]
%115 = icmp eq i32 %114, 0 ; <i1> [#uses=1]
br i1 %115, label %bb42, label %bb96
bb49: ; preds = %bb48
%Opq.sa.calc572 = add i32 %Opq.sa.calc798, 84 ; <i32> [#uses=0]
%122 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %123 = getelementptr %struct.ImageParameters* %122, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %123 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %122, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%124 = load %struct.Macroblock** %123, align 8 ; <%struct.Macroblock*> [#uses=1]
%125 = load i32* %.SV99.phi1037, align 4 ; <i32> [#uses=1]
br label %bb49.fragment
bb49.fragment: ; preds = %bb49
%Opq.sa.calc860 = sub i32 %Opq.sa.calc569, 114 ; <i32> [#uses=5]
%126 = sext i32 %125 to i64 ; <i64> [#uses=1]
- %127 = getelementptr %struct.Macroblock* %124, i64 %126, i32 20 ; <i32*> [#uses=1]
+ %127 = getelementptr %struct.Macroblock, %struct.Macroblock* %124, i64 %126, i32 20 ; <i32*> [#uses=1]
%128 = load i32* %127, align 4 ; <i32> [#uses=1]
%129 = icmp eq i32 %128, 0 ; <i1> [#uses=1]
br i1 %129, label %bb50, label %meshBB380
bb59: ; preds = %bb58
%Opq.sa.calc599 = add i32 %Opq.sa.calc1002, 151 ; <i32> [#uses=0]
- %141 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
+ %141 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
%142 = load i32* %141, align 4 ; <i32> [#uses=1]
br label %bb59.fragment
bb61: ; preds = %bb60
%Opq.sa.calc605 = xor i32 %Opq.sa.calc731, 57 ; <i32> [#uses=1]
- %146 = getelementptr %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=2]
+ %146 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=2]
%147 = load i32* %146, align 8 ; <i32> [#uses=3]
- %148 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
+ %148 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=3]
br label %bb61.fragment
bb61.fragment: ; preds = %bb61
%Opq.sa.calc700 = sub i32 %Opq.sa.calc605, 108 ; <i32> [#uses=3]
store i32 %147, i32* %148, align 4
- %149 = getelementptr %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=4]
+ %149 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=4]
%150 = load i32* %149, align 8 ; <i32> [#uses=1]
%151 = icmp eq i32 %150, 0 ; <i1> [#uses=1]
br i1 %151, label %bb65, label %bb62
bb62: ; preds = %bb61.fragment
%Opq.sa.calc608 = add i32 %Opq.sa.calc700, -94 ; <i32> [#uses=1]
%152 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=2]
- %153 = getelementptr %struct.ImageParameters* %152, i64 0, i32 45 ; <i32*> [#uses=1]
+ %153 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %152, i64 0, i32 45 ; <i32*> [#uses=1]
%154 = load i32* %153, align 4 ; <i32> [#uses=1]
%155 = icmp eq i32 %154, 1 ; <i1> [#uses=1]
br i1 %155, label %bb63, label %bb64
bb63: ; preds = %bb62
%Opq.sa.calc611 = add i32 %Opq.sa.calc700, -101 ; <i32> [#uses=2]
- %156 = getelementptr %struct.ImageParameters* %152, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %156 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %152, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%157 = load %struct.Macroblock** %156, align 8 ; <%struct.Macroblock*> [#uses=1]
%158 = load i32* %146, align 8 ; <i32> [#uses=1]
br label %meshBB452
%Opq.sa.calc891 = add i32 %Opq.link.mask823, 18 ; <i32> [#uses=2]
%Opq.sa.calc890 = add i32 %Opq.sa.calc891, -3 ; <i32> [#uses=2]
%159 = sext i32 %.SV266.phi to i64 ; <i64> [#uses=1]
- %160 = getelementptr %struct.Macroblock* %.SV264.phi, i64 %159, i32 20 ; <i32*> [#uses=1]
+ %160 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV264.phi, i64 %159, i32 20 ; <i32*> [#uses=1]
%161 = load i32* %160, align 4 ; <i32> [#uses=1]
%162 = icmp eq i32 %161, 0 ; <i1> [#uses=1]
br i1 %162, label %bb64, label %meshBB456
bb66: ; preds = %bb60
%Opq.sa.calc621 = add i32 %Opq.sa.calc602, -217 ; <i32> [#uses=1]
%165 = add i32 %curr_mb_nr, -1 ; <i32> [#uses=1]
- %166 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %166 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %meshBB420
bb66.fragment: ; preds = %meshBB420
bb68: ; preds = %bb59.fragment
%Opq.sa.calc624 = sub i32 %Opq.sa.calc731, 229 ; <i32> [#uses=3]
- %167 = getelementptr %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
+ %167 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
br label %meshBB344
bb68.fragment: ; preds = %meshBB344
bb69: ; preds = %bb68.fragment
%Opq.sa.calc627 = add i32 %Opq.sa.calc784, 163 ; <i32> [#uses=0]
- %169 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
+ %169 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
store i32 %168, i32* %169, align 4
- %170 = getelementptr %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=2]
+ %170 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=2]
br label %bb69.fragment
bb69.fragment: ; preds = %bb69
bb70: ; preds = %meshBB400
%Opq.sa.calc630 = add i32 %Opq.sa.calc824, -203 ; <i32> [#uses=2]
%174 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %175 = getelementptr %struct.ImageParameters* %174, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %175 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %174, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%176 = load %struct.Macroblock** %175, align 8 ; <%struct.Macroblock*> [#uses=1]
%177 = load i32* %.SV156.phi, align 8 ; <i32> [#uses=1]
br label %meshBB428
%Opq.sa.calc739 = xor i32 %Opq.sa.calc897, 213 ; <i32> [#uses=2]
%Opq.sa.calc738 = sub i32 %Opq.sa.calc739, 1 ; <i32> [#uses=2]
%178 = sext i32 %.SV280.phi to i64 ; <i64> [#uses=1]
- %179 = getelementptr %struct.Macroblock* %.SV278.phi, i64 %178, i32 20 ; <i32*> [#uses=1]
+ %179 = getelementptr %struct.Macroblock, %struct.Macroblock* %.SV278.phi, i64 %178, i32 20 ; <i32*> [#uses=1]
%180 = load i32* %179, align 4 ; <i32> [#uses=1]
%181 = icmp eq i32 %180, 0 ; <i1> [#uses=1]
br i1 %181, label %meshBB452, label %meshBB356
bb74: ; preds = %meshBB412
%Opq.sa.calc636 = xor i32 %Opq.sa.calc932, 233 ; <i32> [#uses=1]
%184 = add i32 %.SV158.phi1063, 1 ; <i32> [#uses=1]
- %185 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %185 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %bb74.fragment
bb74.fragment: ; preds = %bb74
%Opq.sa.calc1011 = sub i32 %Opq.sa.calc636, -19 ; <i32> [#uses=0]
store i32 %184, i32* %185, align 4
- %186 = getelementptr %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=1]
+ %186 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 27 ; <i32*> [#uses=1]
%187 = load i32* %186, align 8 ; <i32> [#uses=2]
store i32 %187, i32* %.SV52.phi1186, align 4
br label %bb96
bb77: ; preds = %bb76
%Opq.sa.calc643 = add i32 %Opq.sa.calc640, 2 ; <i32> [#uses=2]
%189 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %190 = getelementptr %struct.ImageParameters* %189, i64 0, i32 45 ; <i32*> [#uses=1]
+ %190 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %189, i64 0, i32 45 ; <i32*> [#uses=1]
%191 = load i32* %190, align 4 ; <i32> [#uses=1]
%192 = icmp eq i32 %191, 2 ; <i1> [#uses=1]
br i1 %192, label %meshBB416, label %bb79
bb78: ; preds = %meshBB416
%Opq.sa.calc647 = xor i32 %Opq.sa.calc971, 25 ; <i32> [#uses=2]
%Opq.sa.calc646 = sub i32 %Opq.sa.calc647, 29 ; <i32> [#uses=0]
- %193 = getelementptr %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
+ %193 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 23 ; <i32*> [#uses=1]
%194 = load i32* %193, align 8 ; <i32> [#uses=1]
%195 = add i32 %194, 1 ; <i32> [#uses=1]
br label %bb78.fragment
bb78.fragment: ; preds = %bb78
%Opq.sa.calc850 = sub i32 %Opq.sa.calc647, -93 ; <i32> [#uses=0]
- %196 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %196 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
store i32 %195, i32* %196, align 4
store i32 1, i32* %.SV52.phi1200, align 4
%197 = add i32 %yN, -1 ; <i32> [#uses=1]
bb81: ; preds = %meshBB456
%Opq.sa.calc655 = add i32 %Opq.sa.calc816, 56 ; <i32> [#uses=0]
- %198 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %198 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
store i32 %curr_mb_nr, i32* %198, align 4
store i32 1, i32* %.SV52.phi1136, align 4
br label %bb98
bb84: ; preds = %bb83
%Opq.sa.calc661 = xor i32 %Opq.sa.calc658, 22 ; <i32> [#uses=1]
- %199 = getelementptr %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
+ %199 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 20 ; <i32*> [#uses=1]
%200 = load i32* %199, align 4 ; <i32> [#uses=1]
br label %meshBB400
bb86: ; preds = %meshBB336
%Opq.sa.calc670 = sub i32 %Opq.sa.calc979, 35 ; <i32> [#uses=1]
- %204 = getelementptr %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=1]
+ %204 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=1]
%205 = load i32* %204, align 4 ; <i32> [#uses=1]
%206 = add i32 %205, 1 ; <i32> [#uses=1]
- %207 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %207 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %bb86.fragment
bb86.fragment: ; preds = %bb86
%Opq.sa.calc943 = xor i32 %Opq.sa.calc670, 123 ; <i32> [#uses=2]
store i32 %206, i32* %207, align 4
- %208 = getelementptr %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=1]
+ %208 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=1]
%209 = load i32* %208, align 4 ; <i32> [#uses=2]
store i32 %209, i32* %.SV52.phi1234, align 4
br label %meshBB424
bb89: ; preds = %bb84.fragment
%Opq.sa.calc677 = sub i32 %Opq.sa.calc802, -183 ; <i32> [#uses=1]
- %210 = getelementptr %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=2]
+ %210 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 24 ; <i32*> [#uses=2]
br label %bb89.fragment
bb89.fragment: ; preds = %bb89
bb90: ; preds = %bb89.fragment
%Opq.sa.calc680 = xor i32 %Opq.sa.calc962, 92 ; <i32> [#uses=1]
- %212 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
+ %212 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=2]
store i32 %211, i32* %212, align 4
- %213 = getelementptr %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=2]
+ %213 = getelementptr %struct.Macroblock, %struct.Macroblock* %2, i64 %3, i32 28 ; <i32*> [#uses=2]
br label %bb90.fragment
bb90.fragment: ; preds = %bb90
bb91: ; preds = %meshBB368
%Opq.sa.calc683 = sub i32 %Opq.sa.calc768, -7 ; <i32> [#uses=0]
%217 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %218 = getelementptr %struct.ImageParameters* %217, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
+ %218 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %217, i64 0, i32 39 ; <%struct.Macroblock**> [#uses=1]
%219 = load %struct.Macroblock** %218, align 8 ; <%struct.Macroblock*> [#uses=1]
%220 = load i32* %.SV170.phi, align 4 ; <i32> [#uses=1]
br label %bb91.fragment
bb91.fragment: ; preds = %bb91
%Opq.sa.calc853 = xor i32 %Opq.sa.calc768, 8 ; <i32> [#uses=1]
%221 = sext i32 %220 to i64 ; <i64> [#uses=1]
- %222 = getelementptr %struct.Macroblock* %219, i64 %221, i32 20 ; <i32*> [#uses=1]
+ %222 = getelementptr %struct.Macroblock, %struct.Macroblock* %219, i64 %221, i32 20 ; <i32*> [#uses=1]
%223 = load i32* %222, align 4 ; <i32> [#uses=1]
%224 = icmp eq i32 %223, 0 ; <i1> [#uses=1]
br i1 %224, label %bb92, label %bb96
bb95: ; preds = %meshBB408
%Opq.sa.calc689 = xor i32 %Opq.sa.calc912, 207 ; <i32> [#uses=3]
%227 = add i32 %.SV172.phi1074, 1 ; <i32> [#uses=1]
- %228 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %228 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
br label %meshBB384
bb95.fragment: ; preds = %meshBB384
%Opq.sa.calc841 = sub i32 %Opq.sa.calc901, 76 ; <i32> [#uses=0]
store i32 %.SV306.phi, i32* %.SV308.phi, align 4
- %229 = getelementptr %struct.Macroblock* %.load.SV.phi, i64 %.load20.SV.phi, i32 28 ; <i32*> [#uses=1]
+ %229 = getelementptr %struct.Macroblock, %struct.Macroblock* %.load.SV.phi, i64 %.load20.SV.phi, i32 28 ; <i32*> [#uses=1]
%230 = load i32* %229, align 4 ; <i32> [#uses=2]
store i32 %230, i32* %.load53.SV.phi, align 4
br label %bb96
%yM.0.reg2mem.0.SV.phi = phi i32 [ -1, %meshBB424 ], [ -1, %meshBB408 ], [ -1, %meshBB352 ], [ %yM.0.SV.phi, %bb96 ], [ -1, %bb21 ] ; <i32> [#uses=1]
%Opq.sa.calc694 = xor i32 0, 243 ; <i32> [#uses=1]
%232 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %233 = getelementptr %struct.ImageParameters* %232, i64 0, i32 45 ; <i32*> [#uses=1]
+ %233 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %232, i64 0, i32 45 ; <i32*> [#uses=1]
br label %bb97.fragment
bb97.fragment: ; preds = %bb97
%yM.0.reg2mem.1.SV.phi1068 = phi i32 [ %yN, %meshBB444 ], [ %yM.0.reg2mem.1.SV.phi1077, %meshBB404 ], [ %yM.0.reg2mem.0.SV.phi, %bb97.fragment ], [ %yN, %bb81 ], [ %197, %bb78.fragment ] ; <i32> [#uses=1]
%Opq.sa.calc695 = xor i32 0, 23 ; <i32> [#uses=2]
%236 = and i32 %.SV70.phi1091, %xN ; <i32> [#uses=1]
- %237 = getelementptr %struct.PixelPos* %pix, i64 0, i32 2 ; <i32*> [#uses=2]
+ %237 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 2 ; <i32*> [#uses=2]
store i32 %236, i32* %237, align 4
%238 = and i32 %yM.0.reg2mem.1.SV.phi1068, %.SV68.phi1092 ; <i32> [#uses=1]
- %239 = getelementptr %struct.PixelPos* %pix, i64 0, i32 3 ; <i32*> [#uses=2]
+ %239 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 3 ; <i32*> [#uses=2]
store i32 %238, i32* %239, align 4
- %240 = getelementptr %struct.PixelPos* %pix, i64 0, i32 5 ; <i32*> [#uses=1]
+ %240 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 5 ; <i32*> [#uses=1]
br label %meshBB376
bb98.fragment: ; preds = %meshBB376
%Opq.sa.calc1008 = sub i32 %Opq.link.mask911, 13 ; <i32> [#uses=1]
- %241 = getelementptr %struct.PixelPos* %pix, i64 0, i32 4 ; <i32*> [#uses=4]
- %242 = getelementptr %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
+ %241 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 4 ; <i32*> [#uses=4]
+ %242 = getelementptr %struct.PixelPos, %struct.PixelPos* %pix, i64 0, i32 1 ; <i32*> [#uses=1]
%243 = load i32* %242, align 4 ; <i32> [#uses=1]
%244 = load void (i32, i32*, i32*)** @get_mb_block_pos, align 8 ; <void (i32, i32*, i32*)*> [#uses=1]
tail call void %244(i32 %243, i32* %241, i32* %.SV317.phi) nounwind
%245 = load i32* %241, align 4 ; <i32> [#uses=1]
%246 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %247 = getelementptr %struct.ImageParameters* %246, i64 0, i32 119, i64 %.load39.SV.phi, i64 0 ; <i32*> [#uses=1]
+ %247 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %246, i64 0, i32 119, i64 %.load39.SV.phi, i64 0 ; <i32*> [#uses=1]
%248 = load i32* %247, align 4 ; <i32> [#uses=1]
%249 = mul i32 %248, %245 ; <i32> [#uses=2]
store i32 %249, i32* %241, align 4
%Opq.sa.calc776 = sub i32 %Opq.sa.calc777, 46 ; <i32> [#uses=0]
%250 = load i32* %.SV317.phi, align 4 ; <i32> [#uses=1]
%251 = load %struct.ImageParameters** @img, align 8 ; <%struct.ImageParameters*> [#uses=1]
- %252 = getelementptr %struct.ImageParameters* %251, i64 0, i32 119, i64 %.load39.SV.phi, i64 1 ; <i32*> [#uses=1]
+ %252 = getelementptr %struct.ImageParameters, %struct.ImageParameters* %251, i64 0, i32 119, i64 %.load39.SV.phi, i64 1 ; <i32*> [#uses=1]
%253 = load i32* %252, align 4 ; <i32> [#uses=1]
%254 = mul i32 %253, %250 ; <i32> [#uses=1]
%255 = load i32* %.SV313.phi, align 4 ; <i32> [#uses=1]
define void @cpuid(i32* %data) nounwind {
entry:
- %arrayidx = getelementptr i32* %data, i32 1 ; <i32*> [#uses=1]
- %arrayidx2 = getelementptr i32* %data, i32 2 ; <i32*> [#uses=1]
- %arrayidx4 = getelementptr i32* %data, i32 3 ; <i32*> [#uses=1]
- %arrayidx6 = getelementptr i32* %data, i32 4 ; <i32*> [#uses=1]
- %arrayidx8 = getelementptr i32* %data, i32 5 ; <i32*> [#uses=1]
+ %arrayidx = getelementptr i32, i32* %data, i32 1 ; <i32*> [#uses=1]
+ %arrayidx2 = getelementptr i32, i32* %data, i32 2 ; <i32*> [#uses=1]
+ %arrayidx4 = getelementptr i32, i32* %data, i32 3 ; <i32*> [#uses=1]
+ %arrayidx6 = getelementptr i32, i32* %data, i32 4 ; <i32*> [#uses=1]
+ %arrayidx8 = getelementptr i32, i32* %data, i32 5 ; <i32*> [#uses=1]
%tmp9 = load i32* %arrayidx8 ; <i32> [#uses=1]
- %arrayidx11 = getelementptr i32* %data, i32 6 ; <i32*> [#uses=1]
+ %arrayidx11 = getelementptr i32, i32* %data, i32 6 ; <i32*> [#uses=1]
%tmp12 = load i32* %arrayidx11 ; <i32> [#uses=1]
- %arrayidx14 = getelementptr i32* %data, i32 7 ; <i32*> [#uses=1]
+ %arrayidx14 = getelementptr i32, i32* %data, i32 7 ; <i32*> [#uses=1]
%tmp15 = load i32* %arrayidx14 ; <i32> [#uses=1]
- %arrayidx17 = getelementptr i32* %data, i32 8 ; <i32*> [#uses=1]
+ %arrayidx17 = getelementptr i32, i32* %data, i32 8 ; <i32*> [#uses=1]
%tmp18 = load i32* %arrayidx17 ; <i32> [#uses=1]
%0 = call i32 asm "cpuid", "={ax},=*{bx},=*{cx},=*{dx},{ax},{bx},{cx},{dx},~{dirflag},~{fpsr},~{flags}"(i32* %arrayidx2, i32* %arrayidx4, i32* %arrayidx6, i32 %tmp9, i32 %tmp12, i32 %tmp15, i32 %tmp18) nounwind ; <i32> [#uses=1]
store i32 %0, i32* %arrayidx
bb1: ; preds = %entry
%5 = load i8* null, align 4 ; <i8> [#uses=2]
%6 = zext i8 %5 to i32 ; <i32> [#uses=2]
- %7 = getelementptr %struct.pf_state_key* %b, i32 0, i32 3 ; <i8*> [#uses=1]
+ %7 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %b, i32 0, i32 3 ; <i8*> [#uses=1]
%8 = load i8* %7, align 4 ; <i8> [#uses=2]
%9 = zext i8 %8 to i32 ; <i32> [#uses=1]
%10 = sub i32 %6, %9 ; <i32> [#uses=1]
br i1 false, label %bb23, label %bb79
bb21: ; preds = %bb3
- %31 = getelementptr %struct.pf_state_key* %a, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
+ %31 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %a, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
%32 = load i32* %31, align 4 ; <i32> [#uses=2]
- %33 = getelementptr %struct.pf_state_key* %b, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
+ %33 = getelementptr %struct.pf_state_key, %struct.pf_state_key* %b, i32 0, i32 1, i32 1, i32 0 ; <i32*> [#uses=1]
%34 = load i32* %33, align 4 ; <i32> [#uses=2]
%35 = sub i32 %32, %34 ; <i32> [#uses=1]
%36 = icmp eq i32 %32, %34 ; <i1> [#uses=1]
br i1 %or.cond.i, label %bb5.i, label %bb3.i
bb5.i: ; preds = %bb3.i
- %4 = getelementptr i8* %0, i64 0 ; <i8*> [#uses=1]
+ %4 = getelementptr i8, i8* %0, i64 0 ; <i8*> [#uses=1]
store i8 0, i8* %4, align 1
- %5 = getelementptr i8* %0, i64 0 ; <i8*> [#uses=1]
+ %5 = getelementptr i8, i8* %0, i64 0 ; <i8*> [#uses=1]
store i8 0, i8* %5, align 1
%6 = add i32 %1, 2 ; <i32> [#uses=1]
%7 = zext i32 %6 to i64 ; <i64> [#uses=2]
- %8 = getelementptr i8* %0, i64 %7 ; <i8*> [#uses=1]
+ %8 = getelementptr i8, i8* %0, i64 %7 ; <i8*> [#uses=1]
%9 = lshr i32 %size_addr.0.i, 8 ; <i32> [#uses=1]
%10 = trunc i32 %9 to i8 ; <i8> [#uses=1]
store i8 %10, i8* %8, align 1
%.sum31.i = add i64 %7, 1 ; <i64> [#uses=1]
- %11 = getelementptr i8* %0, i64 %.sum31.i ; <i8*> [#uses=1]
+ %11 = getelementptr i8, i8* %0, i64 %.sum31.i ; <i8*> [#uses=1]
store i8 0, i8* %11, align 1
br label %bb11.outer.i
br label %bb2
bb2: ; preds = %bb, %bb12.i
- %14 = getelementptr %struct.MemPage* %pPage, i64 0, i32 1 ; <i8*> [#uses=1]
+ %14 = getelementptr %struct.MemPage, %struct.MemPage* %pPage, i64 0, i32 1 ; <i8*> [#uses=1]
store i8 1, i8* %14, align 1
ret void
}
%0 = load i32* null, align 4 ; <i32> [#uses=1]
%1 = lshr i32 %0, 8 ; <i32> [#uses=1]
%2 = and i32 %1, 255 ; <i32> [#uses=1]
- %3 = getelementptr %struct.array* null, i32 0, i32 3 ; <[256 x %struct.pair]*> [#uses=1]
- %4 = getelementptr [256 x %struct.pair]* %3, i32 0, i32 %2 ; <%struct.pair*> [#uses=1]
- %5 = getelementptr %struct.pair* %4, i32 0, i32 1 ; <i64*> [#uses=1]
+ %3 = getelementptr %struct.array, %struct.array* null, i32 0, i32 3 ; <[256 x %struct.pair]*> [#uses=1]
+ %4 = getelementptr [256 x %struct.pair], [256 x %struct.pair]* %3, i32 0, i32 %2 ; <%struct.pair*> [#uses=1]
+ %5 = getelementptr %struct.pair, %struct.pair* %4, i32 0, i32 1 ; <i64*> [#uses=1]
%6 = load i64* %5, align 4 ; <i64> [#uses=1]
%7 = xor i64 0, %6 ; <i64> [#uses=1]
%8 = xor i64 %7, 0 ; <i64> [#uses=1]
bb54.i: ; preds = %newFuncRoot
%1 = zext i32 %.reload51 to i64 ; <i64> [#uses=1]
- %2 = getelementptr i32* %0, i64 %1 ; <i32*> [#uses=1]
+ %2 = getelementptr i32, i32* %0, i64 %1 ; <i32*> [#uses=1]
%3 = load i32* %2, align 4 ; <i32> [#uses=2]
%4 = lshr i32 %3, 8 ; <i32> [#uses=1]
%5 = and i32 %3, 255 ; <i32> [#uses=1]
%6 = add i32 %5, 4 ; <i32> [#uses=1]
%7 = zext i32 %4 to i64 ; <i64> [#uses=1]
- %8 = getelementptr i32* %0, i64 %7 ; <i32*> [#uses=1]
+ %8 = getelementptr i32, i32* %0, i64 %7 ; <i32*> [#uses=1]
%9 = load i32* %8, align 4 ; <i32> [#uses=2]
%10 = and i32 %9, 255 ; <i32> [#uses=1]
%11 = lshr i32 %9, 8 ; <i32> [#uses=1]
while.body: ; preds = %for.end, %bb.nph
%indvar2787 = phi i64 [ 0, %bb.nph ], [ %indvar.next2788, %for.end ] ; <i64> [#uses=2]
%tmp2791 = mul i64 %indvar2787, 44 ; <i64> [#uses=0]
- %ctg22996 = getelementptr i8* %in, i64 0 ; <i8*> [#uses=1]
+ %ctg22996 = getelementptr i8, i8* %in, i64 0 ; <i8*> [#uses=1]
%conv = zext i32 undef to i64 ; <i64> [#uses=1]
%conv11 = zext i32 undef to i64 ; <i64> [#uses=1]
%tmp18 = load i32* undef ; <i32> [#uses=1]
br i1 undef, label %while.end, label %bb.nph
bb.nph: ; preds = %entry
- %arrayidx5 = getelementptr i32* %arr, i64 1 ; <i32*> [#uses=1]
- %arrayidx9 = getelementptr i32* %arr, i64 2 ; <i32*> [#uses=2]
- %arrayidx13 = getelementptr i32* %arr, i64 3 ; <i32*> [#uses=2]
- %arrayidx25 = getelementptr i32* %arr, i64 6 ; <i32*> [#uses=1]
- %arrayidx29 = getelementptr i32* %arr, i64 7 ; <i32*> [#uses=1]
+ %arrayidx5 = getelementptr i32, i32* %arr, i64 1 ; <i32*> [#uses=1]
+ %arrayidx9 = getelementptr i32, i32* %arr, i64 2 ; <i32*> [#uses=2]
+ %arrayidx13 = getelementptr i32, i32* %arr, i64 3 ; <i32*> [#uses=2]
+ %arrayidx25 = getelementptr i32, i32* %arr, i64 6 ; <i32*> [#uses=1]
+ %arrayidx29 = getelementptr i32, i32* %arr, i64 7 ; <i32*> [#uses=1]
br label %while.body
while.body: ; preds = %for.end, %bb.nph
br i1 %tmp3240, label %bb974, label %bb269
bb269:
- %tmp3424 = getelementptr %struct.rec* %x, i32 0, i32 0, i32 0, i32 0, i32 1 ; <%struct.rec**> [#uses=0]
+ %tmp3424 = getelementptr %struct.rec, %struct.rec* %x, i32 0, i32 0, i32 0, i32 0, i32 1 ; <%struct.rec**> [#uses=0]
unreachable
bb974:
br i1 undef, label %return, label %if.end
if.end: ; preds = %entry
- %tmp35 = getelementptr %struct.re_pattern_buffer* %bufp, i64 0, i32 3 ; <i64*> [#uses=1]
+ %tmp35 = getelementptr %struct.re_pattern_buffer, %struct.re_pattern_buffer* %bufp, i64 0, i32 3 ; <i64*> [#uses=1]
store i64 %syntax, i64* %tmp35
store i32 undef, i32* undef
br i1 undef, label %if.then66, label %if.end102
%startoffset.0.ph = phi i32 [ 0, %cond.end834 ], [ 0, %land.lhs.true838 ], [ %conv851, %if.then842 ] ; <i32> [#uses=2]
%laststart.7.ph = phi i8* [ %laststart.2, %cond.end834 ], [ %laststart.2, %land.lhs.true838 ], [ %laststart.2, %if.then842 ] ; <i8*> [#uses=3]
%b.4.ph = phi i8* [ %b.1, %cond.end834 ], [ %b.1, %land.lhs.true838 ], [ %b.1, %if.then842 ] ; <i8*> [#uses=3]
- %ctg29688 = getelementptr i8* %b.4.ph, i64 6 ; <i8*> [#uses=1]
+ %ctg29688 = getelementptr i8, i8* %b.4.ph, i64 6 ; <i8*> [#uses=1]
br label %while.cond979
while.cond979: ; preds = %if.end1006, %while.cond979.preheader
if.then1091: ; preds = %while.end1088
store i8 undef, i8* undef
%idx.ext1132.pre = zext i32 %startoffset.0.ph to i64 ; <i64> [#uses=1]
- %add.ptr1133.pre = getelementptr i8* %laststart.7.ph, i64 %idx.ext1132.pre ; <i8*> [#uses=1]
+ %add.ptr1133.pre = getelementptr i8, i8* %laststart.7.ph, i64 %idx.ext1132.pre ; <i8*> [#uses=1]
%sub.ptr.lhs.cast1135.pre = ptrtoint i8* %add.ptr1133.pre to i64 ; <i64> [#uses=1]
br label %if.end1126
if.else1101: ; preds = %while.end1088
%cond1109 = select i1 undef, i32 18, i32 14 ; <i32> [#uses=1]
%idx.ext1112 = zext i32 %startoffset.0.ph to i64 ; <i64> [#uses=1]
- %add.ptr1113 = getelementptr i8* %laststart.7.ph, i64 %idx.ext1112 ; <i8*> [#uses=2]
+ %add.ptr1113 = getelementptr i8, i8* %laststart.7.ph, i64 %idx.ext1112 ; <i8*> [#uses=2]
%sub.ptr.rhs.cast1121 = ptrtoint i8* %add.ptr1113 to i64 ; <i64> [#uses=1]
call fastcc void @insert_op1(i32 %cond1109, i8* %add.ptr1113, i32 undef, i8* %b.4.ph) ssp
br label %if.end1126
if.end1126: ; preds = %if.else1101, %if.then1091
%sub.ptr.lhs.cast1135.pre-phi = phi i64 [ %sub.ptr.rhs.cast1121, %if.else1101 ], [ %sub.ptr.lhs.cast1135.pre, %if.then1091 ] ; <i64> [#uses=1]
- %add.ptr1128 = getelementptr i8* %b.4.ph, i64 3 ; <i8*> [#uses=1]
+ %add.ptr1128 = getelementptr i8, i8* %b.4.ph, i64 3 ; <i8*> [#uses=1]
%sub.ptr.rhs.cast1136 = ptrtoint i8* %add.ptr1128 to i64 ; <i64> [#uses=1]
%sub.ptr.sub1137 = sub i64 %sub.ptr.lhs.cast1135.pre-phi, %sub.ptr.rhs.cast1136 ; <i64> [#uses=1]
%sub.ptr.sub11378527 = trunc i64 %sub.ptr.sub1137 to i32 ; <i32> [#uses=1]
if.end78: ; preds = %if.then28, %entry
%level.1 = phi i32 [ %tmp, %if.then28 ], [ 0, %entry ] ; <i32> [#uses=1]
- %add.ptr1 = getelementptr [64 x i16]* null, i32 0, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr2 = getelementptr [64 x i16]* null, i32 1, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr3 = getelementptr [64 x i16]* null, i32 2, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr4 = getelementptr [64 x i16]* null, i32 3, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr5 = getelementptr [64 x i16]* null, i32 4, i32 %qscale ; <i16*> [#uses=1]
- %add.ptr6 = getelementptr [64 x i16]* null, i32 5, i32 %qscale ; <i16*> [#uses=1]
+ %add.ptr1 = getelementptr [64 x i16], [64 x i16]* null, i32 0, i32 %qscale ; <i16*> [#uses=1]
+ %add.ptr2 = getelementptr [64 x i16], [64 x i16]* null, i32 1, i32 %qscale ; <i16*> [#uses=1]
+ %add.ptr3 = getelementptr [64 x i16], [64 x i16]* null, i32 2, i32 %qscale ; <i16*> [#uses=1]
+ %add.ptr4 = getelementptr [64 x i16], [64 x i16]* null, i32 3, i32 %qscale ; <i16*> [#uses=1]
+ %add.ptr5 = getelementptr [64 x i16], [64 x i16]* null, i32 4, i32 %qscale ; <i16*> [#uses=1]
+ %add.ptr6 = getelementptr [64 x i16], [64 x i16]* null, i32 5, i32 %qscale ; <i16*> [#uses=1]
%tmp1 = call i32 asm sideeffect "nop", "={ax},r,r,r,r,r,0,~{dirflag},~{fpsr},~{flags}"(i16* %add.ptr6, i16* %add.ptr5, i16* %add.ptr4, i16* %add.ptr3, i16* %add.ptr2, i16* %add.ptr1) nounwind ; <i32> [#uses=0]
ret i32 %level.1
}
primitiveTextureFetchBlock: ; preds = %indexCheckBlock
%pointerArithmeticTmp = bitcast %0* %shaderExecutionStatePtr to i8* ; <i8*> [#uses=1]
- %pointerArithmeticTmp1 = getelementptr i8* %pointerArithmeticTmp, i64 1808 ; <i8*> [#uses=1]
+ %pointerArithmeticTmp1 = getelementptr i8, i8* %pointerArithmeticTmp, i64 1808 ; <i8*> [#uses=1]
%pointerArithmeticTmp2 = bitcast i8* %pointerArithmeticTmp1 to %1** ; <%1**> [#uses=1]
%primitivePtr = load %1** %pointerArithmeticTmp2 ; <%1*> [#uses=1]
%pointerArithmeticTmp3 = bitcast %1* %primitivePtr to i8* ; <i8*> [#uses=1]
- %pointerArithmeticTmp4 = getelementptr i8* %pointerArithmeticTmp3, i64 19408 ; <i8*> [#uses=1]
+ %pointerArithmeticTmp4 = getelementptr i8, i8* %pointerArithmeticTmp3, i64 19408 ; <i8*> [#uses=1]
%pointerArithmeticTmp5 = bitcast i8* %pointerArithmeticTmp4 to %1** ; <%1**> [#uses=1]
- %primitiveTexturePtr = getelementptr %1** %pointerArithmeticTmp5, i32 %index ; <%1**> [#uses=1]
+ %primitiveTexturePtr = getelementptr %1*, %1** %pointerArithmeticTmp5, i32 %index ; <%1**> [#uses=1]
%primitiveTexturePtr6 = load %1** %primitiveTexturePtr ; <%1*> [#uses=2]
br label %textureCheckBlock
rhoCalculateBlock: ; preds = %textureCheckBlock
%pointerArithmeticTmp7 = bitcast %1* %primitiveTexturePtr6 to i8* ; <i8*> [#uses=1]
- %pointerArithmeticTmp8 = getelementptr i8* %pointerArithmeticTmp7, i64 640 ; <i8*> [#uses=1]
+ %pointerArithmeticTmp8 = getelementptr i8, i8* %pointerArithmeticTmp7, i64 640 ; <i8*> [#uses=1]
%pointerArithmeticTmp9 = bitcast i8* %pointerArithmeticTmp8 to <4 x float>* ; <<4 x float>*> [#uses=1]
%dimensionsPtr = load <4 x float>* %pointerArithmeticTmp9, align 1 ; <<4 x float>> [#uses=2]
%texDiffDX = fsub <4 x float> %texCoordDX, %texCoord ; <<4 x float>> [#uses=1]
to label %invcont1 unwind label %lpad ; <i8> [#uses=0]
invcont1: ; preds = %invcont
- %6 = getelementptr inbounds %struct.ComplexType* %2, i64 0, i32 0 ; <i32*> [#uses=1]
+ %6 = getelementptr inbounds %struct.ComplexType, %struct.ComplexType* %2, i64 0, i32 0 ; <i32*> [#uses=1]
%7 = load i32* %6, align 4 ; <i32> [#uses=1]
invoke void @booleanAndDataReply(i32 %7, i32 undef, i32 %requestID, i32 undef, i64 undef, i32 undef)
to label %invcont2 unwind label %lpad
br i1 %tmp2, label %UnifiedReturnBlock, label %cond_next
cond_next: ; preds = %entry
- %tmp6 = getelementptr %struct.rtx_def* %x, i32 0, i32 0 ; <i16*> [#uses=1]
+ %tmp6 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 0 ; <i16*> [#uses=1]
%tmp7 = load i16* %tmp6 ; <i16> [#uses=2]
%tmp78 = zext i16 %tmp7 to i32 ; <i32> [#uses=2]
%tmp10 = icmp eq i16 %tmp7, 54 ; <i1> [#uses=1]
br i1 %tmp10, label %cond_true13, label %cond_next32
cond_true13: ; preds = %cond_next
- %tmp15 = getelementptr %struct.rtx_def* %x, i32 0, i32 3 ; <[1 x %struct..0anon]*> [#uses=1]
+ %tmp15 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3 ; <[1 x %struct..0anon]*> [#uses=1]
%tmp1718 = bitcast [1 x %struct..0anon]* %tmp15 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
%tmp19 = load %struct.rtx_def** %tmp1718 ; <%struct.rtx_def*> [#uses=1]
- %tmp20 = getelementptr %struct.rtx_def* %tmp19, i32 0, i32 0 ; <i16*> [#uses=1]
+ %tmp20 = getelementptr %struct.rtx_def, %struct.rtx_def* %tmp19, i32 0, i32 0 ; <i16*> [#uses=1]
%tmp21 = load i16* %tmp20 ; <i16> [#uses=1]
%tmp22 = icmp eq i16 %tmp21, 57 ; <i1> [#uses=1]
br i1 %tmp22, label %cond_true25, label %cond_next32
ret %struct.rtx_def* %tmp29
cond_next32: ; preds = %cond_true13, %cond_next
- %tmp34 = getelementptr [116 x i8*]* @rtx_format, i32 0, i32 %tmp78 ; <i8**> [#uses=1]
+ %tmp34 = getelementptr [116 x i8*], [116 x i8*]* @rtx_format, i32 0, i32 %tmp78 ; <i8**> [#uses=1]
%tmp35 = load i8** %tmp34, align 4 ; <i8*> [#uses=1]
- %tmp37 = getelementptr [117 x i32]* @rtx_length, i32 0, i32 %tmp78 ; <i32*> [#uses=1]
+ %tmp37 = getelementptr [117 x i32], [117 x i32]* @rtx_length, i32 0, i32 %tmp78 ; <i32*> [#uses=1]
%tmp38 = load i32* %tmp37, align 4 ; <i32> [#uses=1]
%i.011 = add i32 %tmp38, -1 ; <i32> [#uses=2]
%tmp12513 = icmp sgt i32 %i.011, -1 ; <i1> [#uses=1]
bb: ; preds = %bb123, %cond_next32
%indvar = phi i32 [ %indvar.next26, %bb123 ], [ 0, %cond_next32 ] ; <i32> [#uses=2]
%i.01.0 = sub i32 %i.011, %indvar ; <i32> [#uses=5]
- %tmp42 = getelementptr i8* %tmp35, i32 %i.01.0 ; <i8*> [#uses=2]
+ %tmp42 = getelementptr i8, i8* %tmp35, i32 %i.01.0 ; <i8*> [#uses=2]
%tmp43 = load i8* %tmp42 ; <i8> [#uses=1]
switch i8 %tmp43, label %bb123 [
i8 101, label %cond_true47
]
cond_true47: ; preds = %bb
- %tmp52 = getelementptr %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
+ %tmp52 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
%tmp5354 = bitcast %struct..0anon* %tmp52 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
%tmp55 = load %struct.rtx_def** %tmp5354 ; <%struct.rtx_def*> [#uses=1]
%tmp58 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp55, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
- %tmp62 = getelementptr %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0, i32 0 ; <i32*> [#uses=1]
+ %tmp62 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0, i32 0 ; <i32*> [#uses=1]
%tmp58.c = ptrtoint %struct.rtx_def* %tmp58 to i32 ; <i32> [#uses=1]
store i32 %tmp58.c, i32* %tmp62
%tmp6816 = load i8* %tmp42 ; <i8> [#uses=1]
br i1 %tmp6917, label %bb105.preheader, label %bb123
bb105.preheader: ; preds = %cond_true47, %bb
- %tmp11020 = getelementptr %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
+ %tmp11020 = getelementptr %struct.rtx_def, %struct.rtx_def* %x, i32 0, i32 3, i32 %i.01.0 ; <%struct..0anon*> [#uses=1]
%tmp11111221 = bitcast %struct..0anon* %tmp11020 to %struct.rtvec_def** ; <%struct.rtvec_def**> [#uses=3]
%tmp11322 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
- %tmp11423 = getelementptr %struct.rtvec_def* %tmp11322, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp11423 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp11322, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp11524 = load i32* %tmp11423 ; <i32> [#uses=1]
%tmp11625 = icmp eq i32 %tmp11524, 0 ; <i1> [#uses=1]
br i1 %tmp11625, label %bb123, label %bb73
bb73: ; preds = %bb73, %bb105.preheader
%j.019 = phi i32 [ %tmp104, %bb73 ], [ 0, %bb105.preheader ] ; <i32> [#uses=3]
%tmp81 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=2]
- %tmp92 = getelementptr %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019 ; <%struct..0anon*> [#uses=1]
+ %tmp92 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019 ; <%struct..0anon*> [#uses=1]
%tmp9394 = bitcast %struct..0anon* %tmp92 to %struct.rtx_def** ; <%struct.rtx_def**> [#uses=1]
%tmp95 = load %struct.rtx_def** %tmp9394 ; <%struct.rtx_def*> [#uses=1]
%tmp98 = tail call %struct.rtx_def* @walk_fixup_memory_subreg( %struct.rtx_def* %tmp95, %struct.rtx_def* %insn ) nounwind ; <%struct.rtx_def*> [#uses=1]
- %tmp101 = getelementptr %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019, i32 0 ; <i32*> [#uses=1]
+ %tmp101 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp81, i32 0, i32 1, i32 %j.019, i32 0 ; <i32*> [#uses=1]
%tmp98.c = ptrtoint %struct.rtx_def* %tmp98 to i32 ; <i32> [#uses=1]
store i32 %tmp98.c, i32* %tmp101
%tmp104 = add i32 %j.019, 1 ; <i32> [#uses=2]
%tmp113 = load %struct.rtvec_def** %tmp11111221 ; <%struct.rtvec_def*> [#uses=1]
- %tmp114 = getelementptr %struct.rtvec_def* %tmp113, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp114 = getelementptr %struct.rtvec_def, %struct.rtvec_def* %tmp113, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp115 = load i32* %tmp114 ; <i32> [#uses=1]
%tmp116 = icmp ult i32 %tmp104, %tmp115 ; <i1> [#uses=1]
br i1 %tmp116, label %bb73, label %bb123
%i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%sum.04 = phi i32 [ 0, %entry ], [ %10, %bb ] ; <i32> [#uses=1]
%1 = mul i32 %i.03, %As ; <i32> [#uses=1]
- %2 = getelementptr i16* %A, i32 %1 ; <i16*> [#uses=1]
+ %2 = getelementptr i16, i16* %A, i32 %1 ; <i16*> [#uses=1]
%3 = load i16* %2, align 2 ; <i16> [#uses=1]
%4 = sext i16 %3 to i32 ; <i32> [#uses=1]
%5 = mul i32 %i.03, %Bs ; <i32> [#uses=1]
- %6 = getelementptr i16* %B, i32 %5 ; <i16*> [#uses=1]
+ %6 = getelementptr i16, i16* %B, i32 %5 ; <i16*> [#uses=1]
%7 = load i16* %6, align 2 ; <i16> [#uses=1]
%8 = sext i16 %7 to i32 ; <i32> [#uses=1]
%9 = mul i32 %8, %4 ; <i32> [#uses=1]
%8 = lshr i64 %5, 8 ; <i64> [#uses=1]
%9 = trunc i64 %8 to i8 ; <i8> [#uses=1]
%.sum4 = add i64 %4, 6 ; <i64> [#uses=1]
- %10 = getelementptr inbounds i8* %0, i64 %.sum4 ; <i8*> [#uses=1]
+ %10 = getelementptr inbounds i8, i8* %0, i64 %.sum4 ; <i8*> [#uses=1]
store i8 %9, i8* %10, align 1
- %11 = getelementptr inbounds %struct.Rtree* %pRtree, i64 0, i32 3 ; <i32*> [#uses=1]
+ %11 = getelementptr inbounds %struct.Rtree, %struct.Rtree* %pRtree, i64 0, i32 3 ; <i32*> [#uses=1]
br i1 undef, label %bb.nph, label %bb2
bb.nph: ; preds = %entry
bb: ; preds = %bb, %bb.nph
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb ] ; <i64> [#uses=3]
- %scevgep = getelementptr %struct.RtreeCell* %pCell, i64 0, i32 1, i64 %indvar ; <%union.RtreeCoord*> [#uses=1]
+ %scevgep = getelementptr %struct.RtreeCell, %struct.RtreeCell* %pCell, i64 0, i32 1, i64 %indvar ; <%union.RtreeCoord*> [#uses=1]
%scevgep12 = bitcast %union.RtreeCoord* %scevgep to i32* ; <i32*> [#uses=1]
%tmp = shl i64 %indvar, 2 ; <i64> [#uses=1]
%tmp26 = add i64 %tmp, %tmp25 ; <i64> [#uses=1]
- %scevgep27 = getelementptr i8* %0, i64 %tmp26 ; <i8*> [#uses=1]
+ %scevgep27 = getelementptr i8, i8* %0, i64 %tmp26 ; <i8*> [#uses=1]
%12 = load i32* %scevgep12, align 4 ; <i32> [#uses=1]
%13 = lshr i32 %12, 24 ; <i32> [#uses=1]
%14 = trunc i32 %13 to i8 ; <i8> [#uses=1]
br i1 %17, label %bb, label %bb2
bb2: ; preds = %bb, %entry
- %18 = getelementptr inbounds %struct.RtreeNode* %pNode, i64 0, i32 3 ; <i32*> [#uses=1]
+ %18 = getelementptr inbounds %struct.RtreeNode, %struct.RtreeNode* %pNode, i64 0, i32 3 ; <i32*> [#uses=1]
store i32 1, i32* %18, align 4
ret void
}
define fastcc void @insert_picture_in_dpb(%struct.FrameStore* nocapture %fs, %struct.StorablePicture* %p) nounwind ssp {
entry:
- %0 = getelementptr inbounds %struct.FrameStore* %fs, i64 0, i32 12 ; <%struct.StorablePicture**> [#uses=1]
+ %0 = getelementptr inbounds %struct.FrameStore, %struct.FrameStore* %fs, i64 0, i32 12 ; <%struct.StorablePicture**> [#uses=1]
%1 = icmp eq i32 undef, 0 ; <i1> [#uses=1]
br i1 %1, label %bb.i, label %bb36.i
%23 = phi %struct.StorablePicture* [ %40, %bb66.i ], [ %12, %bb67.preheader.i ] ; <%struct.StorablePicture*> [#uses=1]
%indvar248.i = phi i64 [ %indvar.next249.i, %bb66.i ], [ 0, %bb67.preheader.i ] ; <i64> [#uses=3]
%storemerge52.i = trunc i64 %indvar248.i to i32 ; <i32> [#uses=1]
- %24 = getelementptr inbounds %struct.StorablePicture* %23, i64 0, i32 19 ; <i32*> [#uses=0]
+ %24 = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %23, i64 0, i32 19 ; <i32*> [#uses=0]
br i1 undef, label %bb.nph51.i, label %bb66.i
bb.nph51.i: ; preds = %bb38.i
br i1 undef, label %bb45.i, label %bb47.i
bb45.i: ; preds = %bb41.i
- %33 = getelementptr inbounds %struct.StorablePicture* %26, i64 0, i32 5, i64 undef, i64 %32, i64 undef ; <i64*> [#uses=1]
+ %33 = getelementptr inbounds %struct.StorablePicture, %struct.StorablePicture* %26, i64 0, i32 5, i64 undef, i64 %32, i64 undef ; <i64*> [#uses=1]
%34 = load i64* %33, align 8 ; <i64> [#uses=1]
br label %bb47.i
bb47.i: ; preds = %bb45.i, %bb41.i
%storemerge11.i = phi i64 [ %34, %bb45.i ], [ 0, %bb41.i ] ; <i64> [#uses=0]
- %scevgep246.i = getelementptr i64* undef, i64 undef ; <i64*> [#uses=0]
+ %scevgep246.i = getelementptr i64, i64* undef, i64 undef ; <i64*> [#uses=0]
br label %bb64.i
bb57.i: ; preds = %bb40.i, %bb39.i
bb60.i: ; preds = %bb58.i, %bb57.i
%35 = load i64*** undef, align 8 ; <i64**> [#uses=1]
- %scevgep256.i = getelementptr i64** %35, i64 %indvar248.i ; <i64**> [#uses=1]
+ %scevgep256.i = getelementptr i64*, i64** %35, i64 %indvar248.i ; <i64**> [#uses=1]
%36 = load i64** %scevgep256.i, align 8 ; <i64*> [#uses=1]
- %scevgep243.i = getelementptr i64* %36, i64 undef ; <i64*> [#uses=1]
+ %scevgep243.i = getelementptr i64, i64* %36, i64 undef ; <i64*> [#uses=1]
store i64 -1, i64* %scevgep243.i, align 8
br label %bb64.i
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb ] ; <i64> [#uses=2]
%tmp9 = shl i64 %indvar, 2 ; <i64> [#uses=4]
%tmp1016 = or i64 %tmp9, 1 ; <i64> [#uses=1]
- %scevgep = getelementptr float* %x, i64 %tmp1016 ; <float*> [#uses=1]
+ %scevgep = getelementptr float, float* %x, i64 %tmp1016 ; <float*> [#uses=1]
%tmp1117 = or i64 %tmp9, 2 ; <i64> [#uses=1]
- %scevgep12 = getelementptr float* %x, i64 %tmp1117 ; <float*> [#uses=1]
+ %scevgep12 = getelementptr float, float* %x, i64 %tmp1117 ; <float*> [#uses=1]
%tmp1318 = or i64 %tmp9, 3 ; <i64> [#uses=1]
- %scevgep14 = getelementptr float* %x, i64 %tmp1318 ; <float*> [#uses=1]
- %x_addr.03 = getelementptr float* %x, i64 %tmp9 ; <float*> [#uses=1]
+ %scevgep14 = getelementptr float, float* %x, i64 %tmp1318 ; <float*> [#uses=1]
+ %x_addr.03 = getelementptr float, float* %x, i64 %tmp9 ; <float*> [#uses=1]
%1 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 0), align 16 ; <float> [#uses=1]
store float %1, float* %x_addr.03, align 4
%2 = load float* getelementptr inbounds ([4 x float]* @g, i64 0, i64 1), align 4 ; <float> [#uses=1]
; CHECK: movups L_str+12(%rip), %xmm0
; CHECK: movups L_str(%rip), %xmm1
%tmp0 = alloca [60 x i8], align 1
- %tmp1 = getelementptr inbounds [60 x i8]* %tmp0, i64 0, i64 0
+ %tmp1 = getelementptr inbounds [60 x i8], [60 x i8]* %tmp0, i64 0, i64 0
br label %bb1
bb1:
ret i8 1
bb27.outer108: ; preds = %bb13, %bb27.outer
- %I.2.ph109 = getelementptr i8* %I.2.ph, i64 undef ; <i8*> [#uses=1]
- %scevgep = getelementptr i8* %I.2.ph, i64 undef ; <i8*> [#uses=0]
+ %I.2.ph109 = getelementptr i8, i8* %I.2.ph, i64 undef ; <i8*> [#uses=1]
+ %scevgep = getelementptr i8, i8* %I.2.ph, i64 undef ; <i8*> [#uses=0]
br label %bb8
bb56: ; preds = %bb10, %bb8, %bb8, %entry
%r2 = load %test** %p, align 8 ; <%test*> [#uses=1]
%r3 = ptrtoint %test* %r2 to i64 ; <i64> [#uses=1]
%r4 = inttoptr i64 %r3 to %link** ; <%link**> [#uses=1]
- %r5 = getelementptr %link** %r4, i64 1 ; <%link**> [#uses=1]
+ %r5 = getelementptr %link*, %link** %r4, i64 1 ; <%link**> [#uses=1]
store %link* %r1, %link** %r5, align 8
br label %"@CFE_debug_label_3"
%r6 = load %test** %p, align 8 ; <%test*> [#uses=1]
%r7 = ptrtoint %test* %r6 to i64 ; <i64> [#uses=1]
%r8 = inttoptr i64 %r7 to %link* ; <%link*> [#uses=1]
- %r9 = getelementptr %link* %r8, i64 1 ; <%link*> [#uses=1]
+ %r9 = getelementptr %link, %link* %r8, i64 1 ; <%link*> [#uses=1]
store %link* %r9, %link** bitcast ([1 x i64]* @link_ptr to %link**), align 8
br label %"@CFE_debug_label_4"
entry:
%call = tail call i8* @_Z15uprv_malloc_4_2v()
%0 = bitcast i8* %call to double*
- %tmp = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 3
+ %tmp = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 3
%tmp2 = load i16* %tmp
- %tmp525 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 0
+ %tmp525 = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 0
%tmp626 = load i16* %tmp525
%cmp27 = icmp slt i16 %tmp2, %tmp626
br i1 %cmp27, label %bb.nph, label %for.end
br i1 %cmp, label %for.body, label %for.end
bb.nph:
- %tmp10 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 2
- %tmp17 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 1
- %tmp5 = getelementptr inbounds %class.OlsonTimeZone* %this, i32 0, i32 0
+ %tmp10 = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 2
+ %tmp17 = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 1
+ %tmp5 = getelementptr inbounds %class.OlsonTimeZone, %class.OlsonTimeZone* %this, i32 0, i32 0
%tmp29 = sext i16 %tmp2 to i32
%tmp31 = add i16 %tmp2, 1
%tmp32 = zext i16 %tmp31 to i32
%tmp33 = add i32 %indvar, %tmp32
%inc = trunc i32 %tmp33 to i16
%tmp11 = load i8** %tmp10
- %arrayidx = getelementptr i8* %tmp11, i32 %tmp30
+ %arrayidx = getelementptr i8, i8* %tmp11, i32 %tmp30
%tmp12 = load i8* %arrayidx
br label %for.cond
%r1295 = extractelement <4 x i32> %r1258, i32 3 ; <i32> [#uses=1]
%r1296 = sext i32 %r1295 to i64 ; <i64> [#uses=1]
%r1297 = add i64 %r1296, -1 ; <i64> [#uses=1]
- %r1298183 = getelementptr [0 x i32]* %ismbs, i64 0, i64 %r1297 ; <i32*> [#uses=1]
+ %r1298183 = getelementptr [0 x i32], [0 x i32]* %ismbs, i64 0, i64 %r1297 ; <i32*> [#uses=1]
%r1298184 = load i32* %r1298183, align 4 ; <i32> [#uses=1]
%r1301 = extractelement <4 x i32> %r1037, i32 3 ; <i32> [#uses=1]
%r1302 = mul i32 %r1298184, %r1301 ; <i32> [#uses=1]
%0 = alloca double ; <double*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.declare(metadata %struct.Rect* %my_r0, metadata !0, metadata !{!"0x102"}), !dbg !15
- %1 = getelementptr inbounds %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
- %2 = getelementptr inbounds %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
+ %1 = getelementptr inbounds %struct.Rect, %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
+ %2 = getelementptr inbounds %struct.Pt, %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
%3 = load double* %2, align 8, !dbg !16 ; <double> [#uses=1]
store double %3, double* %0, align 8, !dbg !16
%4 = load double* %0, align 8, !dbg !16 ; <double> [#uses=1]
define void @t(i32 %cNum, i64 %max) nounwind optsize ssp noimplicitfloat {
entry:
%0 = load %struct.b_t** null, align 4 ; <%struct.b_t*> [#uses=1]
- %1 = getelementptr inbounds %struct.b_t* %0, i32 %cNum, i32 5 ; <i64*> [#uses=1]
+ %1 = getelementptr inbounds %struct.b_t, %struct.b_t* %0, i32 %cNum, i32 5 ; <i64*> [#uses=1]
%2 = load i64* %1, align 4 ; <i64> [#uses=1]
%3 = icmp ult i64 %2, %max ; <i1> [#uses=1]
- %4 = getelementptr inbounds %struct.a_t* null, i32 0, i32 7 ; <i64**> [#uses=1]
+ %4 = getelementptr inbounds %struct.a_t, %struct.a_t* null, i32 0, i32 7 ; <i64**> [#uses=1]
%5 = load i64** %4, align 4 ; <i64*> [#uses=0]
%6 = load i64* null, align 4 ; <i64> [#uses=1]
br i1 %3, label %bb2, label %bb
define fastcc void @l186(%tupl* %r1) noreturn nounwind {
entry:
- %ptr1 = getelementptr %tupl* %r1, i32 0, i32 0
+ %ptr1 = getelementptr %tupl, %tupl* %r1, i32 0, i32 0
%r2 = load i32* %ptr1
- %ptr3 = getelementptr %tupl* %r1, i32 0, i32 1
+ %ptr3 = getelementptr %tupl, %tupl* %r1, i32 0, i32 1
%r3 = load i32* %ptr3
- %ptr5 = getelementptr %tupl* %r1, i32 0, i32 2
+ %ptr5 = getelementptr %tupl, %tupl* %r1, i32 0, i32 2
%r4 = load i32* %ptr5
- %ptr7 = getelementptr %tupl* %r1, i32 0, i32 3
+ %ptr7 = getelementptr %tupl, %tupl* %r1, i32 0, i32 3
%r5 = load i32* %ptr7
- %ptr9 = getelementptr %tupl* %r1, i32 0, i32 4
+ %ptr9 = getelementptr %tupl, %tupl* %r1, i32 0, i32 4
%r6 = load i32* %ptr9
- %ptr11 = getelementptr %tupl* %r1, i32 0, i32 5
+ %ptr11 = getelementptr %tupl, %tupl* %r1, i32 0, i32 5
%r7 = load i32* %ptr11
- %ptr13 = getelementptr %tupl* %r1, i32 0, i32 6
+ %ptr13 = getelementptr %tupl, %tupl* %r1, i32 0, i32 6
%r8 = load i32* %ptr13
- %ptr15 = getelementptr %tupl* %r1, i32 0, i32 7
+ %ptr15 = getelementptr %tupl, %tupl* %r1, i32 0, i32 7
%r9 = load i32* %ptr15
- %ptr17 = getelementptr %tupl* %r1, i32 0, i32 8
+ %ptr17 = getelementptr %tupl, %tupl* %r1, i32 0, i32 8
%r10 = load i32* %ptr17
%cond = icmp eq i32 %r10, 3
br i1 %cond, label %true, label %false
bb1.i: ; preds = %bb1.i, %bb2.outer.i
%indvar5.i = phi i64 [ %tmp, %bb1.i ], [ 0, %bb2.outer.i ] ; <i64> [#uses=1]
%tmp = add i64 %indvar5.i, 1 ; <i64> [#uses=2]
- %scevgep.i = getelementptr double* undef, i64 %tmp ; <double*> [#uses=0]
+ %scevgep.i = getelementptr double, double* undef, i64 %tmp ; <double*> [#uses=0]
br i1 undef, label %bb1.i, label %bb5.preheader.i
bb5.preheader.i: ; preds = %bb1.i, %bb2.outer.i
ret i32* null
bb.nph380: ; preds = %entry
- %scevgep403 = getelementptr %struct.PPOperation* %operation, i32 0, i32 1, i32 0, i32 2 ; <i32*> [#uses=1]
+ %scevgep403 = getelementptr %struct.PPOperation, %struct.PPOperation* %operation, i32 0, i32 1, i32 0, i32 2 ; <i32*> [#uses=1]
%3 = ashr i32 %2, 1 ; <i32> [#uses=1]
%tmp405 = and i32 %3, -2 ; <i32> [#uses=1]
- %scevgep408 = getelementptr %struct.PPOperation* %operation, i32 0, i32 1, i32 0, i32 1 ; <i16*> [#uses=1]
+ %scevgep408 = getelementptr %struct.PPOperation, %struct.PPOperation* %operation, i32 0, i32 1, i32 0, i32 1 ; <i16*> [#uses=1]
%tmp410 = and i32 %2, -4 ; <i32> [#uses=1]
br label %bb169
%index.6379 = phi i32 [ 0, %bb.nph380 ], [ %4, %bb169 ] ; <i32> [#uses=3]
%tmp404 = mul i32 %index.6379, -2 ; <i32> [#uses=1]
%tmp406 = add i32 %tmp405, %tmp404 ; <i32> [#uses=1]
- %scevgep407 = getelementptr i32* %scevgep403, i32 %tmp406 ; <i32*> [#uses=1]
+ %scevgep407 = getelementptr i32, i32* %scevgep403, i32 %tmp406 ; <i32*> [#uses=1]
%tmp409 = mul i32 %index.6379, -4 ; <i32> [#uses=1]
%tmp411 = add i32 %tmp410, %tmp409 ; <i32> [#uses=1]
- %scevgep412 = getelementptr i16* %scevgep408, i32 %tmp411 ; <i16*> [#uses=1]
+ %scevgep412 = getelementptr i16, i16* %scevgep408, i32 %tmp411 ; <i16*> [#uses=1]
store i16 undef, i16* %scevgep412, align 2
store i32 undef, i32* %scevgep407, align 4
%4 = add nsw i32 %index.6379, 1 ; <i32> [#uses=1]
for.body261.i: ; preds = %for.body261.i, %for.body190
%line.3300.i = phi i32 [ undef, %for.body190 ], [ %add292.i, %for.body261.i ] ; <i32> [#uses=3]
%conv268.i = and i32 %line.3300.i, 255 ; <i32> [#uses=1]
- %tmp278.i = getelementptr [2 x [256 x %struct.bufBit_s]]* %colourLines, i32 undef, i32 %pen.1100, i32 %conv268.i, i32 0 ; <i8**> [#uses=1]
+ %tmp278.i = getelementptr [2 x [256 x %struct.bufBit_s]], [2 x [256 x %struct.bufBit_s]]* %colourLines, i32 undef, i32 %pen.1100, i32 %conv268.i, i32 0 ; <i8**> [#uses=1]
store i8* undef, i8** %tmp278.i
%tmp338 = shl i32 %line.3300.i, 3 ; <i32> [#uses=1]
%tmp339 = and i32 %tmp338, 2040 ; <i32> [#uses=1]
- %tmp285.i = getelementptr i8* %scevgep328, i32 %tmp339 ; <i8*> [#uses=1]
+ %tmp285.i = getelementptr i8, i8* %scevgep328, i32 %tmp339 ; <i8*> [#uses=1]
store i8 undef, i8* %tmp285.i
%add292.i = add nsw i32 0, %line.3300.i ; <i32> [#uses=1]
br i1 undef, label %for.body190, label %for.body261.i
for.body190: ; preds = %for.body261.i, %for.body190, %bb.nph104
%pen.1100 = phi i32 [ 0, %entry ], [ %inc230, %for.body261.i ], [ %inc230, %for.body190 ] ; <i32> [#uses=3]
- %scevgep328 = getelementptr [2 x [256 x %struct.bufBit_s]]* %colourLines, i32 undef, i32 %pen.1100, i32 0, i32 1 ; <i8*> [#uses=1]
+ %scevgep328 = getelementptr [2 x [256 x %struct.bufBit_s]], [2 x [256 x %struct.bufBit_s]]* %colourLines, i32 undef, i32 %pen.1100, i32 0, i32 1 ; <i8*> [#uses=1]
%inc230 = add i32 %pen.1100, 1 ; <i32> [#uses=2]
br i1 undef, label %for.body190, label %for.body261.i
}
; CHECK-LABEL: t:
; CHECK: addq $12, %rsi
%BitValueArray = alloca [32 x i32], align 4
- %tmp2 = getelementptr inbounds %struct.F* %this, i64 0, i32 0
+ %tmp2 = getelementptr inbounds %struct.F, %struct.F* %this, i64 0, i32 0
%tmp3 = load %struct.FC** %tmp2, align 8
- %tmp4 = getelementptr inbounds %struct.FC* %tmp3, i64 0, i32 1, i64 0
+ %tmp4 = getelementptr inbounds %struct.FC, %struct.FC* %tmp3, i64 0, i32 1, i64 0
%tmp5 = bitcast [32 x i32]* %BitValueArray to i8*
%tmp6 = bitcast i32* %tmp4 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp5, i8* %tmp6, i64 128, i32 4, i1 false)
unreachable
finally.end: ; preds = %cleanup.end10, %cleanup.switch9
- %tmp11 = getelementptr inbounds %struct.S* %s1, i32 0, i32 0 ; <[2 x i8*]*> [#uses=1]
- %arraydecay = getelementptr inbounds [2 x i8*]* %tmp11, i32 0, i32 0 ; <i8**> [#uses=1]
- %arrayidx = getelementptr inbounds i8** %arraydecay, i32 1 ; <i8**> [#uses=1]
+ %tmp11 = getelementptr inbounds %struct.S, %struct.S* %s1, i32 0, i32 0 ; <[2 x i8*]*> [#uses=1]
+ %arraydecay = getelementptr inbounds [2 x i8*], [2 x i8*]* %tmp11, i32 0, i32 0 ; <i8**> [#uses=1]
+ %arrayidx = getelementptr inbounds i8*, i8** %arraydecay, i32 1 ; <i8**> [#uses=1]
%tmp12 = load i8** %arrayidx ; <i8*> [#uses=1]
store i8* %tmp12, i8** %retval
%9 = load i8** %retval ; <i8*> [#uses=1]
define i8* @bar(%struct.a* %myvar) nounwind optsize noinline ssp {
entry:
tail call void @llvm.dbg.value(metadata %struct.a* %myvar, i64 0, metadata !8, metadata !{!"0x102"})
- %0 = getelementptr inbounds %struct.a* %myvar, i64 0, i32 0, !dbg !28 ; <i32*> [#uses=1]
+ %0 = getelementptr inbounds %struct.a, %struct.a* %myvar, i64 0, i32 0, !dbg !28 ; <i32*> [#uses=1]
%1 = load i32* %0, align 8, !dbg !28 ; <i32> [#uses=1]
tail call void @foo(i32 %1) nounwind optsize noinline ssp, !dbg !28
%2 = bitcast %struct.a* %myvar to i8*, !dbg !30 ; <i8*> [#uses=1]
define i32 @func(%struct.type* %s) nounwind optsize ssp {
entry:
- %tmp1 = getelementptr inbounds %struct.type* %s, i32 0, i32 1
+ %tmp1 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 1
%tmp2 = load i32* %tmp1, align 8
%tmp3 = icmp eq i32 %tmp2, 10
- %tmp4 = getelementptr inbounds %struct.type* %s, i32 0, i32 40
+ %tmp4 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 40
br i1 %tmp3, label %bb, label %entry.bb1_crit_edge
entry.bb1_crit_edge:
%tmp5 = bitcast i32* %tmp4 to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 84, i32 4, i1 false)
- %tmp6 = getelementptr inbounds %struct.type* %s, i32 0, i32 62
+ %tmp6 = getelementptr inbounds %struct.type, %struct.type* %s, i32 0, i32 62
store i32* null, i32** %tmp6, align 8
br label %bb1
br i1 %0, label %bb, label %bb1, !dbg !27
bb: ; preds = %entry
- %1 = getelementptr inbounds %struct.SVal* %location, i32 0, i32 1, !dbg !29 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !29 ; <i32*> [#uses=1]
%2 = load i32* %1, align 8, !dbg !29 ; <i32> [#uses=1]
%3 = add i32 %2, %i, !dbg !29 ; <i32> [#uses=1]
br label %bb2, !dbg !29
bb1: ; preds = %entry
- %4 = getelementptr inbounds %struct.SVal* %location, i32 0, i32 1, !dbg !30 ; <i32*> [#uses=1]
+ %4 = getelementptr inbounds %struct.SVal, %struct.SVal* %location, i32 0, i32 1, !dbg !30 ; <i32*> [#uses=1]
%5 = load i32* %4, align 8, !dbg !30 ; <i32> [#uses=1]
%6 = sub i32 %5, 1, !dbg !30 ; <i32> [#uses=1]
br label %bb2, !dbg !30
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.value(metadata %struct.SVal* %this, i64 0, metadata !31, metadata !{!"0x102"}), !dbg !34
- %0 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 0, !dbg !34 ; <i8**> [#uses=1]
+ %0 = getelementptr inbounds %struct.SVal, %struct.SVal* %this, i32 0, i32 0, !dbg !34 ; <i8**> [#uses=1]
store i8* null, i8** %0, align 8, !dbg !34
- %1 = getelementptr inbounds %struct.SVal* %this, i32 0, i32 1, !dbg !34 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds %struct.SVal, %struct.SVal* %this, i32 0, i32 1, !dbg !34 ; <i32*> [#uses=1]
store i32 0, i32* %1, align 8, !dbg !34
br label %return, !dbg !34
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.declare(metadata %struct.SVal* %v, metadata !38, metadata !{!"0x102"}), !dbg !41
call void @_ZN4SValC1Ev(%struct.SVal* %v) nounwind, !dbg !41
- %1 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 1, !dbg !42 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 1, !dbg !42 ; <i32*> [#uses=1]
store i32 1, i32* %1, align 8, !dbg !42
- %2 = getelementptr inbounds %struct.SVal* %0, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
- %3 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
+ %2 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
+ %3 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 0, !dbg !43 ; <i8**> [#uses=1]
%4 = load i8** %3, align 8, !dbg !43 ; <i8*> [#uses=1]
store i8* %4, i8** %2, align 8, !dbg !43
- %5 = getelementptr inbounds %struct.SVal* %0, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
- %6 = getelementptr inbounds %struct.SVal* %v, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
+ %5 = getelementptr inbounds %struct.SVal, %struct.SVal* %0, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
+ %6 = getelementptr inbounds %struct.SVal, %struct.SVal* %v, i32 0, i32 1, !dbg !43 ; <i32*> [#uses=1]
%7 = load i32* %6, align 8, !dbg !43 ; <i32> [#uses=1]
store i32 %7, i32* %5, align 8, !dbg !43
%8 = call i32 @_Z3fooi4SVal(i32 2, %struct.SVal* noalias %0) nounwind, !dbg !43 ; <i32> [#uses=0]
br i1 undef, label %while.cond.preheader, label %sem_check_validity.exit
while.cond.preheader: ; preds = %entry
- %tmp4 = getelementptr inbounds %struct._sem* %sem, i64 0, i32 1, i32 1
+ %tmp4 = getelementptr inbounds %struct._sem, %struct._sem* %sem, i64 0, i32 1, i32 1
br label %while.cond
sem_check_validity.exit: ; preds = %entry
define fastcc i32 @cli_magic_scandesc(i8* %in) nounwind ssp {
entry:
%a = alloca [64 x i8]
- %b = getelementptr inbounds [64 x i8]* %a, i64 0, i32 0
- %c = getelementptr inbounds [64 x i8]* %a, i64 0, i32 30
+ %b = getelementptr inbounds [64 x i8], [64 x i8]* %a, i64 0, i32 0
+ %c = getelementptr inbounds [64 x i8], [64 x i8]* %a, i64 0, i32 30
%d = load i8* %b, align 8
%e = load i8* %c, align 8
%f = bitcast [64 x i8]* %a to i8*
"2": ; preds = %entry
%3 = bitcast i8* %0 to <2 x i32>*
- %4 = getelementptr inbounds %0* %1, i32 0, i32 0
+ %4 = getelementptr inbounds %0, %0* %1, i32 0, i32 0
%5 = bitcast %"int[]"* %4 to <4 x float>*
%6 = load <4 x float>* %5, align 16
%7 = bitcast <2 x i32>* %3 to <2 x float>*
%11 = insertelement <2 x double> %10, double undef, i32 1
%12 = bitcast <2 x double> %11 to <4 x float>
%13 = shufflevector <4 x float> %6, <4 x float> %12, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
- %14 = getelementptr inbounds %0* %1, i32 0, i32 0
+ %14 = getelementptr inbounds %0, %0* %1, i32 0, i32 0
%15 = bitcast %"int[]"* %14 to <4 x float>*
store <4 x float> %13, <4 x float>* %15, align 16
%16 = bitcast i8* %0 to <2 x i32>*
%17 = bitcast <2 x i32>* %16 to i8*
- %18 = getelementptr i8* %17, i64 8
+ %18 = getelementptr i8, i8* %17, i64 8
%19 = bitcast i8* %18 to <2 x i32>*
- %20 = getelementptr inbounds %0* %2, i32 0, i32 0
+ %20 = getelementptr inbounds %0, %0* %2, i32 0, i32 0
%21 = bitcast %"int[]"* %20 to <4 x float>*
%22 = load <4 x float>* %21, align 16
%23 = bitcast <2 x i32>* %19 to <2 x float>*
%27 = insertelement <2 x double> %26, double undef, i32 1
%28 = bitcast <2 x double> %27 to <4 x float>
%29 = shufflevector <4 x float> %22, <4 x float> %28, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
- %30 = getelementptr inbounds %0* %2, i32 0, i32 0
+ %30 = getelementptr inbounds %0, %0* %2, i32 0, i32 0
%31 = bitcast %"int[]"* %30 to <4 x float>*
store <4 x float> %29, <4 x float>* %31, align 16
br label %return
if.then758:
%add761 = add i32 %call747, 4
%add763 = add i32 %add761, %call747
- %add.ptr768 = getelementptr inbounds [516 x i8]* null, i32 0, i32 %add761
+ %add.ptr768 = getelementptr inbounds [516 x i8], [516 x i8]* null, i32 0, i32 %add761
br i1 undef, label %cond.false783, label %cond.true771
cond.true771:
cond.end791:
%conv801 = trunc i32 %call747 to i8
%add.ptr822.sum = add i32 %call747, 3
- %arrayidx833 = getelementptr inbounds [516 x i8]* null, i32 0, i32 %add.ptr822.sum
+ %arrayidx833 = getelementptr inbounds [516 x i8], [516 x i8]* null, i32 0, i32 %add.ptr822.sum
store i8 %conv801, i8* %arrayidx833, align 1
%cmp841 = icmp eq i8* undef, null
br i1 %cmp841, label %if.end849, label %if.then843
%6 = lshr i32 %5, 3
%bf.clear = and i32 %6, 15
%conv = sitofp i32 %bf.clear to float
- %f = getelementptr inbounds %struct.anon* %F, i32 0, i32 0
+ %f = getelementptr inbounds %struct.anon, %struct.anon* %F, i32 0, i32 0
%tmp = load float* %f, align 4
%sub = fsub float %tmp, %conv
store float %sub, float* %f, align 4
- %ld = getelementptr inbounds %struct.anon* %F, i32 0, i32 1
+ %ld = getelementptr inbounds %struct.anon, %struct.anon* %F, i32 0, i32 1
%tmp1 = load x86_fp80* %ld, align 16
%7 = bitcast %0* %K to i32*
%8 = load i32* %7, align 4
%tmp1 = load i8* %sp, align 1
%div = udiv i8 %tmp1, 10
%rem = urem i8 %div, 10
- %arrayidx.i = getelementptr inbounds [2 x i8]* %temp.i, i32 0, i32 0
+ %arrayidx.i = getelementptr inbounds [2 x i8], [2 x i8]* %temp.i, i32 0, i32 0
store i8 %rem, i8* %arrayidx.i, align 1
%call.i = call fastcc i8* @save_string(i8* %sp, i8* %arrayidx.i) nounwind
ret i32 undef
do.body27: ; preds = %_ZN3JSC7JSValue19equalSlowCaseInlineEPNS_9ExecStateES0_S0_.exit
%tmp30 = bitcast i8* %1 to %"class.JSC::JSGlobalData"*
- %2 = getelementptr inbounds i8** %args, i64 -1
+ %2 = getelementptr inbounds i8*, i8** %args, i64 -1
%3 = bitcast i8** %2 to %"class.JSC::FunctionPtr"*
tail call fastcc void @_ZN3JSCL23returnToThrowTrampolineEPNS_12JSGlobalDataENS_16ReturnAddressPtrERS2_(%"class.JSC::JSGlobalData"* %tmp30, i8* undef, %"class.JSC::FunctionPtr"* %3)
unreachable
cond.end166.i: ; preds = %cond.false156.i, %cond.true138.i
%idxprom1113.i = phi i64 [ %idxprom1114.i, %cond.false156.i ], [ undef, %cond.true138.i ]
%tmp235.i = load %struct.state** getelementptr inbounds (%struct.dfa* @aux_temp, i64 0, i32 2), align 8
- %att.i = getelementptr inbounds %struct.state* %tmp235.i, i64 %idxprom1113.i, i32 0
+ %att.i = getelementptr inbounds %struct.state, %struct.state* %tmp235.i, i64 %idxprom1113.i, i32 0
store i32 0, i32* %att.i, align 4
ret void
}
%shr.i14 = lshr i32 %tmp6.i12, 8
%and14.i = and i32 %shr.i14, 255
%idxprom15.i = zext i32 %and14.i to i64
- %arrayidx16.i = getelementptr inbounds [256 x i32]* @bit_count, i64 0, i64 %idxprom15.i
+ %arrayidx16.i = getelementptr inbounds [256 x i32], [256 x i32]* @bit_count, i64 0, i64 %idxprom15.i
%tmp17.i15 = load i32* %arrayidx16.i, align 4
%add.i = add i32 0, %tmp3524.i
%add24.i = add i32 %add.i, %tmp17.i15
%mul = mul nsw i32 %y, %x
%sub = add nsw i32 %mul, -1
%idxprom = sext i32 %sub to i64
- %arrayidx = getelementptr inbounds i64* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i64, i64* %a, i64 %idxprom
%tmp4 = load i64* %arrayidx, align 8
; CHECK: fildll
%conv = sitofp i64 %tmp4 to float
br label %bb8
bb8: ; preds = %bb23, %bb
- %tmp15 = getelementptr inbounds %3* %tmp7, i32 0, i32 4
+ %tmp15 = getelementptr inbounds %3, %3* %tmp7, i32 0, i32 4
store i8* bitcast (%0* @0 to i8*), i8** %tmp15
%tmp16 = bitcast %3* %tmp7 to void ()*
store void ()* %tmp16, void ()** %tmp6, align 8
%tmp17 = load void ()** %tmp6, align 8
%tmp18 = bitcast void ()* %tmp17 to %6*
- %tmp19 = getelementptr inbounds %6* %tmp18, i32 0, i32 3
+ %tmp19 = getelementptr inbounds %6, %6* %tmp18, i32 0, i32 3
%tmp20 = bitcast %6* %tmp18 to i8*
%tmp21 = load i8** %tmp19
%tmp22 = bitcast i8* %tmp21 to void (i8*)*
%l.tr = phi i32 [ %l, %entry ], [ %i.1, %do.cond ]
%r.tr = phi i32 [ %r, %entry ], [ %l.tr, %do.cond ]
%idxprom12 = sext i32 %r.tr to i64
- %arrayidx14 = getelementptr inbounds i32* %a, i64 %idxprom12
+ %arrayidx14 = getelementptr inbounds i32, i32* %a, i64 %idxprom12
br label %do.body
do.body: ; preds = %do.cond, %tailrecurse
call void @bar([39 x i8]* %stack_main)
%tmp6 = add i64 %a, -2147483647
%.sum = add i64 %tmp6, %b
- %tmp8 = getelementptr inbounds [39 x i8]* %stack_main, i64 0, i64 %.sum
+ %tmp8 = getelementptr inbounds [39 x i8], [39 x i8]* %stack_main, i64 0, i64 %.sum
%tmp9 = load i8* %tmp8, align 1
%tmp10 = sext i8 %tmp9 to i32
ret i32 %tmp10
%bf.clear = and i32 %2, 255
%idxprom = sext i32 %bf.clear to i64
%3 = load %struct.optab** getelementptr inbounds ([49 x %struct.optab*]* @optab_table, i32 0, i64 0), align 8
- %handlers = getelementptr inbounds %struct.optab* %3, i32 0, i32 1
- %arrayidx = getelementptr inbounds [59 x %struct.anon.3]* %handlers, i32 0, i64 %idxprom
- %insn_code = getelementptr inbounds %struct.anon.3* %arrayidx, i32 0, i32 0
+ %handlers = getelementptr inbounds %struct.optab, %struct.optab* %3, i32 0, i32 1
+ %arrayidx = getelementptr inbounds [59 x %struct.anon.3], [59 x %struct.anon.3]* %handlers, i32 0, i64 %idxprom
+ %insn_code = getelementptr inbounds %struct.anon.3, %struct.anon.3* %arrayidx, i32 0, i32 0
%4 = load i32* %insn_code, align 4
%cmp = icmp eq i32 %4, 1317
br i1 %cmp, label %if.then, label %lor.lhs.false
lor.lhs.false: ; preds = %entry
%idxprom1 = sext i32 %4 to i64
- %arrayidx2 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom1
- %operand = getelementptr inbounds %struct.insn_data* %arrayidx2, i32 0, i32 3
+ %arrayidx2 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom1
+ %operand = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx2, i32 0, i32 3
%5 = load %struct.insn_operand_data** %operand, align 8
- %arrayidx3 = getelementptr inbounds %struct.insn_operand_data* %5, i64 0
- %predicate = getelementptr inbounds %struct.insn_operand_data* %arrayidx3, i32 0, i32 0
+ %arrayidx3 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %5, i64 0
+ %predicate = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx3, i32 0, i32 0
%6 = load i32 (%struct.rtx_def*, i32)** %predicate, align 8
%idxprom4 = sext i32 %4 to i64
- %arrayidx5 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom4
- %operand6 = getelementptr inbounds %struct.insn_data* %arrayidx5, i32 0, i32 3
+ %arrayidx5 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom4
+ %operand6 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx5, i32 0, i32 3
%7 = load %struct.insn_operand_data** %operand6, align 8
- %arrayidx7 = getelementptr inbounds %struct.insn_operand_data* %7, i64 0
+ %arrayidx7 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %7, i64 0
%8 = bitcast %struct.insn_operand_data* %arrayidx7 to i8*
- %bf.field.offs = getelementptr i8* %8, i32 16
+ %bf.field.offs = getelementptr i8, i8* %8, i32 16
%9 = bitcast i8* %bf.field.offs to i32*
%10 = load i32* %9, align 8
%bf.clear8 = and i32 %10, 65535
lor.lhs.false9: ; preds = %lor.lhs.false
%idxprom10 = sext i32 %4 to i64
- %arrayidx11 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom10
- %operand12 = getelementptr inbounds %struct.insn_data* %arrayidx11, i32 0, i32 3
+ %arrayidx11 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom10
+ %operand12 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx11, i32 0, i32 3
%11 = load %struct.insn_operand_data** %operand12, align 8
- %arrayidx13 = getelementptr inbounds %struct.insn_operand_data* %11, i64 1
- %predicate14 = getelementptr inbounds %struct.insn_operand_data* %arrayidx13, i32 0, i32 0
+ %arrayidx13 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %11, i64 1
+ %predicate14 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx13, i32 0, i32 0
%12 = load i32 (%struct.rtx_def*, i32)** %predicate14, align 8
%idxprom15 = sext i32 %4 to i64
- %arrayidx16 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom15
- %operand17 = getelementptr inbounds %struct.insn_data* %arrayidx16, i32 0, i32 3
+ %arrayidx16 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom15
+ %operand17 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx16, i32 0, i32 3
%13 = load %struct.insn_operand_data** %operand17, align 8
- %arrayidx18 = getelementptr inbounds %struct.insn_operand_data* %13, i64 1
+ %arrayidx18 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %13, i64 1
%14 = bitcast %struct.insn_operand_data* %arrayidx18 to i8*
- %bf.field.offs19 = getelementptr i8* %14, i32 16
+ %bf.field.offs19 = getelementptr i8, i8* %14, i32 16
%15 = bitcast i8* %bf.field.offs19 to i32*
%16 = load i32* %15, align 8
%bf.clear20 = and i32 %16, 65535
lor.lhs.false23: ; preds = %lor.lhs.false9
%idxprom24 = sext i32 %4 to i64
- %arrayidx25 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom24
- %operand26 = getelementptr inbounds %struct.insn_data* %arrayidx25, i32 0, i32 3
+ %arrayidx25 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom24
+ %operand26 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx25, i32 0, i32 3
%17 = load %struct.insn_operand_data** %operand26, align 8
- %arrayidx27 = getelementptr inbounds %struct.insn_operand_data* %17, i64 2
- %predicate28 = getelementptr inbounds %struct.insn_operand_data* %arrayidx27, i32 0, i32 0
+ %arrayidx27 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %17, i64 2
+ %predicate28 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %arrayidx27, i32 0, i32 0
%18 = load i32 (%struct.rtx_def*, i32)** %predicate28, align 8
%idxprom29 = sext i32 %4 to i64
- %arrayidx30 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom29
- %operand31 = getelementptr inbounds %struct.insn_data* %arrayidx30, i32 0, i32 3
+ %arrayidx30 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom29
+ %operand31 = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx30, i32 0, i32 3
%19 = load %struct.insn_operand_data** %operand31, align 8
- %arrayidx32 = getelementptr inbounds %struct.insn_operand_data* %19, i64 2
+ %arrayidx32 = getelementptr inbounds %struct.insn_operand_data, %struct.insn_operand_data* %19, i64 2
%20 = bitcast %struct.insn_operand_data* %arrayidx32 to i8*
- %bf.field.offs33 = getelementptr i8* %20, i32 16
+ %bf.field.offs33 = getelementptr i8, i8* %20, i32 16
%21 = bitcast i8* %bf.field.offs33 to i32*
%22 = load i32* %21, align 8
%bf.clear34 = and i32 %22, 65535
if.end: ; preds = %lor.lhs.false23
%idxprom37 = sext i32 %4 to i64
- %arrayidx38 = getelementptr inbounds [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom37
- %genfun = getelementptr inbounds %struct.insn_data* %arrayidx38, i32 0, i32 2
+ %arrayidx38 = getelementptr inbounds [0 x %struct.insn_data], [0 x %struct.insn_data]* @insn_data, i32 0, i64 %idxprom37
+ %genfun = getelementptr inbounds %struct.insn_data, %struct.insn_data* %arrayidx38, i32 0, i32 2
%23 = load %struct.rtx_def* (%struct.rtx_def*, ...)** %genfun, align 8
%call39 = tail call %struct.rtx_def* (%struct.rtx_def*, ...)* %23(%struct.rtx_def* %r0, %struct.rtx_def* %r1, %struct.rtx_def* %c)
br label %return
%tmp12 = ptrtoint i8* %tmp10 to i32
%tmp13 = bitcast i8* %tmp10 to i32*
%tmp14 = shl i32 %tmp8, 2
- %tmp15 = getelementptr i32* %tmp13, i32 undef
- %tmp16 = getelementptr i32* %tmp13, i32 undef
+ %tmp15 = getelementptr i32, i32* %tmp13, i32 undef
+ %tmp16 = getelementptr i32, i32* %tmp13, i32 undef
%tmp17 = zext i32 %tmp9 to i64
%tmp18 = add i64 %tmp17, -1
%tmp19 = icmp ugt i64 %tmp18, 4294967295
%tmp50 = phi i32 [ %tmp55, %bb49 ], [ 0, %bb48 ]
%tmp51 = add i32 %tmp50, undef
%tmp52 = add i32 %tmp50, undef
- %tmp53 = getelementptr i32* %tmp13, i32 %tmp52
+ %tmp53 = getelementptr i32, i32* %tmp13, i32 %tmp52
%tmp54 = load i32* %tmp53, align 4
%tmp55 = add i32 %tmp50, 1
%tmp56 = icmp eq i32 %tmp55, %tmp8
bb61: ; preds = %bb61, %bb59
%tmp62 = phi i32 [ %tmp65, %bb61 ], [ 0, %bb59 ]
%tmp63 = add i32 %tmp62, %tmp14
- %tmp64 = getelementptr i32* %tmp13, i32 %tmp63
+ %tmp64 = getelementptr i32, i32* %tmp13, i32 %tmp63
store i32 0, i32* %tmp64, align 4
%tmp65 = add i32 %tmp62, 1
%tmp66 = icmp eq i32 %tmp65, %tmp8
indirectbr i8* undef, [label %return, label %if.end]
if.end: ; preds = %entry
- %size5 = getelementptr inbounds %struct.ref_s* %op, i64 0, i32 2
+ %size5 = getelementptr inbounds %struct.ref_s, %struct.ref_s* %op, i64 0, i32 2
%tmp6 = load i16* %size5, align 2
%tobool1 = icmp eq i16 %tmp6, 0
%1 = select i1 %tobool1, i32 1396, i32 -1910
%tmp4 = zext i16 %tmp6 to i64
%index13 = add i32 %index10, 1658
%2 = sext i32 %index13 to i64
- %3 = getelementptr [3891 x i64]* @table, i64 0, i64 %2
+ %3 = getelementptr [3891 x i64], [3891 x i64]* @table, i64 0, i64 %2
%blockaddress14 = load i64* %3, align 8
%4 = inttoptr i64 %blockaddress14 to i8*
indirectbr i8* %4, [label %while.body]
while.body: ; preds = %while.body, %while.body.lr.ph
%index7 = phi i32 [ %index15, %while.body ], [ %index13, %while.body.lr.ph ]
%indvar = phi i64 [ %indvar.next, %while.body ], [ 0, %while.body.lr.ph ]
- %type_attrs = getelementptr %struct.ref_s* %tmp9, i64 %indvar, i32 1
+ %type_attrs = getelementptr %struct.ref_s, %struct.ref_s* %tmp9, i64 %indvar, i32 1
store i16 32, i16* %type_attrs, align 2
%indvar.next = add i64 %indvar, 1
%exitcond5 = icmp eq i64 %indvar.next, %tmp4
%index15 = add i32 %index7, %tmp7
%tmp8 = select i1 %exitcond5, i64 13, i64 0
%5 = sext i32 %index15 to i64
- %6 = getelementptr [3891 x i64]* @table, i64 0, i64 %5
+ %6 = getelementptr [3891 x i64], [3891 x i64]* @table, i64 0, i64 %5
%blockaddress16 = load i64* %6, align 8
%7 = inttoptr i64 %blockaddress16 to i8*
indirectbr i8* %7, [label %return, label %while.body]
define void @func() nounwind ssp {
%tmp = load <4 x float>* null, align 1
- %tmp14 = getelementptr <4 x float>* null, i32 2
+ %tmp14 = getelementptr <4 x float>, <4 x float>* null, i32 2
%tmp15 = load <4 x float>* %tmp14, align 1
%tmp16 = shufflevector <4 x float> %tmp, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 4, i32 4, i32 4>
%tmp17 = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> %tmp16, <4 x float> undef, i8 1)
store i8* %0, i8** %2
%3 = load i8** %2
%4 = bitcast i8* %3 to { i32, i32 }*
- %5 = getelementptr { i32, i32 }* %4, i32 0, i32 0
+ %5 = getelementptr { i32, i32 }, { i32, i32 }* %4, i32 0, i32 0
%6 = load i32* %5
%7 = srem i32 %6, 2
%8 = icmp slt i32 %6, 2
; <label>:11 ; preds = %1
%12 = zext i1 %10 to i32
- %13 = getelementptr [4 x i32]* @JT, i32 0, i32 %12
+ %13 = getelementptr [4 x i32], [4 x i32]* @JT, i32 0, i32 %12
%14 = load i32* %13
%15 = add i32 %14, ptrtoint (i8* blockaddress(@h, %11) to i32)
%16 = inttoptr i32 %15 to i8*
define i32 @t(%TRp* inreg %rp) nounwind optsize ssp {
entry:
- %handler = getelementptr inbounds %TRp* %rp, i32 0, i32 1
+ %handler = getelementptr inbounds %TRp, %TRp* %rp, i32 0, i32 1
%0 = load %TRH** %handler, align 4
- %sync = getelementptr inbounds %TRH* %0, i32 0, i32 4
+ %sync = getelementptr inbounds %TRH, %TRH* %0, i32 0, i32 4
%sync12 = load {}** %sync, align 4
%1 = bitcast {}* %sync12 to i32 (%TRp*)*
%call = tail call i32 %1(%TRp* inreg %rp) nounwind optsize
define { <2 x float>, <2 x float> } @t2(%btConeShape* %this) unnamed_addr uwtable ssp align 2 {
entry:
- %0 = getelementptr inbounds %btConeShape* %this, i64 0, i32 0
+ %0 = getelementptr inbounds %btConeShape, %btConeShape* %this, i64 0, i32 0
br i1 undef, label %if.then, label %if.end17
if.then: ; preds = %entry
define fastcc void @bar(%struct.pluto.0* %arg) nounwind uwtable ssp align 2 {
bb:
%tmp1 = alloca %struct.widget.375, align 8
- %tmp2 = getelementptr inbounds %struct.pluto.0* %arg, i64 0, i32 1
+ %tmp2 = getelementptr inbounds %struct.pluto.0, %struct.pluto.0* %arg, i64 0, i32 1
%tmp3 = load %struct.hoge.368** %tmp2, align 8
store %struct.pluto.0* %arg, %struct.pluto.0** undef, align 8
- %tmp = getelementptr inbounds %struct.widget.375* %tmp1, i64 0, i32 2
- %tmp4 = getelementptr %struct.pluto.0* %arg, i64 0, i32 0, i32 0
+ %tmp = getelementptr inbounds %struct.widget.375, %struct.widget.375* %tmp1, i64 0, i32 2
+ %tmp4 = getelementptr %struct.pluto.0, %struct.pluto.0* %arg, i64 0, i32 0, i32 0
%tmp5 = load %i8** %tmp4, align 8
store %i8* %tmp5, %i8** %tmp, align 8
- %tmp6 = getelementptr inbounds %struct.widget.375* %tmp1, i64 0, i32 3
+ %tmp6 = getelementptr inbounds %struct.widget.375, %struct.widget.375* %tmp1, i64 0, i32 3
store %struct.hoge.368* %tmp3, %struct.hoge.368** %tmp6, align 8
br i1 undef, label %bb8, label %bb7
store i8* undef, i8** undef
%tmp30 = load i32* null
%tmp31 = icmp eq i32 %tmp30, 0
- %tmp32 = getelementptr inbounds [411 x i8]* @global, i32 0, i32 undef
+ %tmp32 = getelementptr inbounds [411 x i8], [411 x i8]* @global, i32 0, i32 undef
%tmp33 = load i8* %tmp32, align 1
- %tmp34 = getelementptr inbounds [411 x i8]* @global, i32 0, i32 0
+ %tmp34 = getelementptr inbounds [411 x i8], [411 x i8]* @global, i32 0, i32 0
%tmp35 = load i8* %tmp34, align 1
%tmp36 = select i1 %tmp31, i8 %tmp35, i8 %tmp33
%tmp37 = select i1 undef, i8 %tmp29, i8 %tmp36
%tmp38 = zext i8 %tmp37 to i32
%tmp39 = select i1 undef, i32 0, i32 %tmp38
- %tmp40 = getelementptr inbounds i32* null, i32 %tmp39
+ %tmp40 = getelementptr inbounds i32, i32* null, i32 %tmp39
%tmp41 = load i32* %tmp40, align 4
%tmp42 = load i32* undef, align 4
%tmp43 = load i32* undef
%ret0 = call i32 @foo([10 x i32]* %object1) nounwind
- %O1_1 = getelementptr [10 x i32]* %object1, i64 0, i32 1
- %O1_2 = getelementptr [10 x i32]* %object1, i64 0, i32 2
- %O1_3 = getelementptr [10 x i32]* %object1, i64 0, i32 3
- %O1_4 = getelementptr [10 x i32]* %object1, i64 0, i32 4
- %ld_ptr = getelementptr [10 x i32]* %object1, i64 0, i32 9
+ %O1_1 = getelementptr [10 x i32], [10 x i32]* %object1, i64 0, i32 1
+ %O1_2 = getelementptr [10 x i32], [10 x i32]* %object1, i64 0, i32 2
+ %O1_3 = getelementptr [10 x i32], [10 x i32]* %object1, i64 0, i32 3
+ %O1_4 = getelementptr [10 x i32], [10 x i32]* %object1, i64 0, i32 4
+ %ld_ptr = getelementptr [10 x i32], [10 x i32]* %object1, i64 0, i32 9
store i32 0, i32* %O1_1
store i32 0, i32* %O1_2
; CHECK: ret
define i32 @merge_stores_cant([10 x i32]* %in0, [10 x i32]* %in1) nounwind ssp {
- %O1_1 = getelementptr [10 x i32]* %in1, i64 0, i32 1
- %O1_2 = getelementptr [10 x i32]* %in1, i64 0, i32 2
- %O1_3 = getelementptr [10 x i32]* %in1, i64 0, i32 3
- %O1_4 = getelementptr [10 x i32]* %in1, i64 0, i32 4
- %ld_ptr = getelementptr [10 x i32]* %in0, i64 0, i32 2
+ %O1_1 = getelementptr [10 x i32], [10 x i32]* %in1, i64 0, i32 1
+ %O1_2 = getelementptr [10 x i32], [10 x i32]* %in1, i64 0, i32 2
+ %O1_3 = getelementptr [10 x i32], [10 x i32]* %in1, i64 0, i32 3
+ %O1_4 = getelementptr [10 x i32], [10 x i32]* %in1, i64 0, i32 4
+ %ld_ptr = getelementptr [10 x i32], [10 x i32]* %in0, i64 0, i32 2
store i32 0, i32* %O1_1
store i32 0, i32* %O1_2
define signext i16 @subdivp(%struct.node.0.27* nocapture %p, double %dsq, double %tolsq, %struct.hgstruct.2.29* nocapture byval align 8 %hg) nounwind uwtable readonly ssp {
entry:
call void @llvm.dbg.declare(metadata %struct.hgstruct.2.29* %hg, metadata !4, metadata !{!"0x102"})
- %type = getelementptr inbounds %struct.node.0.27* %p, i64 0, i32 0
+ %type = getelementptr inbounds %struct.node.0.27, %struct.node.0.27* %p, i64 0, i32 0
%0 = load i16* %type, align 2
%cmp = icmp eq i16 %0, 1
br i1 %cmp, label %return, label %for.cond.preheader
for.cond.preheader: ; preds = %entry
- %arrayidx6.1 = getelementptr inbounds %struct.hgstruct.2.29* %hg, i64 0, i32 1, i64 1
+ %arrayidx6.1 = getelementptr inbounds %struct.hgstruct.2.29, %struct.hgstruct.2.29* %hg, i64 0, i32 1, i64 1
%cmp22 = fcmp olt double 0.000000e+00, %dsq
%conv24 = zext i1 %cmp22 to i16
br label %return
if.then4073: ; preds = %if.then3344
call void @llvm.dbg.declare(metadata [20 x i8]* %num14075, metadata !4, metadata !{!"0x102"})
- %arraydecay4078 = getelementptr inbounds [20 x i8]* %num14075, i64 0, i64 0
+ %arraydecay4078 = getelementptr inbounds [20 x i8], [20 x i8]* %num14075, i64 0, i64 0
%0 = load i32* undef, align 4
%add4093 = add nsw i32 %0, 0
%conv4094 = sitofp i32 %add4093 to float
cond.end: ; preds = %entry
call void @llvm.dbg.declare(metadata %"class.__gnu_cxx::hash_map"* %X, metadata !31, metadata !{!"0x102"})
- %_M_num_elements.i.i.i.i = getelementptr inbounds %"class.__gnu_cxx::hash_map"* %X, i64 0, i32 0, i32 5
+ %_M_num_elements.i.i.i.i = getelementptr inbounds %"class.__gnu_cxx::hash_map", %"class.__gnu_cxx::hash_map"* %X, i64 0, i32 0, i32 5
invoke void @_Znwm()
to label %exit.i unwind label %lpad2.i.i.i.i
if.end: ; preds = %entry
call void @llvm.dbg.declare(metadata %struct.btCompoundLeafCallback* %callback, metadata !3, metadata !{!"0x102"})
- %m = getelementptr inbounds %struct.btCompoundLeafCallback* %callback, i64 0, i32 1
+ %m = getelementptr inbounds %struct.btCompoundLeafCallback, %struct.btCompoundLeafCallback* %callback, i64 0, i32 1
store i32 0, i32* undef, align 8
%cmp12447 = icmp sgt i32 undef, 0
br i1 %cmp12447, label %for.body.lr.ph, label %invoke.cont44
;CHECK: ret
define i32 @foo (i64* %so) nounwind uwtable ssp {
entry:
- %used = getelementptr inbounds i64* %so, i32 3
+ %used = getelementptr inbounds i64, i64* %so, i32 3
store i64 0, i64* %used, align 8
- %fill = getelementptr inbounds i64* %so, i32 2
+ %fill = getelementptr inbounds i64, i64* %so, i32 2
%L = load i64* %fill, align 8
store i64 0, i64* %fill, align 8
%cmp28 = icmp sgt i64 %L, 0
; CHECK: ret
define void @multiple_stores_on_chain(i16 * %A) {
entry:
- %a0 = getelementptr inbounds i16* %A, i64 0
- %a1 = getelementptr inbounds i16* %A, i64 1
- %a2 = getelementptr inbounds i16* %A, i64 2
- %a3 = getelementptr inbounds i16* %A, i64 3
- %a4 = getelementptr inbounds i16* %A, i64 4
- %a5 = getelementptr inbounds i16* %A, i64 5
- %a6 = getelementptr inbounds i16* %A, i64 6
- %a7 = getelementptr inbounds i16* %A, i64 7
+ %a0 = getelementptr inbounds i16, i16* %A, i64 0
+ %a1 = getelementptr inbounds i16, i16* %A, i64 1
+ %a2 = getelementptr inbounds i16, i16* %A, i64 2
+ %a3 = getelementptr inbounds i16, i16* %A, i64 3
+ %a4 = getelementptr inbounds i16, i16* %A, i64 4
+ %a5 = getelementptr inbounds i16, i16* %A, i64 5
+ %a6 = getelementptr inbounds i16, i16* %A, i64 6
+ %a7 = getelementptr inbounds i16, i16* %A, i64 7
store i16 0, i16* %a0
store i16 1, i16* %a1
; CHECK-NOT: xmm
; CHECK: ret
%0 = load %struct1** undef, align 8
- %1 = getelementptr inbounds %struct1* %0, i64 0, i32 0
+ %1 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 0
store i32* null, i32** %1, align 8
- %2 = getelementptr inbounds %struct1* %0, i64 0, i32 1
+ %2 = getelementptr inbounds %struct1, %struct1* %0, i64 0, i32 1
store i32* null, i32** %2, align 8
ret void
}
%iv.i = phi i64 [ -5, %0 ], [ %iv.next.i, %print_shadow_bytes.exit.i ]
%reg15 = icmp eq i64 %iv.i, 0
%.str..str1.i = select i1 %reg15, [3 x i8]* @.str, [3 x i8]* @.str1
- %reg16 = getelementptr inbounds [3 x i8]* %.str..str1.i, i64 0, i64 0
+ %reg16 = getelementptr inbounds [3 x i8], [3 x i8]* %.str..str1.i, i64 0, i64 0
%reg17 = shl i64 %iv.i, 1
%reg19 = inttoptr i64 %reg17 to i8*
call void (i64*, i8*, ...)* @append(i64* %str.i, i8* getelementptr inbounds ([6 x i8]* @.str2, i64 0, i64 0), i8* %reg16, i8* %reg19)
declare %list* @llvm.gcread(%list*, %list**)
define %list* @tl(%list* %l) gc "example" {
- %hd.ptr = getelementptr %list* %l, i32 0, i32 0
+ %hd.ptr = getelementptr %list, %list* %l, i32 0, i32 0
%hd = call %list* @llvm.gcread(%list* %l, %list** %hd.ptr)
ret i32 %tmp
}
%tmp = call i8* @gcalloc(i32 bitcast(%list* getelementptr(%list* null, i32 1) to i32))
%cell = bitcast i8* %tmp to %list*
- %hd.ptr = getelementptr %list* %cell, i32 0, i32 0
+ %hd.ptr = getelementptr %list, %list* %cell, i32 0, i32 0
store i32 %hd, i32* %hd.ptr
- %tl.ptr = getelementptr %list* %cell, i32 0, i32 0
+ %tl.ptr = getelementptr %list, %list* %cell, i32 0, i32 0
call void @llvm.gcwrite(%list* %tl, %list* %cell, %list** %tl.ptr)
ret %cell.2
%obj = call %IntArray* @h( ) ; <%IntArray*> [#uses=2]
%obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj.2, i8** %root
- %Length.ptr = getelementptr %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
+ %Length.ptr = getelementptr %IntArray, %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
%Length = load i32* %Length.ptr ; <i32> [#uses=1]
ret i32 %Length
}
%obj = call %IntArray* @h( ) ; <%IntArray*> [#uses=2]
%obj.2 = bitcast %IntArray* %obj to i8* ; <i8*> [#uses=1]
store i8* %obj.2, i8** %root
- %Length.ptr = getelementptr %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
+ %Length.ptr = getelementptr %IntArray, %IntArray* %obj, i32 0, i32 0 ; <i32*> [#uses=1]
%Length = load i32* %Length.ptr ; <i32> [#uses=1]
ret i32 %Length
}
%b.i = alloca [16 x <2 x double>], align 16
%conv = bitcast i8* %_stubArgs to i32*
%tmp1 = load i32* %conv, align 4
- %ptr8 = getelementptr i8* %_stubArgs, i64 16
+ %ptr8 = getelementptr i8, i8* %_stubArgs, i64 16
%i4 = bitcast i8* %ptr8 to <2 x double>*
- %ptr20 = getelementptr i8* %_stubArgs, i64 48
+ %ptr20 = getelementptr i8, i8* %_stubArgs, i64 48
%i7 = bitcast i8* %ptr20 to <2 x double> addrspace(1)**
%tmp21 = load <2 x double> addrspace(1)** %i7, align 8
- %ptr28 = getelementptr i8* %_stubArgs, i64 64
+ %ptr28 = getelementptr i8, i8* %_stubArgs, i64 64
%i9 = bitcast i8* %ptr28 to i32*
%tmp29 = load i32* %i9, align 4
- %ptr32 = getelementptr i8* %_stubArgs, i64 68
+ %ptr32 = getelementptr i8, i8* %_stubArgs, i64 68
%i10 = bitcast i8* %ptr32 to i32*
%tmp33 = load i32* %i10, align 4
%tmp17.i = mul i32 10, 20
%tmp42.i = add i32 %tmp6.i, 17
%tmp44.i = insertelement <2 x i32> undef, i32 %tmp42.i, i32 1
%tmp96676677.i = or i32 17, -4
- %ptr4438.i = getelementptr inbounds [16 x <2 x double>]* %b.i, i64 0, i64 0
- %arrayidx4506.i = getelementptr [16 x <2 x double>]* %b.i, i64 0, i64 4
+ %ptr4438.i = getelementptr inbounds [16 x <2 x double>], [16 x <2 x double>]* %b.i, i64 0, i64 0
+ %arrayidx4506.i = getelementptr [16 x <2 x double>], [16 x <2 x double>]* %b.i, i64 0, i64 4
%tmp52.i = insertelement <2 x i32> %tmp44.i, i32 0, i32 0
%tmp78.i = extractelement <2 x i32> %tmp44.i, i32 1
%tmp97.i = add i32 %tmp78.i, %tmp96676677.i
%i39 = add i32 %tmp158.i, %i38
%conv160.i = zext i32 %i39 to i64
%tmp22.sum652.i = add i64 %conv160.i, %conv21.i
- %arrayidx161.i = getelementptr <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i
+ %arrayidx161.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum652.i
%tmp162.i = load <2 x double> addrspace(1)* %arrayidx161.i, align 16
%tmp222.i = add i32 %tmp154.i, 1
%i43 = mul i32 %tmp222.i, %tmp29
%i44 = add i32 %tmp158.i, %i43
%conv228.i = zext i32 %i44 to i64
%tmp22.sum656.i = add i64 %conv228.i, %conv21.i
- %arrayidx229.i = getelementptr <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i
+ %arrayidx229.i = getelementptr <2 x double>, <2 x double> addrspace(1)* %tmp21, i64 %tmp22.sum656.i
%tmp230.i = load <2 x double> addrspace(1)* %arrayidx229.i, align 16
%cmp432.i = icmp ult i32 %tmp156.i, %tmp1
.lr.ph:
%i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
%.01 = phi %struct.A* [ %11, %.lr.ph ], [ %p, %0 ]
- %2 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0
+ %2 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 1, i8* %2, align 1
- %3 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1
+ %3 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
store i8 2, i8* %3, align 1
- %4 = getelementptr inbounds %struct.A* %.01, i64 0, i32 2
+ %4 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 2
store i8 3, i8* %4, align 1
- %5 = getelementptr inbounds %struct.A* %.01, i64 0, i32 3
+ %5 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 3
store i8 4, i8* %5, align 1
- %6 = getelementptr inbounds %struct.A* %.01, i64 0, i32 4
+ %6 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 4
store i8 5, i8* %6, align 1
- %7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 5
+ %7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 5
store i8 6, i8* %7, align 1
- %8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 6
+ %8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 6
store i8 7, i8* %8, align 1
- %9 = getelementptr inbounds %struct.A* %.01, i64 0, i32 7
+ %9 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 7
store i8 8, i8* %9, align 1
%10 = add nsw i32 %i.02, 1
- %11 = getelementptr inbounds %struct.A* %.01, i64 1
+ %11 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
%exitcond = icmp eq i32 %10, %count
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge:
.lr.ph:
%i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
%.01 = phi %struct.B* [ %11, %.lr.ph ], [ %p, %0 ]
- %2 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0
+ %2 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
store i32 0, i32* %2, align 4
- %3 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1
+ %3 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
store i32 0, i32* %3, align 4
- %4 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2
+ %4 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
store i32 0, i32* %4, align 4
- %5 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3
+ %5 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
store i32 0, i32* %5, align 4
- %6 = getelementptr inbounds %struct.B* %.01, i64 0, i32 4
+ %6 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 4
store i32 0, i32* %6, align 4
- %7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 5
+ %7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 5
store i32 0, i32* %7, align 4
- %8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 6
+ %8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 6
store i32 0, i32* %8, align 4
- %9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 7
+ %9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 7
store i32 0, i32* %9, align 4
%10 = add nsw i32 %i.02, 1
- %11 = getelementptr inbounds %struct.B* %.01, i64 1
+ %11 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
%exitcond = icmp eq i32 %10, %count
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge:
.lr.ph:
%i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
%.01 = phi %struct.B* [ %11, %.lr.ph ], [ %p, %0 ]
- %2 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0
+ %2 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
store i32 0, i32* %2, align 4
- %3 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1
+ %3 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
store i32 0, i32* %3, align 4
- %4 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2
+ %4 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
store i32 0, i32* %4, align 4
- %5 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3
+ %5 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
store i32 0, i32* %5, align 4
- %6 = getelementptr inbounds %struct.B* %.01, i64 0, i32 4
+ %6 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 4
store i32 0, i32* %6, align 4
- %7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 5
+ %7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 5
store i32 0, i32* %7, align 4
- %8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 6
+ %8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 6
store i32 0, i32* %8, align 4
- %9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 7
+ %9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 7
store i32 0, i32* %9, align 4
%10 = add nsw i32 %i.02, 1
- %11 = getelementptr inbounds %struct.B* %.01, i64 1
+ %11 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
%exitcond = icmp eq i32 %10, %count
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge:
.lr.ph:
%i.02 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
%.01 = phi %struct.A* [ %11, %.lr.ph ], [ %p, %0 ]
- %2 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0
+ %2 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 1, i8* %2, align 1
- %3 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1
+ %3 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
store i8 2, i8* %3, align 1
- %4 = getelementptr inbounds %struct.A* %.01, i64 0, i32 2
+ %4 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 2
store i8 3, i8* %4, align 1
- %5 = getelementptr inbounds %struct.A* %.01, i64 0, i32 3
+ %5 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 3
store i8 4, i8* %5, align 1
- %6 = getelementptr inbounds %struct.A* %.01, i64 0, i32 4
+ %6 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 4
store i8 %zz, i8* %6, align 1 ; <----------- Not a const;
- %7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 5
+ %7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 5
store i8 6, i8* %7, align 1
- %8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 6
+ %8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 6
store i8 7, i8* %8, align 1
- %9 = getelementptr inbounds %struct.A* %.01, i64 0, i32 7
+ %9 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 7
store i8 8, i8* %9, align 1
%10 = add nsw i32 %i.02, 1
- %11 = getelementptr inbounds %struct.A* %.01, i64 1
+ %11 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
%exitcond = icmp eq i32 %10, %count
br i1 %exitcond, label %._crit_edge, label %.lr.ph
._crit_edge:
br i1 %1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0
- %2 = getelementptr inbounds %struct.A* %q, i64 0, i32 0
- %3 = getelementptr inbounds %struct.A* %q, i64 0, i32 1
+ %2 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 0
+ %3 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 1
br label %4
; <label>:4 ; preds = %4, %.lr.ph
%.01 = phi %struct.A* [ %p, %.lr.ph ], [ %10, %4 ]
%5 = load i8* %2, align 1
%6 = load i8* %3, align 1
- %7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0
+ %7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 %5, i8* %7, align 1
- %8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1
+ %8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
store i8 %6, i8* %8, align 1
%9 = add nsw i32 %i.02, 1
- %10 = getelementptr inbounds %struct.A* %.01, i64 1
+ %10 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
%exitcond = icmp eq i32 %9, %count
br i1 %exitcond, label %._crit_edge, label %4
br i1 %1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0
- %2 = getelementptr inbounds %struct.A* %q, i64 0, i32 0
- %3 = getelementptr inbounds %struct.A* %q, i64 0, i32 1
+ %2 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 0
+ %3 = getelementptr inbounds %struct.A, %struct.A* %q, i64 0, i32 1
br label %a4
a4: ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %a9, %a4 ]
%.01 = phi %struct.A* [ %p, %.lr.ph ], [ %a10, %a4 ]
%a5 = load i8* %2, align 1
- %a7 = getelementptr inbounds %struct.A* %.01, i64 0, i32 0
+ %a7 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 0
store i8 %a5, i8* %a7, align 1
- %a8 = getelementptr inbounds %struct.A* %.01, i64 0, i32 1
+ %a8 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 0, i32 1
%a6 = load i8* %3, align 1
store i8 %a6, i8* %a8, align 1
%a9 = add nsw i32 %i.02, 1
- %a10 = getelementptr inbounds %struct.A* %.01, i64 1
+ %a10 = getelementptr inbounds %struct.A, %struct.A* %.01, i64 1
%exitcond = icmp eq i32 %a9, %count
br i1 %exitcond, label %._crit_edge, label %a4
br i1 %1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0
- %2 = getelementptr inbounds %struct.B* %q, i64 0, i32 0
- %3 = getelementptr inbounds %struct.B* %q, i64 0, i32 1
+ %2 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 0
+ %3 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 1
br label %4
; <label>:4 ; preds = %4, %.lr.ph
%.01 = phi %struct.B* [ %p, %.lr.ph ], [ %10, %4 ]
%5 = load i32* %2
%6 = load i32* %3
- %7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0
+ %7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
store i32 %5, i32* %7
- %8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1
+ %8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
store i32 %6, i32* %8
%9 = add nsw i32 %i.02, 1
- %10 = getelementptr inbounds %struct.B* %.01, i64 1
+ %10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
%exitcond = icmp eq i32 %9, %count
br i1 %exitcond, label %._crit_edge, label %4
br i1 %a1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0
- %a2 = getelementptr inbounds %struct.B* %q, i64 0, i32 0
- %a3 = getelementptr inbounds %struct.B* %q, i64 0, i32 1
- %a4 = getelementptr inbounds %struct.B* %q, i64 0, i32 2
- %a5 = getelementptr inbounds %struct.B* %q, i64 0, i32 3
+ %a2 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 0
+ %a3 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 1
+ %a4 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 2
+ %a5 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 3
br label %block4
block4: ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %c9, %block4 ]
%.01 = phi %struct.B* [ %p, %.lr.ph ], [ %c10, %block4 ]
- %a7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0
- %a8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1
- %a9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2
- %a10 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3
+ %a7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
+ %a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
+ %a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
+ %a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
%b1 = load i32* %a2
%b2 = load i32* %a3
%b3 = load i32* %a4
store i32 %b3, i32* %a9
store i32 %b4, i32* %a10
%c9 = add nsw i32 %i.02, 1
- %c10 = getelementptr inbounds %struct.B* %.01, i64 1
+ %c10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
%exitcond = icmp eq i32 %c9, %count
br i1 %exitcond, label %._crit_edge, label %block4
br i1 %a1, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0
- %a2 = getelementptr inbounds %struct.B* %q, i64 0, i32 0
- %a3 = getelementptr inbounds %struct.B* %q, i64 0, i32 1
- %a4 = getelementptr inbounds %struct.B* %q, i64 0, i32 2
- %a5 = getelementptr inbounds %struct.B* %q, i64 0, i32 3
+ %a2 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 0
+ %a3 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 1
+ %a4 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 2
+ %a5 = getelementptr inbounds %struct.B, %struct.B* %q, i64 0, i32 3
br label %block4
block4: ; preds = %4, %.lr.ph
%i.02 = phi i32 [ 0, %.lr.ph ], [ %c9, %block4 ]
%.01 = phi %struct.B* [ %p, %.lr.ph ], [ %c10, %block4 ]
- %a7 = getelementptr inbounds %struct.B* %.01, i64 0, i32 0
- %a8 = getelementptr inbounds %struct.B* %.01, i64 0, i32 1
- %a9 = getelementptr inbounds %struct.B* %.01, i64 0, i32 2
- %a10 = getelementptr inbounds %struct.B* %.01, i64 0, i32 3
+ %a7 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 0
+ %a8 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 1
+ %a9 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 2
+ %a10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 0, i32 3
%b1 = load i32* %a2, align 1
%b2 = load i32* %a3, align 1
%b3 = load i32* %a4, align 1
store i32 %b3, i32* %a9, align 1
store i32 %b4, i32* %a10, align 1
%c9 = add nsw i32 %i.02, 1
- %c10 = getelementptr inbounds %struct.B* %.01, i64 1
+ %c10 = getelementptr inbounds %struct.B, %struct.B* %.01, i64 1
%exitcond = icmp eq i32 %c9, %count
br i1 %exitcond, label %._crit_edge, label %block4
%.09 = phi i32 [ %n, %0 ], [ %11, %1 ]
%.08 = phi i8* [ %b, %0 ], [ %10, %1 ]
%.0 = phi i64* [ %a, %0 ], [ %2, %1 ]
- %2 = getelementptr inbounds i64* %.0, i64 1
+ %2 = getelementptr inbounds i64, i64* %.0, i64 1
%3 = load i64* %.0, align 1
- %4 = getelementptr inbounds i8* %c, i64 %3
+ %4 = getelementptr inbounds i8, i8* %c, i64 %3
%5 = load i8* %4, align 1
%6 = add i64 %3, 1
- %7 = getelementptr inbounds i8* %c, i64 %6
+ %7 = getelementptr inbounds i8, i8* %c, i64 %6
%8 = load i8* %7, align 1
store i8 %5, i8* %.08, align 1
- %9 = getelementptr inbounds i8* %.08, i64 1
+ %9 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %8, i8* %9, align 1
- %10 = getelementptr inbounds i8* %.08, i64 2
+ %10 = getelementptr inbounds i8, i8* %.08, i64 2
%11 = add nsw i32 %.09, -1
%12 = icmp eq i32 %11, 0
br i1 %12, label %13, label %1
%.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
- %2 = getelementptr inbounds i8* %.0, i64 1
+ %2 = getelementptr inbounds i8, i8* %.0, i64 1
%3 = load i8* %.0, align 1
%4 = sext i8 %3 to i64
- %5 = getelementptr inbounds i8* %c, i64 %4
+ %5 = getelementptr inbounds i8, i8* %c, i64 %4
%6 = load i8* %5, align 1
%7 = add i64 %4, 1
- %8 = getelementptr inbounds i8* %c, i64 %7
+ %8 = getelementptr inbounds i8, i8* %c, i64 %7
%9 = load i8* %8, align 1
store i8 %6, i8* %.08, align 1
- %10 = getelementptr inbounds i8* %.08, i64 1
+ %10 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %9, i8* %10, align 1
- %11 = getelementptr inbounds i8* %.08, i64 2
+ %11 = getelementptr inbounds i8, i8* %.08, i64 2
%12 = add nsw i32 %.09, -1
%13 = icmp eq i32 %12, 0
br i1 %13, label %14, label %1
%.09 = phi i32 [ %n, %0 ], [ %12, %1 ]
%.08 = phi i8* [ %b, %0 ], [ %11, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
- %2 = getelementptr inbounds i8* %.0, i64 1
+ %2 = getelementptr inbounds i8, i8* %.0, i64 1
%3 = load i8* %.0, align 1
%4 = sext i8 %3 to i64
- %5 = getelementptr inbounds i8* %c, i64 %4
+ %5 = getelementptr inbounds i8, i8* %c, i64 %4
%6 = load i8* %5, align 1
%7 = add i8 %3, 1
%wrap.4 = sext i8 %7 to i64
- %8 = getelementptr inbounds i8* %c, i64 %wrap.4
+ %8 = getelementptr inbounds i8, i8* %c, i64 %wrap.4
%9 = load i8* %8, align 1
store i8 %6, i8* %.08, align 1
- %10 = getelementptr inbounds i8* %.08, i64 1
+ %10 = getelementptr inbounds i8, i8* %.08, i64 1
store i8 %9, i8* %10, align 1
- %11 = getelementptr inbounds i8* %.08, i64 2
+ %11 = getelementptr inbounds i8, i8* %.08, i64 2
%12 = add nsw i32 %.09, -1
%13 = icmp eq i32 %12, 0
br i1 %13, label %14, label %1
%vecext5 = extractelement <8 x float> %v, i32 5
%vecext6 = extractelement <8 x float> %v, i32 6
%vecext7 = extractelement <8 x float> %v, i32 7
- %arrayidx1 = getelementptr inbounds float* %ptr, i64 1
- %arrayidx2 = getelementptr inbounds float* %ptr, i64 2
- %arrayidx3 = getelementptr inbounds float* %ptr, i64 3
- %arrayidx4 = getelementptr inbounds float* %ptr, i64 4
- %arrayidx5 = getelementptr inbounds float* %ptr, i64 5
- %arrayidx6 = getelementptr inbounds float* %ptr, i64 6
- %arrayidx7 = getelementptr inbounds float* %ptr, i64 7
+ %arrayidx1 = getelementptr inbounds float, float* %ptr, i64 1
+ %arrayidx2 = getelementptr inbounds float, float* %ptr, i64 2
+ %arrayidx3 = getelementptr inbounds float, float* %ptr, i64 3
+ %arrayidx4 = getelementptr inbounds float, float* %ptr, i64 4
+ %arrayidx5 = getelementptr inbounds float, float* %ptr, i64 5
+ %arrayidx6 = getelementptr inbounds float, float* %ptr, i64 6
+ %arrayidx7 = getelementptr inbounds float, float* %ptr, i64 7
store float %vecext0, float* %ptr, align 4
store float %vecext1, float* %arrayidx1, align 4
store float %vecext2, float* %arrayidx2, align 4
; We could merge stores (and loads) like this...
define void @merge_vec_element_and_scalar_load([6 x i64]* %array) {
- %idx0 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 0
- %idx1 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 1
- %idx4 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 4
- %idx5 = getelementptr inbounds [6 x i64]* %array, i64 0, i64 5
+ %idx0 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 0
+ %idx1 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 1
+ %idx4 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 4
+ %idx5 = getelementptr inbounds [6 x i64], [6 x i64]* %array, i64 0, i64 5
%a0 = load i64* %idx0, align 8
store i64 %a0, i64* %idx4, align 8
entry:
%x.i = alloca i8, align 1
%y.i = alloca [256 x i8], align 16
- %0 = getelementptr inbounds [256 x i8]* %y.i, i64 0, i64 0
+ %0 = getelementptr inbounds [256 x i8], [256 x i8]* %y.i, i64 0, i64 0
br label %for.body
for.body:
%a8 = bitcast [4 x %struct.Klass]* %a.i to i8*
%b8 = bitcast [4 x %struct.Klass]* %b.i to i8*
; I am used outside the lifetime zone below:
- %z2 = getelementptr inbounds [4 x %struct.Klass]* %a.i, i64 0, i64 0, i32 0
+ %z2 = getelementptr inbounds [4 x %struct.Klass], [4 x %struct.Klass]* %a.i, i64 0, i64 0, i32 0
call void @llvm.lifetime.start(i64 -1, i8* %a8)
call void @llvm.lifetime.start(i64 -1, i8* %b8)
%z3 = load i32* %z2, align 16
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%CurPtr_addr.0.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %CurPtr_addr.0 = getelementptr i8* %CurPtr, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
+ %CurPtr_addr.0 = getelementptr i8, i8* %CurPtr, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
%tmp = load i8* %CurPtr_addr.0 ; <i8> [#uses=3]
%tmp2.rec = add i32 %CurPtr_addr.0.rec, 1 ; <i32> [#uses=1]
- %tmp2 = getelementptr i8* %CurPtr, i32 %tmp2.rec ; <i8*> [#uses=1]
+ %tmp2 = getelementptr i8, i8* %CurPtr, i32 %tmp2.rec ; <i8*> [#uses=1]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
switch i8 %tmp, label %bb [
i8 0, label %bb7
entry:
%0 = load i32** @ptr, align 8
%1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 16), align 4
- %2 = getelementptr i32* %0, i64 16
+ %2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux02:
; LINUX-64-STATIC: movl src+64(%rip), [[EAX:%e.x]]
entry:
%0 = load i32** @ptr, align 8
%1 = load i32* getelementptr ([32 x i32]* @xsrc, i32 0, i64 16), align 4
- %2 = getelementptr i32* %0, i64 16
+ %2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qxx02:
; LINUX-64-STATIC: movl xsrc+64(%rip), [[EAX:%e.x]]
entry:
%0 = load i32** @dptr, align 8
%1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 16), align 32
- %2 = getelementptr i32* %0, i64 16
+ %2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux05:
; LINUX-64-STATIC: movl dsrc+64(%rip), [[EAX:%e.x]]
entry:
%0 = load i32** @lptr, align 8
%1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 16), align 4
- %2 = getelementptr i32* %0, i64 16
+ %2 = getelementptr i32, i32* %0, i64 16
store i32 %1, i32* %2, align 4
; LINUX-64-STATIC-LABEL: qux08:
; LINUX-64-STATIC: movl lsrc+64(%rip), [[EAX:%e.x]]
define void @ind00(i64 %i) nounwind {
entry:
- %0 = getelementptr [131072 x i32]* @src, i64 0, i64 %i
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %i
%1 = load i32* %0, align 4
- %2 = getelementptr [131072 x i32]* @dst, i64 0, i64 %i
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
; LINUX-64-STATIC-LABEL: ind00:
define void @ixd00(i64 %i) nounwind {
entry:
- %0 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %i
+ %0 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %i
%1 = load i32* %0, align 4
- %2 = getelementptr [32 x i32]* @xdst, i64 0, i64 %i
+ %2 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
; LINUX-64-STATIC-LABEL: ixd00:
define void @ind01(i64 %i) nounwind {
entry:
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %i
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %i
store i32* %0, i32** @ptr, align 8
ret void
; LINUX-64-STATIC-LABEL: ind01:
define void @ixd01(i64 %i) nounwind {
entry:
- %0 = getelementptr [32 x i32]* @xdst, i64 0, i64 %i
+ %0 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %i
store i32* %0, i32** @ptr, align 8
ret void
; LINUX-64-STATIC-LABEL: ixd01:
define void @ind02(i64 %i) nounwind {
entry:
%0 = load i32** @ptr, align 8
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %i
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %i
%2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
+ %3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: ind02:
define void @ixd02(i64 %i) nounwind {
entry:
%0 = load i32** @ptr, align 8
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %i
+ %1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %i
%2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
+ %3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: ixd02:
define void @ind03(i64 %i) nounwind {
entry:
- %0 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %i
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %i
%1 = load i32* %0, align 4
- %2 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %i
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
; LINUX-64-STATIC-LABEL: ind03:
define void @ind04(i64 %i) nounwind {
entry:
- %0 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %i
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %i
store i32* %0, i32** @dptr, align 8
ret void
; LINUX-64-STATIC-LABEL: ind04:
define void @ind05(i64 %i) nounwind {
entry:
%0 = load i32** @dptr, align 8
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %i
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %i
%2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
+ %3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: ind05:
define void @ind06(i64 %i) nounwind {
entry:
- %0 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %i
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %i
%1 = load i32* %0, align 4
- %2 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %i
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %i
store i32 %1, i32* %2, align 4
ret void
; LINUX-64-STATIC-LABEL: ind06:
define void @ind07(i64 %i) nounwind {
entry:
- %0 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %i
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %i
store i32* %0, i32** @lptr, align 8
ret void
; LINUX-64-STATIC-LABEL: ind07:
define void @ind08(i64 %i) nounwind {
entry:
%0 = load i32** @lptr, align 8
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %i
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %i
%2 = load i32* %1, align 4
- %3 = getelementptr i32* %0, i64 %i
+ %3 = getelementptr i32, i32* %0, i64 %i
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: ind08:
define void @off00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
%2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
+ %3 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: off00:
define void @oxf00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %0
+ %1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %0
%2 = load i32* %1, align 4
- %3 = getelementptr [32 x i32]* @xdst, i64 0, i64 %0
+ %3 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: oxf00:
define void @off01(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %.sum
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %.sum
store i32* %0, i32** @ptr, align 8
ret void
; LINUX-64-STATIC-LABEL: off01:
define void @oxf01(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 16
- %0 = getelementptr [32 x i32]* @xdst, i64 0, i64 %.sum
+ %0 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %.sum
store i32* %0, i32** @ptr, align 8
ret void
; LINUX-64-STATIC-LABEL: oxf01:
entry:
%0 = load i32** @ptr, align 8
%1 = add i64 %i, 16
- %2 = getelementptr [131072 x i32]* @src, i64 0, i64 %1
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %1
%3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
+ %4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
; LINUX-64-STATIC-LABEL: off02:
entry:
%0 = load i32** @ptr, align 8
%1 = add i64 %i, 16
- %2 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %1
+ %2 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %1
%3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
+ %4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
; LINUX-64-STATIC-LABEL: oxf02:
define void @off03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
%2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
+ %3 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: off03:
define void @off04(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %.sum
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %.sum
store i32* %0, i32** @dptr, align 8
ret void
; LINUX-64-STATIC-LABEL: off04:
entry:
%0 = load i32** @dptr, align 8
%1 = add i64 %i, 16
- %2 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %1
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %1
%3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
+ %4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
; LINUX-64-STATIC-LABEL: off05:
define void @off06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
%2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
+ %3 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: off06:
define void @off07(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 16
- %0 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %.sum
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %.sum
store i32* %0, i32** @lptr, align 8
ret void
; LINUX-64-STATIC-LABEL: off07:
entry:
%0 = load i32** @lptr, align 8
%1 = add i64 %i, 16
- %2 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %1
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %1
%3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
+ %4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
; LINUX-64-STATIC-LABEL: off08:
entry:
%0 = load i32** @ptr, align 8
%1 = load i32* getelementptr ([131072 x i32]* @src, i32 0, i64 65536), align 4
- %2 = getelementptr i32* %0, i64 65536
+ %2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
; LINUX-64-STATIC-LABEL: moo02:
entry:
%0 = load i32** @dptr, align 8
%1 = load i32* getelementptr ([131072 x i32]* @dsrc, i32 0, i64 65536), align 32
- %2 = getelementptr i32* %0, i64 65536
+ %2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
; LINUX-64-STATIC-LABEL: moo05:
entry:
%0 = load i32** @lptr, align 8
%1 = load i32* getelementptr ([131072 x i32]* @lsrc, i32 0, i64 65536), align 4
- %2 = getelementptr i32* %0, i64 65536
+ %2 = getelementptr i32, i32* %0, i64 65536
store i32 %1, i32* %2, align 4
ret void
; LINUX-64-STATIC-LABEL: moo08:
define void @big00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
%2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
+ %3 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: big00:
define void @big01(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 65536
- %0 = getelementptr [131072 x i32]* @dst, i64 0, i64 %.sum
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %.sum
store i32* %0, i32** @ptr, align 8
ret void
; LINUX-64-STATIC-LABEL: big01:
entry:
%0 = load i32** @ptr, align 8
%1 = add i64 %i, 65536
- %2 = getelementptr [131072 x i32]* @src, i64 0, i64 %1
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %1
%3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
+ %4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
; LINUX-64-STATIC-LABEL: big02:
define void @big03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
%2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
+ %3 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: big03:
define void @big04(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 65536
- %0 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %.sum
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %.sum
store i32* %0, i32** @dptr, align 8
ret void
; LINUX-64-STATIC-LABEL: big04:
entry:
%0 = load i32** @dptr, align 8
%1 = add i64 %i, 65536
- %2 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %1
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %1
%3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
+ %4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
; LINUX-64-STATIC-LABEL: big05:
define void @big06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
%2 = load i32* %1, align 4
- %3 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
+ %3 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
store i32 %2, i32* %3, align 4
ret void
; LINUX-64-STATIC-LABEL: big06:
define void @big07(i64 %i) nounwind {
entry:
%.sum = add i64 %i, 65536
- %0 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %.sum
+ %0 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %.sum
store i32* %0, i32** @lptr, align 8
ret void
; LINUX-64-STATIC-LABEL: big07:
entry:
%0 = load i32** @lptr, align 8
%1 = add i64 %i, 65536
- %2 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %1
+ %2 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %1
%3 = load i32* %2, align 4
- %4 = getelementptr i32* %0, i64 %1
+ %4 = getelementptr i32, i32* %0, i64 %1
store i32 %3, i32* %4, align 4
ret void
; LINUX-64-STATIC-LABEL: big08:
define i8* @bat02() nounwind {
entry:
%0 = load i32** @ptr, align 8
- %1 = getelementptr i32* %0, i64 16
+ %1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: bat02:
define i8* @bat05() nounwind {
entry:
%0 = load i32** @dptr, align 8
- %1 = getelementptr i32* %0, i64 16
+ %1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: bat05:
define i8* @bat08() nounwind {
entry:
%0 = load i32** @lptr, align 8
- %1 = getelementptr i32* %0, i64 16
+ %1 = getelementptr i32, i32* %0, i64 16
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: bat08:
define i8* @bam02() nounwind {
entry:
%0 = load i32** @ptr, align 8
- %1 = getelementptr i32* %0, i64 65536
+ %1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: bam02:
define i8* @bam05() nounwind {
entry:
%0 = load i32** @dptr, align 8
- %1 = getelementptr i32* %0, i64 65536
+ %1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: bam05:
define i8* @bam08() nounwind {
entry:
%0 = load i32** @lptr, align 8
- %1 = getelementptr i32* %0, i64 65536
+ %1 = getelementptr i32, i32* %0, i64 65536
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: bam08:
define i8* @cat00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cat00:
define i8* @cxt00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %0
+ %1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cxt00:
define i8* @cat01(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cat01:
define i8* @cxt01(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [32 x i32]* @xdst, i64 0, i64 %0
+ %1 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cxt01:
entry:
%0 = load i32** @ptr, align 8
%1 = add i64 %i, 16
- %2 = getelementptr i32* %0, i64 %1
+ %2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
ret i8* %3
; LINUX-64-STATIC-LABEL: cat02:
define i8* @cat03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cat03:
define i8* @cat04(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cat04:
entry:
%0 = load i32** @dptr, align 8
%1 = add i64 %i, 16
- %2 = getelementptr i32* %0, i64 %1
+ %2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
ret i8* %3
; LINUX-64-STATIC-LABEL: cat05:
define i8* @cat06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cat06:
define i8* @cat07(i64 %i) nounwind {
entry:
%0 = add i64 %i, 16
- %1 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cat07:
entry:
%0 = load i32** @lptr, align 8
%1 = add i64 %i, 16
- %2 = getelementptr i32* %0, i64 %1
+ %2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
ret i8* %3
; LINUX-64-STATIC-LABEL: cat08:
define i8* @cam00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @src, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @src, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cam00:
define i8* @cxm00(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [32 x i32]* @xsrc, i64 0, i64 %0
+ %1 = getelementptr [32 x i32], [32 x i32]* @xsrc, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cxm00:
define i8* @cam01(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @dst, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @dst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cam01:
define i8* @cxm01(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [32 x i32]* @xdst, i64 0, i64 %0
+ %1 = getelementptr [32 x i32], [32 x i32]* @xdst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cxm01:
entry:
%0 = load i32** @ptr, align 8
%1 = add i64 %i, 65536
- %2 = getelementptr i32* %0, i64 %1
+ %2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
ret i8* %3
; LINUX-64-STATIC-LABEL: cam02:
define i8* @cam03(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @dsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @dsrc, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cam03:
define i8* @cam04(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @ddst, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @ddst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cam04:
entry:
%0 = load i32** @dptr, align 8
%1 = add i64 %i, 65536
- %2 = getelementptr i32* %0, i64 %1
+ %2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
ret i8* %3
; LINUX-64-STATIC-LABEL: cam05:
define i8* @cam06(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @lsrc, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @lsrc, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cam06:
define i8* @cam07(i64 %i) nounwind {
entry:
%0 = add i64 %i, 65536
- %1 = getelementptr [131072 x i32]* @ldst, i64 0, i64 %0
+ %1 = getelementptr [131072 x i32], [131072 x i32]* @ldst, i64 0, i64 %0
%2 = bitcast i32* %1 to i8*
ret i8* %2
; LINUX-64-STATIC-LABEL: cam07:
entry:
%0 = load i32** @lptr, align 8
%1 = add i64 %i, 65536
- %2 = getelementptr i32* %0, i64 %1
+ %2 = getelementptr i32, i32* %0, i64 %1
%3 = bitcast i32* %2 to i8*
ret i8* %3
; LINUX-64-STATIC-LABEL: cam08:
; %load1 = (load (and (shl %xor, 2), 1020))
%tmp1701 = shl i32 %xor, 2
%tmp1702 = and i32 %tmp1701, 1020
- %tmp1703 = getelementptr inbounds [1028 x i8]* null, i32 0, i32 %tmp1702
+ %tmp1703 = getelementptr inbounds [1028 x i8], [1028 x i8]* null, i32 0, i32 %tmp1702
%tmp1704 = bitcast i8* %tmp1703 to i32*
%load1 = load i32* %tmp1704, align 4
; %load2 = (load (shl (and %xor, 255), 2))
%tmp1698 = and i32 %xor, 255
%tmp1706 = shl i32 %tmp1698, 2
- %tmp1707 = getelementptr inbounds [1028 x i8]* null, i32 0, i32 %tmp1706
+ %tmp1707 = getelementptr inbounds [1028 x i8], [1028 x i8]* null, i32 0, i32 %tmp1706
%tmp1708 = bitcast i8* %tmp1707 to i32*
%load2 = load i32* %tmp1708, align 4
; references in MatchScope and RecordedNodes stale.
%tmp1711 = xor i32 %load1, %tmp1710
- %tmp1744 = getelementptr inbounds [256 x i32]* null, i32 0, i32 %tmp1711
+ %tmp1744 = getelementptr inbounds [256 x i32], [256 x i32]* null, i32 0, i32 %tmp1711
store i32 0, i32* %tmp1744, align 4
%tmp1745 = add i32 %tmp1694, 1
indirectbr i8* undef, [label %bb1756, label %bb1692]
define void @bar(%struct.Baz* byval nocapture readnone align 8 %x, ...) {
entry:
%va = alloca [1 x %struct.__va_list_tag], align 16
- %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag]* %va, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0
%arraydecay1 = bitcast [1 x %struct.__va_list_tag]* %va to i8*
call void @llvm.va_start(i8* %arraydecay1)
- %overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag]* %va, i64 0, i64 0, i32 2
+ %overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0, i32 2
%overflow_arg_area = load i8** %overflow_arg_area_p, align 8
- %overflow_arg_area.next = getelementptr i8* %overflow_arg_area, i64 24
+ %overflow_arg_area.next = getelementptr i8, i8* %overflow_arg_area, i64 24
store i8* %overflow_arg_area.next, i8** %overflow_arg_area_p, align 8
; X32: leal 68(%esp), [[REG:%.*]]
; X32: movl [[REG]], 16(%esp)
define i8 @run_test(i8* %rd_p) {
entry:
- %incdec.ptr = getelementptr inbounds i8* %rd_p, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %rd_p, i64 1
%ld1 = load i8* %rd_p, align 1
- %incdec.ptr1 = getelementptr inbounds i8* %rd_p, i64 2
+ %incdec.ptr1 = getelementptr inbounds i8, i8* %rd_p, i64 2
%ld2 = load i8* %incdec.ptr, align 1
%x4 = xor i8 %ld1, -1
%x5 = xor i8 %ld2, -1
for.body:
%i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %array, i32 %i.06
+ %arrayidx = getelementptr inbounds i32, i32* %array, i32 %i.06
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.05
%inc = add nsw i32 %i.06, 1
entry:
%n = alloca %struct.node_t, align 4
call void bitcast (void (%struct.node_t*, ...)* @getnode to void (%struct.node_t*)*)(%struct.node_t* sret %n)
- %array = getelementptr inbounds %struct.node_t* %n, i32 0, i32 4
+ %array = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 4
%0 = load i32** %array, align 4
%cmp = icmp eq i32* %0, null
br i1 %cmp, label %if.end, label %land.lhs.true
land.lhs.true:
- %p = getelementptr inbounds %struct.node_t* %n, i32 0, i32 3
+ %p = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 3
%1 = load i32* %p, align 4
%cmp1 = icmp sgt i32 %1, 0
br i1 %cmp1, label %land.lhs.true2, label %if.end
land.lhs.true2:
- %k = getelementptr inbounds %struct.node_t* %n, i32 0, i32 0
+ %k = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 0
%2 = load i32* %k, align 4
%cmp3 = icmp sgt i32 %2, 0
br i1 %cmp3, label %land.lhs.true4, label %if.end
land.lhs.true4:
- %n5 = getelementptr inbounds %struct.node_t* %n, i32 0, i32 2
+ %n5 = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 2
%3 = load i32* %n5, align 4
%cmp6 = icmp sgt i32 %3, 0
br i1 %cmp6, label %land.lhs.true7, label %if.end
land.lhs.true7:
- %m = getelementptr inbounds %struct.node_t* %n, i32 0, i32 1
+ %m = getelementptr inbounds %struct.node_t, %struct.node_t* %n, i32 0, i32 1
%4 = load i32* %m, align 4
%cmp8 = icmp sgt i32 %4, 0
br i1 %cmp8, label %if.then, label %if.end
%5 = ptrtoint i32* %0 to i32
%add15 = add nsw i32 %1, %5
%6 = inttoptr i32 %add15 to i32*
- %arrayidx = getelementptr inbounds i32* %6, i32 %add12
+ %arrayidx = getelementptr inbounds i32, i32* %6, i32 %add12
%7 = load i32* %arrayidx, align 4
br label %if.end
%sum.010 = phi i32 [ 0, %for.body.lr.ph ], [ %add3, %for.body ]
%j.09 = phi i32 [ 0, %for.body.lr.ph ], [ %inc1, %for.body ]
%inc1 = add nsw i32 %j.09, 1
- %arrayidx = getelementptr inbounds i32* %array2, i32 %j.09
+ %arrayidx = getelementptr inbounds i32, i32* %array2, i32 %j.09
%1 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %1
store i32 %add, i32* %m, align 4
- %arrayidx2 = getelementptr inbounds i32* %array, i32 %inc1
+ %arrayidx2 = getelementptr inbounds i32, i32* %array, i32 %inc1
%2 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %2, %sum.010
%exitcond = icmp eq i32 %inc1, %n
%this.addr = alloca %struct.ValueWrapper.6*, align 8
store %struct.ValueWrapper.6* %this, %struct.ValueWrapper.6** %this.addr, align 8
%this1 = load %struct.ValueWrapper.6** %this.addr
- %value = getelementptr inbounds %struct.ValueWrapper.6* %this1, i32 0, i32 0
+ %value = getelementptr inbounds %struct.ValueWrapper.6, %struct.ValueWrapper.6* %this1, i32 0, i32 0
call void @_ZN12ValueWrapperIS_IS_IdEEEC2Ev(%struct.ValueWrapper.7* %value)
ret void
}
; CHECK: call
; CHECK-NOT: lea
%arr = alloca [1024 x i8], align 16
- %arr_ptr = getelementptr inbounds [1024 x i8]* %arr, i8 0, i8 0
+ %arr_ptr = getelementptr inbounds [1024 x i8], [1024 x i8]* %arr, i8 0, i8 0
call void @use_arr(i8* %arr_ptr)
ret void
}
define void @test(i8** %a, i64* %b, i64 %c, i64 %d) nounwind {
entry:
%ptrtoarg4 = load i8** %a, align 8
- %brglist1 = getelementptr i8** %a, i64 1
+ %brglist1 = getelementptr i8*, i8** %a, i64 1
%ptrtoarg25 = load i8** %brglist1, align 8
%0 = load i64* %b, align 8
%1 = mul i64 %0, 4
- %scevgep = getelementptr i8* %ptrtoarg25, i64 %1
+ %scevgep = getelementptr i8, i8* %ptrtoarg25, i64 %1
%2 = mul i64 %d, 4
br label %loop.cond
%17 = atomicrmw min i32 addrspace(1)* %8, i32 %extract15vector_func.i seq_cst
store <8 x i32> %vectorvector_func.i, <8 x i32> addrspace(1)* %asr.iv911, align 4
%asr.iv.next = add i64 %asr.iv, -1
- %scevgep10 = getelementptr i8* %asr.iv9, i64 32
+ %scevgep10 = getelementptr i8, i8* %asr.iv9, i64 32
%dim_0_vector_cmp.to.max.i = icmp eq i64 %asr.iv.next, 0
br i1 %dim_0_vector_cmp.to.max.i, label %scalarIf.i, label %vector_kernel_entry.i
%asr.iv12 = phi i64 [ %asr.iv.next13, %scalar_kernel_entry.i ], [ %22, %dim_0_pre_head.i ]
%23 = addrspacecast i8* %asr.iv6 to i32 addrspace(1)*
%24 = addrspacecast i8* %ptrtoarg4 to i32 addrspace(1)*
- %scevgep16 = getelementptr i32 addrspace(1)* %23, i64 %asr.iv12
+ %scevgep16 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
%25 = load i32 addrspace(1)* %scevgep16, align 4
%26 = atomicrmw min i32 addrspace(1)* %24, i32 %25 seq_cst
- %scevgep15 = getelementptr i32 addrspace(1)* %23, i64 %asr.iv12
+ %scevgep15 = getelementptr i32, i32 addrspace(1)* %23, i64 %asr.iv12
store i32 %21, i32 addrspace(1)* %scevgep15, align 4
%asr.iv.next13 = add i64 %asr.iv12, 1
%dim_0_cmp.to.max.i = icmp eq i64 %5, %asr.iv.next13
%27 = bitcast i8* %asr.iv6 to i1*
%28 = add i64 %iv, %d
store i64 %28, i64* %b, align 8
- %scevgep8 = getelementptr i1* %27, i64 %2
+ %scevgep8 = getelementptr i1, i1* %27, i64 %2
%29 = bitcast i1* %scevgep8 to i8*
br label %loop.cond
%indvar = phi i32 [ 0, %bb2.preheader.us ], [ %indvar.next, %bb1.us ] ; <i32> [#uses=2]
%tmp17 = add i32 %indvar, %tmp16 ; <i32> [#uses=1]
%tmp. = zext i32 %tmp17 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32* %2, i64 %tmp. ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32* %2, i64 %tmp. ; <i32*> [#uses=1]
%4 = load i32* %3, align 4 ; <i32> [#uses=2]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %b ; <i1> [#uses=1]
bb1: ; preds = %bb, %entry
%P.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
- %P.0 = getelementptr i8* %tmp1, i32 %P.0.rec ; <i8*> [#uses=3]
+ %P.0 = getelementptr i8, i8* %tmp1, i32 %P.0.rec ; <i8*> [#uses=3]
%tmp2 = load i8* %P.0, align 1 ; <i8> [#uses=1]
switch i8 %tmp2, label %bb4 [
i8 12, label %bb
bb4: ; preds = %bb1
%tmp3 = ptrtoint i8* %P.0 to i32 ; <i32> [#uses=1]
%tmp4 = sub i32 %tmp3, %tmp ; <i32> [#uses=1]
- %tmp5 = getelementptr [100 x i32]* @A, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
+ %tmp5 = getelementptr [100 x i32], [100 x i32]* @A, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
store i32 4, i32* %tmp5, align 4
ret i8* %P.0
}
; CHECK-NOT: phi
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
%tmp = add nsw i64 %indvars.iv, -1
- %arrayidx = getelementptr inbounds double* %b, i64 %tmp
+ %arrayidx = getelementptr inbounds double, double* %b, i64 %tmp
%tmp1 = load double* %arrayidx, align 8
; The induction variable should carry the scaling factor: 1.
; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 1
%indvars.iv.next = add i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next
+ %arrayidx2 = getelementptr inbounds double, double* %c, i64 %indvars.iv.next
%tmp2 = load double* %arrayidx2, align 8
%mul = fmul double %tmp1, %tmp2
- %arrayidx4 = getelementptr inbounds double* %a, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds double, double* %a, i64 %indvars.iv
store double %mul, double* %arrayidx4, align 8
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; Comparison should be 19 * 1 = 19.
allocas:
%ptrcast.i33.i = bitcast [0 x float]* %aFOO to i32*
%val.i34.i = load i32* %ptrcast.i33.i, align 4
- %ptroffset.i22.i992 = getelementptr [0 x float]* %aFOO, i64 0, i64 1
+ %ptroffset.i22.i992 = getelementptr [0 x float], [0 x float]* %aFOO, i64 0, i64 1
%ptrcast.i23.i = bitcast float* %ptroffset.i22.i992 to i32*
%val.i24.i = load i32* %ptrcast.i23.i, align 4
%updatedret.i30.i = insertelement <8 x i32> undef, i32 %val.i34.i, i32 1
br i1 undef, label %__load_and_broadcast_32.exit1249, label %load.i1247
load.i1247: ; preds = %for_exit499
- %ptr1227 = getelementptr [18 x [18 x float]]* %udx495, i64 0, i64 1, i64 1
+ %ptr1227 = getelementptr [18 x [18 x float]], [18 x [18 x float]]* %udx495, i64 0, i64 1, i64 1
%ptr.i1237 = bitcast float* %ptr1227 to i32*
%val.i1238 = load i32* %ptr.i1237, align 4
%ret6.i1245 = insertelement <8 x i32> undef, i32 %val.i1238, i32 6
; PR15462
define void @t9(i64* %p) {
store i64 0, i64* %p
- %q = getelementptr i64* %p, i64 1
+ %q = getelementptr i64, i64* %p, i64 1
store i64 0, i64* %q
- %r = getelementptr i64* %p, i64 2
+ %r = getelementptr i64, i64* %p, i64 2
store i64 0, i64* %r
- %s = getelementptr i64* %p, i64 3
+ %s = getelementptr i64, i64* %p, i64 3
store i64 0, i64* %s
ret void
; CHECK-NOT: vmovaps
; CHECK: vinsertf128
entry:
- %add.ptr = getelementptr inbounds float* %f, i64 4
+ %add.ptr = getelementptr inbounds float, float* %f, i64 4
%0 = bitcast float* %add.ptr to <4 x float>*
%1 = load <4 x float>* %0, align 16
%2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
; CHECK-NOT: vmovups
; CHECK: vinsertf128
entry:
- %add.ptr = getelementptr inbounds float* %f, i64 4
+ %add.ptr = getelementptr inbounds float, float* %f, i64 4
%0 = bitcast float* %add.ptr to <4 x float>*
%1 = load <4 x float>* %0, align 8
%2 = tail call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %1, i8 1)
;; Try to match a bit more of the instr, since we need the load's offset.
; CHECK: vinsertps $192, 12(%{{...}},%{{...}}), %
; CHECK-NEXT: ret
- %1 = getelementptr inbounds <4 x float>* %pb, i64 %index
+ %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
%2 = load <4 x float>* %1, align 16
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
ret <4 x float> %3
; CHECK-NOT: mov
; CHECK: insertps $48
; CHECK-NEXT: ret
- %1 = getelementptr inbounds float* %fb, i64 %index
+ %1 = getelementptr inbounds float, float* %fb, i64 %index
%2 = load float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
; CHECK: vaddps
; CHECK: vaddps
; CHECK-NEXT: ret
- %1 = getelementptr inbounds float* %fb, i64 %index
+ %1 = getelementptr inbounds float, float* %fb, i64 %index
%2 = load float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
define <2 x double> @test_rsqrt28_sd_maskz_mem_offset(<2 x double> %a0, double* %ptr ) {
; CHECK: vrsqrt28sd 144(%rdi), %xmm0, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xcd,0x47,0x12]
- %ptr1 = getelementptr double* %ptr, i32 18
+ %ptr1 = getelementptr double, double* %ptr, i32 18
%mem = load double * %ptr1, align 8
%mem_v = insertelement <2 x double> undef, double %mem, i32 0
%res = call <2 x double> @llvm.x86.avx512.rsqrt28.sd(<2 x double> %a0, <2 x double> %mem_v, <2 x double> zeroinitializer, i8 7, i32 4) ;
; CHECK: %then5
entry:
- %gep1 = getelementptr i32* %a, i32 1
+ %gep1 = getelementptr i32, i32* %a, i32 1
%val1 = load i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then1, label %else1, !prof !0
br label %else1
else1:
- %gep2 = getelementptr i32* %a, i32 2
+ %gep2 = getelementptr i32, i32* %a, i32 2
%val2 = load i32* %gep2
%cond2 = icmp ugt i32 %val2, 2
br i1 %cond2, label %then2, label %else2, !prof !0
br label %else2
else2:
- %gep3 = getelementptr i32* %a, i32 3
+ %gep3 = getelementptr i32, i32* %a, i32 3
%val3 = load i32* %gep3
%cond3 = icmp ugt i32 %val3, 3
br i1 %cond3, label %then3, label %else3, !prof !0
br label %else3
else3:
- %gep4 = getelementptr i32* %a, i32 4
+ %gep4 = getelementptr i32, i32* %a, i32 4
%val4 = load i32* %gep4
%cond4 = icmp ugt i32 %val4, 4
br i1 %cond4, label %then4, label %else4, !prof !0
br label %else4
else4:
- %gep5 = getelementptr i32* %a, i32 3
+ %gep5 = getelementptr i32, i32* %a, i32 3
%val5 = load i32* %gep5
%cond5 = icmp ugt i32 %val5, 3
br i1 %cond5, label %then5, label %exit, !prof !0
br label %body3
body3:
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
ret i32 -3
body4:
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
br i1 %exitcond, label %exit, label %body1
body1:
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%bailcond1 = icmp eq i32 %sum, 42
body0:
%iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
%base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%bailcond1 = icmp eq i32 %sum, 42
body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%0 = load i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
loop.body.1:
%iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
%bidx = load i32* %arrayidx
br label %inner.loop.body
%inner.iv = phi i32 [ 0, %loop.body.1 ], [ %inner.next, %inner.loop.body ]
%base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
%scaled_idx = mul i32 %bidx, %iv
- %inner.arrayidx = getelementptr inbounds i32* %b, i32 %scaled_idx
+ %inner.arrayidx = getelementptr inbounds i32, i32* %b, i32 %scaled_idx
%0 = load i32* %inner.arrayidx
%sum = add nsw i32 %0, %base
%inner.next = add i32 %iv, 1
br i1 undef, label %loop.body4, label %loop.body3
loop.body3:
- %ptr1 = getelementptr inbounds i32* %val0, i32 0
+ %ptr1 = getelementptr inbounds i32, i32* %val0, i32 0
%castptr1 = bitcast i32* %ptr1 to i32**
%val1 = load i32** %castptr1, align 4
br label %loop.inner1.begin
br i1 %comp1, label %loop.inner1.end, label %loop.body4
loop.inner1.end:
- %ptr2 = getelementptr inbounds i32* %valphi, i32 0
+ %ptr2 = getelementptr inbounds i32, i32* %valphi, i32 0
%castptr2 = bitcast i32* %ptr2 to i32**
%val2 = load i32** %castptr2, align 4
br label %loop.inner1.begin
br label %loop3
loop2b:
- %gep = getelementptr inbounds i32* %var.phi, i32 0
+ %gep = getelementptr inbounds i32, i32* %var.phi, i32 0
%next.ptr = bitcast i32* %gep to i32**
store i32* %next.phi, i32** %next.ptr
br label %loop3
entry:
%shr = ashr i32 %n, 1
%add = add nsw i32 %shr, 1
- %arrayidx3 = getelementptr inbounds double* %ra, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %ra, i64 1
br label %for.cond
for.cond:
if.then:
%dec = add nsw i32 %l.0, -1
%idxprom = sext i32 %dec to i64
- %arrayidx = getelementptr inbounds double* %ra, i64 %idxprom
+ %arrayidx = getelementptr inbounds double, double* %ra, i64 %idxprom
%0 = load double* %arrayidx, align 8
br label %if.end10
if.else:
%idxprom1 = sext i32 %ir.0 to i64
- %arrayidx2 = getelementptr inbounds double* %ra, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds double, double* %ra, i64 %idxprom1
%1 = load double* %arrayidx2, align 8
%2 = load double* %arrayidx3, align 8
store double %2, double* %arrayidx2, align 8
land.lhs.true:
%idxprom13 = sext i32 %j.0 to i64
- %arrayidx14 = getelementptr inbounds double* %ra, i64 %idxprom13
+ %arrayidx14 = getelementptr inbounds double, double* %ra, i64 %idxprom13
%3 = load double* %arrayidx14, align 8
%add15 = add nsw i32 %j.0, 1
%idxprom16 = sext i32 %add15 to i64
- %arrayidx17 = getelementptr inbounds double* %ra, i64 %idxprom16
+ %arrayidx17 = getelementptr inbounds double, double* %ra, i64 %idxprom16
%4 = load double* %arrayidx17, align 8
%cmp18 = fcmp olt double %3, %4
br i1 %cmp18, label %if.then19, label %if.end20
if.end20:
%j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
%idxprom21 = sext i32 %j.1 to i64
- %arrayidx22 = getelementptr inbounds double* %ra, i64 %idxprom21
+ %arrayidx22 = getelementptr inbounds double, double* %ra, i64 %idxprom21
%5 = load double* %arrayidx22, align 8
%cmp23 = fcmp olt double %rra.0, %5
br i1 %cmp23, label %if.then24, label %while.cond
if.then24:
%idxprom27 = sext i32 %j.0.ph.in to i64
- %arrayidx28 = getelementptr inbounds double* %ra, i64 %idxprom27
+ %arrayidx28 = getelementptr inbounds double, double* %ra, i64 %idxprom27
store double %5, double* %arrayidx28, align 8
br label %while.cond.outer
while.end:
%idxprom33 = sext i32 %j.0.ph.in to i64
- %arrayidx34 = getelementptr inbounds double* %ra, i64 %idxprom33
+ %arrayidx34 = getelementptr inbounds double, double* %ra, i64 %idxprom33
store double %rra.0, double* %arrayidx34, align 8
br label %for.cond
}
; CHECK: %then
entry:
- %gep1 = getelementptr i32* %a, i32 1
+ %gep1 = getelementptr i32, i32* %a, i32 1
%val1 = load i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then, label %else
br label %exit
else:
- %gep2 = getelementptr i32* %a, i32 2
+ %gep2 = getelementptr i32, i32* %a, i32 2
%val2 = load i32* %gep2
br label %exit
for.body3:
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @v, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @v, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%conv = sitofp i32 %0 to double
- %arrayidx5 = getelementptr inbounds [1024 x double]* @x, i64 0, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds [1024 x double], [1024 x double]* @x, i64 0, i64 %indvars.iv
%1 = load double* %arrayidx5, align 8
%mul = fmul double %conv, %1
- %arrayidx7 = getelementptr inbounds [1024 x double]* @y, i64 0, i64 %indvars.iv
+ %arrayidx7 = getelementptr inbounds [1024 x double], [1024 x double]* @y, i64 0, i64 %indvars.iv
%2 = load double* %arrayidx7, align 8
%mul8 = fmul double %mul, %2
- %arrayidx10 = getelementptr inbounds [1024 x double]* @z, i64 0, i64 %indvars.iv
+ %arrayidx10 = getelementptr inbounds [1024 x double], [1024 x double]* @z, i64 0, i64 %indvars.iv
%3 = load double* %arrayidx10, align 8
%mul11 = fmul double %mul8, %3
- %arrayidx13 = getelementptr inbounds [1024 x double]* @w, i64 0, i64 %indvars.iv
+ %arrayidx13 = getelementptr inbounds [1024 x double], [1024 x double]* @w, i64 0, i64 %indvars.iv
store double %mul11, double* %arrayidx13, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1024
; CHECK: andq $-64, %rsp
%s1 = alloca %struct.S ; <%struct.S*> [#uses=4]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = getelementptr inbounds %struct.S* %s1, i32 0, i32 0 ; <i32*> [#uses=1]
+ %0 = getelementptr inbounds %struct.S, %struct.S* %s1, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %0, align 4
call void @aligned_func(%struct.S* byval align 64 %s1) nounwind
br label %return
define i64 @f(%struct.s* byval %a) {
entry:
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 0
+ %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
%tmp3 = load i64* %tmp2, align 8
ret i64 %tmp3
}
define void @g(i64 %a, i64 %b, i64 %c) {
entry:
%d = alloca %struct.s, align 16
- %tmp = getelementptr %struct.s* %d, i32 0, i32 0
+ %tmp = getelementptr %struct.s, %struct.s* %d, i32 0, i32 0
store i64 %a, i64* %tmp, align 16
- %tmp2 = getelementptr %struct.s* %d, i32 0, i32 1
+ %tmp2 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 1
store i64 %b, i64* %tmp2, align 16
- %tmp4 = getelementptr %struct.s* %d, i32 0, i32 2
+ %tmp4 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 2
store i64 %c, i64* %tmp4, align 16
call void @f( %struct.s*byval %d )
call void @f( %struct.s*byval %d )
define void @g(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) nounwind {
entry:
%d = alloca %struct.s, align 16
- %tmp = getelementptr %struct.s* %d, i32 0, i32 0
+ %tmp = getelementptr %struct.s, %struct.s* %d, i32 0, i32 0
store i32 %a1, i32* %tmp, align 16
- %tmp2 = getelementptr %struct.s* %d, i32 0, i32 1
+ %tmp2 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 1
store i32 %a2, i32* %tmp2, align 16
- %tmp4 = getelementptr %struct.s* %d, i32 0, i32 2
+ %tmp4 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 2
store i32 %a3, i32* %tmp4, align 16
- %tmp6 = getelementptr %struct.s* %d, i32 0, i32 3
+ %tmp6 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 3
store i32 %a4, i32* %tmp6, align 16
- %tmp8 = getelementptr %struct.s* %d, i32 0, i32 4
+ %tmp8 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 4
store i32 %a5, i32* %tmp8, align 16
- %tmp10 = getelementptr %struct.s* %d, i32 0, i32 5
+ %tmp10 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 5
store i32 %a6, i32* %tmp10, align 16
call void @f( %struct.s* byval %d)
call void @f( %struct.s* byval %d)
i16 signext %a4, i16 signext %a5, i16 signext %a6) nounwind {
entry:
%a = alloca %struct.s, align 16
- %tmp = getelementptr %struct.s* %a, i32 0, i32 0
+ %tmp = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
store i16 %a1, i16* %tmp, align 16
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
+ %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1
store i16 %a2, i16* %tmp2, align 16
- %tmp4 = getelementptr %struct.s* %a, i32 0, i32 2
+ %tmp4 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 2
store i16 %a3, i16* %tmp4, align 16
- %tmp6 = getelementptr %struct.s* %a, i32 0, i32 3
+ %tmp6 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 3
store i16 %a4, i16* %tmp6, align 16
- %tmp8 = getelementptr %struct.s* %a, i32 0, i32 4
+ %tmp8 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 4
store i16 %a5, i16* %tmp8, align 16
- %tmp10 = getelementptr %struct.s* %a, i32 0, i32 5
+ %tmp10 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 5
store i16 %a6, i16* %tmp10, align 16
call void @f( %struct.s* byval %a )
call void @f( %struct.s* byval %a )
i8 signext %a4, i8 signext %a5, i8 signext %a6) {
entry:
%a = alloca %struct.s
- %tmp = getelementptr %struct.s* %a, i32 0, i32 0
+ %tmp = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
store i8 %a1, i8* %tmp, align 8
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
+ %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1
store i8 %a2, i8* %tmp2, align 8
- %tmp4 = getelementptr %struct.s* %a, i32 0, i32 2
+ %tmp4 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 2
store i8 %a3, i8* %tmp4, align 8
- %tmp6 = getelementptr %struct.s* %a, i32 0, i32 3
+ %tmp6 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 3
store i8 %a4, i8* %tmp6, align 8
- %tmp8 = getelementptr %struct.s* %a, i32 0, i32 4
+ %tmp8 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 4
store i8 %a5, i8* %tmp8, align 8
- %tmp10 = getelementptr %struct.s* %a, i32 0, i32 5
+ %tmp10 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 5
store i8 %a6, i8* %tmp10, align 8
call void @f( %struct.s* byval %a )
call void @f( %struct.s* byval %a )
; CHECK: rep;movsl
; CHECK: movl $1, (%esp)
%s = alloca %struct.S ; <%struct.S*> [#uses=2]
- %tmp15 = getelementptr %struct.S* %s, i32 0, i32 0 ; <<2 x i64>*> [#uses=1]
+ %tmp15 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0 ; <<2 x i64>*> [#uses=1]
store <2 x i64> < i64 8589934595, i64 1 >, <2 x i64>* %tmp15, align 16
call void @t( i32 1, %struct.S* byval %s ) nounwind
ret i32 0
; CHECK: jmp
; CHECK: popl
entry:
- %tmp2 = getelementptr %struct.decode_t* %decode, i32 0, i32 4 ; <i16*> [#uses=1]
+ %tmp2 = getelementptr %struct.decode_t, %struct.decode_t* %decode, i32 0, i32 4 ; <i16*> [#uses=1]
%tmp23 = bitcast i16* %tmp2 to i32* ; <i32*> [#uses=1]
%tmp4 = load i32* %tmp23 ; <i32> [#uses=1]
%tmp514 = lshr i32 %tmp4, 24 ; <i32> [#uses=1]
define void @cftx020(double* nocapture %a) {
entry:
%0 = load double* %a, align 8
- %arrayidx1 = getelementptr inbounds double* %a, i64 2
+ %arrayidx1 = getelementptr inbounds double, double* %a, i64 2
%1 = load double* %arrayidx1, align 8
- %arrayidx2 = getelementptr inbounds double* %a, i64 1
+ %arrayidx2 = getelementptr inbounds double, double* %a, i64 1
%2 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %a, i64 3
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 3
%3 = load double* %arrayidx3, align 8
%4 = insertelement <2 x double> undef, double %0, i32 0
%5 = insertelement <2 x double> %4, double %3, i32 1
bb2: ; preds = %bb2, %bb2.outer
%indvar = phi i64 [ 0, %bb2.outer ], [ %indvar.next, %bb2 ] ; <i64> [#uses=3]
%indvar16 = trunc i64 %indvar to i16 ; <i16> [#uses=1]
- %ctg2 = getelementptr i8* %out, i64 %tmp21 ; <i8*> [#uses=1]
+ %ctg2 = getelementptr i8, i8* %out, i64 %tmp21 ; <i8*> [#uses=1]
%tmp22 = ptrtoint i8* %ctg2 to i64 ; <i64> [#uses=1]
%tmp24 = sub i64 %tmp22, %indvar ; <i64> [#uses=1]
%out_addr.0.reg2mem.0 = inttoptr i64 %tmp24 to i8* ; <i8*> [#uses=1]
%3 = add i32 %1, %2 ; <i32> [#uses=9]
%4 = add i32 %3, -481 ; <i32> [#uses=1]
%5 = zext i32 %4 to i64 ; <i64> [#uses=1]
- %6 = getelementptr i8* %in, i64 %5 ; <i8*> [#uses=1]
+ %6 = getelementptr i8, i8* %in, i64 %5 ; <i8*> [#uses=1]
%7 = load i8* %6, align 1 ; <i8> [#uses=1]
%8 = add i32 %3, -480 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %10 = getelementptr i8* %in, i64 %9 ; <i8*> [#uses=1]
+ %10 = getelementptr i8, i8* %in, i64 %9 ; <i8*> [#uses=1]
%11 = load i8* %10, align 1 ; <i8> [#uses=1]
%12 = add i32 %3, -479 ; <i32> [#uses=1]
%13 = zext i32 %12 to i64 ; <i64> [#uses=1]
- %14 = getelementptr i8* %in, i64 %13 ; <i8*> [#uses=1]
+ %14 = getelementptr i8, i8* %in, i64 %13 ; <i8*> [#uses=1]
%15 = load i8* %14, align 1 ; <i8> [#uses=1]
%16 = add i32 %3, -1 ; <i32> [#uses=1]
%17 = zext i32 %16 to i64 ; <i64> [#uses=1]
- %18 = getelementptr i8* %in, i64 %17 ; <i8*> [#uses=1]
+ %18 = getelementptr i8, i8* %in, i64 %17 ; <i8*> [#uses=1]
%19 = load i8* %18, align 1 ; <i8> [#uses=1]
%20 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %21 = getelementptr i8* %in, i64 %20 ; <i8*> [#uses=1]
+ %21 = getelementptr i8, i8* %in, i64 %20 ; <i8*> [#uses=1]
%22 = load i8* %21, align 1 ; <i8> [#uses=1]
%23 = add i32 %3, 1 ; <i32> [#uses=1]
%24 = zext i32 %23 to i64 ; <i64> [#uses=1]
- %25 = getelementptr i8* %in, i64 %24 ; <i8*> [#uses=1]
+ %25 = getelementptr i8, i8* %in, i64 %24 ; <i8*> [#uses=1]
%26 = load i8* %25, align 1 ; <i8> [#uses=1]
%27 = add i32 %3, 481 ; <i32> [#uses=1]
%28 = zext i32 %27 to i64 ; <i64> [#uses=1]
- %29 = getelementptr i8* %in, i64 %28 ; <i8*> [#uses=1]
+ %29 = getelementptr i8, i8* %in, i64 %28 ; <i8*> [#uses=1]
%30 = load i8* %29, align 1 ; <i8> [#uses=1]
%31 = add i32 %3, 480 ; <i32> [#uses=1]
%32 = zext i32 %31 to i64 ; <i64> [#uses=1]
- %33 = getelementptr i8* %in, i64 %32 ; <i8*> [#uses=1]
+ %33 = getelementptr i8, i8* %in, i64 %32 ; <i8*> [#uses=1]
%34 = load i8* %33, align 1 ; <i8> [#uses=1]
%35 = add i32 %3, 479 ; <i32> [#uses=1]
%36 = zext i32 %35 to i64 ; <i64> [#uses=1]
- %37 = getelementptr i8* %in, i64 %36 ; <i8*> [#uses=1]
+ %37 = getelementptr i8, i8* %in, i64 %36 ; <i8*> [#uses=1]
%38 = load i8* %37, align 1 ; <i8> [#uses=1]
%39 = add i8 %11, %7 ; <i8> [#uses=1]
%40 = add i8 %39, %15 ; <i8> [#uses=1]
define i32 @test6() nounwind align 2 {
%A = alloca {i64, i64}, align 8
- %B = getelementptr inbounds {i64, i64}* %A, i64 0, i32 1
+ %B = getelementptr inbounds {i64, i64}, {i64, i64}* %A, i64 0, i32 1
%C = load i64* %B
%D = icmp eq i64 %C, 0
br i1 %D, label %T, label %F
bb4: ; preds = %bb7.backedge, %entry
%indvar = phi i32 [ %indvar.next, %bb7.backedge ], [ 0, %entry ] ; <i32> [#uses=2]
%scevgep24.sum = sub i32 undef, %indvar ; <i32> [#uses=2]
- %scevgep25 = getelementptr i32* %0, i32 %scevgep24.sum ; <i32*> [#uses=1]
- %scevgep27 = getelementptr i32* undef, i32 %scevgep24.sum ; <i32*> [#uses=1]
+ %scevgep25 = getelementptr i32, i32* %0, i32 %scevgep24.sum ; <i32*> [#uses=1]
+ %scevgep27 = getelementptr i32, i32* undef, i32 %scevgep24.sum ; <i32*> [#uses=1]
%1 = load i32* %scevgep27, align 4 ; <i32> [#uses=0]
br i1 undef, label %bb7.backedge, label %bb5
bb: ; preds = %bb, %entry
%neuron.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%thesum.0 = phi float [ 0.000000e+00, %entry ], [ %tmp6, %bb ] ; <float> [#uses=1]
- %tmp2 = getelementptr i32* %source, i32 %neuron.0 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr i32, i32* %source, i32 %neuron.0 ; <i32*> [#uses=1]
%tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
%tmp34 = sitofp i32 %tmp3 to float ; <float> [#uses=1]
%tmp6 = fadd float %tmp34, %thesum.0 ; <float> [#uses=2]
bb: ; preds = %bb, %bb.preheader
%i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%res.0.reg2mem.0 = phi float [ 0.000000e+00, %bb.preheader ], [ %tmp14, %bb ] ; <float> [#uses=1]
- %tmp3 = getelementptr i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr i32, i32* %x, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
%tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
%tmp45 = sitofp i32 %tmp4 to float ; <float> [#uses=1]
- %tmp8 = getelementptr float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
+ %tmp8 = getelementptr float, float* %y, i32 %i.0.reg2mem.0 ; <float*> [#uses=1]
%tmp9 = load float* %tmp8, align 4 ; <float> [#uses=1]
%tmp11 = fmul float %tmp9, %tmp45 ; <float> [#uses=1]
%tmp14 = fadd float %tmp11, %res.0.reg2mem.0 ; <float> [#uses=2]
%0 = tail call i32 @"\01_clock$UNIX2003"() nounwind ; <i32> [#uses=1]
%1 = uitofp i32 %0 to double ; <double> [#uses=1]
%2 = fdiv double %1, 1.000000e+06 ; <double> [#uses=1]
- %3 = getelementptr %struct.lua_State* %L, i32 0, i32 4 ; <%struct.TValue**> [#uses=3]
+ %3 = getelementptr %struct.lua_State, %struct.lua_State* %L, i32 0, i32 4 ; <%struct.TValue**> [#uses=3]
%4 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=2]
- %5 = getelementptr %struct.TValue* %4, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %5 = getelementptr %struct.TValue, %struct.TValue* %4, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
store double %2, double* %5, align 4
- %6 = getelementptr %struct.TValue* %4, i32 0, i32 1 ; <i32*> [#uses=1]
+ %6 = getelementptr %struct.TValue, %struct.TValue* %4, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 3, i32* %6, align 4
%7 = load %struct.TValue** %3, align 4 ; <%struct.TValue*> [#uses=1]
- %8 = getelementptr %struct.TValue* %7, i32 1 ; <%struct.TValue*> [#uses=1]
+ %8 = getelementptr %struct.TValue, %struct.TValue* %7, i32 1 ; <%struct.TValue*> [#uses=1]
store %struct.TValue* %8, %struct.TValue** %3, align 4
ret i32 1
}
define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind ssp {
entry:
%0 = load i32* %rk, align 4 ; <i32> [#uses=1]
- %1 = getelementptr i32* %rk, i64 1 ; <i32*> [#uses=1]
+ %1 = getelementptr i32, i32* %rk, i64 1 ; <i32*> [#uses=1]
%2 = load i32* %1, align 4 ; <i32> [#uses=1]
%tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
%tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
%rk26 = bitcast i32* %rk to i8* ; <i8*> [#uses=6]
%3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %5 = getelementptr [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
+ %5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
%6 = load i32* %5, align 4 ; <i32> [#uses=1]
%7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
%8 = and i32 %7, 255 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %10 = getelementptr [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
+ %10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
%11 = load i32* %10, align 4 ; <i32> [#uses=1]
%ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
- %12 = getelementptr i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
+ %12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
%13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
%14 = load i32* %13, align 4 ; <i32> [#uses=1]
%15 = xor i32 %11, %6 ; <i32> [#uses=1]
%16 = xor i32 %15, %14 ; <i32> [#uses=3]
%17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
%18 = zext i32 %17 to i64 ; <i64> [#uses=1]
- %19 = getelementptr [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
+ %19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
%20 = load i32* %19, align 4 ; <i32> [#uses=1]
%21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
%22 = zext i32 %21 to i64 ; <i64> [#uses=1]
- %23 = getelementptr [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
+ %23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
%24 = load i32* %23, align 4 ; <i32> [#uses=1]
%ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
- %25 = getelementptr i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
+ %25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
%26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
%27 = load i32* %26, align 4 ; <i32> [#uses=1]
%28 = xor i32 %24, %20 ; <i32> [#uses=1]
%29 = xor i32 %28, %27 ; <i32> [#uses=4]
%30 = lshr i32 %16, 24 ; <i32> [#uses=1]
%31 = zext i32 %30 to i64 ; <i64> [#uses=1]
- %32 = getelementptr [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
+ %32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
%33 = load i32* %32, align 4 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
br i1 %exitcond, label %bb2, label %bb1
bb1: ; preds = %bb
%ctg2.sum31 = add i64 %tmp18, 16 ; <i64> [#uses=1]
- %34 = getelementptr i8* %rk26, i64 %ctg2.sum31 ; <i8*> [#uses=1]
+ %34 = getelementptr i8, i8* %rk26, i64 %ctg2.sum31 ; <i8*> [#uses=1]
%35 = bitcast i8* %34 to i32* ; <i32*> [#uses=1]
%36 = lshr i32 %29, 16 ; <i32> [#uses=1]
%37 = and i32 %36, 255 ; <i32> [#uses=1]
%38 = zext i32 %37 to i64 ; <i64> [#uses=1]
- %39 = getelementptr [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
+ %39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
%40 = load i32* %39, align 4 ; <i32> [#uses=1]
%41 = load i32* %35, align 4 ; <i32> [#uses=1]
%42 = xor i32 %40, %33 ; <i32> [#uses=1]
%43 = xor i32 %42, %41 ; <i32> [#uses=1]
%44 = lshr i32 %29, 24 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
- %46 = getelementptr [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
+ %46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
%47 = load i32* %46, align 4 ; <i32> [#uses=1]
%48 = and i32 %16, 255 ; <i32> [#uses=1]
%49 = zext i32 %48 to i64 ; <i64> [#uses=1]
- %50 = getelementptr [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
+ %50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
%51 = load i32* %50, align 4 ; <i32> [#uses=1]
%ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
- %52 = getelementptr i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
+ %52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
%53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
%54 = load i32* %53, align 4 ; <i32> [#uses=1]
%55 = xor i32 %51, %47 ; <i32> [#uses=1]
bb2: ; preds = %bb
%tmp10 = shl i64 %tmp.16, 4 ; <i64> [#uses=2]
%ctg2.sum = add i64 %tmp10, 16 ; <i64> [#uses=1]
- %tmp1213 = getelementptr i8* %rk26, i64 %ctg2.sum ; <i8*> [#uses=1]
+ %tmp1213 = getelementptr i8, i8* %rk26, i64 %ctg2.sum ; <i8*> [#uses=1]
%57 = bitcast i8* %tmp1213 to i32* ; <i32*> [#uses=1]
%58 = and i32 %33, -16777216 ; <i32> [#uses=1]
%59 = lshr i32 %29, 16 ; <i32> [#uses=1]
%60 = and i32 %59, 255 ; <i32> [#uses=1]
%61 = zext i32 %60 to i64 ; <i64> [#uses=1]
- %62 = getelementptr [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
+ %62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
%63 = load i32* %62, align 4 ; <i32> [#uses=1]
%64 = and i32 %63, 16711680 ; <i32> [#uses=1]
%65 = or i32 %64, %58 ; <i32> [#uses=1]
%67 = xor i32 %65, %66 ; <i32> [#uses=2]
%68 = lshr i32 %29, 8 ; <i32> [#uses=1]
%69 = zext i32 %68 to i64 ; <i64> [#uses=1]
- %70 = getelementptr [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
+ %70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
%71 = load i32* %70, align 4 ; <i32> [#uses=1]
%72 = and i32 %71, -16777216 ; <i32> [#uses=1]
%73 = and i32 %16, 255 ; <i32> [#uses=1]
%74 = zext i32 %73 to i64 ; <i64> [#uses=1]
- %75 = getelementptr [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
+ %75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
%76 = load i32* %75, align 4 ; <i32> [#uses=1]
%77 = and i32 %76, 16711680 ; <i32> [#uses=1]
%78 = or i32 %77, %72 ; <i32> [#uses=1]
%ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
- %79 = getelementptr i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
+ %79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
%80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
%81 = load i32* %80, align 4 ; <i32> [#uses=1]
%82 = xor i32 %78, %81 ; <i32> [#uses=2]
store i8 %84, i8* %out, align 1
%85 = lshr i32 %67, 16 ; <i32> [#uses=1]
%86 = trunc i32 %85 to i8 ; <i8> [#uses=1]
- %87 = getelementptr i8* %out, i64 1 ; <i8*> [#uses=1]
+ %87 = getelementptr i8, i8* %out, i64 1 ; <i8*> [#uses=1]
store i8 %86, i8* %87, align 1
- %88 = getelementptr i8* %out, i64 4 ; <i8*> [#uses=1]
+ %88 = getelementptr i8, i8* %out, i64 4 ; <i8*> [#uses=1]
%89 = lshr i32 %82, 24 ; <i32> [#uses=1]
%90 = trunc i32 %89 to i8 ; <i8> [#uses=1]
store i8 %90, i8* %88, align 1
%91 = lshr i32 %82, 16 ; <i32> [#uses=1]
%92 = trunc i32 %91 to i8 ; <i8> [#uses=1]
- %93 = getelementptr i8* %out, i64 5 ; <i8*> [#uses=1]
+ %93 = getelementptr i8, i8* %out, i64 5 ; <i8*> [#uses=1]
store i8 %92, i8* %93, align 1
ret void
}
define i8 @twoArgsNoPromotion(i32 %arg1, i32 %arg2, i8* %base) {
%add = add nsw i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
define i8 @noPromotion(i32 %arg1, i32 %arg2, i8* %base) {
%add = add i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK-LABEL: @oneArgPromotion
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i32 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotion(i32 %arg1, i8* %base) {
%add = add nsw i32 %arg1, 1
%sextadd = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK-LABEL: @oneArgPromotionZExt
; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionZExt(i8 %arg1, i8* %base) {
%zext = zext i8 %arg1 to i32
%add = add nsw i32 %zext, 1
%sextadd = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK: [[ZEXT:%[a-zA-Z_0-9-]+]] = zext i16 undef to i32
; CHECK: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i32 [[ZEXT]] to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[SEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionCstZExt(i8* %base) {
%cst = zext i16 undef to i32
%add = add nsw i32 %cst, 1
%sextadd = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 %arg1 to i8
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionBlockTrunc1(i32 %arg1, i8* %base) {
%trunc = trunc i32 %arg1 to i8
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[ARG1SEXT]] to i8
; CHECK: [[ARG1SEXT64:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT64]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionBlockTrunc2(i16 %arg1, i8* %base) {
%sextarg1 = sext i16 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK-LABEL: @oneArgPromotionPassTruncKeepSExt
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionPassTruncKeepSExt(i1 %arg1, i8* %base) {
%sextarg1 = sext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
-; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
; CHECK: add i8 [[LOAD]], [[TRUNC]]
; CHECK: ret
define i8 @oneArgPromotionTruncInsert(i8 %arg1, i8* %base) {
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
%finalres = add i8 %res, %add
ret i8 %finalres
; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i128 %arg1 to i8
; CHECK: [[ARG1SEXT64:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT64]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionLargerType(i128 %arg1, i8* %base) {
%trunc = trunc i128 %arg1 to i8
%add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
%finalres = add i8 %res, %add
ret i8 %finalres
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
-; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = add i8 [[LOAD]], [[TRUNC]]
; CHECK: add i8 [[ADDRES]], [[TRUNC]]
define i8 @oneArgPromotionTruncInsertSeveralUse(i8 %arg1, i8* %base) {
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
%almostfinalres = add i8 %res, %add
%finalres = add i8 %almostfinalres, %add
; CHECK-LABEL: @oneArgPromotionSExtSeveralUse
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8* [[GEP]]
; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = zext i8 [[LOAD]] to i64
; CHECK: add i64 [[ADDRES]], [[PROMOTED]]
define i64 @oneArgPromotionSExtSeveralUse(i8 %arg1, i8* %base) {
%add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
%almostfinalres = zext i8 %res to i64
%finalres = add i64 %almostfinalres, %sextadd
; CHECK: [[ORIG:%[a-zA-Z_0-9-]+]] = add nsw i32 %arg1, %arg2
; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ORIG]], [[ORIG]]
; CHECK: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
-; CHECK: getelementptr inbounds i8* %base, i64 [[SEXT]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[SEXT]]
; CHECK: ret
define i8 @twoArgsPromotionNest(i32 %arg1, i32 %arg2, i8* %base) {
%promotableadd1 = add nsw i32 %arg1, %arg2
%promotableadd2 = add nsw i32 %promotableadd1, %promotableadd1
%sextadd = sext i32 %promotableadd2 to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[SEXTARG1]] to i8
; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[TRUNC]], %arg2
; CHECK: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i64
-; CHECK: getelementptr inbounds i8* %base, i64 [[SEXT]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[SEXT]]
; CHECK: ret
define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, i8* %base) {
%sextarg1 = sext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
%add = add nsw i8 %trunc, %arg2
%sextadd = sext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %sextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %sextadd
%res = load i8* %arrayidx
ret i8 %res
}
; BB then
; CHECK-GEP: [[BASE1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
; CHECK-GEP: [[BCC1:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE1]] to i8*
-; CHECK-GEP: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8* [[BCC1]], i64 48
+; CHECK-GEP: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC1]], i64 48
; CHECK-GEP: [[ADDR1:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL1]] to i32*
; CHECK-GEP: load i32* [[ADDR1]]
; BB else
; CHECK-GEP: [[BASE2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to i32*
; CHECK-GEP: [[BCC2:%[a-zA-Z_0-9-]+]] = bitcast i32* [[BASE2]] to i8*
-; CHECK-GEP: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8* [[BCC2]], i64 48
+; CHECK-GEP: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8, i8* [[BCC2]], i64 48
; CHECK-GEP: [[ADDR2:%[a-zA-Z_0-9-]+]] = bitcast i8* [[FULL2]] to i32*
; CHECK-GEP: load i32* [[ADDR2]]
; CHECK-GEP: ret
%add1 = add nsw i32 %shl, %arg2
%sextidx1 = sext i32 %add1 to i64
%tmpptr = inttoptr i64 %sextidx1 to i32*
- %arrayidx1 = getelementptr i32* %tmpptr, i64 12
+ %arrayidx1 = getelementptr i32, i32* %tmpptr, i64 12
br i1 %test, label %then, label %else
then:
%res1 = load i32* %arrayidx1
; CHECK-NEXT: load i8* [[CAST]], align 1
define signext i16 @fn3(%struct.dns_packet* nocapture readonly %P) {
entry:
- %tmp = getelementptr inbounds %struct.dns_packet* %P, i64 0, i32 2
+ %tmp = getelementptr inbounds %struct.dns_packet, %struct.dns_packet* %P, i64 0, i32 2
%data.i.i = bitcast %union.anon* %tmp to [0 x i8]*
br label %while.body.i.i
%src.addr.0.i.i = phi i16 [ 0, %entry ], [ %inc.i.i, %while.body.i.i ]
%inc.i.i = add i16 %src.addr.0.i.i, 1
%idxprom.i.i = sext i16 %src.addr.0.i.i to i64
- %arrayidx.i.i = getelementptr inbounds [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i.i
+ %arrayidx.i.i = getelementptr inbounds [0 x i8], [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i.i
%tmp1 = load i8* %arrayidx.i.i, align 1
%conv2.i.i = zext i8 %tmp1 to i32
%and.i.i = and i32 %conv2.i.i, 15
%conv.i = zext i16 %inc.i.i.lcssa to i32
%sub.i = add nsw i32 %conv.i, -1
%idxprom.i = sext i32 %sub.i to i64
- %arrayidx.i = getelementptr inbounds [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i
+ %arrayidx.i = getelementptr inbounds [0 x i8], [0 x i8]* %data.i.i, i64 0, i64 %idxprom.i
%tmp2 = load i8* %arrayidx.i, align 1
%conv2.i = sext i8 %tmp2 to i16
store i16 %conv2.i, i16* @b, align 2
br i1 %cmp.i, label %if.then.i, label %fn2.exit
if.then.i: ; preds = %fn1.exit.i
- %end.i = getelementptr inbounds %struct.dns_packet* %P, i64 0, i32 1
+ %end.i = getelementptr inbounds %struct.dns_packet, %struct.dns_packet* %P, i64 0, i32 1
%tmp3 = load i32* %end.i, align 4
%sub7.i = add i32 %tmp3, 65535
%conv8.i = trunc i32 %sub7.i to i16
; CHECK-LABEL: @oneArgPromotionNegativeCstZExt
; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 255
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionNegativeCstZExt(i8 %arg1, i8* %base) {
%add = add nuw i8 %arg1, -1
%zextadd = zext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %zextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK-LABEL: @oneArgPromotionZExtZExt
; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionZExtZExt(i8 %arg1, i8* %base) {
%zext = zext i8 %arg1 to i32
%add = add nuw i32 %zext, 1
%zextadd = zext i32 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %zextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[ARG1SEXT]] to i8
; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[ARG1TRUNC]] to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionBlockTruncZExt(i1 %arg1, i8* %base) {
%sextarg1 = sext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
%add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %zextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK-LABEL: @oneArgPromotionPassTruncZExt
; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i1 %arg1 to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionPassTruncZExt(i1 %arg1, i8* %base) {
%sextarg1 = zext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
%add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %zextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i8
; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[ARG1SEXT]] to i64
; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8* %base, i64 [[PROMOTED]]
+; CHECK: getelementptr inbounds i8, i8* %base, i64 [[PROMOTED]]
; CHECK: ret
define i8 @oneArgPromotionBlockSExtZExt(i1 %arg1, i8* %base) {
%sextarg1 = sext i1 %arg1 to i8
%add = add nuw i8 %sextarg1, 1
%zextadd = zext i8 %add to i64
- %arrayidx = getelementptr inbounds i8* %base, i64 %zextadd
+ %arrayidx = getelementptr inbounds i8, i8* %base, i64 %zextadd
%res = load i8* %arrayidx
ret i8 %res
}
; CHECK ret
define fastcc i32 @_Dmain(%"char[][]" %unnamed) {
entry:
- %tmp = getelementptr [7 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp = getelementptr [7 x i8], [7 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
br i1 undef, label %foreachbody, label %foreachend
foreachbody: ; preds = %entry
- %tmp4 = getelementptr i8* %tmp, i32 undef ; <i8*> [#uses=1]
+ %tmp4 = getelementptr i8, i8* %tmp, i32 undef ; <i8*> [#uses=1]
%tmp5 = load i8* %tmp4 ; <i8> [#uses=0]
unreachable
br label %P.Proc8.exit
P.Proc8.exit:
- %valueindex35.i = getelementptr [10 x i32]* @g, i32 0, i32 %1
+ %valueindex35.i = getelementptr [10 x i32], [10 x i32]* @g, i32 0, i32 %1
store i32 %u, i32* %valueindex35.i
ret void
}
define linkonce_odr void @foo(%class.A* nocapture %this, i32 %BoolValue) nounwind uwtable {
entry:
%cmp = icmp eq i32 %BoolValue, 0
- %address1 = getelementptr inbounds %class.A* %this, i64 0, i32 0, i32 3
- %address2 = getelementptr inbounds %class.A* %this, i64 0, i32 0, i32 1
+ %address1 = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 0, i32 3
+ %address2 = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 0, i32 1
br i1 %cmp, label %if.else, label %if.then
if.then: ; preds = %entry
- %0 = getelementptr inbounds %class.D* %address2, i64 0, i32 0, i64 0, i32 0
+ %0 = getelementptr inbounds %class.D, %class.D* %address2, i64 0, i32 0, i64 0, i32 0
%1 = load float* %0, align 4
- %2 = getelementptr inbounds float* %0, i64 3
+ %2 = getelementptr inbounds float, float* %0, i64 3
%3 = load float* %2, align 4
- %4 = getelementptr inbounds %class.D* %address1, i64 0, i32 0, i64 0, i32 0
+ %4 = getelementptr inbounds %class.D, %class.D* %address1, i64 0, i32 0, i64 0, i32 0
store float %1, float* %4, align 4
br label %if.end
%t3 = shl i32 1, %t2 ; <i32> [#uses=1]
%t4 = xor i32 %t3, %t1 ; <i32> [#uses=1]
store i32 %t4, i32* null, align 4
- %t5 = getelementptr %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2]
+ %t5 = getelementptr %struct.Hash_Key, %struct.Hash_Key* @g_flipV_hashkey, i64 0, i32 0, i64 0 ; <i32*> [#uses=2]
%t6 = load i32* %t5, align 4 ; <i32> [#uses=1]
%t7 = shl i32 1, undef ; <i32> [#uses=1]
%t8 = xor i32 %t7, %t6 ; <i32> [#uses=1]
comb_entry:
%.SV59 = alloca %struct.node* ; <%struct.node**> [#uses=1]
%0 = load i32 (...)*** null, align 4 ; <i32 (...)**> [#uses=1]
- %1 = getelementptr inbounds i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1]
+ %1 = getelementptr inbounds i32 (...)*, i32 (...)** %0, i32 3 ; <i32 (...)**> [#uses=1]
%2 = load i32 (...)** %1, align 4 ; <i32 (...)*> [#uses=1]
store %struct.node* undef, %struct.node** %.SV59
%3 = bitcast i32 (...)* %2 to i32 (%struct.node*)* ; <i32 (%struct.node*)*> [#uses=1]
%tmp21 = call i32 @bar()
%tmp25 = load i64* %addr, align 8
%tmp26 = inttoptr i64 %tmp25 to %ty*
- %tmp29 = getelementptr inbounds %ty* %tmp26, i64 0, i32 0
+ %tmp29 = getelementptr inbounds %ty, %ty* %tmp26, i64 0, i32 0
%tmp34 = load i8** %tmp29, align 8
- %tmp35 = getelementptr inbounds i8* %tmp34, i64 %size
+ %tmp35 = getelementptr inbounds i8, i8* %tmp34, i64 %size
store i8* %tmp35, i8** %tmp29, align 8
ret i8* null
}
for.body3: ; preds = %for.inc, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.inc ]
- %image4 = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 0, i64 %indvars.iv, i32 0
+ %image4 = getelementptr inbounds %"struct.dyld::MappedRanges", %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 0, i64 %indvars.iv, i32 0
%0 = load %class.ImageLoader** %image4, align 8
%cmp5 = icmp eq %class.ImageLoader* %0, %image
br i1 %cmp5, label %if.then, label %for.inc
br i1 %exitcond, label %for.inc10, label %for.body3
for.inc10: ; preds = %for.inc
- %next = getelementptr inbounds %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 1
+ %next = getelementptr inbounds %"struct.dyld::MappedRanges", %"struct.dyld::MappedRanges"* %p.019, i64 0, i32 1
%1 = load %"struct.dyld::MappedRanges"** %next, align 8
%cmp = icmp eq %"struct.dyld::MappedRanges"* %1, null
br i1 %cmp, label %for.end11, label %for.cond1.preheader
entry:
%v = alloca %0, align 8
call void asm sideeffect "", "=*r,r,r,0,~{dirflag},~{fpsr},~{flags}"(%0* %v, i32 0, i32 1, i128 undef) nounwind
- %0 = getelementptr inbounds %0* %v, i64 0, i32 0
+ %0 = getelementptr inbounds %0, %0* %v, i64 0, i32 0
%1 = load i64* %0, align 8
- %2 = getelementptr inbounds %0* %v, i64 0, i32 1
+ %2 = getelementptr inbounds %0, %0* %v, i64 0, i32 1
%3 = load i64* %2, align 8
%mrv4 = insertvalue %0 undef, i64 %1, 0
%mrv5 = insertvalue %0 %mrv4, i64 %3, 1
; CHECK-NEXT: movl 4(%rcx), %eax
; CHECK-NEXT: addl 8(%rcx), %eax
; CHECK-NEXT: addl 12(%rcx), %eax
- %addr1 = getelementptr %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 1
+ %addr1 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 1
%tmp1 = load i32* %addr1
- %addr2 = getelementptr %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 2
+ %addr2 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 2
%tmp2 = load i32* %addr2
- %addr3 = getelementptr %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 3
+ %addr3 = getelementptr %T, %T* inttoptr (i64 123456789012345678 to %T*), i32 0, i32 3
%tmp3 = load i32* %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
;
; CHECK-LABEL: PR22524:
entry:
- %0 = getelementptr inbounds { float, float }* %arg, i32 0, i32 1
+ %0 = getelementptr inbounds { float, float }, { float, float }* %arg, i32 0, i32 1
store float 0.000000e+00, float* %0, align 4
; CHECK: movl $0, 4(%rdi)
- %1 = getelementptr inbounds { float, float }* %arg, i64 0, i32 0
+ %1 = getelementptr inbounds { float, float }, { float, float }* %arg, i64 0, i32 0
%2 = bitcast float* %1 to i64*
%3 = load i64* %2, align 8
%4 = trunc i64 %3 to i32
entry:
%0 = add i32 %i2, 1 ; <i32> [#uses=1]
%1 = sext i32 %0 to i64 ; <i64> [#uses=1]
- %2 = getelementptr i8* %ptr, i64 %1 ; <i8*> [#uses=1]
+ %2 = getelementptr i8, i8* %ptr, i64 %1 ; <i8*> [#uses=1]
%3 = load i8* %2, align 1 ; <i8> [#uses=1]
%4 = icmp eq i8 0, %3 ; <i1> [#uses=1]
br i1 %4, label %bb3, label %bb34
; CHECK: catch.entry:
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
-; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
+; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
; CHECK: %eh.obj = load i8** %eh.obj.ptr
; CHECK: call void @_Z16handle_exceptionv()
; CHECK: ret i8* blockaddress(@_Z4testv, %try.cont)
; CHECK: %exn.slot = alloca i8*
; CHECK: %ehselector.slot = alloca i32
; CHECK-NOT: %i = alloca i32, align 4
-; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
+; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
; Function Attrs: uwtable
define void @_Z4testv() #0 {
; CHECK: catch.entry:
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @_Z4testv to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %struct._Z4testv.ehdata*
-; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
+; CHECK: %eh.obj.ptr = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 1
; CHECK: %eh.obj = load i8** %eh.obj.ptr
-; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
+; CHECK: %i = getelementptr inbounds %struct._Z4testv.ehdata, %struct._Z4testv.ehdata* %eh.data, i32 0, i32 2
; CHECK: %2 = bitcast i8* %eh.obj to i32*
; CHECK: %3 = load i32* %2, align 4
; CHECK: store i32 %3, i32* %i, align 4
; CHECK: %frame.alloc = call i8* @llvm.frameallocate(i32 80)
; CHECK: %eh.data = bitcast i8* %frame.alloc to %"struct.\01?test@@YAXXZ.ehdata"*
; CHECK-NOT: %NumExceptions = alloca i32, align 4
-; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
+; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
; CHECK-NOT: %ExceptionVal = alloca [10 x i32], align 16
-; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
+; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
; CHECK-NOT: %Data = alloca %struct.SomeData, align 4
-; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
-; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
+; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
+; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
; CHECK: %exn.slot = alloca i8*
; CHECK: %ehselector.slot = alloca i32
; CHECK-NOT: %e = alloca i32, align 4
-; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
+; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
; Function Attrs: uwtable
define void @"\01?test@@YAXXZ"() #0 {
invoke.cont: ; preds = %for.body
%2 = load i32* %i, align 4
- %a = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 0
+ %a = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
%3 = load i32* %a, align 4
%add = add nsw i32 %3, %2
store i32 %add, i32* %a, align 4
%11 = load i32* %e, align 4
%12 = load i32* %NumExceptions, align 4
%idxprom = sext i32 %12 to i64
- %arrayidx = getelementptr inbounds [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
store i32 %11, i32* %arrayidx, align 4
%13 = load i32* %NumExceptions, align 4
%inc = add nsw i32 %13, 1
if.then: ; preds = %catch
%16 = load i32* %e, align 4
- %b = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 1
+ %b = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 1
%17 = load i32* %b, align 4
%add2 = add nsw i32 %17, %16
store i32 %add2, i32* %b, align 4
if.else: ; preds = %catch
%18 = load i32* %e, align 4
- %a3 = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 0
+ %a3 = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
%19 = load i32* %a3, align 4
%add4 = add nsw i32 %19, %18
store i32 %add4, i32* %a3, align 4
for.end: ; preds = %for.cond
%22 = load i32* %NumExceptions, align 4
- %arraydecay = getelementptr inbounds [10 x i32]* %ExceptionVal, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i32 0
call void @"\01?dump@@YAXPEAHHAEAUSomeData@@@Z"(i32* %arraydecay, i32 %22, %struct.SomeData* dereferenceable(8) %Data)
ret void
; CHECK: catch.entry:
; CHECK: %eh.alloc = call i8* @llvm.framerecover(i8* bitcast (void ()* @"\01?test@@YAXXZ" to i8*), i8* %1)
; CHECK: %eh.data = bitcast i8* %eh.alloc to %"struct.\01?test@@YAXXZ.ehdata"*
-; CHECK: %eh.obj.ptr = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 1
+; CHECK: %eh.obj.ptr = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 1
; CHECK: %eh.obj = load i8** %eh.obj.ptr
-; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
-; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
-; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
-; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
-; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
+; CHECK: %e = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 2
+; CHECK: %NumExceptions = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 3
+; CHECK: %ExceptionVal = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 4
+; CHECK: %i = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 5
+; CHECK: %Data = getelementptr inbounds %"struct.\01?test@@YAXXZ.ehdata", %"struct.\01?test@@YAXXZ.ehdata"* %eh.data, i32 0, i32 6
; CHECK: %2 = bitcast i8* %eh.obj to i32*
; CHECK: %3 = load i32* %2, align 4
; CHECK: store i32 %3, i32* %e, align 4
; CHECK: %4 = load i32* %e, align 4
; CHECK: %5 = load i32* %NumExceptions, align 4
; CHECK: %idxprom = sext i32 %5 to i64
-; CHECK: %arrayidx = getelementptr inbounds [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
+; CHECK: %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %ExceptionVal, i32 0, i64 %idxprom
; CHECK: store i32 %4, i32* %arrayidx, align 4
; CHECK: %6 = load i32* %NumExceptions, align 4
; CHECK: %inc = add nsw i32 %6, 1
;
; CHECK: if.then: ; preds = %catch.entry
; CHECK: %9 = load i32* %e, align 4
-; CHECK: %b = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 1
+; CHECK: %b = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 1
; CHECK: %10 = load i32* %b, align 4
; CHECK: %add2 = add nsw i32 %10, %9
; CHECK: store i32 %add2, i32* %b, align 4
;
; CHECK: if.else: ; preds = %catch.entry
; CHECK: %11 = load i32* %e, align 4
-; CHECK: %a3 = getelementptr inbounds %struct.SomeData* %Data, i32 0, i32 0
+; CHECK: %a3 = getelementptr inbounds %struct.SomeData, %struct.SomeData* %Data, i32 0, i32 0
; CHECK: %12 = load i32* %a3, align 4
; CHECK: %add4 = add nsw i32 %12, %11
; CHECK: store i32 %add4, i32* %a3, align 4
; CHECK-NEXT: idivq
; CHECK: retq
define i64 @addressModeWith32bitIndex(i32 %V) {
- %gep = getelementptr i64* null, i32 %V
+ %gep = getelementptr i64, i64* null, i32 %V
%load = load i64* %gep
%sdiv = sdiv i64 0, %load
ret i64 %sdiv
%l_75.077 = phi i64 [ %ins, %for.body22 ], [ undef, %bb.nph81 ]
%tmp110 = trunc i64 %l_75.077 to i32
%tmp111 = and i32 %tmp110, 65535
- %arrayidx32.0 = getelementptr [9 x [5 x [2 x %struct.S0]]]* undef, i32 0, i32 %l_74.0, i32 %tmp98, i32 %tmp111, i32 0
+ %arrayidx32.0 = getelementptr [9 x [5 x [2 x %struct.S0]]], [9 x [5 x [2 x %struct.S0]]]* undef, i32 0, i32 %l_74.0, i32 %tmp98, i32 %tmp111, i32 0
store i8 1, i8* %arrayidx32.0, align 4
%tmp106 = shl i32 %tmp110, 2
%tmp107 = and i32 %tmp106, 262140
%scevgep99.sum114 = or i32 %tmp107, 1
- %arrayidx32.1.1 = getelementptr [9 x [5 x [2 x %struct.S0]]]* undef, i32 0, i32 %l_74.0, i32 %tmp98, i32 0, i32 1, i32 %scevgep99.sum114
+ %arrayidx32.1.1 = getelementptr [9 x [5 x [2 x %struct.S0]]], [9 x [5 x [2 x %struct.S0]]]* undef, i32 0, i32 %l_74.0, i32 %tmp98, i32 0, i32 1, i32 %scevgep99.sum114
store i8 0, i8* %arrayidx32.1.1, align 1
%ins = or i64 undef, undef
br label %for.body22
define void @_ZNK4llvm17MipsFrameLowering12emitPrologueERNS_15MachineFunctionE() ssp align 2 {
bb:
%tmp = load %t9** undef, align 4
- %tmp2 = getelementptr inbounds %t9* %tmp, i32 0, i32 0
- %tmp3 = getelementptr inbounds %t9* %tmp, i32 0, i32 0, i32 0, i32 0, i32 1
+ %tmp2 = getelementptr inbounds %t9, %t9* %tmp, i32 0, i32 0
+ %tmp3 = getelementptr inbounds %t9, %t9* %tmp, i32 0, i32 0, i32 0, i32 0, i32 1
br label %bb4
bb4: ; preds = %bb37, %bb
br i1 false, label %bb33, label %bb31
bb31: ; preds = %bb29
- %tmp32 = getelementptr inbounds %t13* %tmp30, i32 1
+ %tmp32 = getelementptr inbounds %t13, %t13* %tmp30, i32 1
store %t13* %tmp32, %t13** %tmp3, align 4
br label %bb37
entry:
%tmp7 = mul i32 %idxY, %ref_frame_stride ; <i32> [#uses=2]
%tmp9 = add i32 %tmp7, %idxX ; <i32> [#uses=1]
- %tmp11 = getelementptr i8* %ref_frame_ptr, i32 %tmp9 ; <i8*> [#uses=1]
+ %tmp11 = getelementptr i8, i8* %ref_frame_ptr, i32 %tmp9 ; <i8*> [#uses=1]
%tmp1112 = bitcast i8* %tmp11 to i32* ; <i32*> [#uses=1]
%tmp13 = load i32* %tmp1112, align 4 ; <i32> [#uses=1]
%tmp18 = add i32 %idxX, 4 ; <i32> [#uses=1]
%tmp20.sum = add i32 %tmp18, %tmp7 ; <i32> [#uses=1]
- %tmp21 = getelementptr i8* %ref_frame_ptr, i32 %tmp20.sum ; <i8*> [#uses=1]
+ %tmp21 = getelementptr i8, i8* %ref_frame_ptr, i32 %tmp20.sum ; <i8*> [#uses=1]
%tmp2122 = bitcast i8* %tmp21 to i16* ; <i16*> [#uses=1]
%tmp23 = load i16* %tmp2122, align 2 ; <i16> [#uses=1]
%tmp2425 = zext i16 %tmp23 to i64 ; <i64> [#uses=1]
tail call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !33, metadata !{!"0x102"}), !dbg !55
tail call void @llvm.dbg.value(metadata %struct.AAA3* %var1, i64 0, metadata !56, metadata !{!"0x102"}), !dbg !57
tail call void @llvm.dbg.value(metadata !58, i64 0, metadata !59, metadata !{!"0x102"}), !dbg !60
- %arraydecay.i = getelementptr inbounds %struct.AAA3* %var1, i64 0, i32 0, i64 0, !dbg !61
+ %arraydecay.i = getelementptr inbounds %struct.AAA3, %struct.AAA3* %var1, i64 0, i32 0, i64 0, !dbg !61
call void @_Z3fooPcjPKc(i8* %arraydecay.i, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !61
call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !34, metadata !{!"0x102"}), !dbg !63
call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !64, metadata !{!"0x102"}), !dbg !65
call void @llvm.dbg.value(metadata !58, i64 0, metadata !66, metadata !{!"0x102"}), !dbg !67
- %arraydecay.i5 = getelementptr inbounds %struct.AAA3* %var2, i64 0, i32 0, i64 0, !dbg !68
+ %arraydecay.i5 = getelementptr inbounds %struct.AAA3, %struct.AAA3* %var2, i64 0, i32 0, i64 0, !dbg !68
call void @_Z3fooPcjPKc(i8* %arraydecay.i5, i32 4, i8* getelementptr inbounds ([1 x i8]* @.str, i64 0, i64 0)), !dbg !68
%tobool1 = icmp eq i32 %param1, 0, !dbg !69
call void @llvm.dbg.value(metadata %struct.AAA3* %var2, i64 0, metadata !34, metadata !{!"0x102"}), !dbg !63
br label %_ZN7Flibble3barEP6Wibble.exit
_ZN7Flibble3barEP6Wibble.exit: ; preds = %entry, %if.then.i
- %x.i = getelementptr inbounds %struct.Wibble* %0, i64 0, i32 0
+ %x.i = getelementptr inbounds %struct.Wibble, %struct.Wibble* %0, i64 0, i32 0
store i32 0, i32* %x.i, align 4
ret void
}
store i8* %2, i8** %saved_stack, !dbg !16
%vla = alloca i32, i64 %1, align 16, !dbg !16
call void @llvm.dbg.declare(metadata i32* %vla, metadata !17, metadata !21), !dbg !22
- %arrayidx = getelementptr inbounds i32* %vla, i64 0, !dbg !23
+ %arrayidx = getelementptr inbounds i32, i32* %vla, i64 0, !dbg !23
store i32 0, i32* %arrayidx, align 4, !dbg !24
- %arrayidx1 = getelementptr inbounds i32* %vla, i64 1, !dbg !25
+ %arrayidx1 = getelementptr inbounds i32, i32* %vla, i64 1, !dbg !25
store i32 1, i32* %arrayidx1, align 4, !dbg !26
- %arrayidx2 = getelementptr inbounds i32* %vla, i64 2, !dbg !27
+ %arrayidx2 = getelementptr inbounds i32, i32* %vla, i64 2, !dbg !27
store i32 2, i32* %arrayidx2, align 4, !dbg !28
%3 = load i32* %elems, align 4, !dbg !29
%4 = zext i32 %3 to i64, !dbg !30
%vla3 = alloca i32, i64 %4, align 16, !dbg !30
call void @llvm.dbg.declare(metadata i32* %vla3, metadata !31, metadata !21), !dbg !32
- %arrayidx4 = getelementptr inbounds i32* %vla3, i64 0, !dbg !33
+ %arrayidx4 = getelementptr inbounds i32, i32* %vla3, i64 0, !dbg !33
store i32 1, i32* %arrayidx4, align 4, !dbg !34
- %arrayidx5 = getelementptr inbounds i32* %vla3, i64 0, !dbg !35
+ %arrayidx5 = getelementptr inbounds i32, i32* %vla3, i64 0, !dbg !35
%5 = load i32* %arrayidx5, align 4, !dbg !35
store i32 1, i32* %cleanup.dest.slot
%6 = load i8** %saved_stack, !dbg !36
ret void
if.else130: ; preds = %bb1
- %tmp = getelementptr inbounds [8192 x i8]* %bitmapBuffer, i32 0, i32 0
+ %tmp = getelementptr inbounds [8192 x i8], [8192 x i8]* %bitmapBuffer, i32 0, i32 0
call void @llvm.lifetime.start(i64 8192, i8* %tmp) #0
call void @llvm.lifetime.end(i64 8192, i8* %tmp) #0
- %tmp25 = getelementptr inbounds [8192 x i8]* %bitmapBuffer229, i32 0, i32 0
+ %tmp25 = getelementptr inbounds [8192 x i8], [8192 x i8]* %bitmapBuffer229, i32 0, i32 0
call void @llvm.lifetime.start(i64 8192, i8* %tmp25) #0
call void @llvm.lifetime.end(i64 8192, i8* %tmp25) #0
br label %end1
%min.0 = phi i32 [ 0, %entry ], [ %min.1, %do.cond ]
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ]
%p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ]
- %incdec.ptr = getelementptr inbounds i32* %p.addr.0, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1
%0 = load i32* %p.addr.0, align 4
%cmp = icmp sgt i32 %0, %max.0
br i1 %cmp, label %do.cond, label %if.else
br i1 undef, label %if.end2048, label %while.end2104
if.end2048: ; preds = %if.end2042
- %bsLive2054.pre = getelementptr inbounds i8* %s, i32 8
+ %bsLive2054.pre = getelementptr inbounds i8, i8* %s, i32 8
br label %sw.bb2050
sw.bb2050: ; preds = %if.end2048, %if.end.sw.bb2050_crit_edge
define fastcc void @foo(%pp* nocapture byval %p_arg) {
entry:
- %tmp2 = getelementptr %pp* %p_arg, i64 0, i32 0 ; <%cc*> [#uses=
+ %tmp2 = getelementptr %pp, %pp* %p_arg, i64 0, i32 0 ; <%cc*> [#uses=
%tmp3 = load %cc* %tmp2 ; <%cc> [#uses=1]
%tmp34 = extractvalue %cc %tmp3, 0 ; <%crd> [#uses=1]
%tmp345 = extractvalue %crd %tmp34, 0 ; <i64> [#uses=1]
; should be sign-extended to 64 bits on 64-bit targets.
; PR3181
define i32 @test1(i32 %t3, i32* %t1) nounwind {
- %t9 = getelementptr i32* %t1, i32 %t3 ; <i32*> [#uses=1]
+ %t9 = getelementptr i32, i32* %t1, i32 %t3 ; <i32*> [#uses=1]
%t15 = load i32* %t9 ; <i32> [#uses=1]
ret i32 %t15
; X32-LABEL: test1:
}
define i32 @test2(i64 %t3, i32* %t1) nounwind {
- %t9 = getelementptr i32* %t1, i64 %t3 ; <i32*> [#uses=1]
+ %t9 = getelementptr i32, i32* %t1, i64 %t3 ; <i32*> [#uses=1]
%t15 = load i32* %t9 ; <i32> [#uses=1]
ret i32 %t15
; X32-LABEL: test2:
; PR4984
define i8 @test3(i8* %start) nounwind {
entry:
- %A = getelementptr i8* %start, i64 -2 ; <i8*> [#uses=1]
+ %A = getelementptr i8, i8* %start, i64 -2 ; <i8*> [#uses=1]
%B = load i8* %A, align 1 ; <i8> [#uses=1]
ret i8 %B
%tmp = load i64* %x.addr ; <i64> [#uses=1]
%add = add nsw i64 %tmp, 16 ; <i64> [#uses=1]
%tmp1 = load double** %p.addr ; <double*> [#uses=1]
- %arrayidx = getelementptr inbounds double* %tmp1, i64 %add ; <double*> [#uses=1]
+ %arrayidx = getelementptr inbounds double, double* %tmp1, i64 %add ; <double*> [#uses=1]
%tmp2 = load double* %arrayidx ; <double> [#uses=1]
ret double %tmp2
; PR8961 - Make sure the sext for the GEP addressing comes before the load that
; is folded.
define i64 @test5(i8* %A, i32 %I, i64 %B) nounwind {
- %v8 = getelementptr i8* %A, i32 %I
+ %v8 = getelementptr i8, i8* %A, i32 %I
%v9 = bitcast i8* %v8 to i64*
%v10 = load i64* %v9
%v11 = add i64 %B, %v10
to label %invoke.cont16 unwind label %lpad
invoke.cont16: ; preds = %if.then14
- %arrayidx18 = getelementptr inbounds i8* %call17, i64 %dec
+ %arrayidx18 = getelementptr inbounds i8, i8* %call17, i64 %dec
store i8 0, i8* %arrayidx18
unreachable
; X64: movl $4, 8({{%rdi|%rcx}})
- %tmp29 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
+ %tmp29 = getelementptr inbounds {i32,i32,i32}, {i32,i32,i32}* %tmp1, i32 0, i32 2
%tmp30 = load i32* %tmp29, align 4
- %p2 = getelementptr inbounds {i32,i32,i32}* %tmp1, i32 0, i32 2
+ %p2 = getelementptr inbounds {i32,i32,i32}, {i32,i32,i32}* %tmp1, i32 0, i32 2
store i32 4, i32* %p2
%tmp72 = or i32 %tmp71, %tmp30
@rtx_length = external global [153 x i8]
define i32 @test4(i64 %idxprom9) nounwind {
- %arrayidx10 = getelementptr inbounds [153 x i8]* @rtx_length, i32 0, i64 %idxprom9
+ %arrayidx10 = getelementptr inbounds [153 x i8], [153 x i8]* @rtx_length, i32 0, i64 %idxprom9
%tmp11 = load i8* %arrayidx10, align 1
%conv = zext i8 %tmp11 to i32
ret i32 %conv
%t4 = xor i32 %t3, 3
%t5 = xor i32 %t4, %s
%t6 = add i32 %t5, 2
- %t7 = getelementptr i32* %y, i32 1
- %t8 = getelementptr i32* %t7, i32 %t6
+ %t7 = getelementptr i32, i32* %y, i32 1
+ %t8 = getelementptr i32, i32* %t7, i32 %t6
call void asm sideeffect "hello world", ""()
br label %exit
define fastcc i32 @bar() nounwind {
%V = alloca %struct.MVT
- %a = getelementptr %struct.MVT* %V, i32 0, i32 0
+ %a = getelementptr %struct.MVT, %struct.MVT* %V, i32 0, i32 0
store i32 1, i32* %a
call fastcc void @foo(%struct.MVT* byval %V) nounwind
%t = load i32* %a
define fastcc void @bar(%struct.foo* noalias sret %agg.result) nounwind {
entry:
- %tmp1 = getelementptr %struct.foo* %agg.result, i32 0, i32 0
- %tmp3 = getelementptr [4 x i32]* %tmp1, i32 0, i32 0
+ %tmp1 = getelementptr %struct.foo, %struct.foo* %agg.result, i32 0, i32 0
+ %tmp3 = getelementptr [4 x i32], [4 x i32]* %tmp1, i32 0, i32 0
store i32 1, i32* %tmp3, align 8
ret void
}
define void @foo() nounwind {
%memtmp = alloca %struct.foo, align 4
call fastcc void @bar( %struct.foo* sret %memtmp ) nounwind
- %tmp4 = getelementptr %struct.foo* %memtmp, i32 0, i32 0
- %tmp5 = getelementptr [4 x i32]* %tmp4, i32 0, i32 0
+ %tmp4 = getelementptr %struct.foo, %struct.foo* %memtmp, i32 0, i32 0
+ %tmp5 = getelementptr [4 x i32], [4 x i32]* %tmp4, i32 0, i32 0
%tmp6 = load i32* %tmp5
store i32 %tmp6, i32* @dst
ret void
; CHECK-LABEL: _gep_promotion:
; CHECK: movzbl ({{.*}})
- %arrayidx = getelementptr inbounds i8* %0, i8 %add
+ %arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
%1 = load i8* %arrayidx, align 1
ret i8 %1
%add = add i8 %xor, -127 ; %xor + 0x81
%1 = load i8** %ptr.addr, align 8
- %arrayidx = getelementptr inbounds i8* %1, i8 %add
+ %arrayidx = getelementptr inbounds i8, i8* %1, i8 %add
%2 = load i8* %arrayidx, align 1
ret i8 %2
%1 = zext i32 %cur_match to i64 ; <i64> [#uses=1]
%2 = sext i32 %0 to i64 ; <i64> [#uses=1]
%.sum3 = add i64 %1, %2 ; <i64> [#uses=1]
- %3 = getelementptr [65536 x i8]* @window, i64 0, i64 %.sum3 ; <i8*> [#uses=1]
+ %3 = getelementptr [65536 x i8], [65536 x i8]* @window, i64 0, i64 %.sum3 ; <i8*> [#uses=1]
%4 = load i8* %3, align 1 ; <i8> [#uses=1]
%5 = icmp eq i8 %4, 0 ; <i1> [#uses=1]
br i1 %5, label %bb5, label %bb23
entry:
%tmp2 = shl i32 %i, 2
%tmp4 = and i32 %tmp2, 1020
- %tmp7 = getelementptr i8* %X, i32 %tmp4
+ %tmp7 = getelementptr i8, i8* %X, i32 %tmp4
%tmp78 = bitcast i8* %tmp7 to i32*
%tmp9 = load i32* %tmp78
ret i32 %tmp9
entry:
%tmp2 = shl i32 %i, 1
%tmp4 = and i32 %tmp2, 131070
- %tmp7 = getelementptr i16* %X, i32 %tmp4
+ %tmp7 = getelementptr i16, i16* %X, i32 %tmp4
%tmp78 = bitcast i16* %tmp7 to i32*
%tmp9 = load i32* %tmp78
ret i32 %tmp9
%i = load i16* %i.ptr
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
- %val.ptr = getelementptr inbounds i32* %arr, i32 %index
+ %val.ptr = getelementptr inbounds i32, i32* %arr, i32 %index
%val = load i32* %val.ptr
%sum = add i32 %val, %i.zext
ret i32 %sum
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
- %val.ptr = getelementptr inbounds i32* %arr, i64 %index.zext
+ %val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext
%val = load i32* %val.ptr
%sum.1 = add i32 %val, %i.zext
%sum.2 = add i32 %sum.1, %index
br i1 %2, label %return, label %bb.nph
bb.nph: ; preds = %entry
- %3 = getelementptr %"struct.clang::Action"* %Actions, i64 0, i32 0, i32 0 ; <i32 (...)***> [#uses=1]
+ %3 = getelementptr %"struct.clang::Action", %"struct.clang::Action"* %Actions, i64 0, i32 0, i32 0 ; <i32 (...)***> [#uses=1]
%mrv_gep = bitcast %"struct.clang::ActionBase::ActionResult<0u>"* %0 to i64* ; <i64*> [#uses=1]
- %mrv_gep1 = getelementptr %"struct.clang::ActionBase::ActionResult<0u>"* %0, i64 0, i32 1 ; <i8*> [#uses=1]
+ %mrv_gep1 = getelementptr %"struct.clang::ActionBase::ActionResult<0u>", %"struct.clang::ActionBase::ActionResult<0u>"* %0, i64 0, i32 1 ; <i8*> [#uses=1]
%4 = bitcast i8* %mrv_gep1 to i64* ; <i64*> [#uses=1]
- %5 = getelementptr %"struct.clang::ActionBase::ActionResult<0u>"* %0, i64 0, i32 0 ; <i8**> [#uses=1]
+ %5 = getelementptr %"struct.clang::ActionBase::ActionResult<0u>", %"struct.clang::ActionBase::ActionResult<0u>"* %0, i64 0, i32 0 ; <i8**> [#uses=1]
br label %bb
bb: ; preds = %bb, %bb.nph
%Trial.01 = phi i32 [ 0, %bb.nph ], [ %12, %bb ] ; <i32> [#uses=1]
%Val_addr.02 = phi i8* [ %Val, %bb.nph ], [ %11, %bb ] ; <i8*> [#uses=1]
%6 = load i32 (...)*** %3, align 8 ; <i32 (...)**> [#uses=1]
- %7 = getelementptr i32 (...)** %6, i64 70 ; <i32 (...)**> [#uses=1]
+ %7 = getelementptr i32 (...)*, i32 (...)** %6, i64 70 ; <i32 (...)**> [#uses=1]
%8 = load i32 (...)** %7, align 8 ; <i32 (...)*> [#uses=1]
%9 = bitcast i32 (...)* %8 to { i64, i64 } (%"struct.clang::Action"*, i8*)* ; <{ i64, i64 } (%"struct.clang::Action"*, i8*)*> [#uses=1]
%10 = call { i64, i64 } %9(%"struct.clang::Action"* %Actions, i8* %Val_addr.02) nounwind ; <{ i64, i64 }> [#uses=2]
for.inc8.i: ; preds = %if.then.i, %for.body3.i
%lftr.wideiv.i = trunc i64 %indvars.iv.i to i32
- %arrayidx4.phi.trans.insert.i = getelementptr inbounds [0 x i32*]* undef, i64 0, i64 %indvars.iv.i
+ %arrayidx4.phi.trans.insert.i = getelementptr inbounds [0 x i32*], [0 x i32*]* undef, i64 0, i64 %indvars.iv.i
%.pre.i = load i32** %arrayidx4.phi.trans.insert.i, align 8
%phitmp.i = add i64 %indvars.iv.i, 1
br label %for.body3.i
%struct.X = type { void ()* }
define void @test2(%struct.X* nocapture %x) {
entry:
- %f = getelementptr inbounds %struct.X* %x, i64 0, i32 0
+ %f = getelementptr inbounds %struct.X, %struct.X* %x, i64 0, i32 0
%0 = load void ()** %f
store void ()* null, void ()** %f
tail call void %0()
store <2 x float>* %dest, <2 x float>** %dest.addr, align 8
store <2 x float> zeroinitializer, <2 x float>* %tmp, align 8
%0 = load <4 x float>** %source.addr, align 8
- %arrayidx = getelementptr inbounds <4 x float>* %0, i64 0
+ %arrayidx = getelementptr inbounds <4 x float>, <4 x float>* %0, i64 0
%1 = load <4 x float>* %arrayidx, align 16
%2 = extractelement <4 x float> %1, i32 0
%3 = load <2 x float>* %tmp, align 8
store <2 x float> %4, <2 x float>* %tmp, align 8
%5 = load <2 x float>* %tmp, align 8
%6 = load <2 x float>** %dest.addr, align 8
- %arrayidx1 = getelementptr inbounds <2 x float>* %6, i64 0
+ %arrayidx1 = getelementptr inbounds <2 x float>, <2 x float>* %6, i64 0
store <2 x float> %5, <2 x float>* %arrayidx1, align 8
%7 = load <2 x float>** %dest.addr, align 8
- %arrayidx2 = getelementptr inbounds <2 x float>* %7, i64 0
+ %arrayidx2 = getelementptr inbounds <2 x float>, <2 x float>* %7, i64 0
%8 = load <2 x float>* %arrayidx2, align 8
%vecext = extractelement <2 x float> %8, i32 0
%9 = load <2 x float>** %dest.addr, align 8
- %arrayidx3 = getelementptr inbounds <2 x float>* %9, i64 0
+ %arrayidx3 = getelementptr inbounds <2 x float>, <2 x float>* %9, i64 0
%10 = load <2 x float>* %arrayidx3, align 8
%vecext4 = extractelement <2 x float> %10, i32 1
call void @ext(float %vecext, float %vecext4)
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
- %tmp2 = getelementptr [1000 x i8]* @B, i32 0, i32 %i.019.0
+ %tmp2 = getelementptr [1000 x i8], [1000 x i8]* @B, i32 0, i32 %i.019.0
%tmp3 = load i8* %tmp2, align 4
%tmp4 = mul i8 %tmp3, 2
- %tmp5 = getelementptr [1000 x i8]* @A, i32 0, i32 %i.019.0
+ %tmp5 = getelementptr [1000 x i8], [1000 x i8]* @A, i32 0, i32 %i.019.0
store i8 %tmp4, i8* %tmp5, align 4
%tmp8 = mul i32 %i.019.0, 9
- %tmp10 = getelementptr [1000 x i8]* @P, i32 0, i32 %tmp8
+ %tmp10 = getelementptr [1000 x i8], [1000 x i8]* @P, i32 0, i32 %tmp8
store i8 17, i8* %tmp10, align 4
%indvar.next = add i32 %i.019.0, 1
%exitcond = icmp eq i32 %indvar.next, %m
entry:
%0 = load i32* @a, align 4, !tbaa !1
%1 = inttoptr i32 %0 to %struct.XXH_state64_t*
- %total_len = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 0
+ %total_len = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 0
%2 = load i32* %total_len, align 4, !tbaa !5
%tobool = icmp eq i32 %2, 0
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %v3 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 3
+ %v3 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 3
%3 = load i64* %v3, align 4, !tbaa !8
- %v4 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 4
+ %v4 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 4
%4 = load i64* %v4, align 4, !tbaa !9
- %v2 = getelementptr inbounds %struct.XXH_state64_t* %1, i32 0, i32 2
+ %v2 = getelementptr inbounds %struct.XXH_state64_t, %struct.XXH_state64_t* %1, i32 0, i32 2
%5 = load i64* %v2, align 4, !tbaa !10
%shl = shl i64 %5, 1
%or = or i64 %shl, %5
bb: ; preds = %bb, %entry
%i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=5]
- %1 = getelementptr float* %A, i32 %i.03 ; <float*> [#uses=1]
+ %1 = getelementptr float, float* %A, i32 %i.03 ; <float*> [#uses=1]
%2 = load float* %1, align 4 ; <float> [#uses=1]
- %3 = getelementptr float* %B, i32 %i.03 ; <float*> [#uses=1]
+ %3 = getelementptr float, float* %B, i32 %i.03 ; <float*> [#uses=1]
%4 = load float* %3, align 4 ; <float> [#uses=1]
%5 = fadd float %2, %4 ; <float> [#uses=1]
- %6 = getelementptr float* %C, i32 %i.03 ; <float*> [#uses=1]
+ %6 = getelementptr float, float* %C, i32 %i.03 ; <float*> [#uses=1]
store float %5, float* %6, align 4
%7 = add i32 %i.03, 10 ; <i32> [#uses=3]
- %8 = getelementptr float* %A, i32 %7 ; <float*> [#uses=1]
+ %8 = getelementptr float, float* %A, i32 %7 ; <float*> [#uses=1]
%9 = load float* %8, align 4 ; <float> [#uses=1]
- %10 = getelementptr float* %B, i32 %7 ; <float*> [#uses=1]
+ %10 = getelementptr float, float* %B, i32 %7 ; <float*> [#uses=1]
%11 = load float* %10, align 4 ; <float> [#uses=1]
%12 = fadd float %9, %11 ; <float> [#uses=1]
- %13 = getelementptr float* %C, i32 %7 ; <float*> [#uses=1]
+ %13 = getelementptr float, float* %C, i32 %7 ; <float*> [#uses=1]
store float %12, float* %13, align 4
%indvar.next = add i32 %i.03, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
%d1 = extractelement <4 x i32> %j, i32 1
%d2 = extractelement <4 x i32> %j, i32 2
%d3 = extractelement <4 x i32> %j, i32 3
- %q0 = getelementptr double* %p, i32 %d0
- %q1 = getelementptr double* %p, i32 %d1
- %q2 = getelementptr double* %p, i32 %d2
- %q3 = getelementptr double* %p, i32 %d3
+ %q0 = getelementptr double, double* %p, i32 %d0
+ %q1 = getelementptr double, double* %p, i32 %d1
+ %q2 = getelementptr double, double* %p, i32 %d2
+ %q3 = getelementptr double, double* %p, i32 %d3
%r0 = load double* %q0
%r1 = load double* %q1
%r2 = load double* %q2
define i32 @test() nounwind uwtable {
entry:
%0 = load volatile %struct.thread* addrspace(256)* null
- %c = getelementptr inbounds %struct.thread* %0, i64 0, i32 2
+ %c = getelementptr inbounds %struct.thread, %struct.thread* %0, i64 0, i32 2
%1 = load i32* %c, align 4
ret i32 %1
}
define double @foo8(double* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
- %t2 = getelementptr double* %p, i32 %t1
+ %t2 = getelementptr double, double* %p, i32 %t1
%t3 = load double* %t2, align 8
ret double %t3
}
define float @foo4(float* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
- %t2 = getelementptr float* %p, i32 %t1
+ %t2 = getelementptr float, float* %p, i32 %t1
%t3 = load float* %t2, align 8
ret float %t3
}
define i16 @foo2(i16* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
- %t2 = getelementptr i16* %p, i32 %t1
+ %t2 = getelementptr i16, i16* %p, i32 %t1
%t3 = load i16* %t2, align 8
ret i16 %t3
}
define i8 @foo1(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 8
%t1 = and i32 %t0, 255
- %t2 = getelementptr i8* %p, i32 %t1
+ %t2 = getelementptr i8, i8* %p, i32 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
define i8 @bar8(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 5
%t1 = and i32 %t0, 2040
- %t2 = getelementptr i8* %p, i32 %t1
+ %t2 = getelementptr i8, i8* %p, i32 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
define i8 @bar4(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 6
%t1 = and i32 %t0, 1020
- %t2 = getelementptr i8* %p, i32 %t1
+ %t2 = getelementptr i8, i8* %p, i32 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
define i8 @bar2(i8* nocapture inreg %p, i32 inreg %x) nounwind readonly {
%t0 = lshr i32 %x, 7
%t1 = and i32 %t0, 510
- %t2 = getelementptr i8* %p, i32 %t1
+ %t2 = getelementptr i8, i8* %p, i32 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
define double @foo8(double* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
- %t2 = getelementptr double* %p, i64 %t1
+ %t2 = getelementptr double, double* %p, i64 %t1
%t3 = load double* %t2, align 8
ret double %t3
}
define float @foo4(float* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
- %t2 = getelementptr float* %p, i64 %t1
+ %t2 = getelementptr float, float* %p, i64 %t1
%t3 = load float* %t2, align 8
ret float %t3
}
define i16 @foo2(i16* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
- %t2 = getelementptr i16* %p, i64 %t1
+ %t2 = getelementptr i16, i16* %p, i64 %t1
%t3 = load i16* %t2, align 8
ret i16 %t3
}
define i8 @foo1(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 8
%t1 = and i64 %t0, 255
- %t2 = getelementptr i8* %p, i64 %t1
+ %t2 = getelementptr i8, i8* %p, i64 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
define i8 @bar8(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 5
%t1 = and i64 %t0, 2040
- %t2 = getelementptr i8* %p, i64 %t1
+ %t2 = getelementptr i8, i8* %p, i64 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
define i8 @bar4(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 6
%t1 = and i64 %t0, 1020
- %t2 = getelementptr i8* %p, i64 %t1
+ %t2 = getelementptr i8, i8* %p, i64 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
define i8 @bar2(i8* nocapture inreg %p, i64 inreg %x) nounwind readonly {
%t0 = lshr i64 %x, 7
%t1 = and i64 %t0, 510
- %t2 = getelementptr i8* %p, i64 %t1
+ %t2 = getelementptr i8, i8* %p, i64 %t1
%t3 = load i8* %t2, align 8
ret i8 %t3
}
%t0 = lshr i32 %y, 8 ; <i32> [#uses=1]
%t1 = and i32 %t0, 255 ; <i32> [#uses=2]
%t2 = shl i32 %t1, 3
- %t3 = getelementptr i8* %x, i32 %t2 ; <i8*> [#uses=1]
+ %t3 = getelementptr i8, i8* %x, i32 %t2 ; <i8*> [#uses=1]
store i8 77, i8* %t3, align 4
ret i32 %t2
}
; CHECK-32: movl $50000000{{..}}, [[EAX:%e..]]
; CHECK-32-NEXT: addl [[EAX]], %esp
%1 = alloca [5000000000 x i8], align 16
- %2 = getelementptr inbounds [5000000000 x i8]* %1, i32 0, i32 0
+ %2 = getelementptr inbounds [5000000000 x i8], [5000000000 x i8]* %1, i32 0, i32 0
call void @bar(i8* %2)
ret void
}
; CHECK-32: movl $10, %eax
; CHECK-32-NOT: movl ${{.*}}, %eax
%1 = alloca [5000000000 x i8], align 16
- %2 = getelementptr inbounds [5000000000 x i8]* %1, i32 0, i32 0
+ %2 = getelementptr inbounds [5000000000 x i8], [5000000000 x i8]* %1, i32 0, i32 0
call void @bar(i8* %2)
ret i32 10
}
; CHECK-32: subl $2147483647, %esp
; CHECK-32-NOT: movl ${{.*}}, %eax
%1 = alloca [5000000000 x i8], align 16
- %2 = getelementptr inbounds [5000000000 x i8]* %1, i32 0, i32 0
+ %2 = getelementptr inbounds [5000000000 x i8], [5000000000 x i8]* %1, i32 0, i32 0
call void @bar(i8* %2)
ret i32 %x
}
for.body: ; preds = %entry, %for.body
%carry.013 = phi i64 [ %conv6, %for.body ], [ 0, %entry ]
%i.012 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i64* %x, i64 %i.012
+ %arrayidx = getelementptr inbounds i64, i64* %x, i64 %i.012
%0 = load i64* %arrayidx, align 8
%conv2 = zext i64 %0 to i128
%mul = mul i128 %conv2, %conv
%conv3 = zext i64 %carry.013 to i128
%add = add i128 %mul, %conv3
%conv4 = trunc i128 %add to i64
- %arrayidx5 = getelementptr inbounds i64* %z, i64 %i.012
+ %arrayidx5 = getelementptr inbounds i64, i64* %z, i64 %i.012
store i64 %conv4, i64* %arrayidx5, align 8
%shr = lshr i128 %add, 64
%conv6 = trunc i128 %shr to i64
define void @g() {
entry:
%args = alloca inalloca %frame
- %c = getelementptr %frame* %args, i32 0, i32 2
+ %c = getelementptr %frame, %frame* %args, i32 0, i32 2
; CHECK: movl $20, %eax
; CHECK: calll __chkstk
; CHECK: movl %esp,
; CHECK-NEXT: pushl
; CHECK-NEXT: calll _Foo_ctor
; CHECK: addl $4, %esp
- %b = getelementptr %frame* %args, i32 0, i32 1
+ %b = getelementptr %frame, %frame* %args, i32 0, i32 1
store i32 42, i32* %b
; CHECK: movl $42,
- %a = getelementptr %frame* %args, i32 0, i32 0
+ %a = getelementptr %frame, %frame* %args, i32 0, i32 0
call void @Foo_ctor(%Foo* %a)
; CHECK-NEXT: pushl
; CHECK-NEXT: calll _Foo_ctor
blah:
%inalloca.save = call i8* @llvm.stacksave()
%rev_args = alloca inalloca %frame.reverse, align 4
- %beg = getelementptr %frame.reverse* %rev_args, i32 0, i32 0
- %end = getelementptr %frame.reverse* %rev_args, i32 0, i32 1
+ %beg = getelementptr %frame.reverse, %frame.reverse* %rev_args, i32 0, i32 0
+ %end = getelementptr %frame.reverse, %frame.reverse* %rev_args, i32 0, i32 1
; CHECK: calll __chkstk
; CHECK: movl %[[beg:[^,]*]], %esp
; CHECK: movl $8, %eax
; CHECK: calll __chkstk
; CHECK: movl %[[REG:[^,]*]], %esp
- %f1 = getelementptr %Foo* %b, i32 0, i32 0
- %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ %f1 = getelementptr %Foo, %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo, %Foo* %b, i32 0, i32 1
store i32 13, i32* %f1
store i32 42, i32* %f2
; CHECK: movl $13, (%[[REG]])
; CHECK: movl $8, %eax
; CHECK: calll __chkstk
; CHECK: movl %[[REG:[^,]*]], %esp
- %f1 = getelementptr %Foo* %b, i32 0, i32 0
- %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ %f1 = getelementptr %Foo, %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo, %Foo* %b, i32 0, i32 1
store i32 13, i32* %f1
store i32 42, i32* %f2
; CHECK: movl $13, (%[[REG]])
; CHECK: movl $8, %eax
; CHECK: calll __chkstk
; CHECK: movl %[[REG:[^,]*]], %esp
- %f1 = getelementptr %Foo* %b, i32 0, i32 0
- %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ %f1 = getelementptr %Foo, %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo, %Foo* %b, i32 0, i32 1
store i32 13, i32* %f1
store i32 42, i32* %f2
; CHECK: movl $13, (%[[REG]])
; CHECK: movl $8, %eax
; CHECK: calll __chkstk
; CHECK: movl %[[REG:[^,]*]], %esp
- %f1 = getelementptr %Foo* %b, i32 0, i32 0
- %f2 = getelementptr %Foo* %b, i32 0, i32 1
+ %f1 = getelementptr %Foo, %Foo* %b, i32 0, i32 0
+ %f2 = getelementptr %Foo, %Foo* %b, i32 0, i32 1
store i32 13, i32* %f1
store i32 42, i32* %f2
; CHECK-DAG: movl $13, (%[[REG]])
%tmp49746536 = trunc i32 %tmp4943 to i16 ; <i16> [#uses=1]
%tmp49764977 = and i16 %tmp49746536, 4095 ; <i16> [#uses=1]
%mask498049814982 = zext i16 %tmp49764977 to i64 ; <i64> [#uses=1]
- %tmp4984 = getelementptr %struct.FONT_INFO* null, i64 %mask498049814982, i32 5 ; <%struct.rec**> [#uses=1]
+ %tmp4984 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* null, i64 %mask498049814982, i32 5 ; <%struct.rec**> [#uses=1]
%tmp4985 = load %struct.rec** %tmp4984, align 8 ; <%struct.rec*> [#uses=1]
- %tmp4988 = getelementptr %struct.rec* %tmp4985, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
+ %tmp4988 = getelementptr %struct.rec, %struct.rec* %tmp4985, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
%tmp4991 = bitcast %struct.THIRD_UNION* %tmp4988 to i32* ; <i32*> [#uses=1]
%tmp4992 = load i32* %tmp4991, align 8 ; <i32> [#uses=1]
%tmp49924993 = trunc i32 %tmp4992 to i16 ; <i16> [#uses=1]
%tmp50066537 = trunc i32 %tmp4943 to i16 ; <i16> [#uses=1]
%tmp50085009 = and i16 %tmp50066537, 4095 ; <i16> [#uses=1]
%mask501250135014 = zext i16 %tmp50085009 to i64 ; <i64> [#uses=1]
- %tmp5016 = getelementptr %struct.FONT_INFO* null, i64 %mask501250135014, i32 5 ; <%struct.rec**> [#uses=1]
+ %tmp5016 = getelementptr %struct.FONT_INFO, %struct.FONT_INFO* null, i64 %mask501250135014, i32 5 ; <%struct.rec**> [#uses=1]
%tmp5017 = load %struct.rec** %tmp5016, align 8 ; <%struct.rec*> [#uses=1]
- %tmp5020 = getelementptr %struct.rec* %tmp5017, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
+ %tmp5020 = getelementptr %struct.rec, %struct.rec* %tmp5017, i64 0, i32 0, i32 3 ; <%struct.THIRD_UNION*> [#uses=1]
%tmp5023 = bitcast %struct.THIRD_UNION* %tmp5020 to i32* ; <i32*> [#uses=1]
%tmp5024 = load i32* %tmp5023, align 8 ; <i32> [#uses=1]
%tmp50245025 = trunc i32 %tmp5024 to i16 ; <i16> [#uses=1]
%idx.ext.i.i.i = sext i32 %i.0.i.i.i to i64 ; <i64> [#uses=1]
%sub.ptr72.sum.i.i.i = xor i64 %idx.ext.i.i.i, -1 ; <i64> [#uses=1]
%pos.addr.1.sum155.i.i.i = add i64 %tmp154.i.i.i, %sub.ptr72.sum.i.i.i ; <i64> [#uses=1]
- %arrayidx76.i.i.i = getelementptr inbounds i8* undef, i64 %pos.addr.1.sum155.i.i.i ; <i8*> [#uses=0]
+ %arrayidx76.i.i.i = getelementptr inbounds i8, i8* undef, i64 %pos.addr.1.sum155.i.i.i ; <i8*> [#uses=0]
br label %while.cond.backedge.i.i.i
}
%t0 = add i32 %r, %s
%t1 = add i32 %t0, 1
- %t2 = getelementptr i32* %y, i32 1
- %t3 = getelementptr i32* %t2, i32 %t1
+ %t2 = getelementptr i32, i32* %y, i32 1
+ %t3 = getelementptr i32, i32* %t2, i32 %t1
ret i32* %t3
%t0 = add i32 %r, %s
%t1 = add i32 %t0, 1
- %t2 = getelementptr i32* %y, i32 1
- %t3 = getelementptr i32* %t2, i32 %t1
+ %t2 = getelementptr i32, i32* %y, i32 1
+ %t3 = getelementptr i32, i32* %t2, i32 %t1
ret i32* %t3
; CHECK: ret
; This gep should be sunk out of this block into the load/store users.
- %P = getelementptr i32* %X, i32 %B
+ %P = getelementptr i32, i32* %X, i32 %B
%G = icmp ult i32 %B, 1234
br i1 %G, label %T, label %F
T:
; RUN: not grep leal %t
define i8 @test(i32 *%P) nounwind {
- %Q = getelementptr i32* %P, i32 1
+ %Q = getelementptr i32, i32* %P, i32 1
%R = bitcast i32* %Q to i8*
%S = load i8* %R
%T = icmp eq i8 %S, 0
TB:
ret i8 4
F:
- %U = getelementptr i8* %R, i32 3
+ %U = getelementptr i8, i8* %R, i32 3
%V = load i8* %U
ret i8 %V
}
define i32 @bar(i32** %P) nounwind {
entry:
%0 = load i32** %P, align 4 ; <i32*> [#uses=2]
- %1 = getelementptr i32* %0, i32 1 ; <i32*> [#uses=1]
+ %1 = getelementptr i32, i32* %0, i32 1 ; <i32*> [#uses=1]
%2 = icmp ugt i32* %1, inttoptr (i64 1233 to i32*) ; <i1> [#uses=1]
br i1 %2, label %bb1, label %bb
br label %bb1
bb1: ; preds = %entry, %bb
- %3 = getelementptr i32* %1, i32 1 ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32* %1, i32 1 ; <i32*> [#uses=1]
%4 = load i32* %3, align 4 ; <i32> [#uses=1]
ret i32 %4
}
if.end:
%sub = sub nsw i32 %0, %size
store i32 %sub, i32* %offset, align 8
- %add.ptr = getelementptr inbounds i8* %base, i32 %sub
+ %add.ptr = getelementptr inbounds i8, i8* %base, i32 %sub
br label %return
return:
br label %bb25362
bb2: ; preds = %bb
- %tmp = getelementptr inbounds float* null, i64 1
- %tmp3 = getelementptr inbounds float* %tmp, i64 1
- %tmp4 = getelementptr inbounds float* %tmp3, i64 1
- %tmp5 = getelementptr inbounds float* %tmp4, i64 1
- %tmp6 = getelementptr inbounds float* %tmp5, i64 1
- %tmp7 = getelementptr inbounds float* %tmp6, i64 1
- %tmp8 = getelementptr inbounds float* %tmp7, i64 1
- %tmp9 = getelementptr inbounds float* %tmp8, i64 1
- %tmp10 = getelementptr inbounds float* %tmp9, i64 1
- %tmp11 = getelementptr inbounds float* %tmp10, i64 1
- %tmp12 = getelementptr inbounds float* %tmp11, i64 1
- %tmp13 = getelementptr inbounds float* %tmp12, i64 1
- %tmp14 = getelementptr inbounds float* %tmp13, i64 1
- %tmp15 = getelementptr inbounds float* %tmp14, i64 1
- %tmp16 = getelementptr inbounds float* %tmp15, i64 1
- %tmp17 = getelementptr inbounds float* %tmp16, i64 1
- %tmp18 = getelementptr inbounds float* %tmp17, i64 1
- %tmp19 = getelementptr inbounds float* %tmp18, i64 1
- %tmp20 = getelementptr inbounds float* %tmp19, i64 1
- %tmp21 = getelementptr inbounds float* %tmp20, i64 1
- %tmp22 = getelementptr inbounds float* %tmp21, i64 1
- %tmp23 = getelementptr inbounds float* %tmp22, i64 1
- %tmp24 = getelementptr inbounds float* %tmp23, i64 1
- %tmp25 = getelementptr inbounds float* %tmp24, i64 1
- %tmp26 = getelementptr inbounds float* %tmp25, i64 1
- %tmp27 = getelementptr inbounds float* %tmp26, i64 1
- %tmp28 = getelementptr inbounds float* %tmp27, i64 1
- %tmp29 = getelementptr inbounds float* %tmp28, i64 1
- %tmp30 = getelementptr inbounds float* %tmp29, i64 1
- %tmp31 = getelementptr inbounds float* %tmp30, i64 1
- %tmp32 = getelementptr inbounds float* %tmp31, i64 1
- %tmp33 = getelementptr inbounds float* %tmp32, i64 1
- %tmp34 = getelementptr inbounds float* %tmp33, i64 1
- %tmp35 = getelementptr inbounds float* %tmp34, i64 1
- %tmp36 = getelementptr inbounds float* %tmp35, i64 1
- %tmp37 = getelementptr inbounds float* %tmp36, i64 1
- %tmp38 = getelementptr inbounds float* %tmp37, i64 1
- %tmp39 = getelementptr inbounds float* %tmp38, i64 1
- %tmp40 = getelementptr inbounds float* %tmp39, i64 1
- %tmp41 = getelementptr inbounds float* %tmp40, i64 1
- %tmp42 = getelementptr inbounds float* %tmp41, i64 1
- %tmp43 = getelementptr inbounds float* %tmp42, i64 1
- %tmp44 = getelementptr inbounds float* %tmp43, i64 1
- %tmp45 = getelementptr inbounds float* %tmp44, i64 1
- %tmp46 = getelementptr inbounds float* %tmp45, i64 1
- %tmp47 = getelementptr inbounds float* %tmp46, i64 1
- %tmp48 = getelementptr inbounds float* %tmp47, i64 1
- %tmp49 = getelementptr inbounds float* %tmp48, i64 1
- %tmp50 = getelementptr inbounds float* %tmp49, i64 1
- %tmp51 = getelementptr inbounds float* %tmp50, i64 1
- %tmp52 = getelementptr inbounds float* %tmp51, i64 1
- %tmp53 = getelementptr inbounds float* %tmp52, i64 1
- %tmp54 = getelementptr inbounds float* %tmp53, i64 1
- %tmp55 = getelementptr inbounds float* %tmp54, i64 1
- %tmp56 = getelementptr inbounds float* %tmp55, i64 1
- %tmp57 = getelementptr inbounds float* %tmp56, i64 1
- %tmp58 = getelementptr inbounds float* %tmp57, i64 1
- %tmp59 = getelementptr inbounds float* %tmp58, i64 1
- %tmp60 = getelementptr inbounds float* %tmp59, i64 1
- %tmp61 = getelementptr inbounds float* %tmp60, i64 1
- %tmp62 = getelementptr inbounds float* %tmp61, i64 1
- %tmp63 = getelementptr inbounds float* %tmp62, i64 1
- %tmp64 = getelementptr inbounds float* %tmp63, i64 1
- %tmp65 = getelementptr inbounds float* %tmp64, i64 1
- %tmp66 = getelementptr inbounds float* %tmp65, i64 1
- %tmp67 = getelementptr inbounds float* %tmp66, i64 1
- %tmp68 = getelementptr inbounds float* %tmp67, i64 1
- %tmp69 = getelementptr inbounds float* %tmp68, i64 1
- %tmp70 = getelementptr inbounds float* %tmp69, i64 1
- %tmp71 = getelementptr inbounds float* %tmp70, i64 1
- %tmp72 = getelementptr inbounds float* %tmp71, i64 1
- %tmp73 = getelementptr inbounds float* %tmp72, i64 1
- %tmp74 = getelementptr inbounds float* %tmp73, i64 1
- %tmp75 = getelementptr inbounds float* %tmp74, i64 1
- %tmp76 = getelementptr inbounds float* %tmp75, i64 1
- %tmp77 = getelementptr inbounds float* %tmp76, i64 1
- %tmp78 = getelementptr inbounds float* %tmp77, i64 1
- %tmp79 = getelementptr inbounds float* %tmp78, i64 1
- %tmp80 = getelementptr inbounds float* %tmp79, i64 1
- %tmp81 = getelementptr inbounds float* %tmp80, i64 1
- %tmp82 = getelementptr inbounds float* %tmp81, i64 1
- %tmp83 = getelementptr inbounds float* %tmp82, i64 1
- %tmp84 = getelementptr inbounds float* %tmp83, i64 1
- %tmp85 = getelementptr inbounds float* %tmp84, i64 1
- %tmp86 = getelementptr inbounds float* %tmp85, i64 1
- %tmp87 = getelementptr inbounds float* %tmp86, i64 1
- %tmp88 = getelementptr inbounds float* %tmp87, i64 1
- %tmp89 = getelementptr inbounds float* %tmp88, i64 1
- %tmp90 = getelementptr inbounds float* %tmp89, i64 1
- %tmp91 = getelementptr inbounds float* %tmp90, i64 1
- %tmp92 = getelementptr inbounds float* %tmp91, i64 1
- %tmp93 = getelementptr inbounds float* %tmp92, i64 1
- %tmp94 = getelementptr inbounds float* %tmp93, i64 1
- %tmp95 = getelementptr inbounds float* %tmp94, i64 1
- %tmp96 = getelementptr inbounds float* %tmp95, i64 1
- %tmp97 = getelementptr inbounds float* %tmp96, i64 1
- %tmp98 = getelementptr inbounds float* %tmp97, i64 1
- %tmp99 = getelementptr inbounds float* %tmp98, i64 1
- %tmp100 = getelementptr inbounds float* %tmp99, i64 1
- %tmp101 = getelementptr inbounds float* %tmp100, i64 1
- %tmp102 = getelementptr inbounds float* %tmp101, i64 1
- %tmp103 = getelementptr inbounds float* %tmp102, i64 1
- %tmp104 = getelementptr inbounds float* %tmp103, i64 1
- %tmp105 = getelementptr inbounds float* %tmp104, i64 1
- %tmp106 = getelementptr inbounds float* %tmp105, i64 1
- %tmp107 = getelementptr inbounds float* %tmp106, i64 1
- %tmp108 = getelementptr inbounds float* %tmp107, i64 1
- %tmp109 = getelementptr inbounds float* %tmp108, i64 1
- %tmp110 = getelementptr inbounds float* %tmp109, i64 1
- %tmp111 = getelementptr inbounds float* %tmp110, i64 1
- %tmp112 = getelementptr inbounds float* %tmp111, i64 1
- %tmp113 = getelementptr inbounds float* %tmp112, i64 1
- %tmp114 = getelementptr inbounds float* %tmp113, i64 1
- %tmp115 = getelementptr inbounds float* %tmp114, i64 1
- %tmp116 = getelementptr inbounds float* %tmp115, i64 1
- %tmp117 = getelementptr inbounds float* %tmp116, i64 1
- %tmp118 = getelementptr inbounds float* %tmp117, i64 1
- %tmp119 = getelementptr inbounds float* %tmp118, i64 1
- %tmp120 = getelementptr inbounds float* %tmp119, i64 1
- %tmp121 = getelementptr inbounds float* %tmp120, i64 1
- %tmp122 = getelementptr inbounds float* %tmp121, i64 1
- %tmp123 = getelementptr inbounds float* %tmp122, i64 1
- %tmp124 = getelementptr inbounds float* %tmp123, i64 1
- %tmp125 = getelementptr inbounds float* %tmp124, i64 1
- %tmp126 = getelementptr inbounds float* %tmp125, i64 1
- %tmp127 = getelementptr inbounds float* %tmp126, i64 1
- %tmp128 = getelementptr inbounds float* %tmp127, i64 1
- %tmp129 = getelementptr inbounds float* %tmp128, i64 1
- %tmp130 = getelementptr inbounds float* %tmp129, i64 1
- %tmp131 = getelementptr inbounds float* %tmp130, i64 1
- %tmp132 = getelementptr inbounds float* %tmp131, i64 1
- %tmp133 = getelementptr inbounds float* %tmp132, i64 1
- %tmp134 = getelementptr inbounds float* %tmp133, i64 1
- %tmp135 = getelementptr inbounds float* %tmp134, i64 1
- %tmp136 = getelementptr inbounds float* %tmp135, i64 1
- %tmp137 = getelementptr inbounds float* %tmp136, i64 1
- %tmp138 = getelementptr inbounds float* %tmp137, i64 1
- %tmp139 = getelementptr inbounds float* %tmp138, i64 1
- %tmp140 = getelementptr inbounds float* %tmp139, i64 1
- %tmp141 = getelementptr inbounds float* %tmp140, i64 1
- %tmp142 = getelementptr inbounds float* %tmp141, i64 1
- %tmp143 = getelementptr inbounds float* %tmp142, i64 1
- %tmp144 = getelementptr inbounds float* %tmp143, i64 1
- %tmp145 = getelementptr inbounds float* %tmp144, i64 1
- %tmp146 = getelementptr inbounds float* %tmp145, i64 1
- %tmp147 = getelementptr inbounds float* %tmp146, i64 1
- %tmp148 = getelementptr inbounds float* %tmp147, i64 1
- %tmp149 = getelementptr inbounds float* %tmp148, i64 1
- %tmp150 = getelementptr inbounds float* %tmp149, i64 1
- %tmp151 = getelementptr inbounds float* %tmp150, i64 1
- %tmp152 = getelementptr inbounds float* %tmp151, i64 1
- %tmp153 = getelementptr inbounds float* %tmp152, i64 1
- %tmp154 = getelementptr inbounds float* %tmp153, i64 1
- %tmp155 = getelementptr inbounds float* %tmp154, i64 1
- %tmp156 = getelementptr inbounds float* %tmp155, i64 1
- %tmp157 = getelementptr inbounds float* %tmp156, i64 1
- %tmp158 = getelementptr inbounds float* %tmp157, i64 1
- %tmp159 = getelementptr inbounds float* %tmp158, i64 1
- %tmp160 = getelementptr inbounds float* %tmp159, i64 1
- %tmp161 = getelementptr inbounds float* %tmp160, i64 1
- %tmp162 = getelementptr inbounds float* %tmp161, i64 1
- %tmp163 = getelementptr inbounds float* %tmp162, i64 1
- %tmp164 = getelementptr inbounds float* %tmp163, i64 1
- %tmp165 = getelementptr inbounds float* %tmp164, i64 1
- %tmp166 = getelementptr inbounds float* %tmp165, i64 1
- %tmp167 = getelementptr inbounds float* %tmp166, i64 1
- %tmp168 = getelementptr inbounds float* %tmp167, i64 1
- %tmp169 = getelementptr inbounds float* %tmp168, i64 1
- %tmp170 = getelementptr inbounds float* %tmp169, i64 1
- %tmp171 = getelementptr inbounds float* %tmp170, i64 1
- %tmp172 = getelementptr inbounds float* %tmp171, i64 1
- %tmp173 = getelementptr inbounds float* %tmp172, i64 1
- %tmp174 = getelementptr inbounds float* %tmp173, i64 1
- %tmp175 = getelementptr inbounds float* %tmp174, i64 1
- %tmp176 = getelementptr inbounds float* %tmp175, i64 1
- %tmp177 = getelementptr inbounds float* %tmp176, i64 1
- %tmp178 = getelementptr inbounds float* %tmp177, i64 1
- %tmp179 = getelementptr inbounds float* %tmp178, i64 1
- %tmp180 = getelementptr inbounds float* %tmp179, i64 1
- %tmp181 = getelementptr inbounds float* %tmp180, i64 1
- %tmp182 = getelementptr inbounds float* %tmp181, i64 1
- %tmp183 = getelementptr inbounds float* %tmp182, i64 1
- %tmp184 = getelementptr inbounds float* %tmp183, i64 1
- %tmp185 = getelementptr inbounds float* %tmp184, i64 1
- %tmp186 = getelementptr inbounds float* %tmp185, i64 1
- %tmp187 = getelementptr inbounds float* %tmp186, i64 1
- %tmp188 = getelementptr inbounds float* %tmp187, i64 1
- %tmp189 = getelementptr inbounds float* %tmp188, i64 1
- %tmp190 = getelementptr inbounds float* %tmp189, i64 1
- %tmp191 = getelementptr inbounds float* %tmp190, i64 1
- %tmp192 = getelementptr inbounds float* %tmp191, i64 1
- %tmp193 = getelementptr inbounds float* %tmp192, i64 1
- %tmp194 = getelementptr inbounds float* %tmp193, i64 1
- %tmp195 = getelementptr inbounds float* %tmp194, i64 1
- %tmp196 = getelementptr inbounds float* %tmp195, i64 1
- %tmp197 = getelementptr inbounds float* %tmp196, i64 1
- %tmp198 = getelementptr inbounds float* %tmp197, i64 1
- %tmp199 = getelementptr inbounds float* %tmp198, i64 1
- %tmp200 = getelementptr inbounds float* %tmp199, i64 1
- %tmp201 = getelementptr inbounds float* %tmp200, i64 1
- %tmp202 = getelementptr inbounds float* %tmp201, i64 1
- %tmp203 = getelementptr inbounds float* %tmp202, i64 1
- %tmp204 = getelementptr inbounds float* %tmp203, i64 1
- %tmp205 = getelementptr inbounds float* %tmp204, i64 1
- %tmp206 = getelementptr inbounds float* %tmp205, i64 1
- %tmp207 = getelementptr inbounds float* %tmp206, i64 1
- %tmp208 = getelementptr inbounds float* %tmp207, i64 1
- %tmp209 = getelementptr inbounds float* %tmp208, i64 1
- %tmp210 = getelementptr inbounds float* %tmp209, i64 1
- %tmp211 = getelementptr inbounds float* %tmp210, i64 1
- %tmp212 = getelementptr inbounds float* %tmp211, i64 1
- %tmp213 = getelementptr inbounds float* %tmp212, i64 1
- %tmp214 = getelementptr inbounds float* %tmp213, i64 1
- %tmp215 = getelementptr inbounds float* %tmp214, i64 1
- %tmp216 = getelementptr inbounds float* %tmp215, i64 1
- %tmp217 = getelementptr inbounds float* %tmp216, i64 1
- %tmp218 = getelementptr inbounds float* %tmp217, i64 1
- %tmp219 = getelementptr inbounds float* %tmp218, i64 1
- %tmp220 = getelementptr inbounds float* %tmp219, i64 1
- %tmp221 = getelementptr inbounds float* %tmp220, i64 1
- %tmp222 = getelementptr inbounds float* %tmp221, i64 1
- %tmp223 = getelementptr inbounds float* %tmp222, i64 1
- %tmp224 = getelementptr inbounds float* %tmp223, i64 1
- %tmp225 = getelementptr inbounds float* %tmp224, i64 1
- %tmp226 = getelementptr inbounds float* %tmp225, i64 1
- %tmp227 = getelementptr inbounds float* %tmp226, i64 1
- %tmp228 = getelementptr inbounds float* %tmp227, i64 1
- %tmp229 = getelementptr inbounds float* %tmp228, i64 1
- %tmp230 = getelementptr inbounds float* %tmp229, i64 1
- %tmp231 = getelementptr inbounds float* %tmp230, i64 1
- %tmp232 = getelementptr inbounds float* %tmp231, i64 1
- %tmp233 = getelementptr inbounds float* %tmp232, i64 1
- %tmp234 = getelementptr inbounds float* %tmp233, i64 1
- %tmp235 = getelementptr inbounds float* %tmp234, i64 1
- %tmp236 = getelementptr inbounds float* %tmp235, i64 1
- %tmp237 = getelementptr inbounds float* %tmp236, i64 1
- %tmp238 = getelementptr inbounds float* %tmp237, i64 1
- %tmp239 = getelementptr inbounds float* %tmp238, i64 1
- %tmp240 = getelementptr inbounds float* %tmp239, i64 1
- %tmp241 = getelementptr inbounds float* %tmp240, i64 1
- %tmp242 = getelementptr inbounds float* %tmp241, i64 1
- %tmp243 = getelementptr inbounds float* %tmp242, i64 1
- %tmp244 = getelementptr inbounds float* %tmp243, i64 1
- %tmp245 = getelementptr inbounds float* %tmp244, i64 1
- %tmp246 = getelementptr inbounds float* %tmp245, i64 1
- %tmp247 = getelementptr inbounds float* %tmp246, i64 1
- %tmp248 = getelementptr inbounds float* %tmp247, i64 1
- %tmp249 = getelementptr inbounds float* %tmp248, i64 1
- %tmp250 = getelementptr inbounds float* %tmp249, i64 1
- %tmp251 = getelementptr inbounds float* %tmp250, i64 1
- %tmp252 = getelementptr inbounds float* %tmp251, i64 1
- %tmp253 = getelementptr inbounds float* %tmp252, i64 1
- %tmp254 = getelementptr inbounds float* %tmp253, i64 1
- %tmp255 = getelementptr inbounds float* %tmp254, i64 1
- %tmp256 = getelementptr inbounds float* %tmp255, i64 1
- %tmp257 = getelementptr inbounds float* %tmp256, i64 1
- %tmp258 = getelementptr inbounds float* %tmp257, i64 1
- %tmp259 = getelementptr inbounds float* %tmp258, i64 1
- %tmp260 = getelementptr inbounds float* %tmp259, i64 1
- %tmp261 = getelementptr inbounds float* %tmp260, i64 1
- %tmp262 = getelementptr inbounds float* %tmp261, i64 1
- %tmp263 = getelementptr inbounds float* %tmp262, i64 1
- %tmp264 = getelementptr inbounds float* %tmp263, i64 1
- %tmp265 = getelementptr inbounds float* %tmp264, i64 1
- %tmp266 = getelementptr inbounds float* %tmp265, i64 1
- %tmp267 = getelementptr inbounds float* %tmp266, i64 1
- %tmp268 = getelementptr inbounds float* %tmp267, i64 1
- %tmp269 = getelementptr inbounds float* %tmp268, i64 1
- %tmp270 = getelementptr inbounds float* %tmp269, i64 1
- %tmp271 = getelementptr inbounds float* %tmp270, i64 1
- %tmp272 = getelementptr inbounds float* %tmp271, i64 1
- %tmp273 = getelementptr inbounds float* %tmp272, i64 1
- %tmp274 = getelementptr inbounds float* %tmp273, i64 1
- %tmp275 = getelementptr inbounds float* %tmp274, i64 1
- %tmp276 = getelementptr inbounds float* %tmp275, i64 1
- %tmp277 = getelementptr inbounds float* %tmp276, i64 1
- %tmp278 = getelementptr inbounds float* %tmp277, i64 1
- %tmp279 = getelementptr inbounds float* %tmp278, i64 1
- %tmp280 = getelementptr inbounds float* %tmp279, i64 1
- %tmp281 = getelementptr inbounds float* %tmp280, i64 1
- %tmp282 = getelementptr inbounds float* %tmp281, i64 1
- %tmp283 = getelementptr inbounds float* %tmp282, i64 1
- %tmp284 = getelementptr inbounds float* %tmp283, i64 1
- %tmp285 = getelementptr inbounds float* %tmp284, i64 1
- %tmp286 = getelementptr inbounds float* %tmp285, i64 1
- %tmp287 = getelementptr inbounds float* %tmp286, i64 1
- %tmp288 = getelementptr inbounds float* %tmp287, i64 1
- %tmp289 = getelementptr inbounds float* %tmp288, i64 1
- %tmp290 = getelementptr inbounds float* %tmp289, i64 1
- %tmp291 = getelementptr inbounds float* %tmp290, i64 1
- %tmp292 = getelementptr inbounds float* %tmp291, i64 1
- %tmp293 = getelementptr inbounds float* %tmp292, i64 1
- %tmp294 = getelementptr inbounds float* %tmp293, i64 1
- %tmp295 = getelementptr inbounds float* %tmp294, i64 1
- %tmp296 = getelementptr inbounds float* %tmp295, i64 1
- %tmp297 = getelementptr inbounds float* %tmp296, i64 1
- %tmp298 = getelementptr inbounds float* %tmp297, i64 1
- %tmp299 = getelementptr inbounds float* %tmp298, i64 1
- %tmp300 = getelementptr inbounds float* %tmp299, i64 1
- %tmp301 = getelementptr inbounds float* %tmp300, i64 1
- %tmp302 = getelementptr inbounds float* %tmp301, i64 1
- %tmp303 = getelementptr inbounds float* %tmp302, i64 1
- %tmp304 = getelementptr inbounds float* %tmp303, i64 1
- %tmp305 = getelementptr inbounds float* %tmp304, i64 1
- %tmp306 = getelementptr inbounds float* %tmp305, i64 1
- %tmp307 = getelementptr inbounds float* %tmp306, i64 1
- %tmp308 = getelementptr inbounds float* %tmp307, i64 1
- %tmp309 = getelementptr inbounds float* %tmp308, i64 1
- %tmp310 = getelementptr inbounds float* %tmp309, i64 1
- %tmp311 = getelementptr inbounds float* %tmp310, i64 1
- %tmp312 = getelementptr inbounds float* %tmp311, i64 1
- %tmp313 = getelementptr inbounds float* %tmp312, i64 1
- %tmp314 = getelementptr inbounds float* %tmp313, i64 1
- %tmp315 = getelementptr inbounds float* %tmp314, i64 1
- %tmp316 = getelementptr inbounds float* %tmp315, i64 1
- %tmp317 = getelementptr inbounds float* %tmp316, i64 1
- %tmp318 = getelementptr inbounds float* %tmp317, i64 1
- %tmp319 = getelementptr inbounds float* %tmp318, i64 1
- %tmp320 = getelementptr inbounds float* %tmp319, i64 1
- %tmp321 = getelementptr inbounds float* %tmp320, i64 1
- %tmp322 = getelementptr inbounds float* %tmp321, i64 1
- %tmp323 = getelementptr inbounds float* %tmp322, i64 1
- %tmp324 = getelementptr inbounds float* %tmp323, i64 1
- %tmp325 = getelementptr inbounds float* %tmp324, i64 1
- %tmp326 = getelementptr inbounds float* %tmp325, i64 1
- %tmp327 = getelementptr inbounds float* %tmp326, i64 1
- %tmp328 = getelementptr inbounds float* %tmp327, i64 1
- %tmp329 = getelementptr inbounds float* %tmp328, i64 1
- %tmp330 = getelementptr inbounds float* %tmp329, i64 1
- %tmp331 = getelementptr inbounds float* %tmp330, i64 1
- %tmp332 = getelementptr inbounds float* %tmp331, i64 1
- %tmp333 = getelementptr inbounds float* %tmp332, i64 1
- %tmp334 = getelementptr inbounds float* %tmp333, i64 1
- %tmp335 = getelementptr inbounds float* %tmp334, i64 1
- %tmp336 = getelementptr inbounds float* %tmp335, i64 1
- %tmp337 = getelementptr inbounds float* %tmp336, i64 1
- %tmp338 = getelementptr inbounds float* %tmp337, i64 1
- %tmp339 = getelementptr inbounds float* %tmp338, i64 1
- %tmp340 = getelementptr inbounds float* %tmp339, i64 1
- %tmp341 = getelementptr inbounds float* %tmp340, i64 1
- %tmp342 = getelementptr inbounds float* %tmp341, i64 1
- %tmp343 = getelementptr inbounds float* %tmp342, i64 1
- %tmp344 = getelementptr inbounds float* %tmp343, i64 1
- %tmp345 = getelementptr inbounds float* %tmp344, i64 1
- %tmp346 = getelementptr inbounds float* %tmp345, i64 1
- %tmp347 = getelementptr inbounds float* %tmp346, i64 1
- %tmp348 = getelementptr inbounds float* %tmp347, i64 1
- %tmp349 = getelementptr inbounds float* %tmp348, i64 1
- %tmp350 = getelementptr inbounds float* %tmp349, i64 1
- %tmp351 = getelementptr inbounds float* %tmp350, i64 1
- %tmp352 = getelementptr inbounds float* %tmp351, i64 1
- %tmp353 = getelementptr inbounds float* %tmp352, i64 1
- %tmp354 = getelementptr inbounds float* %tmp353, i64 1
- %tmp355 = getelementptr inbounds float* %tmp354, i64 1
- %tmp356 = getelementptr inbounds float* %tmp355, i64 1
- %tmp357 = getelementptr inbounds float* %tmp356, i64 1
- %tmp358 = getelementptr inbounds float* %tmp357, i64 1
- %tmp359 = getelementptr inbounds float* %tmp358, i64 1
- %tmp360 = getelementptr inbounds float* %tmp359, i64 1
- %tmp361 = getelementptr inbounds float* %tmp360, i64 1
- %tmp362 = getelementptr inbounds float* %tmp361, i64 1
- %tmp363 = getelementptr inbounds float* %tmp362, i64 1
- %tmp364 = getelementptr inbounds float* %tmp363, i64 1
- %tmp365 = getelementptr inbounds float* %tmp364, i64 1
- %tmp366 = getelementptr inbounds float* %tmp365, i64 1
- %tmp367 = getelementptr inbounds float* %tmp366, i64 1
- %tmp368 = getelementptr inbounds float* %tmp367, i64 1
- %tmp369 = getelementptr inbounds float* %tmp368, i64 1
- %tmp370 = getelementptr inbounds float* %tmp369, i64 1
- %tmp371 = getelementptr inbounds float* %tmp370, i64 1
- %tmp372 = getelementptr inbounds float* %tmp371, i64 1
- %tmp373 = getelementptr inbounds float* %tmp372, i64 1
- %tmp374 = getelementptr inbounds float* %tmp373, i64 1
- %tmp375 = getelementptr inbounds float* %tmp374, i64 1
- %tmp376 = getelementptr inbounds float* %tmp375, i64 1
- %tmp377 = getelementptr inbounds float* %tmp376, i64 1
- %tmp378 = getelementptr inbounds float* %tmp377, i64 1
- %tmp379 = getelementptr inbounds float* %tmp378, i64 1
- %tmp380 = getelementptr inbounds float* %tmp379, i64 1
- %tmp381 = getelementptr inbounds float* %tmp380, i64 1
- %tmp382 = getelementptr inbounds float* %tmp381, i64 1
- %tmp383 = getelementptr inbounds float* %tmp382, i64 1
- %tmp384 = getelementptr inbounds float* %tmp383, i64 1
- %tmp385 = getelementptr inbounds float* %tmp384, i64 1
- %tmp386 = getelementptr inbounds float* %tmp385, i64 1
- %tmp387 = getelementptr inbounds float* %tmp386, i64 1
- %tmp388 = getelementptr inbounds float* %tmp387, i64 1
- %tmp389 = getelementptr inbounds float* %tmp388, i64 1
- %tmp390 = getelementptr inbounds float* %tmp389, i64 1
- %tmp391 = getelementptr inbounds float* %tmp390, i64 1
- %tmp392 = getelementptr inbounds float* %tmp391, i64 1
- %tmp393 = getelementptr inbounds float* %tmp392, i64 1
- %tmp394 = getelementptr inbounds float* %tmp393, i64 1
- %tmp395 = getelementptr inbounds float* %tmp394, i64 1
- %tmp396 = getelementptr inbounds float* %tmp395, i64 1
- %tmp397 = getelementptr inbounds float* %tmp396, i64 1
- %tmp398 = getelementptr inbounds float* %tmp397, i64 1
- %tmp399 = getelementptr inbounds float* %tmp398, i64 1
- %tmp400 = getelementptr inbounds float* %tmp399, i64 1
- %tmp401 = getelementptr inbounds float* %tmp400, i64 1
- %tmp402 = getelementptr inbounds float* %tmp401, i64 1
- %tmp403 = getelementptr inbounds float* %tmp402, i64 1
- %tmp404 = getelementptr inbounds float* %tmp403, i64 1
- %tmp405 = getelementptr inbounds float* %tmp404, i64 1
- %tmp406 = getelementptr inbounds float* %tmp405, i64 1
- %tmp407 = getelementptr inbounds float* %tmp406, i64 1
- %tmp408 = getelementptr inbounds float* %tmp407, i64 1
- %tmp409 = getelementptr inbounds float* %tmp408, i64 1
- %tmp410 = getelementptr inbounds float* %tmp409, i64 1
- %tmp411 = getelementptr inbounds float* %tmp410, i64 1
- %tmp412 = getelementptr inbounds float* %tmp411, i64 1
- %tmp413 = getelementptr inbounds float* %tmp412, i64 1
- %tmp414 = getelementptr inbounds float* %tmp413, i64 1
- %tmp415 = getelementptr inbounds float* %tmp414, i64 1
- %tmp416 = getelementptr inbounds float* %tmp415, i64 1
- %tmp417 = getelementptr inbounds float* %tmp416, i64 1
- %tmp418 = getelementptr inbounds float* %tmp417, i64 1
- %tmp419 = getelementptr inbounds float* %tmp418, i64 1
- %tmp420 = getelementptr inbounds float* %tmp419, i64 1
- %tmp421 = getelementptr inbounds float* %tmp420, i64 1
- %tmp422 = getelementptr inbounds float* %tmp421, i64 1
- %tmp423 = getelementptr inbounds float* %tmp422, i64 1
- %tmp424 = getelementptr inbounds float* %tmp423, i64 1
- %tmp425 = getelementptr inbounds float* %tmp424, i64 1
- %tmp426 = getelementptr inbounds float* %tmp425, i64 1
- %tmp427 = getelementptr inbounds float* %tmp426, i64 1
- %tmp428 = getelementptr inbounds float* %tmp427, i64 1
- %tmp429 = getelementptr inbounds float* %tmp428, i64 1
- %tmp430 = getelementptr inbounds float* %tmp429, i64 1
- %tmp431 = getelementptr inbounds float* %tmp430, i64 1
- %tmp432 = getelementptr inbounds float* %tmp431, i64 1
- %tmp433 = getelementptr inbounds float* %tmp432, i64 1
- %tmp434 = getelementptr inbounds float* %tmp433, i64 1
- %tmp435 = getelementptr inbounds float* %tmp434, i64 1
- %tmp436 = getelementptr inbounds float* %tmp435, i64 1
- %tmp437 = getelementptr inbounds float* %tmp436, i64 1
- %tmp438 = getelementptr inbounds float* %tmp437, i64 1
- %tmp439 = getelementptr inbounds float* %tmp438, i64 1
- %tmp440 = getelementptr inbounds float* %tmp439, i64 1
- %tmp441 = getelementptr inbounds float* %tmp440, i64 1
- %tmp442 = getelementptr inbounds float* %tmp441, i64 1
- %tmp443 = getelementptr inbounds float* %tmp442, i64 1
- %tmp444 = getelementptr inbounds float* %tmp443, i64 1
- %tmp445 = getelementptr inbounds float* %tmp444, i64 1
- %tmp446 = getelementptr inbounds float* %tmp445, i64 1
- %tmp447 = getelementptr inbounds float* %tmp446, i64 1
- %tmp448 = getelementptr inbounds float* %tmp447, i64 1
- %tmp449 = getelementptr inbounds float* %tmp448, i64 1
- %tmp450 = getelementptr inbounds float* %tmp449, i64 1
- %tmp451 = getelementptr inbounds float* %tmp450, i64 1
- %tmp452 = getelementptr inbounds float* %tmp451, i64 1
- %tmp453 = getelementptr inbounds float* %tmp452, i64 1
- %tmp454 = getelementptr inbounds float* %tmp453, i64 1
- %tmp455 = getelementptr inbounds float* %tmp454, i64 1
- %tmp456 = getelementptr inbounds float* %tmp455, i64 1
- %tmp457 = getelementptr inbounds float* %tmp456, i64 1
- %tmp458 = getelementptr inbounds float* %tmp457, i64 1
- %tmp459 = getelementptr inbounds float* %tmp458, i64 1
- %tmp460 = getelementptr inbounds float* %tmp459, i64 1
- %tmp461 = getelementptr inbounds float* %tmp460, i64 1
- %tmp462 = getelementptr inbounds float* %tmp461, i64 1
- %tmp463 = getelementptr inbounds float* %tmp462, i64 1
- %tmp464 = getelementptr inbounds float* %tmp463, i64 1
- %tmp465 = getelementptr inbounds float* %tmp464, i64 1
- %tmp466 = getelementptr inbounds float* %tmp465, i64 1
- %tmp467 = getelementptr inbounds float* %tmp466, i64 1
- %tmp468 = getelementptr inbounds float* %tmp467, i64 1
- %tmp469 = getelementptr inbounds float* %tmp468, i64 1
- %tmp470 = getelementptr inbounds float* %tmp469, i64 1
- %tmp471 = getelementptr inbounds float* %tmp470, i64 1
- %tmp472 = getelementptr inbounds float* %tmp471, i64 1
- %tmp473 = getelementptr inbounds float* %tmp472, i64 1
- %tmp474 = getelementptr inbounds float* %tmp473, i64 1
- %tmp475 = getelementptr inbounds float* %tmp474, i64 1
- %tmp476 = getelementptr inbounds float* %tmp475, i64 1
- %tmp477 = getelementptr inbounds float* %tmp476, i64 1
- %tmp478 = getelementptr inbounds float* %tmp477, i64 1
- %tmp479 = getelementptr inbounds float* %tmp478, i64 1
- %tmp480 = getelementptr inbounds float* %tmp479, i64 1
- %tmp481 = getelementptr inbounds float* %tmp480, i64 1
- %tmp482 = getelementptr inbounds float* %tmp481, i64 1
- %tmp483 = getelementptr inbounds float* %tmp482, i64 1
- %tmp484 = getelementptr inbounds float* %tmp483, i64 1
- %tmp485 = getelementptr inbounds float* %tmp484, i64 1
- %tmp486 = getelementptr inbounds float* %tmp485, i64 1
- %tmp487 = getelementptr inbounds float* %tmp486, i64 1
- %tmp488 = getelementptr inbounds float* %tmp487, i64 1
- %tmp489 = getelementptr inbounds float* %tmp488, i64 1
- %tmp490 = getelementptr inbounds float* %tmp489, i64 1
- %tmp491 = getelementptr inbounds float* %tmp490, i64 1
- %tmp492 = getelementptr inbounds float* %tmp491, i64 1
- %tmp493 = getelementptr inbounds float* %tmp492, i64 1
- %tmp494 = getelementptr inbounds float* %tmp493, i64 1
- %tmp495 = getelementptr inbounds float* %tmp494, i64 1
- %tmp496 = getelementptr inbounds float* %tmp495, i64 1
- %tmp497 = getelementptr inbounds float* %tmp496, i64 1
- %tmp498 = getelementptr inbounds float* %tmp497, i64 1
- %tmp499 = getelementptr inbounds float* %tmp498, i64 1
- %tmp500 = getelementptr inbounds float* %tmp499, i64 1
- %tmp501 = getelementptr inbounds float* %tmp500, i64 1
- %tmp502 = getelementptr inbounds float* %tmp501, i64 1
- %tmp503 = getelementptr inbounds float* %tmp502, i64 1
- %tmp504 = getelementptr inbounds float* %tmp503, i64 1
- %tmp505 = getelementptr inbounds float* %tmp504, i64 1
- %tmp506 = getelementptr inbounds float* %tmp505, i64 1
- %tmp507 = getelementptr inbounds float* %tmp506, i64 1
- %tmp508 = getelementptr inbounds float* %tmp507, i64 1
- %tmp509 = getelementptr inbounds float* %tmp508, i64 1
- %tmp510 = getelementptr inbounds float* %tmp509, i64 1
- %tmp511 = getelementptr inbounds float* %tmp510, i64 1
- %tmp512 = getelementptr inbounds float* %tmp511, i64 1
- %tmp513 = getelementptr inbounds float* %tmp512, i64 1
- %tmp514 = getelementptr inbounds float* %tmp513, i64 1
- %tmp515 = getelementptr inbounds float* %tmp514, i64 1
- %tmp516 = getelementptr inbounds float* %tmp515, i64 1
- %tmp517 = getelementptr inbounds float* %tmp516, i64 1
- %tmp518 = getelementptr inbounds float* %tmp517, i64 1
- %tmp519 = getelementptr inbounds float* %tmp518, i64 1
- %tmp520 = getelementptr inbounds float* %tmp519, i64 1
- %tmp521 = getelementptr inbounds float* %tmp520, i64 1
- %tmp522 = getelementptr inbounds float* %tmp521, i64 1
- %tmp523 = getelementptr inbounds float* %tmp522, i64 1
- %tmp524 = getelementptr inbounds float* %tmp523, i64 1
- %tmp525 = getelementptr inbounds float* %tmp524, i64 1
- %tmp526 = getelementptr inbounds float* %tmp525, i64 1
- %tmp527 = getelementptr inbounds float* %tmp526, i64 1
- %tmp528 = getelementptr inbounds float* %tmp527, i64 1
- %tmp529 = getelementptr inbounds float* %tmp528, i64 1
- %tmp530 = getelementptr inbounds float* %tmp529, i64 1
- %tmp531 = getelementptr inbounds float* %tmp530, i64 1
- %tmp532 = getelementptr inbounds float* %tmp531, i64 1
- %tmp533 = getelementptr inbounds float* %tmp532, i64 1
- %tmp534 = getelementptr inbounds float* %tmp533, i64 1
- %tmp535 = getelementptr inbounds float* %tmp534, i64 1
- %tmp536 = getelementptr inbounds float* %tmp535, i64 1
- %tmp537 = getelementptr inbounds float* %tmp536, i64 1
- %tmp538 = getelementptr inbounds float* %tmp537, i64 1
- %tmp539 = getelementptr inbounds float* %tmp538, i64 1
- %tmp540 = getelementptr inbounds float* %tmp539, i64 1
- %tmp541 = getelementptr inbounds float* %tmp540, i64 1
- %tmp542 = getelementptr inbounds float* %tmp541, i64 1
- %tmp543 = getelementptr inbounds float* %tmp542, i64 1
- %tmp544 = getelementptr inbounds float* %tmp543, i64 1
- %tmp545 = getelementptr inbounds float* %tmp544, i64 1
- %tmp546 = getelementptr inbounds float* %tmp545, i64 1
- %tmp547 = getelementptr inbounds float* %tmp546, i64 1
- %tmp548 = getelementptr inbounds float* %tmp547, i64 1
- %tmp549 = getelementptr inbounds float* %tmp548, i64 1
- %tmp550 = getelementptr inbounds float* %tmp549, i64 1
- %tmp551 = getelementptr inbounds float* %tmp550, i64 1
- %tmp552 = getelementptr inbounds float* %tmp551, i64 1
- %tmp553 = getelementptr inbounds float* %tmp552, i64 1
- %tmp554 = getelementptr inbounds float* %tmp553, i64 1
- %tmp555 = getelementptr inbounds float* %tmp554, i64 1
- %tmp556 = getelementptr inbounds float* %tmp555, i64 1
- %tmp557 = getelementptr inbounds float* %tmp556, i64 1
- %tmp558 = getelementptr inbounds float* %tmp557, i64 1
- %tmp559 = getelementptr inbounds float* %tmp558, i64 1
- %tmp560 = getelementptr inbounds float* %tmp559, i64 1
- %tmp561 = getelementptr inbounds float* %tmp560, i64 1
- %tmp562 = getelementptr inbounds float* %tmp561, i64 1
- %tmp563 = getelementptr inbounds float* %tmp562, i64 1
- %tmp564 = getelementptr inbounds float* %tmp563, i64 1
- %tmp565 = getelementptr inbounds float* %tmp564, i64 1
- %tmp566 = getelementptr inbounds float* %tmp565, i64 1
- %tmp567 = getelementptr inbounds float* %tmp566, i64 1
- %tmp568 = getelementptr inbounds float* %tmp567, i64 1
- %tmp569 = getelementptr inbounds float* %tmp568, i64 1
- %tmp570 = getelementptr inbounds float* %tmp569, i64 1
- %tmp571 = getelementptr inbounds float* %tmp570, i64 1
- %tmp572 = getelementptr inbounds float* %tmp571, i64 1
- %tmp573 = getelementptr inbounds float* %tmp572, i64 1
- %tmp574 = getelementptr inbounds float* %tmp573, i64 1
- %tmp575 = getelementptr inbounds float* %tmp574, i64 1
- %tmp576 = getelementptr inbounds float* %tmp575, i64 1
- %tmp577 = getelementptr inbounds float* %tmp576, i64 1
- %tmp578 = getelementptr inbounds float* %tmp577, i64 1
- %tmp579 = getelementptr inbounds float* %tmp578, i64 1
- %tmp580 = getelementptr inbounds float* %tmp579, i64 1
- %tmp581 = getelementptr inbounds float* %tmp580, i64 1
- %tmp582 = getelementptr inbounds float* %tmp581, i64 1
- %tmp583 = getelementptr inbounds float* %tmp582, i64 1
- %tmp584 = getelementptr inbounds float* %tmp583, i64 1
- %tmp585 = getelementptr inbounds float* %tmp584, i64 1
- %tmp586 = getelementptr inbounds float* %tmp585, i64 1
- %tmp587 = getelementptr inbounds float* %tmp586, i64 1
- %tmp588 = getelementptr inbounds float* %tmp587, i64 1
- %tmp589 = getelementptr inbounds float* %tmp588, i64 1
- %tmp590 = getelementptr inbounds float* %tmp589, i64 1
- %tmp591 = getelementptr inbounds float* %tmp590, i64 1
- %tmp592 = getelementptr inbounds float* %tmp591, i64 1
- %tmp593 = getelementptr inbounds float* %tmp592, i64 1
- %tmp594 = getelementptr inbounds float* %tmp593, i64 1
- %tmp595 = getelementptr inbounds float* %tmp594, i64 1
- %tmp596 = getelementptr inbounds float* %tmp595, i64 1
- %tmp597 = getelementptr inbounds float* %tmp596, i64 1
- %tmp598 = getelementptr inbounds float* %tmp597, i64 1
- %tmp599 = getelementptr inbounds float* %tmp598, i64 1
- %tmp600 = getelementptr inbounds float* %tmp599, i64 1
- %tmp601 = getelementptr inbounds float* %tmp600, i64 1
- %tmp602 = getelementptr inbounds float* %tmp601, i64 1
- %tmp603 = getelementptr inbounds float* %tmp602, i64 1
- %tmp604 = getelementptr inbounds float* %tmp603, i64 1
- %tmp605 = getelementptr inbounds float* %tmp604, i64 1
- %tmp606 = getelementptr inbounds float* %tmp605, i64 1
- %tmp607 = getelementptr inbounds float* %tmp606, i64 1
- %tmp608 = getelementptr inbounds float* %tmp607, i64 1
- %tmp609 = getelementptr inbounds float* %tmp608, i64 1
- %tmp610 = getelementptr inbounds float* %tmp609, i64 1
- %tmp611 = getelementptr inbounds float* %tmp610, i64 1
- %tmp612 = getelementptr inbounds float* %tmp611, i64 1
- %tmp613 = getelementptr inbounds float* %tmp612, i64 1
- %tmp614 = getelementptr inbounds float* %tmp613, i64 1
- %tmp615 = getelementptr inbounds float* %tmp614, i64 1
- %tmp616 = getelementptr inbounds float* %tmp615, i64 1
- %tmp617 = getelementptr inbounds float* %tmp616, i64 1
- %tmp618 = getelementptr inbounds float* %tmp617, i64 1
- %tmp619 = getelementptr inbounds float* %tmp618, i64 1
- %tmp620 = getelementptr inbounds float* %tmp619, i64 1
- %tmp621 = getelementptr inbounds float* %tmp620, i64 1
- %tmp622 = getelementptr inbounds float* %tmp621, i64 1
- %tmp623 = getelementptr inbounds float* %tmp622, i64 1
- %tmp624 = getelementptr inbounds float* %tmp623, i64 1
- %tmp625 = getelementptr inbounds float* %tmp624, i64 1
- %tmp626 = getelementptr inbounds float* %tmp625, i64 1
- %tmp627 = getelementptr inbounds float* %tmp626, i64 1
- %tmp628 = getelementptr inbounds float* %tmp627, i64 1
- %tmp629 = getelementptr inbounds float* %tmp628, i64 1
- %tmp630 = getelementptr inbounds float* %tmp629, i64 1
- %tmp631 = getelementptr inbounds float* %tmp630, i64 1
- %tmp632 = getelementptr inbounds float* %tmp631, i64 1
- %tmp633 = getelementptr inbounds float* %tmp632, i64 1
- %tmp634 = getelementptr inbounds float* %tmp633, i64 1
- %tmp635 = getelementptr inbounds float* %tmp634, i64 1
- %tmp636 = getelementptr inbounds float* %tmp635, i64 1
- %tmp637 = getelementptr inbounds float* %tmp636, i64 1
- %tmp638 = getelementptr inbounds float* %tmp637, i64 1
- %tmp639 = getelementptr inbounds float* %tmp638, i64 1
- %tmp640 = getelementptr inbounds float* %tmp639, i64 1
- %tmp641 = getelementptr inbounds float* %tmp640, i64 1
- %tmp642 = getelementptr inbounds float* %tmp641, i64 1
- %tmp643 = getelementptr inbounds float* %tmp642, i64 1
- %tmp644 = getelementptr inbounds float* %tmp643, i64 1
- %tmp645 = getelementptr inbounds float* %tmp644, i64 1
- %tmp646 = getelementptr inbounds float* %tmp645, i64 1
- %tmp647 = getelementptr inbounds float* %tmp646, i64 1
- %tmp648 = getelementptr inbounds float* %tmp647, i64 1
- %tmp649 = getelementptr inbounds float* %tmp648, i64 1
- %tmp650 = getelementptr inbounds float* %tmp649, i64 1
- %tmp651 = getelementptr inbounds float* %tmp650, i64 1
- %tmp652 = getelementptr inbounds float* %tmp651, i64 1
- %tmp653 = getelementptr inbounds float* %tmp652, i64 1
- %tmp654 = getelementptr inbounds float* %tmp653, i64 1
- %tmp655 = getelementptr inbounds float* %tmp654, i64 1
- %tmp656 = getelementptr inbounds float* %tmp655, i64 1
- %tmp657 = getelementptr inbounds float* %tmp656, i64 1
- %tmp658 = getelementptr inbounds float* %tmp657, i64 1
- %tmp659 = getelementptr inbounds float* %tmp658, i64 1
- %tmp660 = getelementptr inbounds float* %tmp659, i64 1
- %tmp661 = getelementptr inbounds float* %tmp660, i64 1
- %tmp662 = getelementptr inbounds float* %tmp661, i64 1
- %tmp663 = getelementptr inbounds float* %tmp662, i64 1
- %tmp664 = getelementptr inbounds float* %tmp663, i64 1
- %tmp665 = getelementptr inbounds float* %tmp664, i64 1
- %tmp666 = getelementptr inbounds float* %tmp665, i64 1
- %tmp667 = getelementptr inbounds float* %tmp666, i64 1
- %tmp668 = getelementptr inbounds float* %tmp667, i64 1
- %tmp669 = getelementptr inbounds float* %tmp668, i64 1
- %tmp670 = getelementptr inbounds float* %tmp669, i64 1
- %tmp671 = getelementptr inbounds float* %tmp670, i64 1
- %tmp672 = getelementptr inbounds float* %tmp671, i64 1
- %tmp673 = getelementptr inbounds float* %tmp672, i64 1
- %tmp674 = getelementptr inbounds float* %tmp673, i64 1
- %tmp675 = getelementptr inbounds float* %tmp674, i64 1
- %tmp676 = getelementptr inbounds float* %tmp675, i64 1
- %tmp677 = getelementptr inbounds float* %tmp676, i64 1
- %tmp678 = getelementptr inbounds float* %tmp677, i64 1
- %tmp679 = getelementptr inbounds float* %tmp678, i64 1
- %tmp680 = getelementptr inbounds float* %tmp679, i64 1
- %tmp681 = getelementptr inbounds float* %tmp680, i64 1
- %tmp682 = getelementptr inbounds float* %tmp681, i64 1
- %tmp683 = getelementptr inbounds float* %tmp682, i64 1
- %tmp684 = getelementptr inbounds float* %tmp683, i64 1
- %tmp685 = getelementptr inbounds float* %tmp684, i64 1
- %tmp686 = getelementptr inbounds float* %tmp685, i64 1
- %tmp687 = getelementptr inbounds float* %tmp686, i64 1
- %tmp688 = getelementptr inbounds float* %tmp687, i64 1
- %tmp689 = getelementptr inbounds float* %tmp688, i64 1
- %tmp690 = getelementptr inbounds float* %tmp689, i64 1
- %tmp691 = getelementptr inbounds float* %tmp690, i64 1
- %tmp692 = getelementptr inbounds float* %tmp691, i64 1
- %tmp693 = getelementptr inbounds float* %tmp692, i64 1
- %tmp694 = getelementptr inbounds float* %tmp693, i64 1
- %tmp695 = getelementptr inbounds float* %tmp694, i64 1
- %tmp696 = getelementptr inbounds float* %tmp695, i64 1
- %tmp697 = getelementptr inbounds float* %tmp696, i64 1
- %tmp698 = getelementptr inbounds float* %tmp697, i64 1
- %tmp699 = getelementptr inbounds float* %tmp698, i64 1
- %tmp700 = getelementptr inbounds float* %tmp699, i64 1
- %tmp701 = getelementptr inbounds float* %tmp700, i64 1
- %tmp702 = getelementptr inbounds float* %tmp701, i64 1
- %tmp703 = getelementptr inbounds float* %tmp702, i64 1
- %tmp704 = getelementptr inbounds float* %tmp703, i64 1
- %tmp705 = getelementptr inbounds float* %tmp704, i64 1
- %tmp706 = getelementptr inbounds float* %tmp705, i64 1
- %tmp707 = getelementptr inbounds float* %tmp706, i64 1
- %tmp708 = getelementptr inbounds float* %tmp707, i64 1
- %tmp709 = getelementptr inbounds float* %tmp708, i64 1
- %tmp710 = getelementptr inbounds float* %tmp709, i64 1
- %tmp711 = getelementptr inbounds float* %tmp710, i64 1
- %tmp712 = getelementptr inbounds float* %tmp711, i64 1
- %tmp713 = getelementptr inbounds float* %tmp712, i64 1
- %tmp714 = getelementptr inbounds float* %tmp713, i64 1
- %tmp715 = getelementptr inbounds float* %tmp714, i64 1
- %tmp716 = getelementptr inbounds float* %tmp715, i64 1
- %tmp717 = getelementptr inbounds float* %tmp716, i64 1
- %tmp718 = getelementptr inbounds float* %tmp717, i64 1
- %tmp719 = getelementptr inbounds float* %tmp718, i64 1
- %tmp720 = getelementptr inbounds float* %tmp719, i64 1
- %tmp721 = getelementptr inbounds float* %tmp720, i64 1
- %tmp722 = getelementptr inbounds float* %tmp721, i64 1
- %tmp723 = getelementptr inbounds float* %tmp722, i64 1
- %tmp724 = getelementptr inbounds float* %tmp723, i64 1
- %tmp725 = getelementptr inbounds float* %tmp724, i64 1
- %tmp726 = getelementptr inbounds float* %tmp725, i64 1
- %tmp727 = getelementptr inbounds float* %tmp726, i64 1
- %tmp728 = getelementptr inbounds float* %tmp727, i64 1
- %tmp729 = getelementptr inbounds float* %tmp728, i64 1
- %tmp730 = getelementptr inbounds float* %tmp729, i64 1
- %tmp731 = getelementptr inbounds float* %tmp730, i64 1
- %tmp732 = getelementptr inbounds float* %tmp731, i64 1
- %tmp733 = getelementptr inbounds float* %tmp732, i64 1
- %tmp734 = getelementptr inbounds float* %tmp733, i64 1
- %tmp735 = getelementptr inbounds float* %tmp734, i64 1
- %tmp736 = getelementptr inbounds float* %tmp735, i64 1
- %tmp737 = getelementptr inbounds float* %tmp736, i64 1
- %tmp738 = getelementptr inbounds float* %tmp737, i64 1
- %tmp739 = getelementptr inbounds float* %tmp738, i64 1
- %tmp740 = getelementptr inbounds float* %tmp739, i64 1
- %tmp741 = getelementptr inbounds float* %tmp740, i64 1
- %tmp742 = getelementptr inbounds float* %tmp741, i64 1
- %tmp743 = getelementptr inbounds float* %tmp742, i64 1
- %tmp744 = getelementptr inbounds float* %tmp743, i64 1
- %tmp745 = getelementptr inbounds float* %tmp744, i64 1
- %tmp746 = getelementptr inbounds float* %tmp745, i64 1
- %tmp747 = getelementptr inbounds float* %tmp746, i64 1
- %tmp748 = getelementptr inbounds float* %tmp747, i64 1
- %tmp749 = getelementptr inbounds float* %tmp748, i64 1
- %tmp750 = getelementptr inbounds float* %tmp749, i64 1
- %tmp751 = getelementptr inbounds float* %tmp750, i64 1
- %tmp752 = getelementptr inbounds float* %tmp751, i64 1
- %tmp753 = getelementptr inbounds float* %tmp752, i64 1
- %tmp754 = getelementptr inbounds float* %tmp753, i64 1
- %tmp755 = getelementptr inbounds float* %tmp754, i64 1
- %tmp756 = getelementptr inbounds float* %tmp755, i64 1
- %tmp757 = getelementptr inbounds float* %tmp756, i64 1
- %tmp758 = getelementptr inbounds float* %tmp757, i64 1
- %tmp759 = getelementptr inbounds float* %tmp758, i64 1
- %tmp760 = getelementptr inbounds float* %tmp759, i64 1
- %tmp761 = getelementptr inbounds float* %tmp760, i64 1
- %tmp762 = getelementptr inbounds float* %tmp761, i64 1
- %tmp763 = getelementptr inbounds float* %tmp762, i64 1
- %tmp764 = getelementptr inbounds float* %tmp763, i64 1
- %tmp765 = getelementptr inbounds float* %tmp764, i64 1
- %tmp766 = getelementptr inbounds float* %tmp765, i64 1
- %tmp767 = getelementptr inbounds float* %tmp766, i64 1
- %tmp768 = getelementptr inbounds float* %tmp767, i64 1
- %tmp769 = getelementptr inbounds float* %tmp768, i64 1
- %tmp770 = getelementptr inbounds float* %tmp769, i64 1
- %tmp771 = getelementptr inbounds float* %tmp770, i64 1
- %tmp772 = getelementptr inbounds float* %tmp771, i64 1
- %tmp773 = getelementptr inbounds float* %tmp772, i64 1
- %tmp774 = getelementptr inbounds float* %tmp773, i64 1
- %tmp775 = getelementptr inbounds float* %tmp774, i64 1
- %tmp776 = getelementptr inbounds float* %tmp775, i64 1
- %tmp777 = getelementptr inbounds float* %tmp776, i64 1
- %tmp778 = getelementptr inbounds float* %tmp777, i64 1
- %tmp779 = getelementptr inbounds float* %tmp778, i64 1
- %tmp780 = getelementptr inbounds float* %tmp779, i64 1
- %tmp781 = getelementptr inbounds float* %tmp780, i64 1
- %tmp782 = getelementptr inbounds float* %tmp781, i64 1
- %tmp783 = getelementptr inbounds float* %tmp782, i64 1
- %tmp784 = getelementptr inbounds float* %tmp783, i64 1
- %tmp785 = getelementptr inbounds float* %tmp784, i64 1
- %tmp786 = getelementptr inbounds float* %tmp785, i64 1
- %tmp787 = getelementptr inbounds float* %tmp786, i64 1
- %tmp788 = getelementptr inbounds float* %tmp787, i64 1
- %tmp789 = getelementptr inbounds float* %tmp788, i64 1
- %tmp790 = getelementptr inbounds float* %tmp789, i64 1
- %tmp791 = getelementptr inbounds float* %tmp790, i64 1
- %tmp792 = getelementptr inbounds float* %tmp791, i64 1
- %tmp793 = getelementptr inbounds float* %tmp792, i64 1
- %tmp794 = getelementptr inbounds float* %tmp793, i64 1
- %tmp795 = getelementptr inbounds float* %tmp794, i64 1
- %tmp796 = getelementptr inbounds float* %tmp795, i64 1
- %tmp797 = getelementptr inbounds float* %tmp796, i64 1
- %tmp798 = getelementptr inbounds float* %tmp797, i64 1
- %tmp799 = getelementptr inbounds float* %tmp798, i64 1
- %tmp800 = getelementptr inbounds float* %tmp799, i64 1
- %tmp801 = getelementptr inbounds float* %tmp800, i64 1
- %tmp802 = getelementptr inbounds float* %tmp801, i64 1
- %tmp803 = getelementptr inbounds float* %tmp802, i64 1
- %tmp804 = getelementptr inbounds float* %tmp803, i64 1
- %tmp805 = getelementptr inbounds float* %tmp804, i64 1
- %tmp806 = getelementptr inbounds float* %tmp805, i64 1
- %tmp807 = getelementptr inbounds float* %tmp806, i64 1
- %tmp808 = getelementptr inbounds float* %tmp807, i64 1
- %tmp809 = getelementptr inbounds float* %tmp808, i64 1
- %tmp810 = getelementptr inbounds float* %tmp809, i64 1
- %tmp811 = getelementptr inbounds float* %tmp810, i64 1
- %tmp812 = getelementptr inbounds float* %tmp811, i64 1
- %tmp813 = getelementptr inbounds float* %tmp812, i64 1
- %tmp814 = getelementptr inbounds float* %tmp813, i64 1
- %tmp815 = getelementptr inbounds float* %tmp814, i64 1
- %tmp816 = getelementptr inbounds float* %tmp815, i64 1
- %tmp817 = getelementptr inbounds float* %tmp816, i64 1
- %tmp818 = getelementptr inbounds float* %tmp817, i64 1
- %tmp819 = getelementptr inbounds float* %tmp818, i64 1
- %tmp820 = getelementptr inbounds float* %tmp819, i64 1
- %tmp821 = getelementptr inbounds float* %tmp820, i64 1
- %tmp822 = getelementptr inbounds float* %tmp821, i64 1
- %tmp823 = getelementptr inbounds float* %tmp822, i64 1
- %tmp824 = getelementptr inbounds float* %tmp823, i64 1
- %tmp825 = getelementptr inbounds float* %tmp824, i64 1
- %tmp826 = getelementptr inbounds float* %tmp825, i64 1
- %tmp827 = getelementptr inbounds float* %tmp826, i64 1
- %tmp828 = getelementptr inbounds float* %tmp827, i64 1
- %tmp829 = getelementptr inbounds float* %tmp828, i64 1
- %tmp830 = getelementptr inbounds float* %tmp829, i64 1
- %tmp831 = getelementptr inbounds float* %tmp830, i64 1
- %tmp832 = getelementptr inbounds float* %tmp831, i64 1
- %tmp833 = getelementptr inbounds float* %tmp832, i64 1
- %tmp834 = getelementptr inbounds float* %tmp833, i64 1
- %tmp835 = getelementptr inbounds float* %tmp834, i64 1
- %tmp836 = getelementptr inbounds float* %tmp835, i64 1
- %tmp837 = getelementptr inbounds float* %tmp836, i64 1
- %tmp838 = getelementptr inbounds float* %tmp837, i64 1
- %tmp839 = getelementptr inbounds float* %tmp838, i64 1
- %tmp840 = getelementptr inbounds float* %tmp839, i64 1
- %tmp841 = getelementptr inbounds float* %tmp840, i64 1
- %tmp842 = getelementptr inbounds float* %tmp841, i64 1
- %tmp843 = getelementptr inbounds float* %tmp842, i64 1
- %tmp844 = getelementptr inbounds float* %tmp843, i64 1
- %tmp845 = getelementptr inbounds float* %tmp844, i64 1
- %tmp846 = getelementptr inbounds float* %tmp845, i64 1
- %tmp847 = getelementptr inbounds float* %tmp846, i64 1
- %tmp848 = getelementptr inbounds float* %tmp847, i64 1
- %tmp849 = getelementptr inbounds float* %tmp848, i64 1
- %tmp850 = getelementptr inbounds float* %tmp849, i64 1
- %tmp851 = getelementptr inbounds float* %tmp850, i64 1
- %tmp852 = getelementptr inbounds float* %tmp851, i64 1
- %tmp853 = getelementptr inbounds float* %tmp852, i64 1
- %tmp854 = getelementptr inbounds float* %tmp853, i64 1
- %tmp855 = getelementptr inbounds float* %tmp854, i64 1
- %tmp856 = getelementptr inbounds float* %tmp855, i64 1
- %tmp857 = getelementptr inbounds float* %tmp856, i64 1
- %tmp858 = getelementptr inbounds float* %tmp857, i64 1
- %tmp859 = getelementptr inbounds float* %tmp858, i64 1
- %tmp860 = getelementptr inbounds float* %tmp859, i64 1
- %tmp861 = getelementptr inbounds float* %tmp860, i64 1
- %tmp862 = getelementptr inbounds float* %tmp861, i64 1
- %tmp863 = getelementptr inbounds float* %tmp862, i64 1
- %tmp864 = getelementptr inbounds float* %tmp863, i64 1
- %tmp865 = getelementptr inbounds float* %tmp864, i64 1
- %tmp866 = getelementptr inbounds float* %tmp865, i64 1
- %tmp867 = getelementptr inbounds float* %tmp866, i64 1
- %tmp868 = getelementptr inbounds float* %tmp867, i64 1
- %tmp869 = getelementptr inbounds float* %tmp868, i64 1
- %tmp870 = getelementptr inbounds float* %tmp869, i64 1
- %tmp871 = getelementptr inbounds float* %tmp870, i64 1
- %tmp872 = getelementptr inbounds float* %tmp871, i64 1
- %tmp873 = getelementptr inbounds float* %tmp872, i64 1
- %tmp874 = getelementptr inbounds float* %tmp873, i64 1
- %tmp875 = getelementptr inbounds float* %tmp874, i64 1
- %tmp876 = getelementptr inbounds float* %tmp875, i64 1
- %tmp877 = getelementptr inbounds float* %tmp876, i64 1
- %tmp878 = getelementptr inbounds float* %tmp877, i64 1
- %tmp879 = getelementptr inbounds float* %tmp878, i64 1
- %tmp880 = getelementptr inbounds float* %tmp879, i64 1
- %tmp881 = getelementptr inbounds float* %tmp880, i64 1
- %tmp882 = getelementptr inbounds float* %tmp881, i64 1
- %tmp883 = getelementptr inbounds float* %tmp882, i64 1
- %tmp884 = getelementptr inbounds float* %tmp883, i64 1
- %tmp885 = getelementptr inbounds float* %tmp884, i64 1
- %tmp886 = getelementptr inbounds float* %tmp885, i64 1
- %tmp887 = getelementptr inbounds float* %tmp886, i64 1
- %tmp888 = getelementptr inbounds float* %tmp887, i64 1
- %tmp889 = getelementptr inbounds float* %tmp888, i64 1
- %tmp890 = getelementptr inbounds float* %tmp889, i64 1
- %tmp891 = getelementptr inbounds float* %tmp890, i64 1
- %tmp892 = getelementptr inbounds float* %tmp891, i64 1
- %tmp893 = getelementptr inbounds float* %tmp892, i64 1
- %tmp894 = getelementptr inbounds float* %tmp893, i64 1
- %tmp895 = getelementptr inbounds float* %tmp894, i64 1
- %tmp896 = getelementptr inbounds float* %tmp895, i64 1
- %tmp897 = getelementptr inbounds float* %tmp896, i64 1
- %tmp898 = getelementptr inbounds float* %tmp897, i64 1
- %tmp899 = getelementptr inbounds float* %tmp898, i64 1
- %tmp900 = getelementptr inbounds float* %tmp899, i64 1
- %tmp901 = getelementptr inbounds float* %tmp900, i64 1
- %tmp902 = getelementptr inbounds float* %tmp901, i64 1
- %tmp903 = getelementptr inbounds float* %tmp902, i64 1
- %tmp904 = getelementptr inbounds float* %tmp903, i64 1
- %tmp905 = getelementptr inbounds float* %tmp904, i64 1
- %tmp906 = getelementptr inbounds float* %tmp905, i64 1
- %tmp907 = getelementptr inbounds float* %tmp906, i64 1
- %tmp908 = getelementptr inbounds float* %tmp907, i64 1
- %tmp909 = getelementptr inbounds float* %tmp908, i64 1
- %tmp910 = getelementptr inbounds float* %tmp909, i64 1
- %tmp911 = getelementptr inbounds float* %tmp910, i64 1
- %tmp912 = getelementptr inbounds float* %tmp911, i64 1
- %tmp913 = getelementptr inbounds float* %tmp912, i64 1
- %tmp914 = getelementptr inbounds float* %tmp913, i64 1
- %tmp915 = getelementptr inbounds float* %tmp914, i64 1
- %tmp916 = getelementptr inbounds float* %tmp915, i64 1
- %tmp917 = getelementptr inbounds float* %tmp916, i64 1
- %tmp918 = getelementptr inbounds float* %tmp917, i64 1
- %tmp919 = getelementptr inbounds float* %tmp918, i64 1
- %tmp920 = getelementptr inbounds float* %tmp919, i64 1
- %tmp921 = getelementptr inbounds float* %tmp920, i64 1
- %tmp922 = getelementptr inbounds float* %tmp921, i64 1
- %tmp923 = getelementptr inbounds float* %tmp922, i64 1
- %tmp924 = getelementptr inbounds float* %tmp923, i64 1
- %tmp925 = getelementptr inbounds float* %tmp924, i64 1
- %tmp926 = getelementptr inbounds float* %tmp925, i64 1
- %tmp927 = getelementptr inbounds float* %tmp926, i64 1
- %tmp928 = getelementptr inbounds float* %tmp927, i64 1
- %tmp929 = getelementptr inbounds float* %tmp928, i64 1
- %tmp930 = getelementptr inbounds float* %tmp929, i64 1
- %tmp931 = getelementptr inbounds float* %tmp930, i64 1
- %tmp932 = getelementptr inbounds float* %tmp931, i64 1
- %tmp933 = getelementptr inbounds float* %tmp932, i64 1
- %tmp934 = getelementptr inbounds float* %tmp933, i64 1
- %tmp935 = getelementptr inbounds float* %tmp934, i64 1
- %tmp936 = getelementptr inbounds float* %tmp935, i64 1
- %tmp937 = getelementptr inbounds float* %tmp936, i64 1
- %tmp938 = getelementptr inbounds float* %tmp937, i64 1
- %tmp939 = getelementptr inbounds float* %tmp938, i64 1
- %tmp940 = getelementptr inbounds float* %tmp939, i64 1
- %tmp941 = getelementptr inbounds float* %tmp940, i64 1
- %tmp942 = getelementptr inbounds float* %tmp941, i64 1
- %tmp943 = getelementptr inbounds float* %tmp942, i64 1
- %tmp944 = getelementptr inbounds float* %tmp943, i64 1
- %tmp945 = getelementptr inbounds float* %tmp944, i64 1
- %tmp946 = getelementptr inbounds float* %tmp945, i64 1
- %tmp947 = getelementptr inbounds float* %tmp946, i64 1
- %tmp948 = getelementptr inbounds float* %tmp947, i64 1
- %tmp949 = getelementptr inbounds float* %tmp948, i64 1
- %tmp950 = getelementptr inbounds float* %tmp949, i64 1
- %tmp951 = getelementptr inbounds float* %tmp950, i64 1
- %tmp952 = getelementptr inbounds float* %tmp951, i64 1
- %tmp953 = getelementptr inbounds float* %tmp952, i64 1
- %tmp954 = getelementptr inbounds float* %tmp953, i64 1
- %tmp955 = getelementptr inbounds float* %tmp954, i64 1
- %tmp956 = getelementptr inbounds float* %tmp955, i64 1
- %tmp957 = getelementptr inbounds float* %tmp956, i64 1
- %tmp958 = getelementptr inbounds float* %tmp957, i64 1
- %tmp959 = getelementptr inbounds float* %tmp958, i64 1
- %tmp960 = getelementptr inbounds float* %tmp959, i64 1
- %tmp961 = getelementptr inbounds float* %tmp960, i64 1
- %tmp962 = getelementptr inbounds float* %tmp961, i64 1
- %tmp963 = getelementptr inbounds float* %tmp962, i64 1
- %tmp964 = getelementptr inbounds float* %tmp963, i64 1
- %tmp965 = getelementptr inbounds float* %tmp964, i64 1
- %tmp966 = getelementptr inbounds float* %tmp965, i64 1
- %tmp967 = getelementptr inbounds float* %tmp966, i64 1
- %tmp968 = getelementptr inbounds float* %tmp967, i64 1
- %tmp969 = getelementptr inbounds float* %tmp968, i64 1
- %tmp970 = getelementptr inbounds float* %tmp969, i64 1
- %tmp971 = getelementptr inbounds float* %tmp970, i64 1
- %tmp972 = getelementptr inbounds float* %tmp971, i64 1
- %tmp973 = getelementptr inbounds float* %tmp972, i64 1
- %tmp974 = getelementptr inbounds float* %tmp973, i64 1
- %tmp975 = getelementptr inbounds float* %tmp974, i64 1
- %tmp976 = getelementptr inbounds float* %tmp975, i64 1
- %tmp977 = getelementptr inbounds float* %tmp976, i64 1
- %tmp978 = getelementptr inbounds float* %tmp977, i64 1
- %tmp979 = getelementptr inbounds float* %tmp978, i64 1
- %tmp980 = getelementptr inbounds float* %tmp979, i64 1
- %tmp981 = getelementptr inbounds float* %tmp980, i64 1
- %tmp982 = getelementptr inbounds float* %tmp981, i64 1
- %tmp983 = getelementptr inbounds float* %tmp982, i64 1
- %tmp984 = getelementptr inbounds float* %tmp983, i64 1
- %tmp985 = getelementptr inbounds float* %tmp984, i64 1
- %tmp986 = getelementptr inbounds float* %tmp985, i64 1
- %tmp987 = getelementptr inbounds float* %tmp986, i64 1
- %tmp988 = getelementptr inbounds float* %tmp987, i64 1
- %tmp989 = getelementptr inbounds float* %tmp988, i64 1
- %tmp990 = getelementptr inbounds float* %tmp989, i64 1
- %tmp991 = getelementptr inbounds float* %tmp990, i64 1
- %tmp992 = getelementptr inbounds float* %tmp991, i64 1
- %tmp993 = getelementptr inbounds float* %tmp992, i64 1
- %tmp994 = getelementptr inbounds float* %tmp993, i64 1
- %tmp995 = getelementptr inbounds float* %tmp994, i64 1
- %tmp996 = getelementptr inbounds float* %tmp995, i64 1
- %tmp997 = getelementptr inbounds float* %tmp996, i64 1
- %tmp998 = getelementptr inbounds float* %tmp997, i64 1
- %tmp999 = getelementptr inbounds float* %tmp998, i64 1
- %tmp1000 = getelementptr inbounds float* %tmp999, i64 1
- %tmp1001 = getelementptr inbounds float* %tmp1000, i64 1
- %tmp1002 = getelementptr inbounds float* %tmp1001, i64 1
- %tmp1003 = getelementptr inbounds float* %tmp1002, i64 1
- %tmp1004 = getelementptr inbounds float* %tmp1003, i64 1
- %tmp1005 = getelementptr inbounds float* %tmp1004, i64 1
- %tmp1006 = getelementptr inbounds float* %tmp1005, i64 1
- %tmp1007 = getelementptr inbounds float* %tmp1006, i64 1
- %tmp1008 = getelementptr inbounds float* %tmp1007, i64 1
- %tmp1009 = getelementptr inbounds float* %tmp1008, i64 1
- %tmp1010 = getelementptr inbounds float* %tmp1009, i64 1
- %tmp1011 = getelementptr inbounds float* %tmp1010, i64 1
- %tmp1012 = getelementptr inbounds float* %tmp1011, i64 1
- %tmp1013 = getelementptr inbounds float* %tmp1012, i64 1
- %tmp1014 = getelementptr inbounds float* %tmp1013, i64 1
- %tmp1015 = getelementptr inbounds float* %tmp1014, i64 1
- %tmp1016 = getelementptr inbounds float* %tmp1015, i64 1
- %tmp1017 = getelementptr inbounds float* %tmp1016, i64 1
- %tmp1018 = getelementptr inbounds float* %tmp1017, i64 1
- %tmp1019 = getelementptr inbounds float* %tmp1018, i64 1
- %tmp1020 = getelementptr inbounds float* %tmp1019, i64 1
- %tmp1021 = getelementptr inbounds float* %tmp1020, i64 1
- %tmp1022 = getelementptr inbounds float* %tmp1021, i64 1
- %tmp1023 = getelementptr inbounds float* %tmp1022, i64 1
- %tmp1024 = getelementptr inbounds float* %tmp1023, i64 1
- %tmp1025 = getelementptr inbounds float* %tmp1024, i64 1
- %tmp1026 = getelementptr inbounds float* %tmp1025, i64 1
- %tmp1027 = getelementptr inbounds float* %tmp1026, i64 1
- %tmp1028 = getelementptr inbounds float* %tmp1027, i64 1
- %tmp1029 = getelementptr inbounds float* %tmp1028, i64 1
- %tmp1030 = getelementptr inbounds float* %tmp1029, i64 1
- %tmp1031 = getelementptr inbounds float* %tmp1030, i64 1
- %tmp1032 = getelementptr inbounds float* %tmp1031, i64 1
- %tmp1033 = getelementptr inbounds float* %tmp1032, i64 1
- %tmp1034 = getelementptr inbounds float* %tmp1033, i64 1
- %tmp1035 = getelementptr inbounds float* %tmp1034, i64 1
- %tmp1036 = getelementptr inbounds float* %tmp1035, i64 1
- %tmp1037 = getelementptr inbounds float* %tmp1036, i64 1
- %tmp1038 = getelementptr inbounds float* %tmp1037, i64 1
- %tmp1039 = getelementptr inbounds float* %tmp1038, i64 1
- %tmp1040 = getelementptr inbounds float* %tmp1039, i64 1
- %tmp1041 = getelementptr inbounds float* %tmp1040, i64 1
- %tmp1042 = getelementptr inbounds float* %tmp1041, i64 1
- %tmp1043 = getelementptr inbounds float* %tmp1042, i64 1
- %tmp1044 = getelementptr inbounds float* %tmp1043, i64 1
- %tmp1045 = getelementptr inbounds float* %tmp1044, i64 1
- %tmp1046 = getelementptr inbounds float* %tmp1045, i64 1
- %tmp1047 = getelementptr inbounds float* %tmp1046, i64 1
- %tmp1048 = getelementptr inbounds float* %tmp1047, i64 1
- %tmp1049 = getelementptr inbounds float* %tmp1048, i64 1
- %tmp1050 = getelementptr inbounds float* %tmp1049, i64 1
- %tmp1051 = getelementptr inbounds float* %tmp1050, i64 1
- %tmp1052 = getelementptr inbounds float* %tmp1051, i64 1
- %tmp1053 = getelementptr inbounds float* %tmp1052, i64 1
- %tmp1054 = getelementptr inbounds float* %tmp1053, i64 1
- %tmp1055 = getelementptr inbounds float* %tmp1054, i64 1
- %tmp1056 = getelementptr inbounds float* %tmp1055, i64 1
- %tmp1057 = getelementptr inbounds float* %tmp1056, i64 1
- %tmp1058 = getelementptr inbounds float* %tmp1057, i64 1
- %tmp1059 = getelementptr inbounds float* %tmp1058, i64 1
- %tmp1060 = getelementptr inbounds float* %tmp1059, i64 1
- %tmp1061 = getelementptr inbounds float* %tmp1060, i64 1
- %tmp1062 = getelementptr inbounds float* %tmp1061, i64 1
- %tmp1063 = getelementptr inbounds float* %tmp1062, i64 1
- %tmp1064 = getelementptr inbounds float* %tmp1063, i64 1
- %tmp1065 = getelementptr inbounds float* %tmp1064, i64 1
- %tmp1066 = getelementptr inbounds float* %tmp1065, i64 1
- %tmp1067 = getelementptr inbounds float* %tmp1066, i64 1
- %tmp1068 = getelementptr inbounds float* %tmp1067, i64 1
- %tmp1069 = getelementptr inbounds float* %tmp1068, i64 1
- %tmp1070 = getelementptr inbounds float* %tmp1069, i64 1
- %tmp1071 = getelementptr inbounds float* %tmp1070, i64 1
- %tmp1072 = getelementptr inbounds float* %tmp1071, i64 1
- %tmp1073 = getelementptr inbounds float* %tmp1072, i64 1
- %tmp1074 = getelementptr inbounds float* %tmp1073, i64 1
- %tmp1075 = getelementptr inbounds float* %tmp1074, i64 1
- %tmp1076 = getelementptr inbounds float* %tmp1075, i64 1
- %tmp1077 = getelementptr inbounds float* %tmp1076, i64 1
- %tmp1078 = getelementptr inbounds float* %tmp1077, i64 1
- %tmp1079 = getelementptr inbounds float* %tmp1078, i64 1
- %tmp1080 = getelementptr inbounds float* %tmp1079, i64 1
- %tmp1081 = getelementptr inbounds float* %tmp1080, i64 1
- %tmp1082 = getelementptr inbounds float* %tmp1081, i64 1
- %tmp1083 = getelementptr inbounds float* %tmp1082, i64 1
- %tmp1084 = getelementptr inbounds float* %tmp1083, i64 1
- %tmp1085 = getelementptr inbounds float* %tmp1084, i64 1
- %tmp1086 = getelementptr inbounds float* %tmp1085, i64 1
- %tmp1087 = getelementptr inbounds float* %tmp1086, i64 1
- %tmp1088 = getelementptr inbounds float* %tmp1087, i64 1
- %tmp1089 = getelementptr inbounds float* %tmp1088, i64 1
- %tmp1090 = getelementptr inbounds float* %tmp1089, i64 1
- %tmp1091 = getelementptr inbounds float* %tmp1090, i64 1
- %tmp1092 = getelementptr inbounds float* %tmp1091, i64 1
- %tmp1093 = getelementptr inbounds float* %tmp1092, i64 1
- %tmp1094 = getelementptr inbounds float* %tmp1093, i64 1
- %tmp1095 = getelementptr inbounds float* %tmp1094, i64 1
- %tmp1096 = getelementptr inbounds float* %tmp1095, i64 1
- %tmp1097 = getelementptr inbounds float* %tmp1096, i64 1
- %tmp1098 = getelementptr inbounds float* %tmp1097, i64 1
- %tmp1099 = getelementptr inbounds float* %tmp1098, i64 1
- %tmp1100 = getelementptr inbounds float* %tmp1099, i64 1
- %tmp1101 = getelementptr inbounds float* %tmp1100, i64 1
- %tmp1102 = getelementptr inbounds float* %tmp1101, i64 1
- %tmp1103 = getelementptr inbounds float* %tmp1102, i64 1
- %tmp1104 = getelementptr inbounds float* %tmp1103, i64 1
- %tmp1105 = getelementptr inbounds float* %tmp1104, i64 1
- %tmp1106 = getelementptr inbounds float* %tmp1105, i64 1
- %tmp1107 = getelementptr inbounds float* %tmp1106, i64 1
- %tmp1108 = getelementptr inbounds float* %tmp1107, i64 1
- %tmp1109 = getelementptr inbounds float* %tmp1108, i64 1
- %tmp1110 = getelementptr inbounds float* %tmp1109, i64 1
- %tmp1111 = getelementptr inbounds float* %tmp1110, i64 1
- %tmp1112 = getelementptr inbounds float* %tmp1111, i64 1
- %tmp1113 = getelementptr inbounds float* %tmp1112, i64 1
- %tmp1114 = getelementptr inbounds float* %tmp1113, i64 1
- %tmp1115 = getelementptr inbounds float* %tmp1114, i64 1
- %tmp1116 = getelementptr inbounds float* %tmp1115, i64 1
- %tmp1117 = getelementptr inbounds float* %tmp1116, i64 1
- %tmp1118 = getelementptr inbounds float* %tmp1117, i64 1
- %tmp1119 = getelementptr inbounds float* %tmp1118, i64 1
- %tmp1120 = getelementptr inbounds float* %tmp1119, i64 1
- %tmp1121 = getelementptr inbounds float* %tmp1120, i64 1
- %tmp1122 = getelementptr inbounds float* %tmp1121, i64 1
- %tmp1123 = getelementptr inbounds float* %tmp1122, i64 1
- %tmp1124 = getelementptr inbounds float* %tmp1123, i64 1
- %tmp1125 = getelementptr inbounds float* %tmp1124, i64 1
- %tmp1126 = getelementptr inbounds float* %tmp1125, i64 1
- %tmp1127 = getelementptr inbounds float* %tmp1126, i64 1
- %tmp1128 = getelementptr inbounds float* %tmp1127, i64 1
- %tmp1129 = getelementptr inbounds float* %tmp1128, i64 1
- %tmp1130 = getelementptr inbounds float* %tmp1129, i64 1
- %tmp1131 = getelementptr inbounds float* %tmp1130, i64 1
- %tmp1132 = getelementptr inbounds float* %tmp1131, i64 1
- %tmp1133 = getelementptr inbounds float* %tmp1132, i64 1
- %tmp1134 = getelementptr inbounds float* %tmp1133, i64 1
- %tmp1135 = getelementptr inbounds float* %tmp1134, i64 1
- %tmp1136 = getelementptr inbounds float* %tmp1135, i64 1
- %tmp1137 = getelementptr inbounds float* %tmp1136, i64 1
- %tmp1138 = getelementptr inbounds float* %tmp1137, i64 1
- %tmp1139 = getelementptr inbounds float* %tmp1138, i64 1
- %tmp1140 = getelementptr inbounds float* %tmp1139, i64 1
- %tmp1141 = getelementptr inbounds float* %tmp1140, i64 1
- %tmp1142 = getelementptr inbounds float* %tmp1141, i64 1
- %tmp1143 = getelementptr inbounds float* %tmp1142, i64 1
- %tmp1144 = getelementptr inbounds float* %tmp1143, i64 1
- %tmp1145 = getelementptr inbounds float* %tmp1144, i64 1
- %tmp1146 = getelementptr inbounds float* %tmp1145, i64 1
- %tmp1147 = getelementptr inbounds float* %tmp1146, i64 1
- %tmp1148 = getelementptr inbounds float* %tmp1147, i64 1
- %tmp1149 = getelementptr inbounds float* %tmp1148, i64 1
- %tmp1150 = getelementptr inbounds float* %tmp1149, i64 1
- %tmp1151 = getelementptr inbounds float* %tmp1150, i64 1
- %tmp1152 = getelementptr inbounds float* %tmp1151, i64 1
- %tmp1153 = getelementptr inbounds float* %tmp1152, i64 1
- %tmp1154 = getelementptr inbounds float* %tmp1153, i64 1
- %tmp1155 = getelementptr inbounds float* %tmp1154, i64 1
- %tmp1156 = getelementptr inbounds float* %tmp1155, i64 1
- %tmp1157 = getelementptr inbounds float* %tmp1156, i64 1
- %tmp1158 = getelementptr inbounds float* %tmp1157, i64 1
- %tmp1159 = getelementptr inbounds float* %tmp1158, i64 1
- %tmp1160 = getelementptr inbounds float* %tmp1159, i64 1
- %tmp1161 = getelementptr inbounds float* %tmp1160, i64 1
- %tmp1162 = getelementptr inbounds float* %tmp1161, i64 1
- %tmp1163 = getelementptr inbounds float* %tmp1162, i64 1
- %tmp1164 = getelementptr inbounds float* %tmp1163, i64 1
- %tmp1165 = getelementptr inbounds float* %tmp1164, i64 1
- %tmp1166 = getelementptr inbounds float* %tmp1165, i64 1
- %tmp1167 = getelementptr inbounds float* %tmp1166, i64 1
- %tmp1168 = getelementptr inbounds float* %tmp1167, i64 1
- %tmp1169 = getelementptr inbounds float* %tmp1168, i64 1
- %tmp1170 = getelementptr inbounds float* %tmp1169, i64 1
- %tmp1171 = getelementptr inbounds float* %tmp1170, i64 1
- %tmp1172 = getelementptr inbounds float* %tmp1171, i64 1
- %tmp1173 = getelementptr inbounds float* %tmp1172, i64 1
- %tmp1174 = getelementptr inbounds float* %tmp1173, i64 1
- %tmp1175 = getelementptr inbounds float* %tmp1174, i64 1
- %tmp1176 = getelementptr inbounds float* %tmp1175, i64 1
- %tmp1177 = getelementptr inbounds float* %tmp1176, i64 1
- %tmp1178 = getelementptr inbounds float* %tmp1177, i64 1
- %tmp1179 = getelementptr inbounds float* %tmp1178, i64 1
- %tmp1180 = getelementptr inbounds float* %tmp1179, i64 1
- %tmp1181 = getelementptr inbounds float* %tmp1180, i64 1
- %tmp1182 = getelementptr inbounds float* %tmp1181, i64 1
- %tmp1183 = getelementptr inbounds float* %tmp1182, i64 1
- %tmp1184 = getelementptr inbounds float* %tmp1183, i64 1
- %tmp1185 = getelementptr inbounds float* %tmp1184, i64 1
- %tmp1186 = getelementptr inbounds float* %tmp1185, i64 1
- %tmp1187 = getelementptr inbounds float* %tmp1186, i64 1
- %tmp1188 = getelementptr inbounds float* %tmp1187, i64 1
- %tmp1189 = getelementptr inbounds float* %tmp1188, i64 1
- %tmp1190 = getelementptr inbounds float* %tmp1189, i64 1
- %tmp1191 = getelementptr inbounds float* %tmp1190, i64 1
- %tmp1192 = getelementptr inbounds float* %tmp1191, i64 1
- %tmp1193 = getelementptr inbounds float* %tmp1192, i64 1
- %tmp1194 = getelementptr inbounds float* %tmp1193, i64 1
- %tmp1195 = getelementptr inbounds float* %tmp1194, i64 1
- %tmp1196 = getelementptr inbounds float* %tmp1195, i64 1
- %tmp1197 = getelementptr inbounds float* %tmp1196, i64 1
- %tmp1198 = getelementptr inbounds float* %tmp1197, i64 1
- %tmp1199 = getelementptr inbounds float* %tmp1198, i64 1
- %tmp1200 = getelementptr inbounds float* %tmp1199, i64 1
- %tmp1201 = getelementptr inbounds float* %tmp1200, i64 1
- %tmp1202 = getelementptr inbounds float* %tmp1201, i64 1
- %tmp1203 = getelementptr inbounds float* %tmp1202, i64 1
- %tmp1204 = getelementptr inbounds float* %tmp1203, i64 1
- %tmp1205 = getelementptr inbounds float* %tmp1204, i64 1
- %tmp1206 = getelementptr inbounds float* %tmp1205, i64 1
- %tmp1207 = getelementptr inbounds float* %tmp1206, i64 1
- %tmp1208 = getelementptr inbounds float* %tmp1207, i64 1
- %tmp1209 = getelementptr inbounds float* %tmp1208, i64 1
- %tmp1210 = getelementptr inbounds float* %tmp1209, i64 1
- %tmp1211 = getelementptr inbounds float* %tmp1210, i64 1
- %tmp1212 = getelementptr inbounds float* %tmp1211, i64 1
- %tmp1213 = getelementptr inbounds float* %tmp1212, i64 1
- %tmp1214 = getelementptr inbounds float* %tmp1213, i64 1
- %tmp1215 = getelementptr inbounds float* %tmp1214, i64 1
- %tmp1216 = getelementptr inbounds float* %tmp1215, i64 1
- %tmp1217 = getelementptr inbounds float* %tmp1216, i64 1
- %tmp1218 = getelementptr inbounds float* %tmp1217, i64 1
- %tmp1219 = getelementptr inbounds float* %tmp1218, i64 1
- %tmp1220 = getelementptr inbounds float* %tmp1219, i64 1
- %tmp1221 = getelementptr inbounds float* %tmp1220, i64 1
- %tmp1222 = getelementptr inbounds float* %tmp1221, i64 1
- %tmp1223 = getelementptr inbounds float* %tmp1222, i64 1
- %tmp1224 = getelementptr inbounds float* %tmp1223, i64 1
- %tmp1225 = getelementptr inbounds float* %tmp1224, i64 1
- %tmp1226 = getelementptr inbounds float* %tmp1225, i64 1
- %tmp1227 = getelementptr inbounds float* %tmp1226, i64 1
- %tmp1228 = getelementptr inbounds float* %tmp1227, i64 1
- %tmp1229 = getelementptr inbounds float* %tmp1228, i64 1
- %tmp1230 = getelementptr inbounds float* %tmp1229, i64 1
- %tmp1231 = getelementptr inbounds float* %tmp1230, i64 1
- %tmp1232 = getelementptr inbounds float* %tmp1231, i64 1
- %tmp1233 = getelementptr inbounds float* %tmp1232, i64 1
- %tmp1234 = getelementptr inbounds float* %tmp1233, i64 1
- %tmp1235 = getelementptr inbounds float* %tmp1234, i64 1
- %tmp1236 = getelementptr inbounds float* %tmp1235, i64 1
- %tmp1237 = getelementptr inbounds float* %tmp1236, i64 1
- %tmp1238 = getelementptr inbounds float* %tmp1237, i64 1
- %tmp1239 = getelementptr inbounds float* %tmp1238, i64 1
- %tmp1240 = getelementptr inbounds float* %tmp1239, i64 1
- %tmp1241 = getelementptr inbounds float* %tmp1240, i64 1
- %tmp1242 = getelementptr inbounds float* %tmp1241, i64 1
- %tmp1243 = getelementptr inbounds float* %tmp1242, i64 1
- %tmp1244 = getelementptr inbounds float* %tmp1243, i64 1
- %tmp1245 = getelementptr inbounds float* %tmp1244, i64 1
- %tmp1246 = getelementptr inbounds float* %tmp1245, i64 1
- %tmp1247 = getelementptr inbounds float* %tmp1246, i64 1
- %tmp1248 = getelementptr inbounds float* %tmp1247, i64 1
- %tmp1249 = getelementptr inbounds float* %tmp1248, i64 1
- %tmp1250 = getelementptr inbounds float* %tmp1249, i64 1
- %tmp1251 = getelementptr inbounds float* %tmp1250, i64 1
- %tmp1252 = getelementptr inbounds float* %tmp1251, i64 1
- %tmp1253 = getelementptr inbounds float* %tmp1252, i64 1
- %tmp1254 = getelementptr inbounds float* %tmp1253, i64 1
- %tmp1255 = getelementptr inbounds float* %tmp1254, i64 1
- %tmp1256 = getelementptr inbounds float* %tmp1255, i64 1
- %tmp1257 = getelementptr inbounds float* %tmp1256, i64 1
- %tmp1258 = getelementptr inbounds float* %tmp1257, i64 1
- %tmp1259 = getelementptr inbounds float* %tmp1258, i64 1
- %tmp1260 = getelementptr inbounds float* %tmp1259, i64 1
- %tmp1261 = getelementptr inbounds float* %tmp1260, i64 1
- %tmp1262 = getelementptr inbounds float* %tmp1261, i64 1
- %tmp1263 = getelementptr inbounds float* %tmp1262, i64 1
- %tmp1264 = getelementptr inbounds float* %tmp1263, i64 1
- %tmp1265 = getelementptr inbounds float* %tmp1264, i64 1
- %tmp1266 = getelementptr inbounds float* %tmp1265, i64 1
- %tmp1267 = getelementptr inbounds float* %tmp1266, i64 1
- %tmp1268 = getelementptr inbounds float* %tmp1267, i64 1
- %tmp1269 = getelementptr inbounds float* %tmp1268, i64 1
- %tmp1270 = getelementptr inbounds float* %tmp1269, i64 1
- %tmp1271 = getelementptr inbounds float* %tmp1270, i64 1
- %tmp1272 = getelementptr inbounds float* %tmp1271, i64 1
- %tmp1273 = getelementptr inbounds float* %tmp1272, i64 1
- %tmp1274 = getelementptr inbounds float* %tmp1273, i64 1
- %tmp1275 = getelementptr inbounds float* %tmp1274, i64 1
- %tmp1276 = getelementptr inbounds float* %tmp1275, i64 1
- %tmp1277 = getelementptr inbounds float* %tmp1276, i64 1
- %tmp1278 = getelementptr inbounds float* %tmp1277, i64 1
- %tmp1279 = getelementptr inbounds float* %tmp1278, i64 1
- %tmp1280 = getelementptr inbounds float* %tmp1279, i64 1
- %tmp1281 = getelementptr inbounds float* %tmp1280, i64 1
- %tmp1282 = getelementptr inbounds float* %tmp1281, i64 1
- %tmp1283 = getelementptr inbounds float* %tmp1282, i64 1
- %tmp1284 = getelementptr inbounds float* %tmp1283, i64 1
- %tmp1285 = getelementptr inbounds float* %tmp1284, i64 1
- %tmp1286 = getelementptr inbounds float* %tmp1285, i64 1
- %tmp1287 = getelementptr inbounds float* %tmp1286, i64 1
- %tmp1288 = getelementptr inbounds float* %tmp1287, i64 1
- %tmp1289 = getelementptr inbounds float* %tmp1288, i64 1
- %tmp1290 = getelementptr inbounds float* %tmp1289, i64 1
- %tmp1291 = getelementptr inbounds float* %tmp1290, i64 1
- %tmp1292 = getelementptr inbounds float* %tmp1291, i64 1
- %tmp1293 = getelementptr inbounds float* %tmp1292, i64 1
- %tmp1294 = getelementptr inbounds float* %tmp1293, i64 1
- %tmp1295 = getelementptr inbounds float* %tmp1294, i64 1
- %tmp1296 = getelementptr inbounds float* %tmp1295, i64 1
- %tmp1297 = getelementptr inbounds float* %tmp1296, i64 1
- %tmp1298 = getelementptr inbounds float* %tmp1297, i64 1
- %tmp1299 = getelementptr inbounds float* %tmp1298, i64 1
- %tmp1300 = getelementptr inbounds float* %tmp1299, i64 1
- %tmp1301 = getelementptr inbounds float* %tmp1300, i64 1
- %tmp1302 = getelementptr inbounds float* %tmp1301, i64 1
- %tmp1303 = getelementptr inbounds float* %tmp1302, i64 1
- %tmp1304 = getelementptr inbounds float* %tmp1303, i64 1
- %tmp1305 = getelementptr inbounds float* %tmp1304, i64 1
- %tmp1306 = getelementptr inbounds float* %tmp1305, i64 1
- %tmp1307 = getelementptr inbounds float* %tmp1306, i64 1
- %tmp1308 = getelementptr inbounds float* %tmp1307, i64 1
- %tmp1309 = getelementptr inbounds float* %tmp1308, i64 1
- %tmp1310 = getelementptr inbounds float* %tmp1309, i64 1
- %tmp1311 = getelementptr inbounds float* %tmp1310, i64 1
- %tmp1312 = getelementptr inbounds float* %tmp1311, i64 1
- %tmp1313 = getelementptr inbounds float* %tmp1312, i64 1
- %tmp1314 = getelementptr inbounds float* %tmp1313, i64 1
- %tmp1315 = getelementptr inbounds float* %tmp1314, i64 1
- %tmp1316 = getelementptr inbounds float* %tmp1315, i64 1
- %tmp1317 = getelementptr inbounds float* %tmp1316, i64 1
- %tmp1318 = getelementptr inbounds float* %tmp1317, i64 1
- %tmp1319 = getelementptr inbounds float* %tmp1318, i64 1
- %tmp1320 = getelementptr inbounds float* %tmp1319, i64 1
- %tmp1321 = getelementptr inbounds float* %tmp1320, i64 1
- %tmp1322 = getelementptr inbounds float* %tmp1321, i64 1
- %tmp1323 = getelementptr inbounds float* %tmp1322, i64 1
- %tmp1324 = getelementptr inbounds float* %tmp1323, i64 1
- %tmp1325 = getelementptr inbounds float* %tmp1324, i64 1
- %tmp1326 = getelementptr inbounds float* %tmp1325, i64 1
- %tmp1327 = getelementptr inbounds float* %tmp1326, i64 1
- %tmp1328 = getelementptr inbounds float* %tmp1327, i64 1
- %tmp1329 = getelementptr inbounds float* %tmp1328, i64 1
- %tmp1330 = getelementptr inbounds float* %tmp1329, i64 1
- %tmp1331 = getelementptr inbounds float* %tmp1330, i64 1
- %tmp1332 = getelementptr inbounds float* %tmp1331, i64 1
- %tmp1333 = getelementptr inbounds float* %tmp1332, i64 1
- %tmp1334 = getelementptr inbounds float* %tmp1333, i64 1
- %tmp1335 = getelementptr inbounds float* %tmp1334, i64 1
- %tmp1336 = getelementptr inbounds float* %tmp1335, i64 1
- %tmp1337 = getelementptr inbounds float* %tmp1336, i64 1
- %tmp1338 = getelementptr inbounds float* %tmp1337, i64 1
- %tmp1339 = getelementptr inbounds float* %tmp1338, i64 1
- %tmp1340 = getelementptr inbounds float* %tmp1339, i64 1
- %tmp1341 = getelementptr inbounds float* %tmp1340, i64 1
- %tmp1342 = getelementptr inbounds float* %tmp1341, i64 1
- %tmp1343 = getelementptr inbounds float* %tmp1342, i64 1
- %tmp1344 = getelementptr inbounds float* %tmp1343, i64 1
- %tmp1345 = getelementptr inbounds float* %tmp1344, i64 1
- %tmp1346 = getelementptr inbounds float* %tmp1345, i64 1
- %tmp1347 = getelementptr inbounds float* %tmp1346, i64 1
- %tmp1348 = getelementptr inbounds float* %tmp1347, i64 1
- %tmp1349 = getelementptr inbounds float* %tmp1348, i64 1
- %tmp1350 = getelementptr inbounds float* %tmp1349, i64 1
- %tmp1351 = getelementptr inbounds float* %tmp1350, i64 1
- %tmp1352 = getelementptr inbounds float* %tmp1351, i64 1
- %tmp1353 = getelementptr inbounds float* %tmp1352, i64 1
- %tmp1354 = getelementptr inbounds float* %tmp1353, i64 1
- %tmp1355 = getelementptr inbounds float* %tmp1354, i64 1
- %tmp1356 = getelementptr inbounds float* %tmp1355, i64 1
- %tmp1357 = getelementptr inbounds float* %tmp1356, i64 1
- %tmp1358 = getelementptr inbounds float* %tmp1357, i64 1
- %tmp1359 = getelementptr inbounds float* %tmp1358, i64 1
- %tmp1360 = getelementptr inbounds float* %tmp1359, i64 1
- %tmp1361 = getelementptr inbounds float* %tmp1360, i64 1
- %tmp1362 = getelementptr inbounds float* %tmp1361, i64 1
- %tmp1363 = getelementptr inbounds float* %tmp1362, i64 1
- %tmp1364 = getelementptr inbounds float* %tmp1363, i64 1
- %tmp1365 = getelementptr inbounds float* %tmp1364, i64 1
- %tmp1366 = getelementptr inbounds float* %tmp1365, i64 1
- %tmp1367 = getelementptr inbounds float* %tmp1366, i64 1
- %tmp1368 = getelementptr inbounds float* %tmp1367, i64 1
- %tmp1369 = getelementptr inbounds float* %tmp1368, i64 1
- %tmp1370 = getelementptr inbounds float* %tmp1369, i64 1
- %tmp1371 = getelementptr inbounds float* %tmp1370, i64 1
- %tmp1372 = getelementptr inbounds float* %tmp1371, i64 1
- %tmp1373 = getelementptr inbounds float* %tmp1372, i64 1
- %tmp1374 = getelementptr inbounds float* %tmp1373, i64 1
- %tmp1375 = getelementptr inbounds float* %tmp1374, i64 1
- %tmp1376 = getelementptr inbounds float* %tmp1375, i64 1
- %tmp1377 = getelementptr inbounds float* %tmp1376, i64 1
- %tmp1378 = getelementptr inbounds float* %tmp1377, i64 1
- %tmp1379 = getelementptr inbounds float* %tmp1378, i64 1
- %tmp1380 = getelementptr inbounds float* %tmp1379, i64 1
- %tmp1381 = getelementptr inbounds float* %tmp1380, i64 1
- %tmp1382 = getelementptr inbounds float* %tmp1381, i64 1
- %tmp1383 = getelementptr inbounds float* %tmp1382, i64 1
- %tmp1384 = getelementptr inbounds float* %tmp1383, i64 1
- %tmp1385 = getelementptr inbounds float* %tmp1384, i64 1
- %tmp1386 = getelementptr inbounds float* %tmp1385, i64 1
- %tmp1387 = getelementptr inbounds float* %tmp1386, i64 1
- %tmp1388 = getelementptr inbounds float* %tmp1387, i64 1
- %tmp1389 = getelementptr inbounds float* %tmp1388, i64 1
- %tmp1390 = getelementptr inbounds float* %tmp1389, i64 1
- %tmp1391 = getelementptr inbounds float* %tmp1390, i64 1
- %tmp1392 = getelementptr inbounds float* %tmp1391, i64 1
- %tmp1393 = getelementptr inbounds float* %tmp1392, i64 1
- %tmp1394 = getelementptr inbounds float* %tmp1393, i64 1
- %tmp1395 = getelementptr inbounds float* %tmp1394, i64 1
- %tmp1396 = getelementptr inbounds float* %tmp1395, i64 1
- %tmp1397 = getelementptr inbounds float* %tmp1396, i64 1
- %tmp1398 = getelementptr inbounds float* %tmp1397, i64 1
- %tmp1399 = getelementptr inbounds float* %tmp1398, i64 1
- %tmp1400 = getelementptr inbounds float* %tmp1399, i64 1
- %tmp1401 = getelementptr inbounds float* %tmp1400, i64 1
- %tmp1402 = getelementptr inbounds float* %tmp1401, i64 1
- %tmp1403 = getelementptr inbounds float* %tmp1402, i64 1
- %tmp1404 = getelementptr inbounds float* %tmp1403, i64 1
- %tmp1405 = getelementptr inbounds float* %tmp1404, i64 1
- %tmp1406 = getelementptr inbounds float* %tmp1405, i64 1
- %tmp1407 = getelementptr inbounds float* %tmp1406, i64 1
- %tmp1408 = getelementptr inbounds float* %tmp1407, i64 1
- %tmp1409 = getelementptr inbounds float* %tmp1408, i64 1
- %tmp1410 = getelementptr inbounds float* %tmp1409, i64 1
- %tmp1411 = getelementptr inbounds float* %tmp1410, i64 1
- %tmp1412 = getelementptr inbounds float* %tmp1411, i64 1
- %tmp1413 = getelementptr inbounds float* %tmp1412, i64 1
- %tmp1414 = getelementptr inbounds float* %tmp1413, i64 1
- %tmp1415 = getelementptr inbounds float* %tmp1414, i64 1
- %tmp1416 = getelementptr inbounds float* %tmp1415, i64 1
- %tmp1417 = getelementptr inbounds float* %tmp1416, i64 1
- %tmp1418 = getelementptr inbounds float* %tmp1417, i64 1
- %tmp1419 = getelementptr inbounds float* %tmp1418, i64 1
- %tmp1420 = getelementptr inbounds float* %tmp1419, i64 1
- %tmp1421 = getelementptr inbounds float* %tmp1420, i64 1
- %tmp1422 = getelementptr inbounds float* %tmp1421, i64 1
- %tmp1423 = getelementptr inbounds float* %tmp1422, i64 1
- %tmp1424 = getelementptr inbounds float* %tmp1423, i64 1
- %tmp1425 = getelementptr inbounds float* %tmp1424, i64 1
- %tmp1426 = getelementptr inbounds float* %tmp1425, i64 1
- %tmp1427 = getelementptr inbounds float* %tmp1426, i64 1
- %tmp1428 = getelementptr inbounds float* %tmp1427, i64 1
- %tmp1429 = getelementptr inbounds float* %tmp1428, i64 1
- %tmp1430 = getelementptr inbounds float* %tmp1429, i64 1
- %tmp1431 = getelementptr inbounds float* %tmp1430, i64 1
- %tmp1432 = getelementptr inbounds float* %tmp1431, i64 1
- %tmp1433 = getelementptr inbounds float* %tmp1432, i64 1
- %tmp1434 = getelementptr inbounds float* %tmp1433, i64 1
- %tmp1435 = getelementptr inbounds float* %tmp1434, i64 1
- %tmp1436 = getelementptr inbounds float* %tmp1435, i64 1
- %tmp1437 = getelementptr inbounds float* %tmp1436, i64 1
- %tmp1438 = getelementptr inbounds float* %tmp1437, i64 1
- %tmp1439 = getelementptr inbounds float* %tmp1438, i64 1
- %tmp1440 = getelementptr inbounds float* %tmp1439, i64 1
- %tmp1441 = getelementptr inbounds float* %tmp1440, i64 1
- %tmp1442 = getelementptr inbounds float* %tmp1441, i64 1
- %tmp1443 = getelementptr inbounds float* %tmp1442, i64 1
- %tmp1444 = getelementptr inbounds float* %tmp1443, i64 1
- %tmp1445 = getelementptr inbounds float* %tmp1444, i64 1
- %tmp1446 = getelementptr inbounds float* %tmp1445, i64 1
- %tmp1447 = getelementptr inbounds float* %tmp1446, i64 1
- %tmp1448 = getelementptr inbounds float* %tmp1447, i64 1
- %tmp1449 = getelementptr inbounds float* %tmp1448, i64 1
- %tmp1450 = getelementptr inbounds float* %tmp1449, i64 1
- %tmp1451 = getelementptr inbounds float* %tmp1450, i64 1
- %tmp1452 = getelementptr inbounds float* %tmp1451, i64 1
- %tmp1453 = getelementptr inbounds float* %tmp1452, i64 1
- %tmp1454 = getelementptr inbounds float* %tmp1453, i64 1
- %tmp1455 = getelementptr inbounds float* %tmp1454, i64 1
- %tmp1456 = getelementptr inbounds float* %tmp1455, i64 1
- %tmp1457 = getelementptr inbounds float* %tmp1456, i64 1
- %tmp1458 = getelementptr inbounds float* %tmp1457, i64 1
- %tmp1459 = getelementptr inbounds float* %tmp1458, i64 1
- %tmp1460 = getelementptr inbounds float* %tmp1459, i64 1
- %tmp1461 = getelementptr inbounds float* %tmp1460, i64 1
- %tmp1462 = getelementptr inbounds float* %tmp1461, i64 1
- %tmp1463 = getelementptr inbounds float* %tmp1462, i64 1
- %tmp1464 = getelementptr inbounds float* %tmp1463, i64 1
- %tmp1465 = getelementptr inbounds float* %tmp1464, i64 1
- %tmp1466 = getelementptr inbounds float* %tmp1465, i64 1
- %tmp1467 = getelementptr inbounds float* %tmp1466, i64 1
- %tmp1468 = getelementptr inbounds float* %tmp1467, i64 1
- %tmp1469 = getelementptr inbounds float* %tmp1468, i64 1
- %tmp1470 = getelementptr inbounds float* %tmp1469, i64 1
- %tmp1471 = getelementptr inbounds float* %tmp1470, i64 1
- %tmp1472 = getelementptr inbounds float* %tmp1471, i64 1
- %tmp1473 = getelementptr inbounds float* %tmp1472, i64 1
- %tmp1474 = getelementptr inbounds float* %tmp1473, i64 1
- %tmp1475 = getelementptr inbounds float* %tmp1474, i64 1
- %tmp1476 = getelementptr inbounds float* %tmp1475, i64 1
- %tmp1477 = getelementptr inbounds float* %tmp1476, i64 1
- %tmp1478 = getelementptr inbounds float* %tmp1477, i64 1
- %tmp1479 = getelementptr inbounds float* %tmp1478, i64 1
- %tmp1480 = getelementptr inbounds float* %tmp1479, i64 1
- %tmp1481 = getelementptr inbounds float* %tmp1480, i64 1
- %tmp1482 = getelementptr inbounds float* %tmp1481, i64 1
- %tmp1483 = getelementptr inbounds float* %tmp1482, i64 1
- %tmp1484 = getelementptr inbounds float* %tmp1483, i64 1
- %tmp1485 = getelementptr inbounds float* %tmp1484, i64 1
- %tmp1486 = getelementptr inbounds float* %tmp1485, i64 1
- %tmp1487 = getelementptr inbounds float* %tmp1486, i64 1
- %tmp1488 = getelementptr inbounds float* %tmp1487, i64 1
- %tmp1489 = getelementptr inbounds float* %tmp1488, i64 1
- %tmp1490 = getelementptr inbounds float* %tmp1489, i64 1
- %tmp1491 = getelementptr inbounds float* %tmp1490, i64 1
- %tmp1492 = getelementptr inbounds float* %tmp1491, i64 1
- %tmp1493 = getelementptr inbounds float* %tmp1492, i64 1
- %tmp1494 = getelementptr inbounds float* %tmp1493, i64 1
- %tmp1495 = getelementptr inbounds float* %tmp1494, i64 1
- %tmp1496 = getelementptr inbounds float* %tmp1495, i64 1
- %tmp1497 = getelementptr inbounds float* %tmp1496, i64 1
- %tmp1498 = getelementptr inbounds float* %tmp1497, i64 1
- %tmp1499 = getelementptr inbounds float* %tmp1498, i64 1
- %tmp1500 = getelementptr inbounds float* %tmp1499, i64 1
- %tmp1501 = getelementptr inbounds float* %tmp1500, i64 1
- %tmp1502 = getelementptr inbounds float* %tmp1501, i64 1
- %tmp1503 = getelementptr inbounds float* %tmp1502, i64 1
- %tmp1504 = getelementptr inbounds float* %tmp1503, i64 1
- %tmp1505 = getelementptr inbounds float* %tmp1504, i64 1
- %tmp1506 = getelementptr inbounds float* %tmp1505, i64 1
- %tmp1507 = getelementptr inbounds float* %tmp1506, i64 1
- %tmp1508 = getelementptr inbounds float* %tmp1507, i64 1
- %tmp1509 = getelementptr inbounds float* %tmp1508, i64 1
- %tmp1510 = getelementptr inbounds float* %tmp1509, i64 1
- %tmp1511 = getelementptr inbounds float* %tmp1510, i64 1
- %tmp1512 = getelementptr inbounds float* %tmp1511, i64 1
- %tmp1513 = getelementptr inbounds float* %tmp1512, i64 1
- %tmp1514 = getelementptr inbounds float* %tmp1513, i64 1
- %tmp1515 = getelementptr inbounds float* %tmp1514, i64 1
- %tmp1516 = getelementptr inbounds float* %tmp1515, i64 1
- %tmp1517 = getelementptr inbounds float* %tmp1516, i64 1
- %tmp1518 = getelementptr inbounds float* %tmp1517, i64 1
- %tmp1519 = getelementptr inbounds float* %tmp1518, i64 1
- %tmp1520 = getelementptr inbounds float* %tmp1519, i64 1
- %tmp1521 = getelementptr inbounds float* %tmp1520, i64 1
- %tmp1522 = getelementptr inbounds float* %tmp1521, i64 1
- %tmp1523 = getelementptr inbounds float* %tmp1522, i64 1
- %tmp1524 = getelementptr inbounds float* %tmp1523, i64 1
- %tmp1525 = getelementptr inbounds float* %tmp1524, i64 1
- %tmp1526 = getelementptr inbounds float* %tmp1525, i64 1
- %tmp1527 = getelementptr inbounds float* %tmp1526, i64 1
- %tmp1528 = getelementptr inbounds float* %tmp1527, i64 1
- %tmp1529 = getelementptr inbounds float* %tmp1528, i64 1
- %tmp1530 = getelementptr inbounds float* %tmp1529, i64 1
- %tmp1531 = getelementptr inbounds float* %tmp1530, i64 1
- %tmp1532 = getelementptr inbounds float* %tmp1531, i64 1
- %tmp1533 = getelementptr inbounds float* %tmp1532, i64 1
- %tmp1534 = getelementptr inbounds float* %tmp1533, i64 1
- %tmp1535 = getelementptr inbounds float* %tmp1534, i64 1
- %tmp1536 = getelementptr inbounds float* %tmp1535, i64 1
- %tmp1537 = getelementptr inbounds float* %tmp1536, i64 1
- %tmp1538 = getelementptr inbounds float* %tmp1537, i64 1
- %tmp1539 = getelementptr inbounds float* %tmp1538, i64 1
- %tmp1540 = getelementptr inbounds float* %tmp1539, i64 1
- %tmp1541 = getelementptr inbounds float* %tmp1540, i64 1
- %tmp1542 = getelementptr inbounds float* %tmp1541, i64 1
- %tmp1543 = getelementptr inbounds float* %tmp1542, i64 1
- %tmp1544 = getelementptr inbounds float* %tmp1543, i64 1
- %tmp1545 = getelementptr inbounds float* %tmp1544, i64 1
- %tmp1546 = getelementptr inbounds float* %tmp1545, i64 1
- %tmp1547 = getelementptr inbounds float* %tmp1546, i64 1
- %tmp1548 = getelementptr inbounds float* %tmp1547, i64 1
- %tmp1549 = getelementptr inbounds float* %tmp1548, i64 1
- %tmp1550 = getelementptr inbounds float* %tmp1549, i64 1
- %tmp1551 = getelementptr inbounds float* %tmp1550, i64 1
- %tmp1552 = getelementptr inbounds float* %tmp1551, i64 1
- %tmp1553 = getelementptr inbounds float* %tmp1552, i64 1
- %tmp1554 = getelementptr inbounds float* %tmp1553, i64 1
- %tmp1555 = getelementptr inbounds float* %tmp1554, i64 1
- %tmp1556 = getelementptr inbounds float* %tmp1555, i64 1
- %tmp1557 = getelementptr inbounds float* %tmp1556, i64 1
- %tmp1558 = getelementptr inbounds float* %tmp1557, i64 1
- %tmp1559 = getelementptr inbounds float* %tmp1558, i64 1
- %tmp1560 = getelementptr inbounds float* %tmp1559, i64 1
- %tmp1561 = getelementptr inbounds float* %tmp1560, i64 1
- %tmp1562 = getelementptr inbounds float* %tmp1561, i64 1
- %tmp1563 = getelementptr inbounds float* %tmp1562, i64 1
- %tmp1564 = getelementptr inbounds float* %tmp1563, i64 1
- %tmp1565 = getelementptr inbounds float* %tmp1564, i64 1
- %tmp1566 = getelementptr inbounds float* %tmp1565, i64 1
- %tmp1567 = getelementptr inbounds float* %tmp1566, i64 1
- %tmp1568 = getelementptr inbounds float* %tmp1567, i64 1
- %tmp1569 = getelementptr inbounds float* %tmp1568, i64 1
- %tmp1570 = getelementptr inbounds float* %tmp1569, i64 1
- %tmp1571 = getelementptr inbounds float* %tmp1570, i64 1
- %tmp1572 = getelementptr inbounds float* %tmp1571, i64 1
- %tmp1573 = getelementptr inbounds float* %tmp1572, i64 1
- %tmp1574 = getelementptr inbounds float* %tmp1573, i64 1
- %tmp1575 = getelementptr inbounds float* %tmp1574, i64 1
- %tmp1576 = getelementptr inbounds float* %tmp1575, i64 1
- %tmp1577 = getelementptr inbounds float* %tmp1576, i64 1
- %tmp1578 = getelementptr inbounds float* %tmp1577, i64 1
- %tmp1579 = getelementptr inbounds float* %tmp1578, i64 1
- %tmp1580 = getelementptr inbounds float* %tmp1579, i64 1
- %tmp1581 = getelementptr inbounds float* %tmp1580, i64 1
- %tmp1582 = getelementptr inbounds float* %tmp1581, i64 1
- %tmp1583 = getelementptr inbounds float* %tmp1582, i64 1
- %tmp1584 = getelementptr inbounds float* %tmp1583, i64 1
- %tmp1585 = getelementptr inbounds float* %tmp1584, i64 1
- %tmp1586 = getelementptr inbounds float* %tmp1585, i64 1
- %tmp1587 = getelementptr inbounds float* %tmp1586, i64 1
- %tmp1588 = getelementptr inbounds float* %tmp1587, i64 1
- %tmp1589 = getelementptr inbounds float* %tmp1588, i64 1
- %tmp1590 = getelementptr inbounds float* %tmp1589, i64 1
- %tmp1591 = getelementptr inbounds float* %tmp1590, i64 1
- %tmp1592 = getelementptr inbounds float* %tmp1591, i64 1
- %tmp1593 = getelementptr inbounds float* %tmp1592, i64 1
- %tmp1594 = getelementptr inbounds float* %tmp1593, i64 1
- %tmp1595 = getelementptr inbounds float* %tmp1594, i64 1
- %tmp1596 = getelementptr inbounds float* %tmp1595, i64 1
- %tmp1597 = getelementptr inbounds float* %tmp1596, i64 1
- %tmp1598 = getelementptr inbounds float* %tmp1597, i64 1
- %tmp1599 = getelementptr inbounds float* %tmp1598, i64 1
- %tmp1600 = getelementptr inbounds float* %tmp1599, i64 1
- %tmp1601 = getelementptr inbounds float* %tmp1600, i64 1
- %tmp1602 = getelementptr inbounds float* %tmp1601, i64 1
- %tmp1603 = getelementptr inbounds float* %tmp1602, i64 1
- %tmp1604 = getelementptr inbounds float* %tmp1603, i64 1
- %tmp1605 = getelementptr inbounds float* %tmp1604, i64 1
- %tmp1606 = getelementptr inbounds float* %tmp1605, i64 1
- %tmp1607 = getelementptr inbounds float* %tmp1606, i64 1
- %tmp1608 = getelementptr inbounds float* %tmp1607, i64 1
- %tmp1609 = getelementptr inbounds float* %tmp1608, i64 1
- %tmp1610 = getelementptr inbounds float* %tmp1609, i64 1
- %tmp1611 = getelementptr inbounds float* %tmp1610, i64 1
- %tmp1612 = getelementptr inbounds float* %tmp1611, i64 1
- %tmp1613 = getelementptr inbounds float* %tmp1612, i64 1
- %tmp1614 = getelementptr inbounds float* %tmp1613, i64 1
- %tmp1615 = getelementptr inbounds float* %tmp1614, i64 1
- %tmp1616 = getelementptr inbounds float* %tmp1615, i64 1
- %tmp1617 = getelementptr inbounds float* %tmp1616, i64 1
- %tmp1618 = getelementptr inbounds float* %tmp1617, i64 1
- %tmp1619 = getelementptr inbounds float* %tmp1618, i64 1
- %tmp1620 = getelementptr inbounds float* %tmp1619, i64 1
- %tmp1621 = getelementptr inbounds float* %tmp1620, i64 1
- %tmp1622 = getelementptr inbounds float* %tmp1621, i64 1
- %tmp1623 = getelementptr inbounds float* %tmp1622, i64 1
- %tmp1624 = getelementptr inbounds float* %tmp1623, i64 1
- %tmp1625 = getelementptr inbounds float* %tmp1624, i64 1
- %tmp1626 = getelementptr inbounds float* %tmp1625, i64 1
- %tmp1627 = getelementptr inbounds float* %tmp1626, i64 1
- %tmp1628 = getelementptr inbounds float* %tmp1627, i64 1
- %tmp1629 = getelementptr inbounds float* %tmp1628, i64 1
- %tmp1630 = getelementptr inbounds float* %tmp1629, i64 1
- %tmp1631 = getelementptr inbounds float* %tmp1630, i64 1
- %tmp1632 = getelementptr inbounds float* %tmp1631, i64 1
- %tmp1633 = getelementptr inbounds float* %tmp1632, i64 1
- %tmp1634 = getelementptr inbounds float* %tmp1633, i64 1
- %tmp1635 = getelementptr inbounds float* %tmp1634, i64 1
- %tmp1636 = getelementptr inbounds float* %tmp1635, i64 1
- %tmp1637 = getelementptr inbounds float* %tmp1636, i64 1
- %tmp1638 = getelementptr inbounds float* %tmp1637, i64 1
- %tmp1639 = getelementptr inbounds float* %tmp1638, i64 1
- %tmp1640 = getelementptr inbounds float* %tmp1639, i64 1
- %tmp1641 = getelementptr inbounds float* %tmp1640, i64 1
- %tmp1642 = getelementptr inbounds float* %tmp1641, i64 1
- %tmp1643 = getelementptr inbounds float* %tmp1642, i64 1
- %tmp1644 = getelementptr inbounds float* %tmp1643, i64 1
- %tmp1645 = getelementptr inbounds float* %tmp1644, i64 1
- %tmp1646 = getelementptr inbounds float* %tmp1645, i64 1
- %tmp1647 = getelementptr inbounds float* %tmp1646, i64 1
- %tmp1648 = getelementptr inbounds float* %tmp1647, i64 1
- %tmp1649 = getelementptr inbounds float* %tmp1648, i64 1
- %tmp1650 = getelementptr inbounds float* %tmp1649, i64 1
- %tmp1651 = getelementptr inbounds float* %tmp1650, i64 1
- %tmp1652 = getelementptr inbounds float* %tmp1651, i64 1
- %tmp1653 = getelementptr inbounds float* %tmp1652, i64 1
- %tmp1654 = getelementptr inbounds float* %tmp1653, i64 1
- %tmp1655 = getelementptr inbounds float* %tmp1654, i64 1
- %tmp1656 = getelementptr inbounds float* %tmp1655, i64 1
- %tmp1657 = getelementptr inbounds float* %tmp1656, i64 1
- %tmp1658 = getelementptr inbounds float* %tmp1657, i64 1
- %tmp1659 = getelementptr inbounds float* %tmp1658, i64 1
- %tmp1660 = getelementptr inbounds float* %tmp1659, i64 1
- %tmp1661 = getelementptr inbounds float* %tmp1660, i64 1
- %tmp1662 = getelementptr inbounds float* %tmp1661, i64 1
- %tmp1663 = getelementptr inbounds float* %tmp1662, i64 1
- %tmp1664 = getelementptr inbounds float* %tmp1663, i64 1
- %tmp1665 = getelementptr inbounds float* %tmp1664, i64 1
- %tmp1666 = getelementptr inbounds float* %tmp1665, i64 1
- %tmp1667 = getelementptr inbounds float* %tmp1666, i64 1
- %tmp1668 = getelementptr inbounds float* %tmp1667, i64 1
- %tmp1669 = getelementptr inbounds float* %tmp1668, i64 1
- %tmp1670 = getelementptr inbounds float* %tmp1669, i64 1
- %tmp1671 = getelementptr inbounds float* %tmp1670, i64 1
- %tmp1672 = getelementptr inbounds float* %tmp1671, i64 1
- %tmp1673 = getelementptr inbounds float* %tmp1672, i64 1
- %tmp1674 = getelementptr inbounds float* %tmp1673, i64 1
- %tmp1675 = getelementptr inbounds float* %tmp1674, i64 1
- %tmp1676 = getelementptr inbounds float* %tmp1675, i64 1
- %tmp1677 = getelementptr inbounds float* %tmp1676, i64 1
- %tmp1678 = getelementptr inbounds float* %tmp1677, i64 1
- %tmp1679 = getelementptr inbounds float* %tmp1678, i64 1
- %tmp1680 = getelementptr inbounds float* %tmp1679, i64 1
- %tmp1681 = getelementptr inbounds float* %tmp1680, i64 1
- %tmp1682 = getelementptr inbounds float* %tmp1681, i64 1
- %tmp1683 = getelementptr inbounds float* %tmp1682, i64 1
- %tmp1684 = getelementptr inbounds float* %tmp1683, i64 1
- %tmp1685 = getelementptr inbounds float* %tmp1684, i64 1
- %tmp1686 = getelementptr inbounds float* %tmp1685, i64 1
- %tmp1687 = getelementptr inbounds float* %tmp1686, i64 1
- %tmp1688 = getelementptr inbounds float* %tmp1687, i64 1
- %tmp1689 = getelementptr inbounds float* %tmp1688, i64 1
- %tmp1690 = getelementptr inbounds float* %tmp1689, i64 1
- %tmp1691 = getelementptr inbounds float* %tmp1690, i64 1
- %tmp1692 = getelementptr inbounds float* %tmp1691, i64 1
- %tmp1693 = getelementptr inbounds float* %tmp1692, i64 1
- %tmp1694 = getelementptr inbounds float* %tmp1693, i64 1
- %tmp1695 = getelementptr inbounds float* %tmp1694, i64 1
- %tmp1696 = getelementptr inbounds float* %tmp1695, i64 1
- %tmp1697 = getelementptr inbounds float* %tmp1696, i64 1
- %tmp1698 = getelementptr inbounds float* %tmp1697, i64 1
- %tmp1699 = getelementptr inbounds float* %tmp1698, i64 1
- %tmp1700 = getelementptr inbounds float* %tmp1699, i64 1
- %tmp1701 = getelementptr inbounds float* %tmp1700, i64 1
- %tmp1702 = getelementptr inbounds float* %tmp1701, i64 1
- %tmp1703 = getelementptr inbounds float* %tmp1702, i64 1
- %tmp1704 = getelementptr inbounds float* %tmp1703, i64 1
- %tmp1705 = getelementptr inbounds float* %tmp1704, i64 1
- %tmp1706 = getelementptr inbounds float* %tmp1705, i64 1
- %tmp1707 = getelementptr inbounds float* %tmp1706, i64 1
- %tmp1708 = getelementptr inbounds float* %tmp1707, i64 1
- %tmp1709 = getelementptr inbounds float* %tmp1708, i64 1
- %tmp1710 = getelementptr inbounds float* %tmp1709, i64 1
- %tmp1711 = getelementptr inbounds float* %tmp1710, i64 1
- %tmp1712 = getelementptr inbounds float* %tmp1711, i64 1
- %tmp1713 = getelementptr inbounds float* %tmp1712, i64 1
- %tmp1714 = getelementptr inbounds float* %tmp1713, i64 1
- %tmp1715 = getelementptr inbounds float* %tmp1714, i64 1
- %tmp1716 = getelementptr inbounds float* %tmp1715, i64 1
- %tmp1717 = getelementptr inbounds float* %tmp1716, i64 1
- %tmp1718 = getelementptr inbounds float* %tmp1717, i64 1
- %tmp1719 = getelementptr inbounds float* %tmp1718, i64 1
- %tmp1720 = getelementptr inbounds float* %tmp1719, i64 1
- %tmp1721 = getelementptr inbounds float* %tmp1720, i64 1
- %tmp1722 = getelementptr inbounds float* %tmp1721, i64 1
- %tmp1723 = getelementptr inbounds float* %tmp1722, i64 1
- %tmp1724 = getelementptr inbounds float* %tmp1723, i64 1
- %tmp1725 = getelementptr inbounds float* %tmp1724, i64 1
- %tmp1726 = getelementptr inbounds float* %tmp1725, i64 1
- %tmp1727 = getelementptr inbounds float* %tmp1726, i64 1
- %tmp1728 = getelementptr inbounds float* %tmp1727, i64 1
- %tmp1729 = getelementptr inbounds float* %tmp1728, i64 1
- %tmp1730 = getelementptr inbounds float* %tmp1729, i64 1
- %tmp1731 = getelementptr inbounds float* %tmp1730, i64 1
- %tmp1732 = getelementptr inbounds float* %tmp1731, i64 1
- %tmp1733 = getelementptr inbounds float* %tmp1732, i64 1
- %tmp1734 = getelementptr inbounds float* %tmp1733, i64 1
- %tmp1735 = getelementptr inbounds float* %tmp1734, i64 1
- %tmp1736 = getelementptr inbounds float* %tmp1735, i64 1
- %tmp1737 = getelementptr inbounds float* %tmp1736, i64 1
- %tmp1738 = getelementptr inbounds float* %tmp1737, i64 1
- %tmp1739 = getelementptr inbounds float* %tmp1738, i64 1
- %tmp1740 = getelementptr inbounds float* %tmp1739, i64 1
- %tmp1741 = getelementptr inbounds float* %tmp1740, i64 1
- %tmp1742 = getelementptr inbounds float* %tmp1741, i64 1
- %tmp1743 = getelementptr inbounds float* %tmp1742, i64 1
- %tmp1744 = getelementptr inbounds float* %tmp1743, i64 1
- %tmp1745 = getelementptr inbounds float* %tmp1744, i64 1
- %tmp1746 = getelementptr inbounds float* %tmp1745, i64 1
- %tmp1747 = getelementptr inbounds float* %tmp1746, i64 1
- %tmp1748 = getelementptr inbounds float* %tmp1747, i64 1
- %tmp1749 = getelementptr inbounds float* %tmp1748, i64 1
- %tmp1750 = getelementptr inbounds float* %tmp1749, i64 1
- %tmp1751 = getelementptr inbounds float* %tmp1750, i64 1
- %tmp1752 = getelementptr inbounds float* %tmp1751, i64 1
- %tmp1753 = getelementptr inbounds float* %tmp1752, i64 1
- %tmp1754 = getelementptr inbounds float* %tmp1753, i64 1
- %tmp1755 = getelementptr inbounds float* %tmp1754, i64 1
- %tmp1756 = getelementptr inbounds float* %tmp1755, i64 1
- %tmp1757 = getelementptr inbounds float* %tmp1756, i64 1
- %tmp1758 = getelementptr inbounds float* %tmp1757, i64 1
- %tmp1759 = getelementptr inbounds float* %tmp1758, i64 1
- %tmp1760 = getelementptr inbounds float* %tmp1759, i64 1
- %tmp1761 = getelementptr inbounds float* %tmp1760, i64 1
- %tmp1762 = getelementptr inbounds float* %tmp1761, i64 1
- %tmp1763 = getelementptr inbounds float* %tmp1762, i64 1
- %tmp1764 = getelementptr inbounds float* %tmp1763, i64 1
- %tmp1765 = getelementptr inbounds float* %tmp1764, i64 1
- %tmp1766 = getelementptr inbounds float* %tmp1765, i64 1
- %tmp1767 = getelementptr inbounds float* %tmp1766, i64 1
- %tmp1768 = getelementptr inbounds float* %tmp1767, i64 1
- %tmp1769 = getelementptr inbounds float* %tmp1768, i64 1
- %tmp1770 = getelementptr inbounds float* %tmp1769, i64 1
- %tmp1771 = getelementptr inbounds float* %tmp1770, i64 1
- %tmp1772 = getelementptr inbounds float* %tmp1771, i64 1
- %tmp1773 = getelementptr inbounds float* %tmp1772, i64 1
- %tmp1774 = getelementptr inbounds float* %tmp1773, i64 1
- %tmp1775 = getelementptr inbounds float* %tmp1774, i64 1
- %tmp1776 = getelementptr inbounds float* %tmp1775, i64 1
- %tmp1777 = getelementptr inbounds float* %tmp1776, i64 1
- %tmp1778 = getelementptr inbounds float* %tmp1777, i64 1
- %tmp1779 = getelementptr inbounds float* %tmp1778, i64 1
- %tmp1780 = getelementptr inbounds float* %tmp1779, i64 1
- %tmp1781 = getelementptr inbounds float* %tmp1780, i64 1
- %tmp1782 = getelementptr inbounds float* %tmp1781, i64 1
- %tmp1783 = getelementptr inbounds float* %tmp1782, i64 1
- %tmp1784 = getelementptr inbounds float* %tmp1783, i64 1
- %tmp1785 = getelementptr inbounds float* %tmp1784, i64 1
- %tmp1786 = getelementptr inbounds float* %tmp1785, i64 1
- %tmp1787 = getelementptr inbounds float* %tmp1786, i64 1
- %tmp1788 = getelementptr inbounds float* %tmp1787, i64 1
- %tmp1789 = getelementptr inbounds float* %tmp1788, i64 1
- %tmp1790 = getelementptr inbounds float* %tmp1789, i64 1
- %tmp1791 = getelementptr inbounds float* %tmp1790, i64 1
- %tmp1792 = getelementptr inbounds float* %tmp1791, i64 1
- %tmp1793 = getelementptr inbounds float* %tmp1792, i64 1
- %tmp1794 = getelementptr inbounds float* %tmp1793, i64 1
- %tmp1795 = getelementptr inbounds float* %tmp1794, i64 1
- %tmp1796 = getelementptr inbounds float* %tmp1795, i64 1
- %tmp1797 = getelementptr inbounds float* %tmp1796, i64 1
- %tmp1798 = getelementptr inbounds float* %tmp1797, i64 1
- %tmp1799 = getelementptr inbounds float* %tmp1798, i64 1
- %tmp1800 = getelementptr inbounds float* %tmp1799, i64 1
- %tmp1801 = getelementptr inbounds float* %tmp1800, i64 1
- %tmp1802 = getelementptr inbounds float* %tmp1801, i64 1
- %tmp1803 = getelementptr inbounds float* %tmp1802, i64 1
- %tmp1804 = getelementptr inbounds float* %tmp1803, i64 1
- %tmp1805 = getelementptr inbounds float* %tmp1804, i64 1
- %tmp1806 = getelementptr inbounds float* %tmp1805, i64 1
- %tmp1807 = getelementptr inbounds float* %tmp1806, i64 1
- %tmp1808 = getelementptr inbounds float* %tmp1807, i64 1
- %tmp1809 = getelementptr inbounds float* %tmp1808, i64 1
- %tmp1810 = getelementptr inbounds float* %tmp1809, i64 1
- %tmp1811 = getelementptr inbounds float* %tmp1810, i64 1
- %tmp1812 = getelementptr inbounds float* %tmp1811, i64 1
- %tmp1813 = getelementptr inbounds float* %tmp1812, i64 1
- %tmp1814 = getelementptr inbounds float* %tmp1813, i64 1
- %tmp1815 = getelementptr inbounds float* %tmp1814, i64 1
- %tmp1816 = getelementptr inbounds float* %tmp1815, i64 1
- %tmp1817 = getelementptr inbounds float* %tmp1816, i64 1
- %tmp1818 = getelementptr inbounds float* %tmp1817, i64 1
- %tmp1819 = getelementptr inbounds float* %tmp1818, i64 1
- %tmp1820 = getelementptr inbounds float* %tmp1819, i64 1
- %tmp1821 = getelementptr inbounds float* %tmp1820, i64 1
- %tmp1822 = getelementptr inbounds float* %tmp1821, i64 1
- %tmp1823 = getelementptr inbounds float* %tmp1822, i64 1
- %tmp1824 = getelementptr inbounds float* %tmp1823, i64 1
- %tmp1825 = getelementptr inbounds float* %tmp1824, i64 1
- %tmp1826 = getelementptr inbounds float* %tmp1825, i64 1
- %tmp1827 = getelementptr inbounds float* %tmp1826, i64 1
- %tmp1828 = getelementptr inbounds float* %tmp1827, i64 1
- %tmp1829 = getelementptr inbounds float* %tmp1828, i64 1
- %tmp1830 = getelementptr inbounds float* %tmp1829, i64 1
- %tmp1831 = getelementptr inbounds float* %tmp1830, i64 1
- %tmp1832 = getelementptr inbounds float* %tmp1831, i64 1
- %tmp1833 = getelementptr inbounds float* %tmp1832, i64 1
- %tmp1834 = getelementptr inbounds float* %tmp1833, i64 1
- %tmp1835 = getelementptr inbounds float* %tmp1834, i64 1
- %tmp1836 = getelementptr inbounds float* %tmp1835, i64 1
- %tmp1837 = getelementptr inbounds float* %tmp1836, i64 1
- %tmp1838 = getelementptr inbounds float* %tmp1837, i64 1
- %tmp1839 = getelementptr inbounds float* %tmp1838, i64 1
- %tmp1840 = getelementptr inbounds float* %tmp1839, i64 1
- %tmp1841 = getelementptr inbounds float* %tmp1840, i64 1
- %tmp1842 = getelementptr inbounds float* %tmp1841, i64 1
- %tmp1843 = getelementptr inbounds float* %tmp1842, i64 1
- %tmp1844 = getelementptr inbounds float* %tmp1843, i64 1
- %tmp1845 = getelementptr inbounds float* %tmp1844, i64 1
- %tmp1846 = getelementptr inbounds float* %tmp1845, i64 1
- %tmp1847 = getelementptr inbounds float* %tmp1846, i64 1
- %tmp1848 = getelementptr inbounds float* %tmp1847, i64 1
- %tmp1849 = getelementptr inbounds float* %tmp1848, i64 1
- %tmp1850 = getelementptr inbounds float* %tmp1849, i64 1
- %tmp1851 = getelementptr inbounds float* %tmp1850, i64 1
- %tmp1852 = getelementptr inbounds float* %tmp1851, i64 1
- %tmp1853 = getelementptr inbounds float* %tmp1852, i64 1
- %tmp1854 = getelementptr inbounds float* %tmp1853, i64 1
- %tmp1855 = getelementptr inbounds float* %tmp1854, i64 1
- %tmp1856 = getelementptr inbounds float* %tmp1855, i64 1
- %tmp1857 = getelementptr inbounds float* %tmp1856, i64 1
- %tmp1858 = getelementptr inbounds float* %tmp1857, i64 1
- %tmp1859 = getelementptr inbounds float* %tmp1858, i64 1
- %tmp1860 = getelementptr inbounds float* %tmp1859, i64 1
- %tmp1861 = getelementptr inbounds float* %tmp1860, i64 1
- %tmp1862 = getelementptr inbounds float* %tmp1861, i64 1
- %tmp1863 = getelementptr inbounds float* %tmp1862, i64 1
- %tmp1864 = getelementptr inbounds float* %tmp1863, i64 1
- %tmp1865 = getelementptr inbounds float* %tmp1864, i64 1
- %tmp1866 = getelementptr inbounds float* %tmp1865, i64 1
- %tmp1867 = getelementptr inbounds float* %tmp1866, i64 1
- %tmp1868 = getelementptr inbounds float* %tmp1867, i64 1
- %tmp1869 = getelementptr inbounds float* %tmp1868, i64 1
- %tmp1870 = getelementptr inbounds float* %tmp1869, i64 1
- %tmp1871 = getelementptr inbounds float* %tmp1870, i64 1
- %tmp1872 = getelementptr inbounds float* %tmp1871, i64 1
- %tmp1873 = getelementptr inbounds float* %tmp1872, i64 1
- %tmp1874 = getelementptr inbounds float* %tmp1873, i64 1
- %tmp1875 = getelementptr inbounds float* %tmp1874, i64 1
- %tmp1876 = getelementptr inbounds float* %tmp1875, i64 1
- %tmp1877 = getelementptr inbounds float* %tmp1876, i64 1
- %tmp1878 = getelementptr inbounds float* %tmp1877, i64 1
- %tmp1879 = getelementptr inbounds float* %tmp1878, i64 1
- %tmp1880 = getelementptr inbounds float* %tmp1879, i64 1
- %tmp1881 = getelementptr inbounds float* %tmp1880, i64 1
- %tmp1882 = getelementptr inbounds float* %tmp1881, i64 1
- %tmp1883 = getelementptr inbounds float* %tmp1882, i64 1
- %tmp1884 = getelementptr inbounds float* %tmp1883, i64 1
- %tmp1885 = getelementptr inbounds float* %tmp1884, i64 1
- %tmp1886 = getelementptr inbounds float* %tmp1885, i64 1
- %tmp1887 = getelementptr inbounds float* %tmp1886, i64 1
- %tmp1888 = getelementptr inbounds float* %tmp1887, i64 1
- %tmp1889 = getelementptr inbounds float* %tmp1888, i64 1
- %tmp1890 = getelementptr inbounds float* %tmp1889, i64 1
- %tmp1891 = getelementptr inbounds float* %tmp1890, i64 1
- %tmp1892 = getelementptr inbounds float* %tmp1891, i64 1
- %tmp1893 = getelementptr inbounds float* %tmp1892, i64 1
- %tmp1894 = getelementptr inbounds float* %tmp1893, i64 1
- %tmp1895 = getelementptr inbounds float* %tmp1894, i64 1
- %tmp1896 = getelementptr inbounds float* %tmp1895, i64 1
- %tmp1897 = getelementptr inbounds float* %tmp1896, i64 1
- %tmp1898 = getelementptr inbounds float* %tmp1897, i64 1
- %tmp1899 = getelementptr inbounds float* %tmp1898, i64 1
- %tmp1900 = getelementptr inbounds float* %tmp1899, i64 1
- %tmp1901 = getelementptr inbounds float* %tmp1900, i64 1
- %tmp1902 = getelementptr inbounds float* %tmp1901, i64 1
- %tmp1903 = getelementptr inbounds float* %tmp1902, i64 1
- %tmp1904 = getelementptr inbounds float* %tmp1903, i64 1
- %tmp1905 = getelementptr inbounds float* %tmp1904, i64 1
- %tmp1906 = getelementptr inbounds float* %tmp1905, i64 1
- %tmp1907 = getelementptr inbounds float* %tmp1906, i64 1
- %tmp1908 = getelementptr inbounds float* %tmp1907, i64 1
- %tmp1909 = getelementptr inbounds float* %tmp1908, i64 1
- %tmp1910 = getelementptr inbounds float* %tmp1909, i64 1
- %tmp1911 = getelementptr inbounds float* %tmp1910, i64 1
- %tmp1912 = getelementptr inbounds float* %tmp1911, i64 1
- %tmp1913 = getelementptr inbounds float* %tmp1912, i64 1
- %tmp1914 = getelementptr inbounds float* %tmp1913, i64 1
- %tmp1915 = getelementptr inbounds float* %tmp1914, i64 1
- %tmp1916 = getelementptr inbounds float* %tmp1915, i64 1
- %tmp1917 = getelementptr inbounds float* %tmp1916, i64 1
- %tmp1918 = getelementptr inbounds float* %tmp1917, i64 1
- %tmp1919 = getelementptr inbounds float* %tmp1918, i64 1
- %tmp1920 = getelementptr inbounds float* %tmp1919, i64 1
- %tmp1921 = getelementptr inbounds float* %tmp1920, i64 1
- %tmp1922 = getelementptr inbounds float* %tmp1921, i64 1
- %tmp1923 = getelementptr inbounds float* %tmp1922, i64 1
- %tmp1924 = getelementptr inbounds float* %tmp1923, i64 1
- %tmp1925 = getelementptr inbounds float* %tmp1924, i64 1
- %tmp1926 = getelementptr inbounds float* %tmp1925, i64 1
- %tmp1927 = getelementptr inbounds float* %tmp1926, i64 1
- %tmp1928 = getelementptr inbounds float* %tmp1927, i64 1
- %tmp1929 = getelementptr inbounds float* %tmp1928, i64 1
- %tmp1930 = getelementptr inbounds float* %tmp1929, i64 1
- %tmp1931 = getelementptr inbounds float* %tmp1930, i64 1
- %tmp1932 = getelementptr inbounds float* %tmp1931, i64 1
- %tmp1933 = getelementptr inbounds float* %tmp1932, i64 1
- %tmp1934 = getelementptr inbounds float* %tmp1933, i64 1
- %tmp1935 = getelementptr inbounds float* %tmp1934, i64 1
- %tmp1936 = getelementptr inbounds float* %tmp1935, i64 1
- %tmp1937 = getelementptr inbounds float* %tmp1936, i64 1
- %tmp1938 = getelementptr inbounds float* %tmp1937, i64 1
- %tmp1939 = getelementptr inbounds float* %tmp1938, i64 1
- %tmp1940 = getelementptr inbounds float* %tmp1939, i64 1
- %tmp1941 = getelementptr inbounds float* %tmp1940, i64 1
- %tmp1942 = getelementptr inbounds float* %tmp1941, i64 1
- %tmp1943 = getelementptr inbounds float* %tmp1942, i64 1
- %tmp1944 = getelementptr inbounds float* %tmp1943, i64 1
- %tmp1945 = getelementptr inbounds float* %tmp1944, i64 1
- %tmp1946 = getelementptr inbounds float* %tmp1945, i64 1
- %tmp1947 = getelementptr inbounds float* %tmp1946, i64 1
- %tmp1948 = getelementptr inbounds float* %tmp1947, i64 1
- %tmp1949 = getelementptr inbounds float* %tmp1948, i64 1
- %tmp1950 = getelementptr inbounds float* %tmp1949, i64 1
- %tmp1951 = getelementptr inbounds float* %tmp1950, i64 1
- %tmp1952 = getelementptr inbounds float* %tmp1951, i64 1
- %tmp1953 = getelementptr inbounds float* %tmp1952, i64 1
- %tmp1954 = getelementptr inbounds float* %tmp1953, i64 1
- %tmp1955 = getelementptr inbounds float* %tmp1954, i64 1
- %tmp1956 = getelementptr inbounds float* %tmp1955, i64 1
- %tmp1957 = getelementptr inbounds float* %tmp1956, i64 1
- %tmp1958 = getelementptr inbounds float* %tmp1957, i64 1
- %tmp1959 = getelementptr inbounds float* %tmp1958, i64 1
- %tmp1960 = getelementptr inbounds float* %tmp1959, i64 1
- %tmp1961 = getelementptr inbounds float* %tmp1960, i64 1
- %tmp1962 = getelementptr inbounds float* %tmp1961, i64 1
- %tmp1963 = getelementptr inbounds float* %tmp1962, i64 1
- %tmp1964 = getelementptr inbounds float* %tmp1963, i64 1
- %tmp1965 = getelementptr inbounds float* %tmp1964, i64 1
- %tmp1966 = getelementptr inbounds float* %tmp1965, i64 1
- %tmp1967 = getelementptr inbounds float* %tmp1966, i64 1
- %tmp1968 = getelementptr inbounds float* %tmp1967, i64 1
- %tmp1969 = getelementptr inbounds float* %tmp1968, i64 1
- %tmp1970 = getelementptr inbounds float* %tmp1969, i64 1
- %tmp1971 = getelementptr inbounds float* %tmp1970, i64 1
- %tmp1972 = getelementptr inbounds float* %tmp1971, i64 1
- %tmp1973 = getelementptr inbounds float* %tmp1972, i64 1
- %tmp1974 = getelementptr inbounds float* %tmp1973, i64 1
- %tmp1975 = getelementptr inbounds float* %tmp1974, i64 1
- %tmp1976 = getelementptr inbounds float* %tmp1975, i64 1
- %tmp1977 = getelementptr inbounds float* %tmp1976, i64 1
- %tmp1978 = getelementptr inbounds float* %tmp1977, i64 1
- %tmp1979 = getelementptr inbounds float* %tmp1978, i64 1
- %tmp1980 = getelementptr inbounds float* %tmp1979, i64 1
- %tmp1981 = getelementptr inbounds float* %tmp1980, i64 1
- %tmp1982 = getelementptr inbounds float* %tmp1981, i64 1
- %tmp1983 = getelementptr inbounds float* %tmp1982, i64 1
- %tmp1984 = getelementptr inbounds float* %tmp1983, i64 1
- %tmp1985 = getelementptr inbounds float* %tmp1984, i64 1
- %tmp1986 = getelementptr inbounds float* %tmp1985, i64 1
- %tmp1987 = getelementptr inbounds float* %tmp1986, i64 1
- %tmp1988 = getelementptr inbounds float* %tmp1987, i64 1
- %tmp1989 = getelementptr inbounds float* %tmp1988, i64 1
- %tmp1990 = getelementptr inbounds float* %tmp1989, i64 1
- %tmp1991 = getelementptr inbounds float* %tmp1990, i64 1
- %tmp1992 = getelementptr inbounds float* %tmp1991, i64 1
- %tmp1993 = getelementptr inbounds float* %tmp1992, i64 1
- %tmp1994 = getelementptr inbounds float* %tmp1993, i64 1
- %tmp1995 = getelementptr inbounds float* %tmp1994, i64 1
- %tmp1996 = getelementptr inbounds float* %tmp1995, i64 1
- %tmp1997 = getelementptr inbounds float* %tmp1996, i64 1
- %tmp1998 = getelementptr inbounds float* %tmp1997, i64 1
- %tmp1999 = getelementptr inbounds float* %tmp1998, i64 1
- %tmp2000 = getelementptr inbounds float* %tmp1999, i64 1
- %tmp2001 = getelementptr inbounds float* %tmp2000, i64 1
- %tmp2002 = getelementptr inbounds float* %tmp2001, i64 1
- %tmp2003 = getelementptr inbounds float* %tmp2002, i64 1
- %tmp2004 = getelementptr inbounds float* %tmp2003, i64 1
- %tmp2005 = getelementptr inbounds float* %tmp2004, i64 1
- %tmp2006 = getelementptr inbounds float* %tmp2005, i64 1
- %tmp2007 = getelementptr inbounds float* %tmp2006, i64 1
- %tmp2008 = getelementptr inbounds float* %tmp2007, i64 1
- %tmp2009 = getelementptr inbounds float* %tmp2008, i64 1
- %tmp2010 = getelementptr inbounds float* %tmp2009, i64 1
- %tmp2011 = getelementptr inbounds float* %tmp2010, i64 1
- %tmp2012 = getelementptr inbounds float* %tmp2011, i64 1
- %tmp2013 = getelementptr inbounds float* %tmp2012, i64 1
- %tmp2014 = getelementptr inbounds float* %tmp2013, i64 1
- %tmp2015 = getelementptr inbounds float* %tmp2014, i64 1
- %tmp2016 = getelementptr inbounds float* %tmp2015, i64 1
- %tmp2017 = getelementptr inbounds float* %tmp2016, i64 1
- %tmp2018 = getelementptr inbounds float* %tmp2017, i64 1
- %tmp2019 = getelementptr inbounds float* %tmp2018, i64 1
- %tmp2020 = getelementptr inbounds float* %tmp2019, i64 1
- %tmp2021 = getelementptr inbounds float* %tmp2020, i64 1
- %tmp2022 = getelementptr inbounds float* %tmp2021, i64 1
- %tmp2023 = getelementptr inbounds float* %tmp2022, i64 1
- %tmp2024 = getelementptr inbounds float* %tmp2023, i64 1
- %tmp2025 = getelementptr inbounds float* %tmp2024, i64 1
- %tmp2026 = getelementptr inbounds float* %tmp2025, i64 1
- %tmp2027 = getelementptr inbounds float* %tmp2026, i64 1
- %tmp2028 = getelementptr inbounds float* %tmp2027, i64 1
- %tmp2029 = getelementptr inbounds float* %tmp2028, i64 1
- %tmp2030 = getelementptr inbounds float* %tmp2029, i64 1
- %tmp2031 = getelementptr inbounds float* %tmp2030, i64 1
- %tmp2032 = getelementptr inbounds float* %tmp2031, i64 1
- %tmp2033 = getelementptr inbounds float* %tmp2032, i64 1
- %tmp2034 = getelementptr inbounds float* %tmp2033, i64 1
- %tmp2035 = getelementptr inbounds float* %tmp2034, i64 1
- %tmp2036 = getelementptr inbounds float* %tmp2035, i64 1
- %tmp2037 = getelementptr inbounds float* %tmp2036, i64 1
- %tmp2038 = getelementptr inbounds float* %tmp2037, i64 1
- %tmp2039 = getelementptr inbounds float* %tmp2038, i64 1
- %tmp2040 = getelementptr inbounds float* %tmp2039, i64 1
- %tmp2041 = getelementptr inbounds float* %tmp2040, i64 1
- %tmp2042 = getelementptr inbounds float* %tmp2041, i64 1
- %tmp2043 = getelementptr inbounds float* %tmp2042, i64 1
- %tmp2044 = getelementptr inbounds float* %tmp2043, i64 1
- %tmp2045 = getelementptr inbounds float* %tmp2044, i64 1
- %tmp2046 = getelementptr inbounds float* %tmp2045, i64 1
- %tmp2047 = getelementptr inbounds float* %tmp2046, i64 1
- %tmp2048 = getelementptr inbounds float* %tmp2047, i64 1
- %tmp2049 = getelementptr inbounds float* %tmp2048, i64 1
- %tmp2050 = getelementptr inbounds float* %tmp2049, i64 1
- %tmp2051 = getelementptr inbounds float* %tmp2050, i64 1
- %tmp2052 = getelementptr inbounds float* %tmp2051, i64 1
- %tmp2053 = getelementptr inbounds float* %tmp2052, i64 1
- %tmp2054 = getelementptr inbounds float* %tmp2053, i64 1
- %tmp2055 = getelementptr inbounds float* %tmp2054, i64 1
- %tmp2056 = getelementptr inbounds float* %tmp2055, i64 1
- %tmp2057 = getelementptr inbounds float* %tmp2056, i64 1
- %tmp2058 = getelementptr inbounds float* %tmp2057, i64 1
- %tmp2059 = getelementptr inbounds float* %tmp2058, i64 1
- %tmp2060 = getelementptr inbounds float* %tmp2059, i64 1
- %tmp2061 = getelementptr inbounds float* %tmp2060, i64 1
- %tmp2062 = getelementptr inbounds float* %tmp2061, i64 1
- %tmp2063 = getelementptr inbounds float* %tmp2062, i64 1
- %tmp2064 = getelementptr inbounds float* %tmp2063, i64 1
- %tmp2065 = getelementptr inbounds float* %tmp2064, i64 1
- %tmp2066 = getelementptr inbounds float* %tmp2065, i64 1
- %tmp2067 = getelementptr inbounds float* %tmp2066, i64 1
- %tmp2068 = getelementptr inbounds float* %tmp2067, i64 1
- %tmp2069 = getelementptr inbounds float* %tmp2068, i64 1
- %tmp2070 = getelementptr inbounds float* %tmp2069, i64 1
- %tmp2071 = getelementptr inbounds float* %tmp2070, i64 1
- %tmp2072 = getelementptr inbounds float* %tmp2071, i64 1
- %tmp2073 = getelementptr inbounds float* %tmp2072, i64 1
- %tmp2074 = getelementptr inbounds float* %tmp2073, i64 1
- %tmp2075 = getelementptr inbounds float* %tmp2074, i64 1
- %tmp2076 = getelementptr inbounds float* %tmp2075, i64 1
- %tmp2077 = getelementptr inbounds float* %tmp2076, i64 1
- %tmp2078 = getelementptr inbounds float* %tmp2077, i64 1
- %tmp2079 = getelementptr inbounds float* %tmp2078, i64 1
- %tmp2080 = getelementptr inbounds float* %tmp2079, i64 1
- %tmp2081 = getelementptr inbounds float* %tmp2080, i64 1
- %tmp2082 = getelementptr inbounds float* %tmp2081, i64 1
- %tmp2083 = getelementptr inbounds float* %tmp2082, i64 1
- %tmp2084 = getelementptr inbounds float* %tmp2083, i64 1
- %tmp2085 = getelementptr inbounds float* %tmp2084, i64 1
- %tmp2086 = getelementptr inbounds float* %tmp2085, i64 1
- %tmp2087 = getelementptr inbounds float* %tmp2086, i64 1
- %tmp2088 = getelementptr inbounds float* %tmp2087, i64 1
- %tmp2089 = getelementptr inbounds float* %tmp2088, i64 1
- %tmp2090 = getelementptr inbounds float* %tmp2089, i64 1
- %tmp2091 = getelementptr inbounds float* %tmp2090, i64 1
- %tmp2092 = getelementptr inbounds float* %tmp2091, i64 1
- %tmp2093 = getelementptr inbounds float* %tmp2092, i64 1
- %tmp2094 = getelementptr inbounds float* %tmp2093, i64 1
- %tmp2095 = getelementptr inbounds float* %tmp2094, i64 1
- %tmp2096 = getelementptr inbounds float* %tmp2095, i64 1
- %tmp2097 = getelementptr inbounds float* %tmp2096, i64 1
- %tmp2098 = getelementptr inbounds float* %tmp2097, i64 1
- %tmp2099 = getelementptr inbounds float* %tmp2098, i64 1
- %tmp2100 = getelementptr inbounds float* %tmp2099, i64 1
- %tmp2101 = getelementptr inbounds float* %tmp2100, i64 1
- %tmp2102 = getelementptr inbounds float* %tmp2101, i64 1
- %tmp2103 = getelementptr inbounds float* %tmp2102, i64 1
- %tmp2104 = getelementptr inbounds float* %tmp2103, i64 1
- %tmp2105 = getelementptr inbounds float* %tmp2104, i64 1
- %tmp2106 = getelementptr inbounds float* %tmp2105, i64 1
- %tmp2107 = getelementptr inbounds float* %tmp2106, i64 1
- %tmp2108 = getelementptr inbounds float* %tmp2107, i64 1
- %tmp2109 = getelementptr inbounds float* %tmp2108, i64 1
- %tmp2110 = getelementptr inbounds float* %tmp2109, i64 1
- %tmp2111 = getelementptr inbounds float* %tmp2110, i64 1
- %tmp2112 = getelementptr inbounds float* %tmp2111, i64 1
- %tmp2113 = getelementptr inbounds float* %tmp2112, i64 1
- %tmp2114 = getelementptr inbounds float* %tmp2113, i64 1
- %tmp2115 = getelementptr inbounds float* %tmp2114, i64 1
- %tmp2116 = getelementptr inbounds float* %tmp2115, i64 1
- %tmp2117 = getelementptr inbounds float* %tmp2116, i64 1
- %tmp2118 = getelementptr inbounds float* %tmp2117, i64 1
- %tmp2119 = getelementptr inbounds float* %tmp2118, i64 1
- %tmp2120 = getelementptr inbounds float* %tmp2119, i64 1
- %tmp2121 = getelementptr inbounds float* %tmp2120, i64 1
- %tmp2122 = getelementptr inbounds float* %tmp2121, i64 1
- %tmp2123 = getelementptr inbounds float* %tmp2122, i64 1
- %tmp2124 = getelementptr inbounds float* %tmp2123, i64 1
- %tmp2125 = getelementptr inbounds float* %tmp2124, i64 1
- %tmp2126 = getelementptr inbounds float* %tmp2125, i64 1
- %tmp2127 = getelementptr inbounds float* %tmp2126, i64 1
- %tmp2128 = getelementptr inbounds float* %tmp2127, i64 1
- %tmp2129 = getelementptr inbounds float* %tmp2128, i64 1
- %tmp2130 = getelementptr inbounds float* %tmp2129, i64 1
- %tmp2131 = getelementptr inbounds float* %tmp2130, i64 1
- %tmp2132 = getelementptr inbounds float* %tmp2131, i64 1
- %tmp2133 = getelementptr inbounds float* %tmp2132, i64 1
- %tmp2134 = getelementptr inbounds float* %tmp2133, i64 1
- %tmp2135 = getelementptr inbounds float* %tmp2134, i64 1
- %tmp2136 = getelementptr inbounds float* %tmp2135, i64 1
- %tmp2137 = getelementptr inbounds float* %tmp2136, i64 1
- %tmp2138 = getelementptr inbounds float* %tmp2137, i64 1
- %tmp2139 = getelementptr inbounds float* %tmp2138, i64 1
- %tmp2140 = getelementptr inbounds float* %tmp2139, i64 1
- %tmp2141 = getelementptr inbounds float* %tmp2140, i64 1
- %tmp2142 = getelementptr inbounds float* %tmp2141, i64 1
- %tmp2143 = getelementptr inbounds float* %tmp2142, i64 1
- %tmp2144 = getelementptr inbounds float* %tmp2143, i64 1
- %tmp2145 = getelementptr inbounds float* %tmp2144, i64 1
- %tmp2146 = getelementptr inbounds float* %tmp2145, i64 1
- %tmp2147 = getelementptr inbounds float* %tmp2146, i64 1
- %tmp2148 = getelementptr inbounds float* %tmp2147, i64 1
- %tmp2149 = getelementptr inbounds float* %tmp2148, i64 1
- %tmp2150 = getelementptr inbounds float* %tmp2149, i64 1
- %tmp2151 = getelementptr inbounds float* %tmp2150, i64 1
- %tmp2152 = getelementptr inbounds float* %tmp2151, i64 1
- %tmp2153 = getelementptr inbounds float* %tmp2152, i64 1
- %tmp2154 = getelementptr inbounds float* %tmp2153, i64 1
- %tmp2155 = getelementptr inbounds float* %tmp2154, i64 1
- %tmp2156 = getelementptr inbounds float* %tmp2155, i64 1
- %tmp2157 = getelementptr inbounds float* %tmp2156, i64 1
- %tmp2158 = getelementptr inbounds float* %tmp2157, i64 1
- %tmp2159 = getelementptr inbounds float* %tmp2158, i64 1
- %tmp2160 = getelementptr inbounds float* %tmp2159, i64 1
- %tmp2161 = getelementptr inbounds float* %tmp2160, i64 1
- %tmp2162 = getelementptr inbounds float* %tmp2161, i64 1
- %tmp2163 = getelementptr inbounds float* %tmp2162, i64 1
- %tmp2164 = getelementptr inbounds float* %tmp2163, i64 1
- %tmp2165 = getelementptr inbounds float* %tmp2164, i64 1
- %tmp2166 = getelementptr inbounds float* %tmp2165, i64 1
- %tmp2167 = getelementptr inbounds float* %tmp2166, i64 1
- %tmp2168 = getelementptr inbounds float* %tmp2167, i64 1
- %tmp2169 = getelementptr inbounds float* %tmp2168, i64 1
- %tmp2170 = getelementptr inbounds float* %tmp2169, i64 1
- %tmp2171 = getelementptr inbounds float* %tmp2170, i64 1
- %tmp2172 = getelementptr inbounds float* %tmp2171, i64 1
- %tmp2173 = getelementptr inbounds float* %tmp2172, i64 1
- %tmp2174 = getelementptr inbounds float* %tmp2173, i64 1
- %tmp2175 = getelementptr inbounds float* %tmp2174, i64 1
- %tmp2176 = getelementptr inbounds float* %tmp2175, i64 1
- %tmp2177 = getelementptr inbounds float* %tmp2176, i64 1
- %tmp2178 = getelementptr inbounds float* %tmp2177, i64 1
- %tmp2179 = getelementptr inbounds float* %tmp2178, i64 1
- %tmp2180 = getelementptr inbounds float* %tmp2179, i64 1
- %tmp2181 = getelementptr inbounds float* %tmp2180, i64 1
- %tmp2182 = getelementptr inbounds float* %tmp2181, i64 1
- %tmp2183 = getelementptr inbounds float* %tmp2182, i64 1
- %tmp2184 = getelementptr inbounds float* %tmp2183, i64 1
- %tmp2185 = getelementptr inbounds float* %tmp2184, i64 1
- %tmp2186 = getelementptr inbounds float* %tmp2185, i64 1
- %tmp2187 = getelementptr inbounds float* %tmp2186, i64 1
- %tmp2188 = getelementptr inbounds float* %tmp2187, i64 1
- %tmp2189 = getelementptr inbounds float* %tmp2188, i64 1
- %tmp2190 = getelementptr inbounds float* %tmp2189, i64 1
- %tmp2191 = getelementptr inbounds float* %tmp2190, i64 1
- %tmp2192 = getelementptr inbounds float* %tmp2191, i64 1
- %tmp2193 = getelementptr inbounds float* %tmp2192, i64 1
- %tmp2194 = getelementptr inbounds float* %tmp2193, i64 1
- %tmp2195 = getelementptr inbounds float* %tmp2194, i64 1
- %tmp2196 = getelementptr inbounds float* %tmp2195, i64 1
- %tmp2197 = getelementptr inbounds float* %tmp2196, i64 1
- %tmp2198 = getelementptr inbounds float* %tmp2197, i64 1
- %tmp2199 = getelementptr inbounds float* %tmp2198, i64 1
- %tmp2200 = getelementptr inbounds float* %tmp2199, i64 1
- %tmp2201 = getelementptr inbounds float* %tmp2200, i64 1
- %tmp2202 = getelementptr inbounds float* %tmp2201, i64 1
- %tmp2203 = getelementptr inbounds float* %tmp2202, i64 1
- %tmp2204 = getelementptr inbounds float* %tmp2203, i64 1
- %tmp2205 = getelementptr inbounds float* %tmp2204, i64 1
- %tmp2206 = getelementptr inbounds float* %tmp2205, i64 1
- %tmp2207 = getelementptr inbounds float* %tmp2206, i64 1
- %tmp2208 = getelementptr inbounds float* %tmp2207, i64 1
- %tmp2209 = getelementptr inbounds float* %tmp2208, i64 1
- %tmp2210 = getelementptr inbounds float* %tmp2209, i64 1
- %tmp2211 = getelementptr inbounds float* %tmp2210, i64 1
- %tmp2212 = getelementptr inbounds float* %tmp2211, i64 1
- %tmp2213 = getelementptr inbounds float* %tmp2212, i64 1
- %tmp2214 = getelementptr inbounds float* %tmp2213, i64 1
- %tmp2215 = getelementptr inbounds float* %tmp2214, i64 1
- %tmp2216 = getelementptr inbounds float* %tmp2215, i64 1
- %tmp2217 = getelementptr inbounds float* %tmp2216, i64 1
- %tmp2218 = getelementptr inbounds float* %tmp2217, i64 1
- %tmp2219 = getelementptr inbounds float* %tmp2218, i64 1
- %tmp2220 = getelementptr inbounds float* %tmp2219, i64 1
- %tmp2221 = getelementptr inbounds float* %tmp2220, i64 1
- %tmp2222 = getelementptr inbounds float* %tmp2221, i64 1
- %tmp2223 = getelementptr inbounds float* %tmp2222, i64 1
- %tmp2224 = getelementptr inbounds float* %tmp2223, i64 1
- %tmp2225 = getelementptr inbounds float* %tmp2224, i64 1
- %tmp2226 = getelementptr inbounds float* %tmp2225, i64 1
- %tmp2227 = getelementptr inbounds float* %tmp2226, i64 1
- %tmp2228 = getelementptr inbounds float* %tmp2227, i64 1
- %tmp2229 = getelementptr inbounds float* %tmp2228, i64 1
- %tmp2230 = getelementptr inbounds float* %tmp2229, i64 1
- %tmp2231 = getelementptr inbounds float* %tmp2230, i64 1
- %tmp2232 = getelementptr inbounds float* %tmp2231, i64 1
- %tmp2233 = getelementptr inbounds float* %tmp2232, i64 1
- %tmp2234 = getelementptr inbounds float* %tmp2233, i64 1
- %tmp2235 = getelementptr inbounds float* %tmp2234, i64 1
- %tmp2236 = getelementptr inbounds float* %tmp2235, i64 1
- %tmp2237 = getelementptr inbounds float* %tmp2236, i64 1
- %tmp2238 = getelementptr inbounds float* %tmp2237, i64 1
- %tmp2239 = getelementptr inbounds float* %tmp2238, i64 1
- %tmp2240 = getelementptr inbounds float* %tmp2239, i64 1
- %tmp2241 = getelementptr inbounds float* %tmp2240, i64 1
- %tmp2242 = getelementptr inbounds float* %tmp2241, i64 1
- %tmp2243 = getelementptr inbounds float* %tmp2242, i64 1
- %tmp2244 = getelementptr inbounds float* %tmp2243, i64 1
- %tmp2245 = getelementptr inbounds float* %tmp2244, i64 1
- %tmp2246 = getelementptr inbounds float* %tmp2245, i64 1
- %tmp2247 = getelementptr inbounds float* %tmp2246, i64 1
- %tmp2248 = getelementptr inbounds float* %tmp2247, i64 1
- %tmp2249 = getelementptr inbounds float* %tmp2248, i64 1
- %tmp2250 = getelementptr inbounds float* %tmp2249, i64 1
- %tmp2251 = getelementptr inbounds float* %tmp2250, i64 1
- %tmp2252 = getelementptr inbounds float* %tmp2251, i64 1
- %tmp2253 = getelementptr inbounds float* %tmp2252, i64 1
- %tmp2254 = getelementptr inbounds float* %tmp2253, i64 1
- %tmp2255 = getelementptr inbounds float* %tmp2254, i64 1
- %tmp2256 = getelementptr inbounds float* %tmp2255, i64 1
- %tmp2257 = getelementptr inbounds float* %tmp2256, i64 1
- %tmp2258 = getelementptr inbounds float* %tmp2257, i64 1
- %tmp2259 = getelementptr inbounds float* %tmp2258, i64 1
- %tmp2260 = getelementptr inbounds float* %tmp2259, i64 1
- %tmp2261 = getelementptr inbounds float* %tmp2260, i64 1
- %tmp2262 = getelementptr inbounds float* %tmp2261, i64 1
- %tmp2263 = getelementptr inbounds float* %tmp2262, i64 1
- %tmp2264 = getelementptr inbounds float* %tmp2263, i64 1
- %tmp2265 = getelementptr inbounds float* %tmp2264, i64 1
- %tmp2266 = getelementptr inbounds float* %tmp2265, i64 1
- %tmp2267 = getelementptr inbounds float* %tmp2266, i64 1
- %tmp2268 = getelementptr inbounds float* %tmp2267, i64 1
- %tmp2269 = getelementptr inbounds float* %tmp2268, i64 1
- %tmp2270 = getelementptr inbounds float* %tmp2269, i64 1
- %tmp2271 = getelementptr inbounds float* %tmp2270, i64 1
- %tmp2272 = getelementptr inbounds float* %tmp2271, i64 1
- %tmp2273 = getelementptr inbounds float* %tmp2272, i64 1
- %tmp2274 = getelementptr inbounds float* %tmp2273, i64 1
- %tmp2275 = getelementptr inbounds float* %tmp2274, i64 1
- %tmp2276 = getelementptr inbounds float* %tmp2275, i64 1
- %tmp2277 = getelementptr inbounds float* %tmp2276, i64 1
- %tmp2278 = getelementptr inbounds float* %tmp2277, i64 1
- %tmp2279 = getelementptr inbounds float* %tmp2278, i64 1
- %tmp2280 = getelementptr inbounds float* %tmp2279, i64 1
- %tmp2281 = getelementptr inbounds float* %tmp2280, i64 1
- %tmp2282 = getelementptr inbounds float* %tmp2281, i64 1
- %tmp2283 = getelementptr inbounds float* %tmp2282, i64 1
- %tmp2284 = getelementptr inbounds float* %tmp2283, i64 1
- %tmp2285 = getelementptr inbounds float* %tmp2284, i64 1
- %tmp2286 = getelementptr inbounds float* %tmp2285, i64 1
- %tmp2287 = getelementptr inbounds float* %tmp2286, i64 1
- %tmp2288 = getelementptr inbounds float* %tmp2287, i64 1
- %tmp2289 = getelementptr inbounds float* %tmp2288, i64 1
- %tmp2290 = getelementptr inbounds float* %tmp2289, i64 1
- %tmp2291 = getelementptr inbounds float* %tmp2290, i64 1
- %tmp2292 = getelementptr inbounds float* %tmp2291, i64 1
- %tmp2293 = getelementptr inbounds float* %tmp2292, i64 1
- %tmp2294 = getelementptr inbounds float* %tmp2293, i64 1
- %tmp2295 = getelementptr inbounds float* %tmp2294, i64 1
- %tmp2296 = getelementptr inbounds float* %tmp2295, i64 1
- %tmp2297 = getelementptr inbounds float* %tmp2296, i64 1
- %tmp2298 = getelementptr inbounds float* %tmp2297, i64 1
- %tmp2299 = getelementptr inbounds float* %tmp2298, i64 1
- %tmp2300 = getelementptr inbounds float* %tmp2299, i64 1
- %tmp2301 = getelementptr inbounds float* %tmp2300, i64 1
- %tmp2302 = getelementptr inbounds float* %tmp2301, i64 1
- %tmp2303 = getelementptr inbounds float* %tmp2302, i64 1
- %tmp2304 = getelementptr inbounds float* %tmp2303, i64 1
- %tmp2305 = getelementptr inbounds float* %tmp2304, i64 1
- %tmp2306 = getelementptr inbounds float* %tmp2305, i64 1
- %tmp2307 = getelementptr inbounds float* %tmp2306, i64 1
- %tmp2308 = getelementptr inbounds float* %tmp2307, i64 1
- %tmp2309 = getelementptr inbounds float* %tmp2308, i64 1
- %tmp2310 = getelementptr inbounds float* %tmp2309, i64 1
- %tmp2311 = getelementptr inbounds float* %tmp2310, i64 1
- %tmp2312 = getelementptr inbounds float* %tmp2311, i64 1
- %tmp2313 = getelementptr inbounds float* %tmp2312, i64 1
- %tmp2314 = getelementptr inbounds float* %tmp2313, i64 1
- %tmp2315 = getelementptr inbounds float* %tmp2314, i64 1
- %tmp2316 = getelementptr inbounds float* %tmp2315, i64 1
- %tmp2317 = getelementptr inbounds float* %tmp2316, i64 1
- %tmp2318 = getelementptr inbounds float* %tmp2317, i64 1
- %tmp2319 = getelementptr inbounds float* %tmp2318, i64 1
- %tmp2320 = getelementptr inbounds float* %tmp2319, i64 1
- %tmp2321 = getelementptr inbounds float* %tmp2320, i64 1
- %tmp2322 = getelementptr inbounds float* %tmp2321, i64 1
- %tmp2323 = getelementptr inbounds float* %tmp2322, i64 1
- %tmp2324 = getelementptr inbounds float* %tmp2323, i64 1
- %tmp2325 = getelementptr inbounds float* %tmp2324, i64 1
- %tmp2326 = getelementptr inbounds float* %tmp2325, i64 1
- %tmp2327 = getelementptr inbounds float* %tmp2326, i64 1
- %tmp2328 = getelementptr inbounds float* %tmp2327, i64 1
- %tmp2329 = getelementptr inbounds float* %tmp2328, i64 1
- %tmp2330 = getelementptr inbounds float* %tmp2329, i64 1
- %tmp2331 = getelementptr inbounds float* %tmp2330, i64 1
- %tmp2332 = getelementptr inbounds float* %tmp2331, i64 1
- %tmp2333 = getelementptr inbounds float* %tmp2332, i64 1
- %tmp2334 = getelementptr inbounds float* %tmp2333, i64 1
- %tmp2335 = getelementptr inbounds float* %tmp2334, i64 1
- %tmp2336 = getelementptr inbounds float* %tmp2335, i64 1
- %tmp2337 = getelementptr inbounds float* %tmp2336, i64 1
- %tmp2338 = getelementptr inbounds float* %tmp2337, i64 1
- %tmp2339 = getelementptr inbounds float* %tmp2338, i64 1
- %tmp2340 = getelementptr inbounds float* %tmp2339, i64 1
- %tmp2341 = getelementptr inbounds float* %tmp2340, i64 1
- %tmp2342 = getelementptr inbounds float* %tmp2341, i64 1
- %tmp2343 = getelementptr inbounds float* %tmp2342, i64 1
- %tmp2344 = getelementptr inbounds float* %tmp2343, i64 1
- %tmp2345 = getelementptr inbounds float* %tmp2344, i64 1
- %tmp2346 = getelementptr inbounds float* %tmp2345, i64 1
- %tmp2347 = getelementptr inbounds float* %tmp2346, i64 1
- %tmp2348 = getelementptr inbounds float* %tmp2347, i64 1
- %tmp2349 = getelementptr inbounds float* %tmp2348, i64 1
- %tmp2350 = getelementptr inbounds float* %tmp2349, i64 1
- %tmp2351 = getelementptr inbounds float* %tmp2350, i64 1
- %tmp2352 = getelementptr inbounds float* %tmp2351, i64 1
- %tmp2353 = getelementptr inbounds float* %tmp2352, i64 1
- %tmp2354 = getelementptr inbounds float* %tmp2353, i64 1
- %tmp2355 = getelementptr inbounds float* %tmp2354, i64 1
- %tmp2356 = getelementptr inbounds float* %tmp2355, i64 1
- %tmp2357 = getelementptr inbounds float* %tmp2356, i64 1
- %tmp2358 = getelementptr inbounds float* %tmp2357, i64 1
- %tmp2359 = getelementptr inbounds float* %tmp2358, i64 1
- %tmp2360 = getelementptr inbounds float* %tmp2359, i64 1
- %tmp2361 = getelementptr inbounds float* %tmp2360, i64 1
- %tmp2362 = getelementptr inbounds float* %tmp2361, i64 1
- %tmp2363 = getelementptr inbounds float* %tmp2362, i64 1
- %tmp2364 = getelementptr inbounds float* %tmp2363, i64 1
- %tmp2365 = getelementptr inbounds float* %tmp2364, i64 1
- %tmp2366 = getelementptr inbounds float* %tmp2365, i64 1
- %tmp2367 = getelementptr inbounds float* %tmp2366, i64 1
- %tmp2368 = getelementptr inbounds float* %tmp2367, i64 1
- %tmp2369 = getelementptr inbounds float* %tmp2368, i64 1
- %tmp2370 = getelementptr inbounds float* %tmp2369, i64 1
- %tmp2371 = getelementptr inbounds float* %tmp2370, i64 1
- %tmp2372 = getelementptr inbounds float* %tmp2371, i64 1
- %tmp2373 = getelementptr inbounds float* %tmp2372, i64 1
- %tmp2374 = getelementptr inbounds float* %tmp2373, i64 1
- %tmp2375 = getelementptr inbounds float* %tmp2374, i64 1
- %tmp2376 = getelementptr inbounds float* %tmp2375, i64 1
- %tmp2377 = getelementptr inbounds float* %tmp2376, i64 1
- %tmp2378 = getelementptr inbounds float* %tmp2377, i64 1
- %tmp2379 = getelementptr inbounds float* %tmp2378, i64 1
- %tmp2380 = getelementptr inbounds float* %tmp2379, i64 1
- %tmp2381 = getelementptr inbounds float* %tmp2380, i64 1
- %tmp2382 = getelementptr inbounds float* %tmp2381, i64 1
- %tmp2383 = getelementptr inbounds float* %tmp2382, i64 1
- %tmp2384 = getelementptr inbounds float* %tmp2383, i64 1
- %tmp2385 = getelementptr inbounds float* %tmp2384, i64 1
- %tmp2386 = getelementptr inbounds float* %tmp2385, i64 1
- %tmp2387 = getelementptr inbounds float* %tmp2386, i64 1
- %tmp2388 = getelementptr inbounds float* %tmp2387, i64 1
- %tmp2389 = getelementptr inbounds float* %tmp2388, i64 1
- %tmp2390 = getelementptr inbounds float* %tmp2389, i64 1
- %tmp2391 = getelementptr inbounds float* %tmp2390, i64 1
- %tmp2392 = getelementptr inbounds float* %tmp2391, i64 1
- %tmp2393 = getelementptr inbounds float* %tmp2392, i64 1
- %tmp2394 = getelementptr inbounds float* %tmp2393, i64 1
- %tmp2395 = getelementptr inbounds float* %tmp2394, i64 1
- %tmp2396 = getelementptr inbounds float* %tmp2395, i64 1
- %tmp2397 = getelementptr inbounds float* %tmp2396, i64 1
- %tmp2398 = getelementptr inbounds float* %tmp2397, i64 1
- %tmp2399 = getelementptr inbounds float* %tmp2398, i64 1
- %tmp2400 = getelementptr inbounds float* %tmp2399, i64 1
- %tmp2401 = getelementptr inbounds float* %tmp2400, i64 1
- %tmp2402 = getelementptr inbounds float* %tmp2401, i64 1
- %tmp2403 = getelementptr inbounds float* %tmp2402, i64 1
- %tmp2404 = getelementptr inbounds float* %tmp2403, i64 1
- %tmp2405 = getelementptr inbounds float* %tmp2404, i64 1
- %tmp2406 = getelementptr inbounds float* %tmp2405, i64 1
- %tmp2407 = getelementptr inbounds float* %tmp2406, i64 1
- %tmp2408 = getelementptr inbounds float* %tmp2407, i64 1
- %tmp2409 = getelementptr inbounds float* %tmp2408, i64 1
- %tmp2410 = getelementptr inbounds float* %tmp2409, i64 1
- %tmp2411 = getelementptr inbounds float* %tmp2410, i64 1
- %tmp2412 = getelementptr inbounds float* %tmp2411, i64 1
- %tmp2413 = getelementptr inbounds float* %tmp2412, i64 1
- %tmp2414 = getelementptr inbounds float* %tmp2413, i64 1
- %tmp2415 = getelementptr inbounds float* %tmp2414, i64 1
- %tmp2416 = getelementptr inbounds float* %tmp2415, i64 1
- %tmp2417 = getelementptr inbounds float* %tmp2416, i64 1
- %tmp2418 = getelementptr inbounds float* %tmp2417, i64 1
- %tmp2419 = getelementptr inbounds float* %tmp2418, i64 1
- %tmp2420 = getelementptr inbounds float* %tmp2419, i64 1
- %tmp2421 = getelementptr inbounds float* %tmp2420, i64 1
- %tmp2422 = getelementptr inbounds float* %tmp2421, i64 1
- %tmp2423 = getelementptr inbounds float* %tmp2422, i64 1
- %tmp2424 = getelementptr inbounds float* %tmp2423, i64 1
- %tmp2425 = getelementptr inbounds float* %tmp2424, i64 1
- %tmp2426 = getelementptr inbounds float* %tmp2425, i64 1
- %tmp2427 = getelementptr inbounds float* %tmp2426, i64 1
- %tmp2428 = getelementptr inbounds float* %tmp2427, i64 1
- %tmp2429 = getelementptr inbounds float* %tmp2428, i64 1
- %tmp2430 = getelementptr inbounds float* %tmp2429, i64 1
- %tmp2431 = getelementptr inbounds float* %tmp2430, i64 1
- %tmp2432 = getelementptr inbounds float* %tmp2431, i64 1
- %tmp2433 = getelementptr inbounds float* %tmp2432, i64 1
- %tmp2434 = getelementptr inbounds float* %tmp2433, i64 1
- %tmp2435 = getelementptr inbounds float* %tmp2434, i64 1
- %tmp2436 = getelementptr inbounds float* %tmp2435, i64 1
- %tmp2437 = getelementptr inbounds float* %tmp2436, i64 1
- %tmp2438 = getelementptr inbounds float* %tmp2437, i64 1
- %tmp2439 = getelementptr inbounds float* %tmp2438, i64 1
- %tmp2440 = getelementptr inbounds float* %tmp2439, i64 1
- %tmp2441 = getelementptr inbounds float* %tmp2440, i64 1
- %tmp2442 = getelementptr inbounds float* %tmp2441, i64 1
- %tmp2443 = getelementptr inbounds float* %tmp2442, i64 1
- %tmp2444 = getelementptr inbounds float* %tmp2443, i64 1
- %tmp2445 = getelementptr inbounds float* %tmp2444, i64 1
- %tmp2446 = getelementptr inbounds float* %tmp2445, i64 1
- %tmp2447 = getelementptr inbounds float* %tmp2446, i64 1
- %tmp2448 = getelementptr inbounds float* %tmp2447, i64 1
- %tmp2449 = getelementptr inbounds float* %tmp2448, i64 1
- %tmp2450 = getelementptr inbounds float* %tmp2449, i64 1
- %tmp2451 = getelementptr inbounds float* %tmp2450, i64 1
- %tmp2452 = getelementptr inbounds float* %tmp2451, i64 1
- %tmp2453 = getelementptr inbounds float* %tmp2452, i64 1
- %tmp2454 = getelementptr inbounds float* %tmp2453, i64 1
- %tmp2455 = getelementptr inbounds float* %tmp2454, i64 1
- %tmp2456 = getelementptr inbounds float* %tmp2455, i64 1
- %tmp2457 = getelementptr inbounds float* %tmp2456, i64 1
- %tmp2458 = getelementptr inbounds float* %tmp2457, i64 1
- %tmp2459 = getelementptr inbounds float* %tmp2458, i64 1
- %tmp2460 = getelementptr inbounds float* %tmp2459, i64 1
- %tmp2461 = getelementptr inbounds float* %tmp2460, i64 1
- %tmp2462 = getelementptr inbounds float* %tmp2461, i64 1
- %tmp2463 = getelementptr inbounds float* %tmp2462, i64 1
- %tmp2464 = getelementptr inbounds float* %tmp2463, i64 1
- %tmp2465 = getelementptr inbounds float* %tmp2464, i64 1
- %tmp2466 = getelementptr inbounds float* %tmp2465, i64 1
- %tmp2467 = getelementptr inbounds float* %tmp2466, i64 1
- %tmp2468 = getelementptr inbounds float* %tmp2467, i64 1
- %tmp2469 = getelementptr inbounds float* %tmp2468, i64 1
- %tmp2470 = getelementptr inbounds float* %tmp2469, i64 1
- %tmp2471 = getelementptr inbounds float* %tmp2470, i64 1
- %tmp2472 = getelementptr inbounds float* %tmp2471, i64 1
- %tmp2473 = getelementptr inbounds float* %tmp2472, i64 1
- %tmp2474 = getelementptr inbounds float* %tmp2473, i64 1
- %tmp2475 = getelementptr inbounds float* %tmp2474, i64 1
- %tmp2476 = getelementptr inbounds float* %tmp2475, i64 1
- %tmp2477 = getelementptr inbounds float* %tmp2476, i64 1
- %tmp2478 = getelementptr inbounds float* %tmp2477, i64 1
- %tmp2479 = getelementptr inbounds float* %tmp2478, i64 1
- %tmp2480 = getelementptr inbounds float* %tmp2479, i64 1
- %tmp2481 = getelementptr inbounds float* %tmp2480, i64 1
- %tmp2482 = getelementptr inbounds float* %tmp2481, i64 1
- %tmp2483 = getelementptr inbounds float* %tmp2482, i64 1
- %tmp2484 = getelementptr inbounds float* %tmp2483, i64 1
- %tmp2485 = getelementptr inbounds float* %tmp2484, i64 1
- %tmp2486 = getelementptr inbounds float* %tmp2485, i64 1
- %tmp2487 = getelementptr inbounds float* %tmp2486, i64 1
- %tmp2488 = getelementptr inbounds float* %tmp2487, i64 1
- %tmp2489 = getelementptr inbounds float* %tmp2488, i64 1
- %tmp2490 = getelementptr inbounds float* %tmp2489, i64 1
- %tmp2491 = getelementptr inbounds float* %tmp2490, i64 1
- %tmp2492 = getelementptr inbounds float* %tmp2491, i64 1
- %tmp2493 = getelementptr inbounds float* %tmp2492, i64 1
- %tmp2494 = getelementptr inbounds float* %tmp2493, i64 1
- %tmp2495 = getelementptr inbounds float* %tmp2494, i64 1
- %tmp2496 = getelementptr inbounds float* %tmp2495, i64 1
- %tmp2497 = getelementptr inbounds float* %tmp2496, i64 1
- %tmp2498 = getelementptr inbounds float* %tmp2497, i64 1
- %tmp2499 = getelementptr inbounds float* %tmp2498, i64 1
- %tmp2500 = getelementptr inbounds float* %tmp2499, i64 1
- %tmp2501 = getelementptr inbounds float* %tmp2500, i64 1
- %tmp2502 = getelementptr inbounds float* %tmp2501, i64 1
- %tmp2503 = getelementptr inbounds float* %tmp2502, i64 1
- %tmp2504 = getelementptr inbounds float* %tmp2503, i64 1
- %tmp2505 = getelementptr inbounds float* %tmp2504, i64 1
- %tmp2506 = getelementptr inbounds float* %tmp2505, i64 1
- %tmp2507 = getelementptr inbounds float* %tmp2506, i64 1
- %tmp2508 = getelementptr inbounds float* %tmp2507, i64 1
- %tmp2509 = getelementptr inbounds float* %tmp2508, i64 1
- %tmp2510 = getelementptr inbounds float* %tmp2509, i64 1
- %tmp2511 = getelementptr inbounds float* %tmp2510, i64 1
- %tmp2512 = getelementptr inbounds float* %tmp2511, i64 1
- %tmp2513 = getelementptr inbounds float* %tmp2512, i64 1
- %tmp2514 = getelementptr inbounds float* %tmp2513, i64 1
- %tmp2515 = getelementptr inbounds float* %tmp2514, i64 1
- %tmp2516 = getelementptr inbounds float* %tmp2515, i64 1
- %tmp2517 = getelementptr inbounds float* %tmp2516, i64 1
- %tmp2518 = getelementptr inbounds float* %tmp2517, i64 1
- %tmp2519 = getelementptr inbounds float* %tmp2518, i64 1
- %tmp2520 = getelementptr inbounds float* %tmp2519, i64 1
- %tmp2521 = getelementptr inbounds float* %tmp2520, i64 1
- %tmp2522 = getelementptr inbounds float* %tmp2521, i64 1
- %tmp2523 = getelementptr inbounds float* %tmp2522, i64 1
- %tmp2524 = getelementptr inbounds float* %tmp2523, i64 1
- %tmp2525 = getelementptr inbounds float* %tmp2524, i64 1
- %tmp2526 = getelementptr inbounds float* %tmp2525, i64 1
- %tmp2527 = getelementptr inbounds float* %tmp2526, i64 1
- %tmp2528 = getelementptr inbounds float* %tmp2527, i64 1
- %tmp2529 = getelementptr inbounds float* %tmp2528, i64 1
- %tmp2530 = getelementptr inbounds float* %tmp2529, i64 1
- %tmp2531 = getelementptr inbounds float* %tmp2530, i64 1
- %tmp2532 = getelementptr inbounds float* %tmp2531, i64 1
- %tmp2533 = getelementptr inbounds float* %tmp2532, i64 1
- %tmp2534 = getelementptr inbounds float* %tmp2533, i64 1
- %tmp2535 = getelementptr inbounds float* %tmp2534, i64 1
- %tmp2536 = getelementptr inbounds float* %tmp2535, i64 1
- %tmp2537 = getelementptr inbounds float* %tmp2536, i64 1
- %tmp2538 = getelementptr inbounds float* %tmp2537, i64 1
- %tmp2539 = getelementptr inbounds float* %tmp2538, i64 1
- %tmp2540 = getelementptr inbounds float* %tmp2539, i64 1
- %tmp2541 = getelementptr inbounds float* %tmp2540, i64 1
- %tmp2542 = getelementptr inbounds float* %tmp2541, i64 1
- %tmp2543 = getelementptr inbounds float* %tmp2542, i64 1
- %tmp2544 = getelementptr inbounds float* %tmp2543, i64 1
- %tmp2545 = getelementptr inbounds float* %tmp2544, i64 1
- %tmp2546 = getelementptr inbounds float* %tmp2545, i64 1
- %tmp2547 = getelementptr inbounds float* %tmp2546, i64 1
- %tmp2548 = getelementptr inbounds float* %tmp2547, i64 1
- %tmp2549 = getelementptr inbounds float* %tmp2548, i64 1
- %tmp2550 = getelementptr inbounds float* %tmp2549, i64 1
- %tmp2551 = getelementptr inbounds float* %tmp2550, i64 1
- %tmp2552 = getelementptr inbounds float* %tmp2551, i64 1
- %tmp2553 = getelementptr inbounds float* %tmp2552, i64 1
- %tmp2554 = getelementptr inbounds float* %tmp2553, i64 1
- %tmp2555 = getelementptr inbounds float* %tmp2554, i64 1
- %tmp2556 = getelementptr inbounds float* %tmp2555, i64 1
- %tmp2557 = getelementptr inbounds float* %tmp2556, i64 1
- %tmp2558 = getelementptr inbounds float* %tmp2557, i64 1
- %tmp2559 = getelementptr inbounds float* %tmp2558, i64 1
- %tmp2560 = getelementptr inbounds float* %tmp2559, i64 1
- %tmp2561 = getelementptr inbounds float* %tmp2560, i64 1
- %tmp2562 = getelementptr inbounds float* %tmp2561, i64 1
- %tmp2563 = getelementptr inbounds float* %tmp2562, i64 1
- %tmp2564 = getelementptr inbounds float* %tmp2563, i64 1
- %tmp2565 = getelementptr inbounds float* %tmp2564, i64 1
- %tmp2566 = getelementptr inbounds float* %tmp2565, i64 1
- %tmp2567 = getelementptr inbounds float* %tmp2566, i64 1
- %tmp2568 = getelementptr inbounds float* %tmp2567, i64 1
- %tmp2569 = getelementptr inbounds float* %tmp2568, i64 1
- %tmp2570 = getelementptr inbounds float* %tmp2569, i64 1
- %tmp2571 = getelementptr inbounds float* %tmp2570, i64 1
- %tmp2572 = getelementptr inbounds float* %tmp2571, i64 1
- %tmp2573 = getelementptr inbounds float* %tmp2572, i64 1
- %tmp2574 = getelementptr inbounds float* %tmp2573, i64 1
- %tmp2575 = getelementptr inbounds float* %tmp2574, i64 1
- %tmp2576 = getelementptr inbounds float* %tmp2575, i64 1
- %tmp2577 = getelementptr inbounds float* %tmp2576, i64 1
- %tmp2578 = getelementptr inbounds float* %tmp2577, i64 1
- %tmp2579 = getelementptr inbounds float* %tmp2578, i64 1
- %tmp2580 = getelementptr inbounds float* %tmp2579, i64 1
- %tmp2581 = getelementptr inbounds float* %tmp2580, i64 1
- %tmp2582 = getelementptr inbounds float* %tmp2581, i64 1
- %tmp2583 = getelementptr inbounds float* %tmp2582, i64 1
- %tmp2584 = getelementptr inbounds float* %tmp2583, i64 1
- %tmp2585 = getelementptr inbounds float* %tmp2584, i64 1
- %tmp2586 = getelementptr inbounds float* %tmp2585, i64 1
- %tmp2587 = getelementptr inbounds float* %tmp2586, i64 1
- %tmp2588 = getelementptr inbounds float* %tmp2587, i64 1
- %tmp2589 = getelementptr inbounds float* %tmp2588, i64 1
- %tmp2590 = getelementptr inbounds float* %tmp2589, i64 1
- %tmp2591 = getelementptr inbounds float* %tmp2590, i64 1
- %tmp2592 = getelementptr inbounds float* %tmp2591, i64 1
- %tmp2593 = getelementptr inbounds float* %tmp2592, i64 1
- %tmp2594 = getelementptr inbounds float* %tmp2593, i64 1
- %tmp2595 = getelementptr inbounds float* %tmp2594, i64 1
- %tmp2596 = getelementptr inbounds float* %tmp2595, i64 1
- %tmp2597 = getelementptr inbounds float* %tmp2596, i64 1
- %tmp2598 = getelementptr inbounds float* %tmp2597, i64 1
- %tmp2599 = getelementptr inbounds float* %tmp2598, i64 1
- %tmp2600 = getelementptr inbounds float* %tmp2599, i64 1
- %tmp2601 = getelementptr inbounds float* %tmp2600, i64 1
- %tmp2602 = getelementptr inbounds float* %tmp2601, i64 1
- %tmp2603 = getelementptr inbounds float* %tmp2602, i64 1
- %tmp2604 = getelementptr inbounds float* %tmp2603, i64 1
- %tmp2605 = getelementptr inbounds float* %tmp2604, i64 1
- %tmp2606 = getelementptr inbounds float* %tmp2605, i64 1
- %tmp2607 = getelementptr inbounds float* %tmp2606, i64 1
- %tmp2608 = getelementptr inbounds float* %tmp2607, i64 1
- %tmp2609 = getelementptr inbounds float* %tmp2608, i64 1
- %tmp2610 = getelementptr inbounds float* %tmp2609, i64 1
- %tmp2611 = getelementptr inbounds float* %tmp2610, i64 1
- %tmp2612 = getelementptr inbounds float* %tmp2611, i64 1
- %tmp2613 = getelementptr inbounds float* %tmp2612, i64 1
- %tmp2614 = getelementptr inbounds float* %tmp2613, i64 1
- %tmp2615 = getelementptr inbounds float* %tmp2614, i64 1
- %tmp2616 = getelementptr inbounds float* %tmp2615, i64 1
- %tmp2617 = getelementptr inbounds float* %tmp2616, i64 1
- %tmp2618 = getelementptr inbounds float* %tmp2617, i64 1
- %tmp2619 = getelementptr inbounds float* %tmp2618, i64 1
- %tmp2620 = getelementptr inbounds float* %tmp2619, i64 1
- %tmp2621 = getelementptr inbounds float* %tmp2620, i64 1
- %tmp2622 = getelementptr inbounds float* %tmp2621, i64 1
- %tmp2623 = getelementptr inbounds float* %tmp2622, i64 1
- %tmp2624 = getelementptr inbounds float* %tmp2623, i64 1
- %tmp2625 = getelementptr inbounds float* %tmp2624, i64 1
- %tmp2626 = getelementptr inbounds float* %tmp2625, i64 1
- %tmp2627 = getelementptr inbounds float* %tmp2626, i64 1
- %tmp2628 = getelementptr inbounds float* %tmp2627, i64 1
- %tmp2629 = getelementptr inbounds float* %tmp2628, i64 1
- %tmp2630 = getelementptr inbounds float* %tmp2629, i64 1
- %tmp2631 = getelementptr inbounds float* %tmp2630, i64 1
- %tmp2632 = getelementptr inbounds float* %tmp2631, i64 1
- %tmp2633 = getelementptr inbounds float* %tmp2632, i64 1
- %tmp2634 = getelementptr inbounds float* %tmp2633, i64 1
- %tmp2635 = getelementptr inbounds float* %tmp2634, i64 1
- %tmp2636 = getelementptr inbounds float* %tmp2635, i64 1
- %tmp2637 = getelementptr inbounds float* %tmp2636, i64 1
- %tmp2638 = getelementptr inbounds float* %tmp2637, i64 1
- %tmp2639 = getelementptr inbounds float* %tmp2638, i64 1
- %tmp2640 = getelementptr inbounds float* %tmp2639, i64 1
- %tmp2641 = getelementptr inbounds float* %tmp2640, i64 1
- %tmp2642 = getelementptr inbounds float* %tmp2641, i64 1
- %tmp2643 = getelementptr inbounds float* %tmp2642, i64 1
- %tmp2644 = getelementptr inbounds float* %tmp2643, i64 1
- %tmp2645 = getelementptr inbounds float* %tmp2644, i64 1
- %tmp2646 = getelementptr inbounds float* %tmp2645, i64 1
- %tmp2647 = getelementptr inbounds float* %tmp2646, i64 1
- %tmp2648 = getelementptr inbounds float* %tmp2647, i64 1
- %tmp2649 = getelementptr inbounds float* %tmp2648, i64 1
- %tmp2650 = getelementptr inbounds float* %tmp2649, i64 1
- %tmp2651 = getelementptr inbounds float* %tmp2650, i64 1
- %tmp2652 = getelementptr inbounds float* %tmp2651, i64 1
- %tmp2653 = getelementptr inbounds float* %tmp2652, i64 1
- %tmp2654 = getelementptr inbounds float* %tmp2653, i64 1
- %tmp2655 = getelementptr inbounds float* %tmp2654, i64 1
- %tmp2656 = getelementptr inbounds float* %tmp2655, i64 1
- %tmp2657 = getelementptr inbounds float* %tmp2656, i64 1
- %tmp2658 = getelementptr inbounds float* %tmp2657, i64 1
- %tmp2659 = getelementptr inbounds float* %tmp2658, i64 1
- %tmp2660 = getelementptr inbounds float* %tmp2659, i64 1
- %tmp2661 = getelementptr inbounds float* %tmp2660, i64 1
- %tmp2662 = getelementptr inbounds float* %tmp2661, i64 1
- %tmp2663 = getelementptr inbounds float* %tmp2662, i64 1
- %tmp2664 = getelementptr inbounds float* %tmp2663, i64 1
- %tmp2665 = getelementptr inbounds float* %tmp2664, i64 1
- %tmp2666 = getelementptr inbounds float* %tmp2665, i64 1
- %tmp2667 = getelementptr inbounds float* %tmp2666, i64 1
- %tmp2668 = getelementptr inbounds float* %tmp2667, i64 1
- %tmp2669 = getelementptr inbounds float* %tmp2668, i64 1
- %tmp2670 = getelementptr inbounds float* %tmp2669, i64 1
- %tmp2671 = getelementptr inbounds float* %tmp2670, i64 1
- %tmp2672 = getelementptr inbounds float* %tmp2671, i64 1
- %tmp2673 = getelementptr inbounds float* %tmp2672, i64 1
- %tmp2674 = getelementptr inbounds float* %tmp2673, i64 1
- %tmp2675 = getelementptr inbounds float* %tmp2674, i64 1
- %tmp2676 = getelementptr inbounds float* %tmp2675, i64 1
- %tmp2677 = getelementptr inbounds float* %tmp2676, i64 1
- %tmp2678 = getelementptr inbounds float* %tmp2677, i64 1
- %tmp2679 = getelementptr inbounds float* %tmp2678, i64 1
- %tmp2680 = getelementptr inbounds float* %tmp2679, i64 1
- %tmp2681 = getelementptr inbounds float* %tmp2680, i64 1
- %tmp2682 = getelementptr inbounds float* %tmp2681, i64 1
- %tmp2683 = getelementptr inbounds float* %tmp2682, i64 1
- %tmp2684 = getelementptr inbounds float* %tmp2683, i64 1
- %tmp2685 = getelementptr inbounds float* %tmp2684, i64 1
- %tmp2686 = getelementptr inbounds float* %tmp2685, i64 1
- %tmp2687 = getelementptr inbounds float* %tmp2686, i64 1
- %tmp2688 = getelementptr inbounds float* %tmp2687, i64 1
- %tmp2689 = getelementptr inbounds float* %tmp2688, i64 1
- %tmp2690 = getelementptr inbounds float* %tmp2689, i64 1
- %tmp2691 = getelementptr inbounds float* %tmp2690, i64 1
- %tmp2692 = getelementptr inbounds float* %tmp2691, i64 1
- %tmp2693 = getelementptr inbounds float* %tmp2692, i64 1
- %tmp2694 = getelementptr inbounds float* %tmp2693, i64 1
- %tmp2695 = getelementptr inbounds float* %tmp2694, i64 1
- %tmp2696 = getelementptr inbounds float* %tmp2695, i64 1
- %tmp2697 = getelementptr inbounds float* %tmp2696, i64 1
- %tmp2698 = getelementptr inbounds float* %tmp2697, i64 1
- %tmp2699 = getelementptr inbounds float* %tmp2698, i64 1
- %tmp2700 = getelementptr inbounds float* %tmp2699, i64 1
- %tmp2701 = getelementptr inbounds float* %tmp2700, i64 1
- %tmp2702 = getelementptr inbounds float* %tmp2701, i64 1
- %tmp2703 = getelementptr inbounds float* %tmp2702, i64 1
- %tmp2704 = getelementptr inbounds float* %tmp2703, i64 1
- %tmp2705 = getelementptr inbounds float* %tmp2704, i64 1
- %tmp2706 = getelementptr inbounds float* %tmp2705, i64 1
- %tmp2707 = getelementptr inbounds float* %tmp2706, i64 1
- %tmp2708 = getelementptr inbounds float* %tmp2707, i64 1
- %tmp2709 = getelementptr inbounds float* %tmp2708, i64 1
- %tmp2710 = getelementptr inbounds float* %tmp2709, i64 1
- %tmp2711 = getelementptr inbounds float* %tmp2710, i64 1
- %tmp2712 = getelementptr inbounds float* %tmp2711, i64 1
- %tmp2713 = getelementptr inbounds float* %tmp2712, i64 1
- %tmp2714 = getelementptr inbounds float* %tmp2713, i64 1
- %tmp2715 = getelementptr inbounds float* %tmp2714, i64 1
- %tmp2716 = getelementptr inbounds float* %tmp2715, i64 1
- %tmp2717 = getelementptr inbounds float* %tmp2716, i64 1
- %tmp2718 = getelementptr inbounds float* %tmp2717, i64 1
- %tmp2719 = getelementptr inbounds float* %tmp2718, i64 1
- %tmp2720 = getelementptr inbounds float* %tmp2719, i64 1
- %tmp2721 = getelementptr inbounds float* %tmp2720, i64 1
- %tmp2722 = getelementptr inbounds float* %tmp2721, i64 1
- %tmp2723 = getelementptr inbounds float* %tmp2722, i64 1
- %tmp2724 = getelementptr inbounds float* %tmp2723, i64 1
- %tmp2725 = getelementptr inbounds float* %tmp2724, i64 1
- %tmp2726 = getelementptr inbounds float* %tmp2725, i64 1
- %tmp2727 = getelementptr inbounds float* %tmp2726, i64 1
- %tmp2728 = getelementptr inbounds float* %tmp2727, i64 1
- %tmp2729 = getelementptr inbounds float* %tmp2728, i64 1
- %tmp2730 = getelementptr inbounds float* %tmp2729, i64 1
- %tmp2731 = getelementptr inbounds float* %tmp2730, i64 1
- %tmp2732 = getelementptr inbounds float* %tmp2731, i64 1
- %tmp2733 = getelementptr inbounds float* %tmp2732, i64 1
- %tmp2734 = getelementptr inbounds float* %tmp2733, i64 1
- %tmp2735 = getelementptr inbounds float* %tmp2734, i64 1
- %tmp2736 = getelementptr inbounds float* %tmp2735, i64 1
- %tmp2737 = getelementptr inbounds float* %tmp2736, i64 1
- %tmp2738 = getelementptr inbounds float* %tmp2737, i64 1
- %tmp2739 = getelementptr inbounds float* %tmp2738, i64 1
- %tmp2740 = getelementptr inbounds float* %tmp2739, i64 1
- %tmp2741 = getelementptr inbounds float* %tmp2740, i64 1
- %tmp2742 = getelementptr inbounds float* %tmp2741, i64 1
- %tmp2743 = getelementptr inbounds float* %tmp2742, i64 1
- %tmp2744 = getelementptr inbounds float* %tmp2743, i64 1
- %tmp2745 = getelementptr inbounds float* %tmp2744, i64 1
- %tmp2746 = getelementptr inbounds float* %tmp2745, i64 1
- %tmp2747 = getelementptr inbounds float* %tmp2746, i64 1
- %tmp2748 = getelementptr inbounds float* %tmp2747, i64 1
- %tmp2749 = getelementptr inbounds float* %tmp2748, i64 1
- %tmp2750 = getelementptr inbounds float* %tmp2749, i64 1
- %tmp2751 = getelementptr inbounds float* %tmp2750, i64 1
- %tmp2752 = getelementptr inbounds float* %tmp2751, i64 1
- %tmp2753 = getelementptr inbounds float* %tmp2752, i64 1
- %tmp2754 = getelementptr inbounds float* %tmp2753, i64 1
- %tmp2755 = getelementptr inbounds float* %tmp2754, i64 1
- %tmp2756 = getelementptr inbounds float* %tmp2755, i64 1
- %tmp2757 = getelementptr inbounds float* %tmp2756, i64 1
- %tmp2758 = getelementptr inbounds float* %tmp2757, i64 1
- %tmp2759 = getelementptr inbounds float* %tmp2758, i64 1
- %tmp2760 = getelementptr inbounds float* %tmp2759, i64 1
- %tmp2761 = getelementptr inbounds float* %tmp2760, i64 1
- %tmp2762 = getelementptr inbounds float* %tmp2761, i64 1
- %tmp2763 = getelementptr inbounds float* %tmp2762, i64 1
- %tmp2764 = getelementptr inbounds float* %tmp2763, i64 1
- %tmp2765 = getelementptr inbounds float* %tmp2764, i64 1
- %tmp2766 = getelementptr inbounds float* %tmp2765, i64 1
- %tmp2767 = getelementptr inbounds float* %tmp2766, i64 1
- %tmp2768 = getelementptr inbounds float* %tmp2767, i64 1
- %tmp2769 = getelementptr inbounds float* %tmp2768, i64 1
- %tmp2770 = getelementptr inbounds float* %tmp2769, i64 1
- %tmp2771 = getelementptr inbounds float* %tmp2770, i64 1
- %tmp2772 = getelementptr inbounds float* %tmp2771, i64 1
- %tmp2773 = getelementptr inbounds float* %tmp2772, i64 1
- %tmp2774 = getelementptr inbounds float* %tmp2773, i64 1
- %tmp2775 = getelementptr inbounds float* %tmp2774, i64 1
- %tmp2776 = getelementptr inbounds float* %tmp2775, i64 1
- %tmp2777 = getelementptr inbounds float* %tmp2776, i64 1
- %tmp2778 = getelementptr inbounds float* %tmp2777, i64 1
- %tmp2779 = getelementptr inbounds float* %tmp2778, i64 1
- %tmp2780 = getelementptr inbounds float* %tmp2779, i64 1
- %tmp2781 = getelementptr inbounds float* %tmp2780, i64 1
- %tmp2782 = getelementptr inbounds float* %tmp2781, i64 1
- %tmp2783 = getelementptr inbounds float* %tmp2782, i64 1
- %tmp2784 = getelementptr inbounds float* %tmp2783, i64 1
- %tmp2785 = getelementptr inbounds float* %tmp2784, i64 1
- %tmp2786 = getelementptr inbounds float* %tmp2785, i64 1
- %tmp2787 = getelementptr inbounds float* %tmp2786, i64 1
- %tmp2788 = getelementptr inbounds float* %tmp2787, i64 1
- %tmp2789 = getelementptr inbounds float* %tmp2788, i64 1
- %tmp2790 = getelementptr inbounds float* %tmp2789, i64 1
- %tmp2791 = getelementptr inbounds float* %tmp2790, i64 1
- %tmp2792 = getelementptr inbounds float* %tmp2791, i64 1
- %tmp2793 = getelementptr inbounds float* %tmp2792, i64 1
- %tmp2794 = getelementptr inbounds float* %tmp2793, i64 1
- %tmp2795 = getelementptr inbounds float* %tmp2794, i64 1
- %tmp2796 = getelementptr inbounds float* %tmp2795, i64 1
- %tmp2797 = getelementptr inbounds float* %tmp2796, i64 1
- %tmp2798 = getelementptr inbounds float* %tmp2797, i64 1
- %tmp2799 = getelementptr inbounds float* %tmp2798, i64 1
- %tmp2800 = getelementptr inbounds float* %tmp2799, i64 1
- %tmp2801 = getelementptr inbounds float* %tmp2800, i64 1
- %tmp2802 = getelementptr inbounds float* %tmp2801, i64 1
- %tmp2803 = getelementptr inbounds float* %tmp2802, i64 1
- %tmp2804 = getelementptr inbounds float* %tmp2803, i64 1
- %tmp2805 = getelementptr inbounds float* %tmp2804, i64 1
- %tmp2806 = getelementptr inbounds float* %tmp2805, i64 1
- %tmp2807 = getelementptr inbounds float* %tmp2806, i64 1
- %tmp2808 = getelementptr inbounds float* %tmp2807, i64 1
- %tmp2809 = getelementptr inbounds float* %tmp2808, i64 1
- %tmp2810 = getelementptr inbounds float* %tmp2809, i64 1
- %tmp2811 = getelementptr inbounds float* %tmp2810, i64 1
- %tmp2812 = getelementptr inbounds float* %tmp2811, i64 1
- %tmp2813 = getelementptr inbounds float* %tmp2812, i64 1
- %tmp2814 = getelementptr inbounds float* %tmp2813, i64 1
- %tmp2815 = getelementptr inbounds float* %tmp2814, i64 1
- %tmp2816 = getelementptr inbounds float* %tmp2815, i64 1
- %tmp2817 = getelementptr inbounds float* %tmp2816, i64 1
- %tmp2818 = getelementptr inbounds float* %tmp2817, i64 1
- %tmp2819 = getelementptr inbounds float* %tmp2818, i64 1
- %tmp2820 = getelementptr inbounds float* %tmp2819, i64 1
- %tmp2821 = getelementptr inbounds float* %tmp2820, i64 1
- %tmp2822 = getelementptr inbounds float* %tmp2821, i64 1
- %tmp2823 = getelementptr inbounds float* %tmp2822, i64 1
- %tmp2824 = getelementptr inbounds float* %tmp2823, i64 1
- %tmp2825 = getelementptr inbounds float* %tmp2824, i64 1
- %tmp2826 = getelementptr inbounds float* %tmp2825, i64 1
- %tmp2827 = getelementptr inbounds float* %tmp2826, i64 1
- %tmp2828 = getelementptr inbounds float* %tmp2827, i64 1
- %tmp2829 = getelementptr inbounds float* %tmp2828, i64 1
- %tmp2830 = getelementptr inbounds float* %tmp2829, i64 1
- %tmp2831 = getelementptr inbounds float* %tmp2830, i64 1
- %tmp2832 = getelementptr inbounds float* %tmp2831, i64 1
- %tmp2833 = getelementptr inbounds float* %tmp2832, i64 1
- %tmp2834 = getelementptr inbounds float* %tmp2833, i64 1
- %tmp2835 = getelementptr inbounds float* %tmp2834, i64 1
- %tmp2836 = getelementptr inbounds float* %tmp2835, i64 1
- %tmp2837 = getelementptr inbounds float* %tmp2836, i64 1
- %tmp2838 = getelementptr inbounds float* %tmp2837, i64 1
- %tmp2839 = getelementptr inbounds float* %tmp2838, i64 1
- %tmp2840 = getelementptr inbounds float* %tmp2839, i64 1
- %tmp2841 = getelementptr inbounds float* %tmp2840, i64 1
- %tmp2842 = getelementptr inbounds float* %tmp2841, i64 1
- %tmp2843 = getelementptr inbounds float* %tmp2842, i64 1
- %tmp2844 = getelementptr inbounds float* %tmp2843, i64 1
- %tmp2845 = getelementptr inbounds float* %tmp2844, i64 1
- %tmp2846 = getelementptr inbounds float* %tmp2845, i64 1
- %tmp2847 = getelementptr inbounds float* %tmp2846, i64 1
- %tmp2848 = getelementptr inbounds float* %tmp2847, i64 1
- %tmp2849 = getelementptr inbounds float* %tmp2848, i64 1
- %tmp2850 = getelementptr inbounds float* %tmp2849, i64 1
- %tmp2851 = getelementptr inbounds float* %tmp2850, i64 1
- %tmp2852 = getelementptr inbounds float* %tmp2851, i64 1
- %tmp2853 = getelementptr inbounds float* %tmp2852, i64 1
- %tmp2854 = getelementptr inbounds float* %tmp2853, i64 1
- %tmp2855 = getelementptr inbounds float* %tmp2854, i64 1
- %tmp2856 = getelementptr inbounds float* %tmp2855, i64 1
- %tmp2857 = getelementptr inbounds float* %tmp2856, i64 1
- %tmp2858 = getelementptr inbounds float* %tmp2857, i64 1
- %tmp2859 = getelementptr inbounds float* %tmp2858, i64 1
- %tmp2860 = getelementptr inbounds float* %tmp2859, i64 1
- %tmp2861 = getelementptr inbounds float* %tmp2860, i64 1
- %tmp2862 = getelementptr inbounds float* %tmp2861, i64 1
- %tmp2863 = getelementptr inbounds float* %tmp2862, i64 1
- %tmp2864 = getelementptr inbounds float* %tmp2863, i64 1
- %tmp2865 = getelementptr inbounds float* %tmp2864, i64 1
- %tmp2866 = getelementptr inbounds float* %tmp2865, i64 1
- %tmp2867 = getelementptr inbounds float* %tmp2866, i64 1
- %tmp2868 = getelementptr inbounds float* %tmp2867, i64 1
- %tmp2869 = getelementptr inbounds float* %tmp2868, i64 1
- %tmp2870 = getelementptr inbounds float* %tmp2869, i64 1
- %tmp2871 = getelementptr inbounds float* %tmp2870, i64 1
- %tmp2872 = getelementptr inbounds float* %tmp2871, i64 1
- %tmp2873 = getelementptr inbounds float* %tmp2872, i64 1
- %tmp2874 = getelementptr inbounds float* %tmp2873, i64 1
- %tmp2875 = getelementptr inbounds float* %tmp2874, i64 1
- %tmp2876 = getelementptr inbounds float* %tmp2875, i64 1
- %tmp2877 = getelementptr inbounds float* %tmp2876, i64 1
- %tmp2878 = getelementptr inbounds float* %tmp2877, i64 1
- %tmp2879 = getelementptr inbounds float* %tmp2878, i64 1
- %tmp2880 = getelementptr inbounds float* %tmp2879, i64 1
- %tmp2881 = getelementptr inbounds float* %tmp2880, i64 1
- %tmp2882 = getelementptr inbounds float* %tmp2881, i64 1
- %tmp2883 = getelementptr inbounds float* %tmp2882, i64 1
- %tmp2884 = getelementptr inbounds float* %tmp2883, i64 1
- %tmp2885 = getelementptr inbounds float* %tmp2884, i64 1
- %tmp2886 = getelementptr inbounds float* %tmp2885, i64 1
- %tmp2887 = getelementptr inbounds float* %tmp2886, i64 1
- %tmp2888 = getelementptr inbounds float* %tmp2887, i64 1
- %tmp2889 = getelementptr inbounds float* %tmp2888, i64 1
- %tmp2890 = getelementptr inbounds float* %tmp2889, i64 1
- %tmp2891 = getelementptr inbounds float* %tmp2890, i64 1
- %tmp2892 = getelementptr inbounds float* %tmp2891, i64 1
- %tmp2893 = getelementptr inbounds float* %tmp2892, i64 1
- %tmp2894 = getelementptr inbounds float* %tmp2893, i64 1
- %tmp2895 = getelementptr inbounds float* %tmp2894, i64 1
- %tmp2896 = getelementptr inbounds float* %tmp2895, i64 1
- %tmp2897 = getelementptr inbounds float* %tmp2896, i64 1
- %tmp2898 = getelementptr inbounds float* %tmp2897, i64 1
- %tmp2899 = getelementptr inbounds float* %tmp2898, i64 1
- %tmp2900 = getelementptr inbounds float* %tmp2899, i64 1
- %tmp2901 = getelementptr inbounds float* %tmp2900, i64 1
- %tmp2902 = getelementptr inbounds float* %tmp2901, i64 1
- %tmp2903 = getelementptr inbounds float* %tmp2902, i64 1
- %tmp2904 = getelementptr inbounds float* %tmp2903, i64 1
- %tmp2905 = getelementptr inbounds float* %tmp2904, i64 1
- %tmp2906 = getelementptr inbounds float* %tmp2905, i64 1
- %tmp2907 = getelementptr inbounds float* %tmp2906, i64 1
- %tmp2908 = getelementptr inbounds float* %tmp2907, i64 1
- %tmp2909 = getelementptr inbounds float* %tmp2908, i64 1
- %tmp2910 = getelementptr inbounds float* %tmp2909, i64 1
- %tmp2911 = getelementptr inbounds float* %tmp2910, i64 1
- %tmp2912 = getelementptr inbounds float* %tmp2911, i64 1
- %tmp2913 = getelementptr inbounds float* %tmp2912, i64 1
- %tmp2914 = getelementptr inbounds float* %tmp2913, i64 1
- %tmp2915 = getelementptr inbounds float* %tmp2914, i64 1
- %tmp2916 = getelementptr inbounds float* %tmp2915, i64 1
- %tmp2917 = getelementptr inbounds float* %tmp2916, i64 1
- %tmp2918 = getelementptr inbounds float* %tmp2917, i64 1
- %tmp2919 = getelementptr inbounds float* %tmp2918, i64 1
- %tmp2920 = getelementptr inbounds float* %tmp2919, i64 1
- %tmp2921 = getelementptr inbounds float* %tmp2920, i64 1
- %tmp2922 = getelementptr inbounds float* %tmp2921, i64 1
- %tmp2923 = getelementptr inbounds float* %tmp2922, i64 1
- %tmp2924 = getelementptr inbounds float* %tmp2923, i64 1
- %tmp2925 = getelementptr inbounds float* %tmp2924, i64 1
- %tmp2926 = getelementptr inbounds float* %tmp2925, i64 1
- %tmp2927 = getelementptr inbounds float* %tmp2926, i64 1
- %tmp2928 = getelementptr inbounds float* %tmp2927, i64 1
- %tmp2929 = getelementptr inbounds float* %tmp2928, i64 1
- %tmp2930 = getelementptr inbounds float* %tmp2929, i64 1
- %tmp2931 = getelementptr inbounds float* %tmp2930, i64 1
- %tmp2932 = getelementptr inbounds float* %tmp2931, i64 1
- %tmp2933 = getelementptr inbounds float* %tmp2932, i64 1
- %tmp2934 = getelementptr inbounds float* %tmp2933, i64 1
- %tmp2935 = getelementptr inbounds float* %tmp2934, i64 1
- %tmp2936 = getelementptr inbounds float* %tmp2935, i64 1
- %tmp2937 = getelementptr inbounds float* %tmp2936, i64 1
- %tmp2938 = getelementptr inbounds float* %tmp2937, i64 1
- %tmp2939 = getelementptr inbounds float* %tmp2938, i64 1
- %tmp2940 = getelementptr inbounds float* %tmp2939, i64 1
- %tmp2941 = getelementptr inbounds float* %tmp2940, i64 1
- %tmp2942 = getelementptr inbounds float* %tmp2941, i64 1
- %tmp2943 = getelementptr inbounds float* %tmp2942, i64 1
- %tmp2944 = getelementptr inbounds float* %tmp2943, i64 1
- %tmp2945 = getelementptr inbounds float* %tmp2944, i64 1
- %tmp2946 = getelementptr inbounds float* %tmp2945, i64 1
- %tmp2947 = getelementptr inbounds float* %tmp2946, i64 1
- %tmp2948 = getelementptr inbounds float* %tmp2947, i64 1
- %tmp2949 = getelementptr inbounds float* %tmp2948, i64 1
- %tmp2950 = getelementptr inbounds float* %tmp2949, i64 1
- %tmp2951 = getelementptr inbounds float* %tmp2950, i64 1
- %tmp2952 = getelementptr inbounds float* %tmp2951, i64 1
- %tmp2953 = getelementptr inbounds float* %tmp2952, i64 1
- %tmp2954 = getelementptr inbounds float* %tmp2953, i64 1
- %tmp2955 = getelementptr inbounds float* %tmp2954, i64 1
- %tmp2956 = getelementptr inbounds float* %tmp2955, i64 1
- %tmp2957 = getelementptr inbounds float* %tmp2956, i64 1
- %tmp2958 = getelementptr inbounds float* %tmp2957, i64 1
- %tmp2959 = getelementptr inbounds float* %tmp2958, i64 1
- %tmp2960 = getelementptr inbounds float* %tmp2959, i64 1
- %tmp2961 = getelementptr inbounds float* %tmp2960, i64 1
- %tmp2962 = getelementptr inbounds float* %tmp2961, i64 1
- %tmp2963 = getelementptr inbounds float* %tmp2962, i64 1
- %tmp2964 = getelementptr inbounds float* %tmp2963, i64 1
- %tmp2965 = getelementptr inbounds float* %tmp2964, i64 1
- %tmp2966 = getelementptr inbounds float* %tmp2965, i64 1
- %tmp2967 = getelementptr inbounds float* %tmp2966, i64 1
- %tmp2968 = getelementptr inbounds float* %tmp2967, i64 1
- %tmp2969 = getelementptr inbounds float* %tmp2968, i64 1
- %tmp2970 = getelementptr inbounds float* %tmp2969, i64 1
- %tmp2971 = getelementptr inbounds float* %tmp2970, i64 1
- %tmp2972 = getelementptr inbounds float* %tmp2971, i64 1
- %tmp2973 = getelementptr inbounds float* %tmp2972, i64 1
- %tmp2974 = getelementptr inbounds float* %tmp2973, i64 1
- %tmp2975 = getelementptr inbounds float* %tmp2974, i64 1
- %tmp2976 = getelementptr inbounds float* %tmp2975, i64 1
- %tmp2977 = getelementptr inbounds float* %tmp2976, i64 1
- %tmp2978 = getelementptr inbounds float* %tmp2977, i64 1
- %tmp2979 = getelementptr inbounds float* %tmp2978, i64 1
- %tmp2980 = getelementptr inbounds float* %tmp2979, i64 1
- %tmp2981 = getelementptr inbounds float* %tmp2980, i64 1
- %tmp2982 = getelementptr inbounds float* %tmp2981, i64 1
- %tmp2983 = getelementptr inbounds float* %tmp2982, i64 1
- %tmp2984 = getelementptr inbounds float* %tmp2983, i64 1
- %tmp2985 = getelementptr inbounds float* %tmp2984, i64 1
- %tmp2986 = getelementptr inbounds float* %tmp2985, i64 1
- %tmp2987 = getelementptr inbounds float* %tmp2986, i64 1
- %tmp2988 = getelementptr inbounds float* %tmp2987, i64 1
- %tmp2989 = getelementptr inbounds float* %tmp2988, i64 1
- %tmp2990 = getelementptr inbounds float* %tmp2989, i64 1
- %tmp2991 = getelementptr inbounds float* %tmp2990, i64 1
- %tmp2992 = getelementptr inbounds float* %tmp2991, i64 1
- %tmp2993 = getelementptr inbounds float* %tmp2992, i64 1
- %tmp2994 = getelementptr inbounds float* %tmp2993, i64 1
- %tmp2995 = getelementptr inbounds float* %tmp2994, i64 1
- %tmp2996 = getelementptr inbounds float* %tmp2995, i64 1
- %tmp2997 = getelementptr inbounds float* %tmp2996, i64 1
- %tmp2998 = getelementptr inbounds float* %tmp2997, i64 1
- %tmp2999 = getelementptr inbounds float* %tmp2998, i64 1
- %tmp3000 = getelementptr inbounds float* %tmp2999, i64 1
- %tmp3001 = getelementptr inbounds float* %tmp3000, i64 1
- %tmp3002 = getelementptr inbounds float* %tmp3001, i64 1
- %tmp3003 = getelementptr inbounds float* %tmp3002, i64 1
- %tmp3004 = getelementptr inbounds float* %tmp3003, i64 1
- %tmp3005 = getelementptr inbounds float* %tmp3004, i64 1
- %tmp3006 = getelementptr inbounds float* %tmp3005, i64 1
- %tmp3007 = getelementptr inbounds float* %tmp3006, i64 1
- %tmp3008 = getelementptr inbounds float* %tmp3007, i64 1
- %tmp3009 = getelementptr inbounds float* %tmp3008, i64 1
- %tmp3010 = getelementptr inbounds float* %tmp3009, i64 1
- %tmp3011 = getelementptr inbounds float* %tmp3010, i64 1
- %tmp3012 = getelementptr inbounds float* %tmp3011, i64 1
- %tmp3013 = getelementptr inbounds float* %tmp3012, i64 1
- %tmp3014 = getelementptr inbounds float* %tmp3013, i64 1
- %tmp3015 = getelementptr inbounds float* %tmp3014, i64 1
- %tmp3016 = getelementptr inbounds float* %tmp3015, i64 1
- %tmp3017 = getelementptr inbounds float* %tmp3016, i64 1
- %tmp3018 = getelementptr inbounds float* %tmp3017, i64 1
- %tmp3019 = getelementptr inbounds float* %tmp3018, i64 1
- %tmp3020 = getelementptr inbounds float* %tmp3019, i64 1
- %tmp3021 = getelementptr inbounds float* %tmp3020, i64 1
- %tmp3022 = getelementptr inbounds float* %tmp3021, i64 1
- %tmp3023 = getelementptr inbounds float* %tmp3022, i64 1
- %tmp3024 = getelementptr inbounds float* %tmp3023, i64 1
- %tmp3025 = getelementptr inbounds float* %tmp3024, i64 1
- %tmp3026 = getelementptr inbounds float* %tmp3025, i64 1
- %tmp3027 = getelementptr inbounds float* %tmp3026, i64 1
- %tmp3028 = getelementptr inbounds float* %tmp3027, i64 1
- %tmp3029 = getelementptr inbounds float* %tmp3028, i64 1
- %tmp3030 = getelementptr inbounds float* %tmp3029, i64 1
- %tmp3031 = getelementptr inbounds float* %tmp3030, i64 1
- %tmp3032 = getelementptr inbounds float* %tmp3031, i64 1
- %tmp3033 = getelementptr inbounds float* %tmp3032, i64 1
- %tmp3034 = getelementptr inbounds float* %tmp3033, i64 1
- %tmp3035 = getelementptr inbounds float* %tmp3034, i64 1
- %tmp3036 = getelementptr inbounds float* %tmp3035, i64 1
- %tmp3037 = getelementptr inbounds float* %tmp3036, i64 1
- %tmp3038 = getelementptr inbounds float* %tmp3037, i64 1
- %tmp3039 = getelementptr inbounds float* %tmp3038, i64 1
- %tmp3040 = getelementptr inbounds float* %tmp3039, i64 1
- %tmp3041 = getelementptr inbounds float* %tmp3040, i64 1
- %tmp3042 = getelementptr inbounds float* %tmp3041, i64 1
- %tmp3043 = getelementptr inbounds float* %tmp3042, i64 1
- %tmp3044 = getelementptr inbounds float* %tmp3043, i64 1
- %tmp3045 = getelementptr inbounds float* %tmp3044, i64 1
- %tmp3046 = getelementptr inbounds float* %tmp3045, i64 1
- %tmp3047 = getelementptr inbounds float* %tmp3046, i64 1
- %tmp3048 = getelementptr inbounds float* %tmp3047, i64 1
- %tmp3049 = getelementptr inbounds float* %tmp3048, i64 1
- %tmp3050 = getelementptr inbounds float* %tmp3049, i64 1
- %tmp3051 = getelementptr inbounds float* %tmp3050, i64 1
- %tmp3052 = getelementptr inbounds float* %tmp3051, i64 1
- %tmp3053 = getelementptr inbounds float* %tmp3052, i64 1
- %tmp3054 = getelementptr inbounds float* %tmp3053, i64 1
- %tmp3055 = getelementptr inbounds float* %tmp3054, i64 1
- %tmp3056 = getelementptr inbounds float* %tmp3055, i64 1
- %tmp3057 = getelementptr inbounds float* %tmp3056, i64 1
- %tmp3058 = getelementptr inbounds float* %tmp3057, i64 1
- %tmp3059 = getelementptr inbounds float* %tmp3058, i64 1
- %tmp3060 = getelementptr inbounds float* %tmp3059, i64 1
- %tmp3061 = getelementptr inbounds float* %tmp3060, i64 1
- %tmp3062 = getelementptr inbounds float* %tmp3061, i64 1
- %tmp3063 = getelementptr inbounds float* %tmp3062, i64 1
- %tmp3064 = getelementptr inbounds float* %tmp3063, i64 1
- %tmp3065 = getelementptr inbounds float* %tmp3064, i64 1
- %tmp3066 = getelementptr inbounds float* %tmp3065, i64 1
- %tmp3067 = getelementptr inbounds float* %tmp3066, i64 1
- %tmp3068 = getelementptr inbounds float* %tmp3067, i64 1
- %tmp3069 = getelementptr inbounds float* %tmp3068, i64 1
- %tmp3070 = getelementptr inbounds float* %tmp3069, i64 1
- %tmp3071 = getelementptr inbounds float* %tmp3070, i64 1
- %tmp3072 = getelementptr inbounds float* %tmp3071, i64 1
- %tmp3073 = getelementptr inbounds float* %tmp3072, i64 1
- %tmp3074 = getelementptr inbounds float* %tmp3073, i64 1
- %tmp3075 = getelementptr inbounds float* %tmp3074, i64 1
- %tmp3076 = getelementptr inbounds float* %tmp3075, i64 1
- %tmp3077 = getelementptr inbounds float* %tmp3076, i64 1
- %tmp3078 = getelementptr inbounds float* %tmp3077, i64 1
- %tmp3079 = getelementptr inbounds float* %tmp3078, i64 1
- %tmp3080 = getelementptr inbounds float* %tmp3079, i64 1
- %tmp3081 = getelementptr inbounds float* %tmp3080, i64 1
- %tmp3082 = getelementptr inbounds float* %tmp3081, i64 1
- %tmp3083 = getelementptr inbounds float* %tmp3082, i64 1
- %tmp3084 = getelementptr inbounds float* %tmp3083, i64 1
- %tmp3085 = getelementptr inbounds float* %tmp3084, i64 1
- %tmp3086 = getelementptr inbounds float* %tmp3085, i64 1
- %tmp3087 = getelementptr inbounds float* %tmp3086, i64 1
- %tmp3088 = getelementptr inbounds float* %tmp3087, i64 1
- %tmp3089 = getelementptr inbounds float* %tmp3088, i64 1
- %tmp3090 = getelementptr inbounds float* %tmp3089, i64 1
- %tmp3091 = getelementptr inbounds float* %tmp3090, i64 1
- %tmp3092 = getelementptr inbounds float* %tmp3091, i64 1
- %tmp3093 = getelementptr inbounds float* %tmp3092, i64 1
- %tmp3094 = getelementptr inbounds float* %tmp3093, i64 1
- %tmp3095 = getelementptr inbounds float* %tmp3094, i64 1
- %tmp3096 = getelementptr inbounds float* %tmp3095, i64 1
- %tmp3097 = getelementptr inbounds float* %tmp3096, i64 1
- %tmp3098 = getelementptr inbounds float* %tmp3097, i64 1
- %tmp3099 = getelementptr inbounds float* %tmp3098, i64 1
- %tmp3100 = getelementptr inbounds float* %tmp3099, i64 1
- %tmp3101 = getelementptr inbounds float* %tmp3100, i64 1
- %tmp3102 = getelementptr inbounds float* %tmp3101, i64 1
- %tmp3103 = getelementptr inbounds float* %tmp3102, i64 1
- %tmp3104 = getelementptr inbounds float* %tmp3103, i64 1
- %tmp3105 = getelementptr inbounds float* %tmp3104, i64 1
- %tmp3106 = getelementptr inbounds float* %tmp3105, i64 1
- %tmp3107 = getelementptr inbounds float* %tmp3106, i64 1
- %tmp3108 = getelementptr inbounds float* %tmp3107, i64 1
- %tmp3109 = getelementptr inbounds float* %tmp3108, i64 1
- %tmp3110 = getelementptr inbounds float* %tmp3109, i64 1
- %tmp3111 = getelementptr inbounds float* %tmp3110, i64 1
- %tmp3112 = getelementptr inbounds float* %tmp3111, i64 1
- %tmp3113 = getelementptr inbounds float* %tmp3112, i64 1
- %tmp3114 = getelementptr inbounds float* %tmp3113, i64 1
- %tmp3115 = getelementptr inbounds float* %tmp3114, i64 1
- %tmp3116 = getelementptr inbounds float* %tmp3115, i64 1
- %tmp3117 = getelementptr inbounds float* %tmp3116, i64 1
- %tmp3118 = getelementptr inbounds float* %tmp3117, i64 1
- %tmp3119 = getelementptr inbounds float* %tmp3118, i64 1
- %tmp3120 = getelementptr inbounds float* %tmp3119, i64 1
- %tmp3121 = getelementptr inbounds float* %tmp3120, i64 1
- %tmp3122 = getelementptr inbounds float* %tmp3121, i64 1
- %tmp3123 = getelementptr inbounds float* %tmp3122, i64 1
- %tmp3124 = getelementptr inbounds float* %tmp3123, i64 1
- %tmp3125 = getelementptr inbounds float* %tmp3124, i64 1
- %tmp3126 = getelementptr inbounds float* %tmp3125, i64 1
- %tmp3127 = getelementptr inbounds float* %tmp3126, i64 1
- %tmp3128 = getelementptr inbounds float* %tmp3127, i64 1
- %tmp3129 = getelementptr inbounds float* %tmp3128, i64 1
- %tmp3130 = getelementptr inbounds float* %tmp3129, i64 1
- %tmp3131 = getelementptr inbounds float* %tmp3130, i64 1
- %tmp3132 = getelementptr inbounds float* %tmp3131, i64 1
- %tmp3133 = getelementptr inbounds float* %tmp3132, i64 1
- %tmp3134 = getelementptr inbounds float* %tmp3133, i64 1
- %tmp3135 = getelementptr inbounds float* %tmp3134, i64 1
- %tmp3136 = getelementptr inbounds float* %tmp3135, i64 1
- %tmp3137 = getelementptr inbounds float* %tmp3136, i64 1
- %tmp3138 = getelementptr inbounds float* %tmp3137, i64 1
- %tmp3139 = getelementptr inbounds float* %tmp3138, i64 1
- %tmp3140 = getelementptr inbounds float* %tmp3139, i64 1
- %tmp3141 = getelementptr inbounds float* %tmp3140, i64 1
- %tmp3142 = getelementptr inbounds float* %tmp3141, i64 1
- %tmp3143 = getelementptr inbounds float* %tmp3142, i64 1
- %tmp3144 = getelementptr inbounds float* %tmp3143, i64 1
- %tmp3145 = getelementptr inbounds float* %tmp3144, i64 1
- %tmp3146 = getelementptr inbounds float* %tmp3145, i64 1
- %tmp3147 = getelementptr inbounds float* %tmp3146, i64 1
- %tmp3148 = getelementptr inbounds float* %tmp3147, i64 1
- %tmp3149 = getelementptr inbounds float* %tmp3148, i64 1
- %tmp3150 = getelementptr inbounds float* %tmp3149, i64 1
- %tmp3151 = getelementptr inbounds float* %tmp3150, i64 1
- %tmp3152 = getelementptr inbounds float* %tmp3151, i64 1
- %tmp3153 = getelementptr inbounds float* %tmp3152, i64 1
- %tmp3154 = getelementptr inbounds float* %tmp3153, i64 1
- %tmp3155 = getelementptr inbounds float* %tmp3154, i64 1
- %tmp3156 = getelementptr inbounds float* %tmp3155, i64 1
- %tmp3157 = getelementptr inbounds float* %tmp3156, i64 1
- %tmp3158 = getelementptr inbounds float* %tmp3157, i64 1
- %tmp3159 = getelementptr inbounds float* %tmp3158, i64 1
- %tmp3160 = getelementptr inbounds float* %tmp3159, i64 1
- %tmp3161 = getelementptr inbounds float* %tmp3160, i64 1
- %tmp3162 = getelementptr inbounds float* %tmp3161, i64 1
- %tmp3163 = getelementptr inbounds float* %tmp3162, i64 1
- %tmp3164 = getelementptr inbounds float* %tmp3163, i64 1
- %tmp3165 = getelementptr inbounds float* %tmp3164, i64 1
- %tmp3166 = getelementptr inbounds float* %tmp3165, i64 1
- %tmp3167 = getelementptr inbounds float* %tmp3166, i64 1
- %tmp3168 = getelementptr inbounds float* %tmp3167, i64 1
- %tmp3169 = getelementptr inbounds float* %tmp3168, i64 1
- %tmp3170 = getelementptr inbounds float* %tmp3169, i64 1
- %tmp3171 = getelementptr inbounds float* %tmp3170, i64 1
- %tmp3172 = getelementptr inbounds float* %tmp3171, i64 1
- %tmp3173 = getelementptr inbounds float* %tmp3172, i64 1
- %tmp3174 = getelementptr inbounds float* %tmp3173, i64 1
- %tmp3175 = getelementptr inbounds float* %tmp3174, i64 1
- %tmp3176 = getelementptr inbounds float* %tmp3175, i64 1
- %tmp3177 = getelementptr inbounds float* %tmp3176, i64 1
- %tmp3178 = getelementptr inbounds float* %tmp3177, i64 1
- %tmp3179 = getelementptr inbounds float* %tmp3178, i64 1
- %tmp3180 = getelementptr inbounds float* %tmp3179, i64 1
- %tmp3181 = getelementptr inbounds float* %tmp3180, i64 1
- %tmp3182 = getelementptr inbounds float* %tmp3181, i64 1
- %tmp3183 = getelementptr inbounds float* %tmp3182, i64 1
- %tmp3184 = getelementptr inbounds float* %tmp3183, i64 1
- %tmp3185 = getelementptr inbounds float* %tmp3184, i64 1
- %tmp3186 = getelementptr inbounds float* %tmp3185, i64 1
- %tmp3187 = getelementptr inbounds float* %tmp3186, i64 1
- %tmp3188 = getelementptr inbounds float* %tmp3187, i64 1
- %tmp3189 = getelementptr inbounds float* %tmp3188, i64 1
- %tmp3190 = getelementptr inbounds float* %tmp3189, i64 1
- %tmp3191 = getelementptr inbounds float* %tmp3190, i64 1
- %tmp3192 = getelementptr inbounds float* %tmp3191, i64 1
- %tmp3193 = getelementptr inbounds float* %tmp3192, i64 1
- %tmp3194 = getelementptr inbounds float* %tmp3193, i64 1
- %tmp3195 = getelementptr inbounds float* %tmp3194, i64 1
- %tmp3196 = getelementptr inbounds float* %tmp3195, i64 1
- %tmp3197 = getelementptr inbounds float* %tmp3196, i64 1
- %tmp3198 = getelementptr inbounds float* %tmp3197, i64 1
- %tmp3199 = getelementptr inbounds float* %tmp3198, i64 1
- %tmp3200 = getelementptr inbounds float* %tmp3199, i64 1
- %tmp3201 = getelementptr inbounds float* %tmp3200, i64 1
- %tmp3202 = getelementptr inbounds float* %tmp3201, i64 1
- %tmp3203 = getelementptr inbounds float* %tmp3202, i64 1
- %tmp3204 = getelementptr inbounds float* %tmp3203, i64 1
- %tmp3205 = getelementptr inbounds float* %tmp3204, i64 1
- %tmp3206 = getelementptr inbounds float* %tmp3205, i64 1
- %tmp3207 = getelementptr inbounds float* %tmp3206, i64 1
- %tmp3208 = getelementptr inbounds float* %tmp3207, i64 1
- %tmp3209 = getelementptr inbounds float* %tmp3208, i64 1
- %tmp3210 = getelementptr inbounds float* %tmp3209, i64 1
- %tmp3211 = getelementptr inbounds float* %tmp3210, i64 1
- %tmp3212 = getelementptr inbounds float* %tmp3211, i64 1
- %tmp3213 = getelementptr inbounds float* %tmp3212, i64 1
- %tmp3214 = getelementptr inbounds float* %tmp3213, i64 1
- %tmp3215 = getelementptr inbounds float* %tmp3214, i64 1
- %tmp3216 = getelementptr inbounds float* %tmp3215, i64 1
- %tmp3217 = getelementptr inbounds float* %tmp3216, i64 1
- %tmp3218 = getelementptr inbounds float* %tmp3217, i64 1
- %tmp3219 = getelementptr inbounds float* %tmp3218, i64 1
- %tmp3220 = getelementptr inbounds float* %tmp3219, i64 1
- %tmp3221 = getelementptr inbounds float* %tmp3220, i64 1
- %tmp3222 = getelementptr inbounds float* %tmp3221, i64 1
- %tmp3223 = getelementptr inbounds float* %tmp3222, i64 1
- %tmp3224 = getelementptr inbounds float* %tmp3223, i64 1
- %tmp3225 = getelementptr inbounds float* %tmp3224, i64 1
- %tmp3226 = getelementptr inbounds float* %tmp3225, i64 1
- %tmp3227 = getelementptr inbounds float* %tmp3226, i64 1
- %tmp3228 = getelementptr inbounds float* %tmp3227, i64 1
- %tmp3229 = getelementptr inbounds float* %tmp3228, i64 1
- %tmp3230 = getelementptr inbounds float* %tmp3229, i64 1
- %tmp3231 = getelementptr inbounds float* %tmp3230, i64 1
- %tmp3232 = getelementptr inbounds float* %tmp3231, i64 1
- %tmp3233 = getelementptr inbounds float* %tmp3232, i64 1
- %tmp3234 = getelementptr inbounds float* %tmp3233, i64 1
- %tmp3235 = getelementptr inbounds float* %tmp3234, i64 1
- %tmp3236 = getelementptr inbounds float* %tmp3235, i64 1
- %tmp3237 = getelementptr inbounds float* %tmp3236, i64 1
- %tmp3238 = getelementptr inbounds float* %tmp3237, i64 1
- %tmp3239 = getelementptr inbounds float* %tmp3238, i64 1
- %tmp3240 = getelementptr inbounds float* %tmp3239, i64 1
- %tmp3241 = getelementptr inbounds float* %tmp3240, i64 1
- %tmp3242 = getelementptr inbounds float* %tmp3241, i64 1
- %tmp3243 = getelementptr inbounds float* %tmp3242, i64 1
- %tmp3244 = getelementptr inbounds float* %tmp3243, i64 1
- %tmp3245 = getelementptr inbounds float* %tmp3244, i64 1
- %tmp3246 = getelementptr inbounds float* %tmp3245, i64 1
- %tmp3247 = getelementptr inbounds float* %tmp3246, i64 1
- %tmp3248 = getelementptr inbounds float* %tmp3247, i64 1
- %tmp3249 = getelementptr inbounds float* %tmp3248, i64 1
- %tmp3250 = getelementptr inbounds float* %tmp3249, i64 1
- %tmp3251 = getelementptr inbounds float* %tmp3250, i64 1
- %tmp3252 = getelementptr inbounds float* %tmp3251, i64 1
- %tmp3253 = getelementptr inbounds float* %tmp3252, i64 1
- %tmp3254 = getelementptr inbounds float* %tmp3253, i64 1
- %tmp3255 = getelementptr inbounds float* %tmp3254, i64 1
- %tmp3256 = getelementptr inbounds float* %tmp3255, i64 1
- %tmp3257 = getelementptr inbounds float* %tmp3256, i64 1
- %tmp3258 = getelementptr inbounds float* %tmp3257, i64 1
- %tmp3259 = getelementptr inbounds float* %tmp3258, i64 1
- %tmp3260 = getelementptr inbounds float* %tmp3259, i64 1
- %tmp3261 = getelementptr inbounds float* %tmp3260, i64 1
- %tmp3262 = getelementptr inbounds float* %tmp3261, i64 1
- %tmp3263 = getelementptr inbounds float* %tmp3262, i64 1
- %tmp3264 = getelementptr inbounds float* %tmp3263, i64 1
- %tmp3265 = getelementptr inbounds float* %tmp3264, i64 1
- %tmp3266 = getelementptr inbounds float* %tmp3265, i64 1
- %tmp3267 = getelementptr inbounds float* %tmp3266, i64 1
- %tmp3268 = getelementptr inbounds float* %tmp3267, i64 1
- %tmp3269 = getelementptr inbounds float* %tmp3268, i64 1
- %tmp3270 = getelementptr inbounds float* %tmp3269, i64 1
- %tmp3271 = getelementptr inbounds float* %tmp3270, i64 1
- %tmp3272 = getelementptr inbounds float* %tmp3271, i64 1
- %tmp3273 = getelementptr inbounds float* %tmp3272, i64 1
- %tmp3274 = getelementptr inbounds float* %tmp3273, i64 1
- %tmp3275 = getelementptr inbounds float* %tmp3274, i64 1
- %tmp3276 = getelementptr inbounds float* %tmp3275, i64 1
- %tmp3277 = getelementptr inbounds float* %tmp3276, i64 1
- %tmp3278 = getelementptr inbounds float* %tmp3277, i64 1
- %tmp3279 = getelementptr inbounds float* %tmp3278, i64 1
- %tmp3280 = getelementptr inbounds float* %tmp3279, i64 1
- %tmp3281 = getelementptr inbounds float* %tmp3280, i64 1
- %tmp3282 = getelementptr inbounds float* %tmp3281, i64 1
- %tmp3283 = getelementptr inbounds float* %tmp3282, i64 1
- %tmp3284 = getelementptr inbounds float* %tmp3283, i64 1
- %tmp3285 = getelementptr inbounds float* %tmp3284, i64 1
- %tmp3286 = getelementptr inbounds float* %tmp3285, i64 1
- %tmp3287 = getelementptr inbounds float* %tmp3286, i64 1
- %tmp3288 = getelementptr inbounds float* %tmp3287, i64 1
- %tmp3289 = getelementptr inbounds float* %tmp3288, i64 1
- %tmp3290 = getelementptr inbounds float* %tmp3289, i64 1
- %tmp3291 = getelementptr inbounds float* %tmp3290, i64 1
- %tmp3292 = getelementptr inbounds float* %tmp3291, i64 1
- %tmp3293 = getelementptr inbounds float* %tmp3292, i64 1
- %tmp3294 = getelementptr inbounds float* %tmp3293, i64 1
- %tmp3295 = getelementptr inbounds float* %tmp3294, i64 1
- %tmp3296 = getelementptr inbounds float* %tmp3295, i64 1
- %tmp3297 = getelementptr inbounds float* %tmp3296, i64 1
- %tmp3298 = getelementptr inbounds float* %tmp3297, i64 1
- %tmp3299 = getelementptr inbounds float* %tmp3298, i64 1
- %tmp3300 = getelementptr inbounds float* %tmp3299, i64 1
- %tmp3301 = getelementptr inbounds float* %tmp3300, i64 1
- %tmp3302 = getelementptr inbounds float* %tmp3301, i64 1
- %tmp3303 = getelementptr inbounds float* %tmp3302, i64 1
- %tmp3304 = getelementptr inbounds float* %tmp3303, i64 1
- %tmp3305 = getelementptr inbounds float* %tmp3304, i64 1
- %tmp3306 = getelementptr inbounds float* %tmp3305, i64 1
- %tmp3307 = getelementptr inbounds float* %tmp3306, i64 1
- %tmp3308 = getelementptr inbounds float* %tmp3307, i64 1
- %tmp3309 = getelementptr inbounds float* %tmp3308, i64 1
- %tmp3310 = getelementptr inbounds float* %tmp3309, i64 1
- %tmp3311 = getelementptr inbounds float* %tmp3310, i64 1
- %tmp3312 = getelementptr inbounds float* %tmp3311, i64 1
- %tmp3313 = getelementptr inbounds float* %tmp3312, i64 1
- %tmp3314 = getelementptr inbounds float* %tmp3313, i64 1
- %tmp3315 = getelementptr inbounds float* %tmp3314, i64 1
- %tmp3316 = getelementptr inbounds float* %tmp3315, i64 1
- %tmp3317 = getelementptr inbounds float* %tmp3316, i64 1
- %tmp3318 = getelementptr inbounds float* %tmp3317, i64 1
- %tmp3319 = getelementptr inbounds float* %tmp3318, i64 1
- %tmp3320 = getelementptr inbounds float* %tmp3319, i64 1
- %tmp3321 = getelementptr inbounds float* %tmp3320, i64 1
- %tmp3322 = getelementptr inbounds float* %tmp3321, i64 1
- %tmp3323 = getelementptr inbounds float* %tmp3322, i64 1
- %tmp3324 = getelementptr inbounds float* %tmp3323, i64 1
- %tmp3325 = getelementptr inbounds float* %tmp3324, i64 1
- %tmp3326 = getelementptr inbounds float* %tmp3325, i64 1
- %tmp3327 = getelementptr inbounds float* %tmp3326, i64 1
- %tmp3328 = getelementptr inbounds float* %tmp3327, i64 1
- %tmp3329 = getelementptr inbounds float* %tmp3328, i64 1
- %tmp3330 = getelementptr inbounds float* %tmp3329, i64 1
- %tmp3331 = getelementptr inbounds float* %tmp3330, i64 1
- %tmp3332 = getelementptr inbounds float* %tmp3331, i64 1
- %tmp3333 = getelementptr inbounds float* %tmp3332, i64 1
- %tmp3334 = getelementptr inbounds float* %tmp3333, i64 1
- %tmp3335 = getelementptr inbounds float* %tmp3334, i64 1
- %tmp3336 = getelementptr inbounds float* %tmp3335, i64 1
- %tmp3337 = getelementptr inbounds float* %tmp3336, i64 1
- %tmp3338 = getelementptr inbounds float* %tmp3337, i64 1
- %tmp3339 = getelementptr inbounds float* %tmp3338, i64 1
- %tmp3340 = getelementptr inbounds float* %tmp3339, i64 1
- %tmp3341 = getelementptr inbounds float* %tmp3340, i64 1
- %tmp3342 = getelementptr inbounds float* %tmp3341, i64 1
- %tmp3343 = getelementptr inbounds float* %tmp3342, i64 1
- %tmp3344 = getelementptr inbounds float* %tmp3343, i64 1
- %tmp3345 = getelementptr inbounds float* %tmp3344, i64 1
- %tmp3346 = getelementptr inbounds float* %tmp3345, i64 1
- %tmp3347 = getelementptr inbounds float* %tmp3346, i64 1
- %tmp3348 = getelementptr inbounds float* %tmp3347, i64 1
- %tmp3349 = getelementptr inbounds float* %tmp3348, i64 1
- %tmp3350 = getelementptr inbounds float* %tmp3349, i64 1
- %tmp3351 = getelementptr inbounds float* %tmp3350, i64 1
- %tmp3352 = getelementptr inbounds float* %tmp3351, i64 1
- %tmp3353 = getelementptr inbounds float* %tmp3352, i64 1
- %tmp3354 = getelementptr inbounds float* %tmp3353, i64 1
- %tmp3355 = getelementptr inbounds float* %tmp3354, i64 1
- %tmp3356 = getelementptr inbounds float* %tmp3355, i64 1
- %tmp3357 = getelementptr inbounds float* %tmp3356, i64 1
- %tmp3358 = getelementptr inbounds float* %tmp3357, i64 1
- %tmp3359 = getelementptr inbounds float* %tmp3358, i64 1
- %tmp3360 = getelementptr inbounds float* %tmp3359, i64 1
- %tmp3361 = getelementptr inbounds float* %tmp3360, i64 1
- %tmp3362 = getelementptr inbounds float* %tmp3361, i64 1
- %tmp3363 = getelementptr inbounds float* %tmp3362, i64 1
- %tmp3364 = getelementptr inbounds float* %tmp3363, i64 1
- %tmp3365 = getelementptr inbounds float* %tmp3364, i64 1
- %tmp3366 = getelementptr inbounds float* %tmp3365, i64 1
- %tmp3367 = getelementptr inbounds float* %tmp3366, i64 1
- %tmp3368 = getelementptr inbounds float* %tmp3367, i64 1
- %tmp3369 = getelementptr inbounds float* %tmp3368, i64 1
- %tmp3370 = getelementptr inbounds float* %tmp3369, i64 1
- %tmp3371 = getelementptr inbounds float* %tmp3370, i64 1
- %tmp3372 = getelementptr inbounds float* %tmp3371, i64 1
- %tmp3373 = getelementptr inbounds float* %tmp3372, i64 1
- %tmp3374 = getelementptr inbounds float* %tmp3373, i64 1
- %tmp3375 = getelementptr inbounds float* %tmp3374, i64 1
- %tmp3376 = getelementptr inbounds float* %tmp3375, i64 1
- %tmp3377 = getelementptr inbounds float* %tmp3376, i64 1
- %tmp3378 = getelementptr inbounds float* %tmp3377, i64 1
- %tmp3379 = getelementptr inbounds float* %tmp3378, i64 1
- %tmp3380 = getelementptr inbounds float* %tmp3379, i64 1
- %tmp3381 = getelementptr inbounds float* %tmp3380, i64 1
- %tmp3382 = getelementptr inbounds float* %tmp3381, i64 1
- %tmp3383 = getelementptr inbounds float* %tmp3382, i64 1
- %tmp3384 = getelementptr inbounds float* %tmp3383, i64 1
- %tmp3385 = getelementptr inbounds float* %tmp3384, i64 1
- %tmp3386 = getelementptr inbounds float* %tmp3385, i64 1
- %tmp3387 = getelementptr inbounds float* %tmp3386, i64 1
- %tmp3388 = getelementptr inbounds float* %tmp3387, i64 1
- %tmp3389 = getelementptr inbounds float* %tmp3388, i64 1
- %tmp3390 = getelementptr inbounds float* %tmp3389, i64 1
- %tmp3391 = getelementptr inbounds float* %tmp3390, i64 1
- %tmp3392 = getelementptr inbounds float* %tmp3391, i64 1
- %tmp3393 = getelementptr inbounds float* %tmp3392, i64 1
- %tmp3394 = getelementptr inbounds float* %tmp3393, i64 1
- %tmp3395 = getelementptr inbounds float* %tmp3394, i64 1
- %tmp3396 = getelementptr inbounds float* %tmp3395, i64 1
- %tmp3397 = getelementptr inbounds float* %tmp3396, i64 1
- %tmp3398 = getelementptr inbounds float* %tmp3397, i64 1
- %tmp3399 = getelementptr inbounds float* %tmp3398, i64 1
- %tmp3400 = getelementptr inbounds float* %tmp3399, i64 1
- %tmp3401 = getelementptr inbounds float* %tmp3400, i64 1
- %tmp3402 = getelementptr inbounds float* %tmp3401, i64 1
- %tmp3403 = getelementptr inbounds float* %tmp3402, i64 1
- %tmp3404 = getelementptr inbounds float* %tmp3403, i64 1
- %tmp3405 = getelementptr inbounds float* %tmp3404, i64 1
- %tmp3406 = getelementptr inbounds float* %tmp3405, i64 1
- %tmp3407 = getelementptr inbounds float* %tmp3406, i64 1
- %tmp3408 = getelementptr inbounds float* %tmp3407, i64 1
- %tmp3409 = getelementptr inbounds float* %tmp3408, i64 1
- %tmp3410 = getelementptr inbounds float* %tmp3409, i64 1
- %tmp3411 = getelementptr inbounds float* %tmp3410, i64 1
- %tmp3412 = getelementptr inbounds float* %tmp3411, i64 1
- %tmp3413 = getelementptr inbounds float* %tmp3412, i64 1
- %tmp3414 = getelementptr inbounds float* %tmp3413, i64 1
- %tmp3415 = getelementptr inbounds float* %tmp3414, i64 1
- %tmp3416 = getelementptr inbounds float* %tmp3415, i64 1
- %tmp3417 = getelementptr inbounds float* %tmp3416, i64 1
- %tmp3418 = getelementptr inbounds float* %tmp3417, i64 1
- %tmp3419 = getelementptr inbounds float* %tmp3418, i64 1
- %tmp3420 = getelementptr inbounds float* %tmp3419, i64 1
- %tmp3421 = getelementptr inbounds float* %tmp3420, i64 1
- %tmp3422 = getelementptr inbounds float* %tmp3421, i64 1
- %tmp3423 = getelementptr inbounds float* %tmp3422, i64 1
- %tmp3424 = getelementptr inbounds float* %tmp3423, i64 1
- %tmp3425 = getelementptr inbounds float* %tmp3424, i64 1
- %tmp3426 = getelementptr inbounds float* %tmp3425, i64 1
- %tmp3427 = getelementptr inbounds float* %tmp3426, i64 1
- %tmp3428 = getelementptr inbounds float* %tmp3427, i64 1
- %tmp3429 = getelementptr inbounds float* %tmp3428, i64 1
- %tmp3430 = getelementptr inbounds float* %tmp3429, i64 1
- %tmp3431 = getelementptr inbounds float* %tmp3430, i64 1
- %tmp3432 = getelementptr inbounds float* %tmp3431, i64 1
- %tmp3433 = getelementptr inbounds float* %tmp3432, i64 1
- %tmp3434 = getelementptr inbounds float* %tmp3433, i64 1
- %tmp3435 = getelementptr inbounds float* %tmp3434, i64 1
- %tmp3436 = getelementptr inbounds float* %tmp3435, i64 1
- %tmp3437 = getelementptr inbounds float* %tmp3436, i64 1
- %tmp3438 = getelementptr inbounds float* %tmp3437, i64 1
- %tmp3439 = getelementptr inbounds float* %tmp3438, i64 1
- %tmp3440 = getelementptr inbounds float* %tmp3439, i64 1
- %tmp3441 = getelementptr inbounds float* %tmp3440, i64 1
- %tmp3442 = getelementptr inbounds float* %tmp3441, i64 1
- %tmp3443 = getelementptr inbounds float* %tmp3442, i64 1
- %tmp3444 = getelementptr inbounds float* %tmp3443, i64 1
- %tmp3445 = getelementptr inbounds float* %tmp3444, i64 1
- %tmp3446 = getelementptr inbounds float* %tmp3445, i64 1
- %tmp3447 = getelementptr inbounds float* %tmp3446, i64 1
- %tmp3448 = getelementptr inbounds float* %tmp3447, i64 1
- %tmp3449 = getelementptr inbounds float* %tmp3448, i64 1
- %tmp3450 = getelementptr inbounds float* %tmp3449, i64 1
- %tmp3451 = getelementptr inbounds float* %tmp3450, i64 1
- %tmp3452 = getelementptr inbounds float* %tmp3451, i64 1
- %tmp3453 = getelementptr inbounds float* %tmp3452, i64 1
- %tmp3454 = getelementptr inbounds float* %tmp3453, i64 1
- %tmp3455 = getelementptr inbounds float* %tmp3454, i64 1
- %tmp3456 = getelementptr inbounds float* %tmp3455, i64 1
- %tmp3457 = getelementptr inbounds float* %tmp3456, i64 1
- %tmp3458 = getelementptr inbounds float* %tmp3457, i64 1
- %tmp3459 = getelementptr inbounds float* %tmp3458, i64 1
- %tmp3460 = getelementptr inbounds float* %tmp3459, i64 1
- %tmp3461 = getelementptr inbounds float* %tmp3460, i64 1
- %tmp3462 = getelementptr inbounds float* %tmp3461, i64 1
- %tmp3463 = getelementptr inbounds float* %tmp3462, i64 1
- %tmp3464 = getelementptr inbounds float* %tmp3463, i64 1
- %tmp3465 = getelementptr inbounds float* %tmp3464, i64 1
- %tmp3466 = getelementptr inbounds float* %tmp3465, i64 1
- %tmp3467 = getelementptr inbounds float* %tmp3466, i64 1
- %tmp3468 = getelementptr inbounds float* %tmp3467, i64 1
- %tmp3469 = getelementptr inbounds float* %tmp3468, i64 1
- %tmp3470 = getelementptr inbounds float* %tmp3469, i64 1
- %tmp3471 = getelementptr inbounds float* %tmp3470, i64 1
- %tmp3472 = getelementptr inbounds float* %tmp3471, i64 1
- %tmp3473 = getelementptr inbounds float* %tmp3472, i64 1
- %tmp3474 = getelementptr inbounds float* %tmp3473, i64 1
- %tmp3475 = getelementptr inbounds float* %tmp3474, i64 1
- %tmp3476 = getelementptr inbounds float* %tmp3475, i64 1
- %tmp3477 = getelementptr inbounds float* %tmp3476, i64 1
- %tmp3478 = getelementptr inbounds float* %tmp3477, i64 1
- %tmp3479 = getelementptr inbounds float* %tmp3478, i64 1
- %tmp3480 = getelementptr inbounds float* %tmp3479, i64 1
- %tmp3481 = getelementptr inbounds float* %tmp3480, i64 1
- %tmp3482 = getelementptr inbounds float* %tmp3481, i64 1
- %tmp3483 = getelementptr inbounds float* %tmp3482, i64 1
- %tmp3484 = getelementptr inbounds float* %tmp3483, i64 1
- %tmp3485 = getelementptr inbounds float* %tmp3484, i64 1
- %tmp3486 = getelementptr inbounds float* %tmp3485, i64 1
- %tmp3487 = getelementptr inbounds float* %tmp3486, i64 1
- %tmp3488 = getelementptr inbounds float* %tmp3487, i64 1
- %tmp3489 = getelementptr inbounds float* %tmp3488, i64 1
- %tmp3490 = getelementptr inbounds float* %tmp3489, i64 1
- %tmp3491 = getelementptr inbounds float* %tmp3490, i64 1
- %tmp3492 = getelementptr inbounds float* %tmp3491, i64 1
- %tmp3493 = getelementptr inbounds float* %tmp3492, i64 1
- %tmp3494 = getelementptr inbounds float* %tmp3493, i64 1
- %tmp3495 = getelementptr inbounds float* %tmp3494, i64 1
- %tmp3496 = getelementptr inbounds float* %tmp3495, i64 1
- %tmp3497 = getelementptr inbounds float* %tmp3496, i64 1
- %tmp3498 = getelementptr inbounds float* %tmp3497, i64 1
- %tmp3499 = getelementptr inbounds float* %tmp3498, i64 1
- %tmp3500 = getelementptr inbounds float* %tmp3499, i64 1
- %tmp3501 = getelementptr inbounds float* %tmp3500, i64 1
- %tmp3502 = getelementptr inbounds float* %tmp3501, i64 1
- %tmp3503 = getelementptr inbounds float* %tmp3502, i64 1
- %tmp3504 = getelementptr inbounds float* %tmp3503, i64 1
- %tmp3505 = getelementptr inbounds float* %tmp3504, i64 1
- %tmp3506 = getelementptr inbounds float* %tmp3505, i64 1
- %tmp3507 = getelementptr inbounds float* %tmp3506, i64 1
- %tmp3508 = getelementptr inbounds float* %tmp3507, i64 1
- %tmp3509 = getelementptr inbounds float* %tmp3508, i64 1
- %tmp3510 = getelementptr inbounds float* %tmp3509, i64 1
- %tmp3511 = getelementptr inbounds float* %tmp3510, i64 1
- %tmp3512 = getelementptr inbounds float* %tmp3511, i64 1
- %tmp3513 = getelementptr inbounds float* %tmp3512, i64 1
- %tmp3514 = getelementptr inbounds float* %tmp3513, i64 1
- %tmp3515 = getelementptr inbounds float* %tmp3514, i64 1
- %tmp3516 = getelementptr inbounds float* %tmp3515, i64 1
- %tmp3517 = getelementptr inbounds float* %tmp3516, i64 1
- %tmp3518 = getelementptr inbounds float* %tmp3517, i64 1
- %tmp3519 = getelementptr inbounds float* %tmp3518, i64 1
- %tmp3520 = getelementptr inbounds float* %tmp3519, i64 1
- %tmp3521 = getelementptr inbounds float* %tmp3520, i64 1
- %tmp3522 = getelementptr inbounds float* %tmp3521, i64 1
- %tmp3523 = getelementptr inbounds float* %tmp3522, i64 1
- %tmp3524 = getelementptr inbounds float* %tmp3523, i64 1
- %tmp3525 = getelementptr inbounds float* %tmp3524, i64 1
- %tmp3526 = getelementptr inbounds float* %tmp3525, i64 1
- %tmp3527 = getelementptr inbounds float* %tmp3526, i64 1
- %tmp3528 = getelementptr inbounds float* %tmp3527, i64 1
- %tmp3529 = getelementptr inbounds float* %tmp3528, i64 1
- %tmp3530 = getelementptr inbounds float* %tmp3529, i64 1
- %tmp3531 = getelementptr inbounds float* %tmp3530, i64 1
- %tmp3532 = getelementptr inbounds float* %tmp3531, i64 1
- %tmp3533 = getelementptr inbounds float* %tmp3532, i64 1
- %tmp3534 = getelementptr inbounds float* %tmp3533, i64 1
- %tmp3535 = getelementptr inbounds float* %tmp3534, i64 1
- %tmp3536 = getelementptr inbounds float* %tmp3535, i64 1
- %tmp3537 = getelementptr inbounds float* %tmp3536, i64 1
- %tmp3538 = getelementptr inbounds float* %tmp3537, i64 1
- %tmp3539 = getelementptr inbounds float* %tmp3538, i64 1
- %tmp3540 = getelementptr inbounds float* %tmp3539, i64 1
- %tmp3541 = getelementptr inbounds float* %tmp3540, i64 1
- %tmp3542 = getelementptr inbounds float* %tmp3541, i64 1
- %tmp3543 = getelementptr inbounds float* %tmp3542, i64 1
- %tmp3544 = getelementptr inbounds float* %tmp3543, i64 1
- %tmp3545 = getelementptr inbounds float* %tmp3544, i64 1
- %tmp3546 = getelementptr inbounds float* %tmp3545, i64 1
- %tmp3547 = getelementptr inbounds float* %tmp3546, i64 1
- %tmp3548 = getelementptr inbounds float* %tmp3547, i64 1
- %tmp3549 = getelementptr inbounds float* %tmp3548, i64 1
- %tmp3550 = getelementptr inbounds float* %tmp3549, i64 1
- %tmp3551 = getelementptr inbounds float* %tmp3550, i64 1
- %tmp3552 = getelementptr inbounds float* %tmp3551, i64 1
- %tmp3553 = getelementptr inbounds float* %tmp3552, i64 1
- %tmp3554 = getelementptr inbounds float* %tmp3553, i64 1
- %tmp3555 = getelementptr inbounds float* %tmp3554, i64 1
- %tmp3556 = getelementptr inbounds float* %tmp3555, i64 1
- %tmp3557 = getelementptr inbounds float* %tmp3556, i64 1
- %tmp3558 = getelementptr inbounds float* %tmp3557, i64 1
- %tmp3559 = getelementptr inbounds float* %tmp3558, i64 1
- %tmp3560 = getelementptr inbounds float* %tmp3559, i64 1
- %tmp3561 = getelementptr inbounds float* %tmp3560, i64 1
- %tmp3562 = getelementptr inbounds float* %tmp3561, i64 1
- %tmp3563 = getelementptr inbounds float* %tmp3562, i64 1
- %tmp3564 = getelementptr inbounds float* %tmp3563, i64 1
- %tmp3565 = getelementptr inbounds float* %tmp3564, i64 1
- %tmp3566 = getelementptr inbounds float* %tmp3565, i64 1
- %tmp3567 = getelementptr inbounds float* %tmp3566, i64 1
- %tmp3568 = getelementptr inbounds float* %tmp3567, i64 1
- %tmp3569 = getelementptr inbounds float* %tmp3568, i64 1
- %tmp3570 = getelementptr inbounds float* %tmp3569, i64 1
- %tmp3571 = getelementptr inbounds float* %tmp3570, i64 1
- %tmp3572 = getelementptr inbounds float* %tmp3571, i64 1
- %tmp3573 = getelementptr inbounds float* %tmp3572, i64 1
- %tmp3574 = getelementptr inbounds float* %tmp3573, i64 1
- %tmp3575 = getelementptr inbounds float* %tmp3574, i64 1
- %tmp3576 = getelementptr inbounds float* %tmp3575, i64 1
- %tmp3577 = getelementptr inbounds float* %tmp3576, i64 1
- %tmp3578 = getelementptr inbounds float* %tmp3577, i64 1
- %tmp3579 = getelementptr inbounds float* %tmp3578, i64 1
- %tmp3580 = getelementptr inbounds float* %tmp3579, i64 1
- %tmp3581 = getelementptr inbounds float* %tmp3580, i64 1
- %tmp3582 = getelementptr inbounds float* %tmp3581, i64 1
- %tmp3583 = getelementptr inbounds float* %tmp3582, i64 1
- %tmp3584 = getelementptr inbounds float* %tmp3583, i64 1
- %tmp3585 = getelementptr inbounds float* %tmp3584, i64 1
- %tmp3586 = getelementptr inbounds float* %tmp3585, i64 1
- %tmp3587 = getelementptr inbounds float* %tmp3586, i64 1
- %tmp3588 = getelementptr inbounds float* %tmp3587, i64 1
- %tmp3589 = getelementptr inbounds float* %tmp3588, i64 1
- %tmp3590 = getelementptr inbounds float* %tmp3589, i64 1
- %tmp3591 = getelementptr inbounds float* %tmp3590, i64 1
- %tmp3592 = getelementptr inbounds float* %tmp3591, i64 1
- %tmp3593 = getelementptr inbounds float* %tmp3592, i64 1
- %tmp3594 = getelementptr inbounds float* %tmp3593, i64 1
- %tmp3595 = getelementptr inbounds float* %tmp3594, i64 1
- %tmp3596 = getelementptr inbounds float* %tmp3595, i64 1
- %tmp3597 = getelementptr inbounds float* %tmp3596, i64 1
- %tmp3598 = getelementptr inbounds float* %tmp3597, i64 1
- %tmp3599 = getelementptr inbounds float* %tmp3598, i64 1
- %tmp3600 = getelementptr inbounds float* %tmp3599, i64 1
- %tmp3601 = getelementptr inbounds float* %tmp3600, i64 1
- %tmp3602 = getelementptr inbounds float* %tmp3601, i64 1
- %tmp3603 = getelementptr inbounds float* %tmp3602, i64 1
- %tmp3604 = getelementptr inbounds float* %tmp3603, i64 1
- %tmp3605 = getelementptr inbounds float* %tmp3604, i64 1
- %tmp3606 = getelementptr inbounds float* %tmp3605, i64 1
- %tmp3607 = getelementptr inbounds float* %tmp3606, i64 1
- %tmp3608 = getelementptr inbounds float* %tmp3607, i64 1
- %tmp3609 = getelementptr inbounds float* %tmp3608, i64 1
- %tmp3610 = getelementptr inbounds float* %tmp3609, i64 1
- %tmp3611 = getelementptr inbounds float* %tmp3610, i64 1
- %tmp3612 = getelementptr inbounds float* %tmp3611, i64 1
- %tmp3613 = getelementptr inbounds float* %tmp3612, i64 1
- %tmp3614 = getelementptr inbounds float* %tmp3613, i64 1
- %tmp3615 = getelementptr inbounds float* %tmp3614, i64 1
- %tmp3616 = getelementptr inbounds float* %tmp3615, i64 1
- %tmp3617 = getelementptr inbounds float* %tmp3616, i64 1
- %tmp3618 = getelementptr inbounds float* %tmp3617, i64 1
- %tmp3619 = getelementptr inbounds float* %tmp3618, i64 1
- %tmp3620 = getelementptr inbounds float* %tmp3619, i64 1
- %tmp3621 = getelementptr inbounds float* %tmp3620, i64 1
- %tmp3622 = getelementptr inbounds float* %tmp3621, i64 1
- %tmp3623 = getelementptr inbounds float* %tmp3622, i64 1
- %tmp3624 = getelementptr inbounds float* %tmp3623, i64 1
- %tmp3625 = getelementptr inbounds float* %tmp3624, i64 1
- %tmp3626 = getelementptr inbounds float* %tmp3625, i64 1
- %tmp3627 = getelementptr inbounds float* %tmp3626, i64 1
- %tmp3628 = getelementptr inbounds float* %tmp3627, i64 1
- %tmp3629 = getelementptr inbounds float* %tmp3628, i64 1
- %tmp3630 = getelementptr inbounds float* %tmp3629, i64 1
- %tmp3631 = getelementptr inbounds float* %tmp3630, i64 1
- %tmp3632 = getelementptr inbounds float* %tmp3631, i64 1
- %tmp3633 = getelementptr inbounds float* %tmp3632, i64 1
- %tmp3634 = getelementptr inbounds float* %tmp3633, i64 1
- %tmp3635 = getelementptr inbounds float* %tmp3634, i64 1
- %tmp3636 = getelementptr inbounds float* %tmp3635, i64 1
- %tmp3637 = getelementptr inbounds float* %tmp3636, i64 1
- %tmp3638 = getelementptr inbounds float* %tmp3637, i64 1
- %tmp3639 = getelementptr inbounds float* %tmp3638, i64 1
- %tmp3640 = getelementptr inbounds float* %tmp3639, i64 1
- %tmp3641 = getelementptr inbounds float* %tmp3640, i64 1
- %tmp3642 = getelementptr inbounds float* %tmp3641, i64 1
- %tmp3643 = getelementptr inbounds float* %tmp3642, i64 1
- %tmp3644 = getelementptr inbounds float* %tmp3643, i64 1
- %tmp3645 = getelementptr inbounds float* %tmp3644, i64 1
- %tmp3646 = getelementptr inbounds float* %tmp3645, i64 1
- %tmp3647 = getelementptr inbounds float* %tmp3646, i64 1
- %tmp3648 = getelementptr inbounds float* %tmp3647, i64 1
- %tmp3649 = getelementptr inbounds float* %tmp3648, i64 1
- %tmp3650 = getelementptr inbounds float* %tmp3649, i64 1
- %tmp3651 = getelementptr inbounds float* %tmp3650, i64 1
- %tmp3652 = getelementptr inbounds float* %tmp3651, i64 1
- %tmp3653 = getelementptr inbounds float* %tmp3652, i64 1
- %tmp3654 = getelementptr inbounds float* %tmp3653, i64 1
- %tmp3655 = getelementptr inbounds float* %tmp3654, i64 1
- %tmp3656 = getelementptr inbounds float* %tmp3655, i64 1
- %tmp3657 = getelementptr inbounds float* %tmp3656, i64 1
- %tmp3658 = getelementptr inbounds float* %tmp3657, i64 1
- %tmp3659 = getelementptr inbounds float* %tmp3658, i64 1
- %tmp3660 = getelementptr inbounds float* %tmp3659, i64 1
- %tmp3661 = getelementptr inbounds float* %tmp3660, i64 1
- %tmp3662 = getelementptr inbounds float* %tmp3661, i64 1
- %tmp3663 = getelementptr inbounds float* %tmp3662, i64 1
- %tmp3664 = getelementptr inbounds float* %tmp3663, i64 1
- %tmp3665 = getelementptr inbounds float* %tmp3664, i64 1
- %tmp3666 = getelementptr inbounds float* %tmp3665, i64 1
- %tmp3667 = getelementptr inbounds float* %tmp3666, i64 1
- %tmp3668 = getelementptr inbounds float* %tmp3667, i64 1
- %tmp3669 = getelementptr inbounds float* %tmp3668, i64 1
- %tmp3670 = getelementptr inbounds float* %tmp3669, i64 1
- %tmp3671 = getelementptr inbounds float* %tmp3670, i64 1
- %tmp3672 = getelementptr inbounds float* %tmp3671, i64 1
- %tmp3673 = getelementptr inbounds float* %tmp3672, i64 1
- %tmp3674 = getelementptr inbounds float* %tmp3673, i64 1
- %tmp3675 = getelementptr inbounds float* %tmp3674, i64 1
- %tmp3676 = getelementptr inbounds float* %tmp3675, i64 1
- %tmp3677 = getelementptr inbounds float* %tmp3676, i64 1
- %tmp3678 = getelementptr inbounds float* %tmp3677, i64 1
- %tmp3679 = getelementptr inbounds float* %tmp3678, i64 1
- %tmp3680 = getelementptr inbounds float* %tmp3679, i64 1
- %tmp3681 = getelementptr inbounds float* %tmp3680, i64 1
- %tmp3682 = getelementptr inbounds float* %tmp3681, i64 1
- %tmp3683 = getelementptr inbounds float* %tmp3682, i64 1
- %tmp3684 = getelementptr inbounds float* %tmp3683, i64 1
- %tmp3685 = getelementptr inbounds float* %tmp3684, i64 1
- %tmp3686 = getelementptr inbounds float* %tmp3685, i64 1
- %tmp3687 = getelementptr inbounds float* %tmp3686, i64 1
- %tmp3688 = getelementptr inbounds float* %tmp3687, i64 1
- %tmp3689 = getelementptr inbounds float* %tmp3688, i64 1
- %tmp3690 = getelementptr inbounds float* %tmp3689, i64 1
- %tmp3691 = getelementptr inbounds float* %tmp3690, i64 1
- %tmp3692 = getelementptr inbounds float* %tmp3691, i64 1
- %tmp3693 = getelementptr inbounds float* %tmp3692, i64 1
- %tmp3694 = getelementptr inbounds float* %tmp3693, i64 1
- %tmp3695 = getelementptr inbounds float* %tmp3694, i64 1
- %tmp3696 = getelementptr inbounds float* %tmp3695, i64 1
- %tmp3697 = getelementptr inbounds float* %tmp3696, i64 1
- %tmp3698 = getelementptr inbounds float* %tmp3697, i64 1
- %tmp3699 = getelementptr inbounds float* %tmp3698, i64 1
- %tmp3700 = getelementptr inbounds float* %tmp3699, i64 1
- %tmp3701 = getelementptr inbounds float* %tmp3700, i64 1
- %tmp3702 = getelementptr inbounds float* %tmp3701, i64 1
- %tmp3703 = getelementptr inbounds float* %tmp3702, i64 1
- %tmp3704 = getelementptr inbounds float* %tmp3703, i64 1
- %tmp3705 = getelementptr inbounds float* %tmp3704, i64 1
- %tmp3706 = getelementptr inbounds float* %tmp3705, i64 1
- %tmp3707 = getelementptr inbounds float* %tmp3706, i64 1
- %tmp3708 = getelementptr inbounds float* %tmp3707, i64 1
- %tmp3709 = getelementptr inbounds float* %tmp3708, i64 1
- %tmp3710 = getelementptr inbounds float* %tmp3709, i64 1
- %tmp3711 = getelementptr inbounds float* %tmp3710, i64 1
- %tmp3712 = getelementptr inbounds float* %tmp3711, i64 1
- %tmp3713 = getelementptr inbounds float* %tmp3712, i64 1
- %tmp3714 = getelementptr inbounds float* %tmp3713, i64 1
- %tmp3715 = getelementptr inbounds float* %tmp3714, i64 1
- %tmp3716 = getelementptr inbounds float* %tmp3715, i64 1
- %tmp3717 = getelementptr inbounds float* %tmp3716, i64 1
- %tmp3718 = getelementptr inbounds float* %tmp3717, i64 1
- %tmp3719 = getelementptr inbounds float* %tmp3718, i64 1
- %tmp3720 = getelementptr inbounds float* %tmp3719, i64 1
- %tmp3721 = getelementptr inbounds float* %tmp3720, i64 1
- %tmp3722 = getelementptr inbounds float* %tmp3721, i64 1
- %tmp3723 = getelementptr inbounds float* %tmp3722, i64 1
- %tmp3724 = getelementptr inbounds float* %tmp3723, i64 1
- %tmp3725 = getelementptr inbounds float* %tmp3724, i64 1
- %tmp3726 = getelementptr inbounds float* %tmp3725, i64 1
- %tmp3727 = getelementptr inbounds float* %tmp3726, i64 1
- %tmp3728 = getelementptr inbounds float* %tmp3727, i64 1
- %tmp3729 = getelementptr inbounds float* %tmp3728, i64 1
- %tmp3730 = getelementptr inbounds float* %tmp3729, i64 1
- %tmp3731 = getelementptr inbounds float* %tmp3730, i64 1
- %tmp3732 = getelementptr inbounds float* %tmp3731, i64 1
- %tmp3733 = getelementptr inbounds float* %tmp3732, i64 1
- %tmp3734 = getelementptr inbounds float* %tmp3733, i64 1
- %tmp3735 = getelementptr inbounds float* %tmp3734, i64 1
- %tmp3736 = getelementptr inbounds float* %tmp3735, i64 1
- %tmp3737 = getelementptr inbounds float* %tmp3736, i64 1
- %tmp3738 = getelementptr inbounds float* %tmp3737, i64 1
- %tmp3739 = getelementptr inbounds float* %tmp3738, i64 1
- %tmp3740 = getelementptr inbounds float* %tmp3739, i64 1
- %tmp3741 = getelementptr inbounds float* %tmp3740, i64 1
- %tmp3742 = getelementptr inbounds float* %tmp3741, i64 1
- %tmp3743 = getelementptr inbounds float* %tmp3742, i64 1
- %tmp3744 = getelementptr inbounds float* %tmp3743, i64 1
- %tmp3745 = getelementptr inbounds float* %tmp3744, i64 1
- %tmp3746 = getelementptr inbounds float* %tmp3745, i64 1
- %tmp3747 = getelementptr inbounds float* %tmp3746, i64 1
- %tmp3748 = getelementptr inbounds float* %tmp3747, i64 1
- %tmp3749 = getelementptr inbounds float* %tmp3748, i64 1
- %tmp3750 = getelementptr inbounds float* %tmp3749, i64 1
- %tmp3751 = getelementptr inbounds float* %tmp3750, i64 1
- %tmp3752 = getelementptr inbounds float* %tmp3751, i64 1
- %tmp3753 = getelementptr inbounds float* %tmp3752, i64 1
- %tmp3754 = getelementptr inbounds float* %tmp3753, i64 1
- %tmp3755 = getelementptr inbounds float* %tmp3754, i64 1
- %tmp3756 = getelementptr inbounds float* %tmp3755, i64 1
- %tmp3757 = getelementptr inbounds float* %tmp3756, i64 1
- %tmp3758 = getelementptr inbounds float* %tmp3757, i64 1
- %tmp3759 = getelementptr inbounds float* %tmp3758, i64 1
- %tmp3760 = getelementptr inbounds float* %tmp3759, i64 1
- %tmp3761 = getelementptr inbounds float* %tmp3760, i64 1
- %tmp3762 = getelementptr inbounds float* %tmp3761, i64 1
- %tmp3763 = getelementptr inbounds float* %tmp3762, i64 1
- %tmp3764 = getelementptr inbounds float* %tmp3763, i64 1
- %tmp3765 = getelementptr inbounds float* %tmp3764, i64 1
- %tmp3766 = getelementptr inbounds float* %tmp3765, i64 1
- %tmp3767 = getelementptr inbounds float* %tmp3766, i64 1
- %tmp3768 = getelementptr inbounds float* %tmp3767, i64 1
- %tmp3769 = getelementptr inbounds float* %tmp3768, i64 1
- %tmp3770 = getelementptr inbounds float* %tmp3769, i64 1
- %tmp3771 = getelementptr inbounds float* %tmp3770, i64 1
- %tmp3772 = getelementptr inbounds float* %tmp3771, i64 1
- %tmp3773 = getelementptr inbounds float* %tmp3772, i64 1
- %tmp3774 = getelementptr inbounds float* %tmp3773, i64 1
- %tmp3775 = getelementptr inbounds float* %tmp3774, i64 1
- %tmp3776 = getelementptr inbounds float* %tmp3775, i64 1
- %tmp3777 = getelementptr inbounds float* %tmp3776, i64 1
- %tmp3778 = getelementptr inbounds float* %tmp3777, i64 1
- %tmp3779 = getelementptr inbounds float* %tmp3778, i64 1
- %tmp3780 = getelementptr inbounds float* %tmp3779, i64 1
- %tmp3781 = getelementptr inbounds float* %tmp3780, i64 1
- %tmp3782 = getelementptr inbounds float* %tmp3781, i64 1
- %tmp3783 = getelementptr inbounds float* %tmp3782, i64 1
- %tmp3784 = getelementptr inbounds float* %tmp3783, i64 1
- %tmp3785 = getelementptr inbounds float* %tmp3784, i64 1
- %tmp3786 = getelementptr inbounds float* %tmp3785, i64 1
- %tmp3787 = getelementptr inbounds float* %tmp3786, i64 1
- %tmp3788 = getelementptr inbounds float* %tmp3787, i64 1
- %tmp3789 = getelementptr inbounds float* %tmp3788, i64 1
- %tmp3790 = getelementptr inbounds float* %tmp3789, i64 1
- %tmp3791 = getelementptr inbounds float* %tmp3790, i64 1
- %tmp3792 = getelementptr inbounds float* %tmp3791, i64 1
- %tmp3793 = getelementptr inbounds float* %tmp3792, i64 1
- %tmp3794 = getelementptr inbounds float* %tmp3793, i64 1
- %tmp3795 = getelementptr inbounds float* %tmp3794, i64 1
- %tmp3796 = getelementptr inbounds float* %tmp3795, i64 1
- %tmp3797 = getelementptr inbounds float* %tmp3796, i64 1
- %tmp3798 = getelementptr inbounds float* %tmp3797, i64 1
- %tmp3799 = getelementptr inbounds float* %tmp3798, i64 1
- %tmp3800 = getelementptr inbounds float* %tmp3799, i64 1
- %tmp3801 = getelementptr inbounds float* %tmp3800, i64 1
- %tmp3802 = getelementptr inbounds float* %tmp3801, i64 1
- %tmp3803 = getelementptr inbounds float* %tmp3802, i64 1
- %tmp3804 = getelementptr inbounds float* %tmp3803, i64 1
- %tmp3805 = getelementptr inbounds float* %tmp3804, i64 1
- %tmp3806 = getelementptr inbounds float* %tmp3805, i64 1
- %tmp3807 = getelementptr inbounds float* %tmp3806, i64 1
- %tmp3808 = getelementptr inbounds float* %tmp3807, i64 1
- %tmp3809 = getelementptr inbounds float* %tmp3808, i64 1
- %tmp3810 = getelementptr inbounds float* %tmp3809, i64 1
- %tmp3811 = getelementptr inbounds float* %tmp3810, i64 1
- %tmp3812 = getelementptr inbounds float* %tmp3811, i64 1
- %tmp3813 = getelementptr inbounds float* %tmp3812, i64 1
- %tmp3814 = getelementptr inbounds float* %tmp3813, i64 1
- %tmp3815 = getelementptr inbounds float* %tmp3814, i64 1
- %tmp3816 = getelementptr inbounds float* %tmp3815, i64 1
- %tmp3817 = getelementptr inbounds float* %tmp3816, i64 1
- %tmp3818 = getelementptr inbounds float* %tmp3817, i64 1
- %tmp3819 = getelementptr inbounds float* %tmp3818, i64 1
- %tmp3820 = getelementptr inbounds float* %tmp3819, i64 1
- %tmp3821 = getelementptr inbounds float* %tmp3820, i64 1
- %tmp3822 = getelementptr inbounds float* %tmp3821, i64 1
- %tmp3823 = getelementptr inbounds float* %tmp3822, i64 1
- %tmp3824 = getelementptr inbounds float* %tmp3823, i64 1
- %tmp3825 = getelementptr inbounds float* %tmp3824, i64 1
- %tmp3826 = getelementptr inbounds float* %tmp3825, i64 1
- %tmp3827 = getelementptr inbounds float* %tmp3826, i64 1
- %tmp3828 = getelementptr inbounds float* %tmp3827, i64 1
- %tmp3829 = getelementptr inbounds float* %tmp3828, i64 1
- %tmp3830 = getelementptr inbounds float* %tmp3829, i64 1
- %tmp3831 = getelementptr inbounds float* %tmp3830, i64 1
- %tmp3832 = getelementptr inbounds float* %tmp3831, i64 1
- %tmp3833 = getelementptr inbounds float* %tmp3832, i64 1
- %tmp3834 = getelementptr inbounds float* %tmp3833, i64 1
- %tmp3835 = getelementptr inbounds float* %tmp3834, i64 1
- %tmp3836 = getelementptr inbounds float* %tmp3835, i64 1
- %tmp3837 = getelementptr inbounds float* %tmp3836, i64 1
- %tmp3838 = getelementptr inbounds float* %tmp3837, i64 1
- %tmp3839 = getelementptr inbounds float* %tmp3838, i64 1
- %tmp3840 = getelementptr inbounds float* %tmp3839, i64 1
- %tmp3841 = getelementptr inbounds float* %tmp3840, i64 1
- %tmp3842 = getelementptr inbounds float* %tmp3841, i64 1
- %tmp3843 = getelementptr inbounds float* %tmp3842, i64 1
- %tmp3844 = getelementptr inbounds float* %tmp3843, i64 1
- %tmp3845 = getelementptr inbounds float* %tmp3844, i64 1
- %tmp3846 = getelementptr inbounds float* %tmp3845, i64 1
- %tmp3847 = getelementptr inbounds float* %tmp3846, i64 1
- %tmp3848 = getelementptr inbounds float* %tmp3847, i64 1
- %tmp3849 = getelementptr inbounds float* %tmp3848, i64 1
- %tmp3850 = getelementptr inbounds float* %tmp3849, i64 1
- %tmp3851 = getelementptr inbounds float* %tmp3850, i64 1
- %tmp3852 = getelementptr inbounds float* %tmp3851, i64 1
- %tmp3853 = getelementptr inbounds float* %tmp3852, i64 1
- %tmp3854 = getelementptr inbounds float* %tmp3853, i64 1
- %tmp3855 = getelementptr inbounds float* %tmp3854, i64 1
- %tmp3856 = getelementptr inbounds float* %tmp3855, i64 1
- %tmp3857 = getelementptr inbounds float* %tmp3856, i64 1
- %tmp3858 = getelementptr inbounds float* %tmp3857, i64 1
- %tmp3859 = getelementptr inbounds float* %tmp3858, i64 1
- %tmp3860 = getelementptr inbounds float* %tmp3859, i64 1
- %tmp3861 = getelementptr inbounds float* %tmp3860, i64 1
- %tmp3862 = getelementptr inbounds float* %tmp3861, i64 1
- %tmp3863 = getelementptr inbounds float* %tmp3862, i64 1
- %tmp3864 = getelementptr inbounds float* %tmp3863, i64 1
- %tmp3865 = getelementptr inbounds float* %tmp3864, i64 1
- %tmp3866 = getelementptr inbounds float* %tmp3865, i64 1
- %tmp3867 = getelementptr inbounds float* %tmp3866, i64 1
- %tmp3868 = getelementptr inbounds float* %tmp3867, i64 1
- %tmp3869 = getelementptr inbounds float* %tmp3868, i64 1
- %tmp3870 = getelementptr inbounds float* %tmp3869, i64 1
- %tmp3871 = getelementptr inbounds float* %tmp3870, i64 1
- %tmp3872 = getelementptr inbounds float* %tmp3871, i64 1
- %tmp3873 = getelementptr inbounds float* %tmp3872, i64 1
- %tmp3874 = getelementptr inbounds float* %tmp3873, i64 1
- %tmp3875 = getelementptr inbounds float* %tmp3874, i64 1
- %tmp3876 = getelementptr inbounds float* %tmp3875, i64 1
- %tmp3877 = getelementptr inbounds float* %tmp3876, i64 1
- %tmp3878 = getelementptr inbounds float* %tmp3877, i64 1
- %tmp3879 = getelementptr inbounds float* %tmp3878, i64 1
- %tmp3880 = getelementptr inbounds float* %tmp3879, i64 1
- %tmp3881 = getelementptr inbounds float* %tmp3880, i64 1
- %tmp3882 = getelementptr inbounds float* %tmp3881, i64 1
- %tmp3883 = getelementptr inbounds float* %tmp3882, i64 1
- %tmp3884 = getelementptr inbounds float* %tmp3883, i64 1
- %tmp3885 = getelementptr inbounds float* %tmp3884, i64 1
- %tmp3886 = getelementptr inbounds float* %tmp3885, i64 1
- %tmp3887 = getelementptr inbounds float* %tmp3886, i64 1
- %tmp3888 = getelementptr inbounds float* %tmp3887, i64 1
- %tmp3889 = getelementptr inbounds float* %tmp3888, i64 1
- %tmp3890 = getelementptr inbounds float* %tmp3889, i64 1
- %tmp3891 = getelementptr inbounds float* %tmp3890, i64 1
- %tmp3892 = getelementptr inbounds float* %tmp3891, i64 1
- %tmp3893 = getelementptr inbounds float* %tmp3892, i64 1
- %tmp3894 = getelementptr inbounds float* %tmp3893, i64 1
- %tmp3895 = getelementptr inbounds float* %tmp3894, i64 1
- %tmp3896 = getelementptr inbounds float* %tmp3895, i64 1
- %tmp3897 = getelementptr inbounds float* %tmp3896, i64 1
- %tmp3898 = getelementptr inbounds float* %tmp3897, i64 1
- %tmp3899 = getelementptr inbounds float* %tmp3898, i64 1
- %tmp3900 = getelementptr inbounds float* %tmp3899, i64 1
- %tmp3901 = getelementptr inbounds float* %tmp3900, i64 1
- %tmp3902 = getelementptr inbounds float* %tmp3901, i64 1
- %tmp3903 = getelementptr inbounds float* %tmp3902, i64 1
- %tmp3904 = getelementptr inbounds float* %tmp3903, i64 1
- %tmp3905 = getelementptr inbounds float* %tmp3904, i64 1
- %tmp3906 = getelementptr inbounds float* %tmp3905, i64 1
- %tmp3907 = getelementptr inbounds float* %tmp3906, i64 1
- %tmp3908 = getelementptr inbounds float* %tmp3907, i64 1
- %tmp3909 = getelementptr inbounds float* %tmp3908, i64 1
- %tmp3910 = getelementptr inbounds float* %tmp3909, i64 1
- %tmp3911 = getelementptr inbounds float* %tmp3910, i64 1
- %tmp3912 = getelementptr inbounds float* %tmp3911, i64 1
- %tmp3913 = getelementptr inbounds float* %tmp3912, i64 1
- %tmp3914 = getelementptr inbounds float* %tmp3913, i64 1
- %tmp3915 = getelementptr inbounds float* %tmp3914, i64 1
- %tmp3916 = getelementptr inbounds float* %tmp3915, i64 1
- %tmp3917 = getelementptr inbounds float* %tmp3916, i64 1
- %tmp3918 = getelementptr inbounds float* %tmp3917, i64 1
- %tmp3919 = getelementptr inbounds float* %tmp3918, i64 1
- %tmp3920 = getelementptr inbounds float* %tmp3919, i64 1
- %tmp3921 = getelementptr inbounds float* %tmp3920, i64 1
- %tmp3922 = getelementptr inbounds float* %tmp3921, i64 1
- %tmp3923 = getelementptr inbounds float* %tmp3922, i64 1
- %tmp3924 = getelementptr inbounds float* %tmp3923, i64 1
- %tmp3925 = getelementptr inbounds float* %tmp3924, i64 1
- %tmp3926 = getelementptr inbounds float* %tmp3925, i64 1
- %tmp3927 = getelementptr inbounds float* %tmp3926, i64 1
- %tmp3928 = getelementptr inbounds float* %tmp3927, i64 1
- %tmp3929 = getelementptr inbounds float* %tmp3928, i64 1
- %tmp3930 = getelementptr inbounds float* %tmp3929, i64 1
- %tmp3931 = getelementptr inbounds float* %tmp3930, i64 1
- %tmp3932 = getelementptr inbounds float* %tmp3931, i64 1
- %tmp3933 = getelementptr inbounds float* %tmp3932, i64 1
- %tmp3934 = getelementptr inbounds float* %tmp3933, i64 1
- %tmp3935 = getelementptr inbounds float* %tmp3934, i64 1
- %tmp3936 = getelementptr inbounds float* %tmp3935, i64 1
- %tmp3937 = getelementptr inbounds float* %tmp3936, i64 1
- %tmp3938 = getelementptr inbounds float* %tmp3937, i64 1
- %tmp3939 = getelementptr inbounds float* %tmp3938, i64 1
- %tmp3940 = getelementptr inbounds float* %tmp3939, i64 1
- %tmp3941 = getelementptr inbounds float* %tmp3940, i64 1
- %tmp3942 = getelementptr inbounds float* %tmp3941, i64 1
- %tmp3943 = getelementptr inbounds float* %tmp3942, i64 1
- %tmp3944 = getelementptr inbounds float* %tmp3943, i64 1
- %tmp3945 = getelementptr inbounds float* %tmp3944, i64 1
- %tmp3946 = getelementptr inbounds float* %tmp3945, i64 1
- %tmp3947 = getelementptr inbounds float* %tmp3946, i64 1
- %tmp3948 = getelementptr inbounds float* %tmp3947, i64 1
- %tmp3949 = getelementptr inbounds float* %tmp3948, i64 1
- %tmp3950 = getelementptr inbounds float* %tmp3949, i64 1
- %tmp3951 = getelementptr inbounds float* %tmp3950, i64 1
- %tmp3952 = getelementptr inbounds float* %tmp3951, i64 1
- %tmp3953 = getelementptr inbounds float* %tmp3952, i64 1
- %tmp3954 = getelementptr inbounds float* %tmp3953, i64 1
- %tmp3955 = getelementptr inbounds float* %tmp3954, i64 1
- %tmp3956 = getelementptr inbounds float* %tmp3955, i64 1
- %tmp3957 = getelementptr inbounds float* %tmp3956, i64 1
- %tmp3958 = getelementptr inbounds float* %tmp3957, i64 1
- %tmp3959 = getelementptr inbounds float* %tmp3958, i64 1
- %tmp3960 = getelementptr inbounds float* %tmp3959, i64 1
- %tmp3961 = getelementptr inbounds float* %tmp3960, i64 1
- %tmp3962 = getelementptr inbounds float* %tmp3961, i64 1
- %tmp3963 = getelementptr inbounds float* %tmp3962, i64 1
- %tmp3964 = getelementptr inbounds float* %tmp3963, i64 1
- %tmp3965 = getelementptr inbounds float* %tmp3964, i64 1
- %tmp3966 = getelementptr inbounds float* %tmp3965, i64 1
- %tmp3967 = getelementptr inbounds float* %tmp3966, i64 1
- %tmp3968 = getelementptr inbounds float* %tmp3967, i64 1
- %tmp3969 = getelementptr inbounds float* %tmp3968, i64 1
- %tmp3970 = getelementptr inbounds float* %tmp3969, i64 1
- %tmp3971 = getelementptr inbounds float* %tmp3970, i64 1
- %tmp3972 = getelementptr inbounds float* %tmp3971, i64 1
- %tmp3973 = getelementptr inbounds float* %tmp3972, i64 1
- %tmp3974 = getelementptr inbounds float* %tmp3973, i64 1
- %tmp3975 = getelementptr inbounds float* %tmp3974, i64 1
- %tmp3976 = getelementptr inbounds float* %tmp3975, i64 1
- %tmp3977 = getelementptr inbounds float* %tmp3976, i64 1
- %tmp3978 = getelementptr inbounds float* %tmp3977, i64 1
- %tmp3979 = getelementptr inbounds float* %tmp3978, i64 1
- %tmp3980 = getelementptr inbounds float* %tmp3979, i64 1
- %tmp3981 = getelementptr inbounds float* %tmp3980, i64 1
- %tmp3982 = getelementptr inbounds float* %tmp3981, i64 1
- %tmp3983 = getelementptr inbounds float* %tmp3982, i64 1
- %tmp3984 = getelementptr inbounds float* %tmp3983, i64 1
- %tmp3985 = getelementptr inbounds float* %tmp3984, i64 1
- %tmp3986 = getelementptr inbounds float* %tmp3985, i64 1
- %tmp3987 = getelementptr inbounds float* %tmp3986, i64 1
- %tmp3988 = getelementptr inbounds float* %tmp3987, i64 1
- %tmp3989 = getelementptr inbounds float* %tmp3988, i64 1
- %tmp3990 = getelementptr inbounds float* %tmp3989, i64 1
- %tmp3991 = getelementptr inbounds float* %tmp3990, i64 1
- %tmp3992 = getelementptr inbounds float* %tmp3991, i64 1
- %tmp3993 = getelementptr inbounds float* %tmp3992, i64 1
- %tmp3994 = getelementptr inbounds float* %tmp3993, i64 1
- %tmp3995 = getelementptr inbounds float* %tmp3994, i64 1
- %tmp3996 = getelementptr inbounds float* %tmp3995, i64 1
- %tmp3997 = getelementptr inbounds float* %tmp3996, i64 1
- %tmp3998 = getelementptr inbounds float* %tmp3997, i64 1
- %tmp3999 = getelementptr inbounds float* %tmp3998, i64 1
- %tmp4000 = getelementptr inbounds float* %tmp3999, i64 1
- %tmp4001 = getelementptr inbounds float* %tmp4000, i64 1
- %tmp4002 = getelementptr inbounds float* %tmp4001, i64 1
- %tmp4003 = getelementptr inbounds float* %tmp4002, i64 1
- %tmp4004 = getelementptr inbounds float* %tmp4003, i64 1
- %tmp4005 = getelementptr inbounds float* %tmp4004, i64 1
- %tmp4006 = getelementptr inbounds float* %tmp4005, i64 1
- %tmp4007 = getelementptr inbounds float* %tmp4006, i64 1
- %tmp4008 = getelementptr inbounds float* %tmp4007, i64 1
- %tmp4009 = getelementptr inbounds float* %tmp4008, i64 1
- %tmp4010 = getelementptr inbounds float* %tmp4009, i64 1
- %tmp4011 = getelementptr inbounds float* %tmp4010, i64 1
- %tmp4012 = getelementptr inbounds float* %tmp4011, i64 1
- %tmp4013 = getelementptr inbounds float* %tmp4012, i64 1
- %tmp4014 = getelementptr inbounds float* %tmp4013, i64 1
- %tmp4015 = getelementptr inbounds float* %tmp4014, i64 1
- %tmp4016 = getelementptr inbounds float* %tmp4015, i64 1
- %tmp4017 = getelementptr inbounds float* %tmp4016, i64 1
- %tmp4018 = getelementptr inbounds float* %tmp4017, i64 1
- %tmp4019 = getelementptr inbounds float* %tmp4018, i64 1
- %tmp4020 = getelementptr inbounds float* %tmp4019, i64 1
- %tmp4021 = getelementptr inbounds float* %tmp4020, i64 1
- %tmp4022 = getelementptr inbounds float* %tmp4021, i64 1
- %tmp4023 = getelementptr inbounds float* %tmp4022, i64 1
- %tmp4024 = getelementptr inbounds float* %tmp4023, i64 1
- %tmp4025 = getelementptr inbounds float* %tmp4024, i64 1
- %tmp4026 = getelementptr inbounds float* %tmp4025, i64 1
- %tmp4027 = getelementptr inbounds float* %tmp4026, i64 1
- %tmp4028 = getelementptr inbounds float* %tmp4027, i64 1
- %tmp4029 = getelementptr inbounds float* %tmp4028, i64 1
- %tmp4030 = getelementptr inbounds float* %tmp4029, i64 1
- %tmp4031 = getelementptr inbounds float* %tmp4030, i64 1
- %tmp4032 = getelementptr inbounds float* %tmp4031, i64 1
- %tmp4033 = getelementptr inbounds float* %tmp4032, i64 1
- %tmp4034 = getelementptr inbounds float* %tmp4033, i64 1
- %tmp4035 = getelementptr inbounds float* %tmp4034, i64 1
- %tmp4036 = getelementptr inbounds float* %tmp4035, i64 1
- %tmp4037 = getelementptr inbounds float* %tmp4036, i64 1
- %tmp4038 = getelementptr inbounds float* %tmp4037, i64 1
- %tmp4039 = getelementptr inbounds float* %tmp4038, i64 1
- %tmp4040 = getelementptr inbounds float* %tmp4039, i64 1
- %tmp4041 = getelementptr inbounds float* %tmp4040, i64 1
- %tmp4042 = getelementptr inbounds float* %tmp4041, i64 1
- %tmp4043 = getelementptr inbounds float* %tmp4042, i64 1
- %tmp4044 = getelementptr inbounds float* %tmp4043, i64 1
- %tmp4045 = getelementptr inbounds float* %tmp4044, i64 1
- %tmp4046 = getelementptr inbounds float* %tmp4045, i64 1
- %tmp4047 = getelementptr inbounds float* %tmp4046, i64 1
- %tmp4048 = getelementptr inbounds float* %tmp4047, i64 1
- %tmp4049 = getelementptr inbounds float* %tmp4048, i64 1
- %tmp4050 = getelementptr inbounds float* %tmp4049, i64 1
- %tmp4051 = getelementptr inbounds float* %tmp4050, i64 1
- %tmp4052 = getelementptr inbounds float* %tmp4051, i64 1
- %tmp4053 = getelementptr inbounds float* %tmp4052, i64 1
- %tmp4054 = getelementptr inbounds float* %tmp4053, i64 1
- %tmp4055 = getelementptr inbounds float* %tmp4054, i64 1
- %tmp4056 = getelementptr inbounds float* %tmp4055, i64 1
- %tmp4057 = getelementptr inbounds float* %tmp4056, i64 1
- %tmp4058 = getelementptr inbounds float* %tmp4057, i64 1
- %tmp4059 = getelementptr inbounds float* %tmp4058, i64 1
- %tmp4060 = getelementptr inbounds float* %tmp4059, i64 1
- %tmp4061 = getelementptr inbounds float* %tmp4060, i64 1
- %tmp4062 = getelementptr inbounds float* %tmp4061, i64 1
- %tmp4063 = getelementptr inbounds float* %tmp4062, i64 1
- %tmp4064 = getelementptr inbounds float* %tmp4063, i64 1
- %tmp4065 = getelementptr inbounds float* %tmp4064, i64 1
- %tmp4066 = getelementptr inbounds float* %tmp4065, i64 1
- %tmp4067 = getelementptr inbounds float* %tmp4066, i64 1
- %tmp4068 = getelementptr inbounds float* %tmp4067, i64 1
- %tmp4069 = getelementptr inbounds float* %tmp4068, i64 1
- %tmp4070 = getelementptr inbounds float* %tmp4069, i64 1
- %tmp4071 = getelementptr inbounds float* %tmp4070, i64 1
- %tmp4072 = getelementptr inbounds float* %tmp4071, i64 1
- %tmp4073 = getelementptr inbounds float* %tmp4072, i64 1
- %tmp4074 = getelementptr inbounds float* %tmp4073, i64 1
- %tmp4075 = getelementptr inbounds float* %tmp4074, i64 1
- %tmp4076 = getelementptr inbounds float* %tmp4075, i64 1
- %tmp4077 = getelementptr inbounds float* %tmp4076, i64 1
- %tmp4078 = getelementptr inbounds float* %tmp4077, i64 1
- %tmp4079 = getelementptr inbounds float* %tmp4078, i64 1
- %tmp4080 = getelementptr inbounds float* %tmp4079, i64 1
- %tmp4081 = getelementptr inbounds float* %tmp4080, i64 1
- %tmp4082 = getelementptr inbounds float* %tmp4081, i64 1
- %tmp4083 = getelementptr inbounds float* %tmp4082, i64 1
- %tmp4084 = getelementptr inbounds float* %tmp4083, i64 1
- %tmp4085 = getelementptr inbounds float* %tmp4084, i64 1
- %tmp4086 = getelementptr inbounds float* %tmp4085, i64 1
- %tmp4087 = getelementptr inbounds float* %tmp4086, i64 1
- %tmp4088 = getelementptr inbounds float* %tmp4087, i64 1
- %tmp4089 = getelementptr inbounds float* %tmp4088, i64 1
- %tmp4090 = getelementptr inbounds float* %tmp4089, i64 1
- %tmp4091 = getelementptr inbounds float* %tmp4090, i64 1
- %tmp4092 = getelementptr inbounds float* %tmp4091, i64 1
- %tmp4093 = getelementptr inbounds float* %tmp4092, i64 1
- %tmp4094 = getelementptr inbounds float* %tmp4093, i64 1
- %tmp4095 = getelementptr inbounds float* %tmp4094, i64 1
- %tmp4096 = getelementptr inbounds float* %tmp4095, i64 1
- %tmp4097 = getelementptr inbounds float* %tmp4096, i64 1
- %tmp4098 = getelementptr inbounds float* %tmp4097, i64 1
- %tmp4099 = getelementptr inbounds float* %tmp4098, i64 1
- %tmp4100 = getelementptr inbounds float* %tmp4099, i64 1
- %tmp4101 = getelementptr inbounds float* %tmp4100, i64 1
- %tmp4102 = getelementptr inbounds float* %tmp4101, i64 1
- %tmp4103 = getelementptr inbounds float* %tmp4102, i64 1
- %tmp4104 = getelementptr inbounds float* %tmp4103, i64 1
- %tmp4105 = getelementptr inbounds float* %tmp4104, i64 1
- %tmp4106 = getelementptr inbounds float* %tmp4105, i64 1
- %tmp4107 = getelementptr inbounds float* %tmp4106, i64 1
- %tmp4108 = getelementptr inbounds float* %tmp4107, i64 1
- %tmp4109 = getelementptr inbounds float* %tmp4108, i64 1
- %tmp4110 = getelementptr inbounds float* %tmp4109, i64 1
- %tmp4111 = getelementptr inbounds float* %tmp4110, i64 1
- %tmp4112 = getelementptr inbounds float* %tmp4111, i64 1
- %tmp4113 = getelementptr inbounds float* %tmp4112, i64 1
- %tmp4114 = getelementptr inbounds float* %tmp4113, i64 1
- %tmp4115 = getelementptr inbounds float* %tmp4114, i64 1
- %tmp4116 = getelementptr inbounds float* %tmp4115, i64 1
- %tmp4117 = getelementptr inbounds float* %tmp4116, i64 1
- %tmp4118 = getelementptr inbounds float* %tmp4117, i64 1
- %tmp4119 = getelementptr inbounds float* %tmp4118, i64 1
- %tmp4120 = getelementptr inbounds float* %tmp4119, i64 1
- %tmp4121 = getelementptr inbounds float* %tmp4120, i64 1
- %tmp4122 = getelementptr inbounds float* %tmp4121, i64 1
- %tmp4123 = getelementptr inbounds float* %tmp4122, i64 1
- %tmp4124 = getelementptr inbounds float* %tmp4123, i64 1
- %tmp4125 = getelementptr inbounds float* %tmp4124, i64 1
- %tmp4126 = getelementptr inbounds float* %tmp4125, i64 1
- %tmp4127 = getelementptr inbounds float* %tmp4126, i64 1
- %tmp4128 = getelementptr inbounds float* %tmp4127, i64 1
- %tmp4129 = getelementptr inbounds float* %tmp4128, i64 1
- %tmp4130 = getelementptr inbounds float* %tmp4129, i64 1
- %tmp4131 = getelementptr inbounds float* %tmp4130, i64 1
- %tmp4132 = getelementptr inbounds float* %tmp4131, i64 1
- %tmp4133 = getelementptr inbounds float* %tmp4132, i64 1
- %tmp4134 = getelementptr inbounds float* %tmp4133, i64 1
- %tmp4135 = getelementptr inbounds float* %tmp4134, i64 1
- %tmp4136 = getelementptr inbounds float* %tmp4135, i64 1
- %tmp4137 = getelementptr inbounds float* %tmp4136, i64 1
- %tmp4138 = getelementptr inbounds float* %tmp4137, i64 1
- %tmp4139 = getelementptr inbounds float* %tmp4138, i64 1
- %tmp4140 = getelementptr inbounds float* %tmp4139, i64 1
- %tmp4141 = getelementptr inbounds float* %tmp4140, i64 1
- %tmp4142 = getelementptr inbounds float* %tmp4141, i64 1
- %tmp4143 = getelementptr inbounds float* %tmp4142, i64 1
- %tmp4144 = getelementptr inbounds float* %tmp4143, i64 1
- %tmp4145 = getelementptr inbounds float* %tmp4144, i64 1
- %tmp4146 = getelementptr inbounds float* %tmp4145, i64 1
- %tmp4147 = getelementptr inbounds float* %tmp4146, i64 1
- %tmp4148 = getelementptr inbounds float* %tmp4147, i64 1
- %tmp4149 = getelementptr inbounds float* %tmp4148, i64 1
- %tmp4150 = getelementptr inbounds float* %tmp4149, i64 1
- %tmp4151 = getelementptr inbounds float* %tmp4150, i64 1
- %tmp4152 = getelementptr inbounds float* %tmp4151, i64 1
- %tmp4153 = getelementptr inbounds float* %tmp4152, i64 1
- %tmp4154 = getelementptr inbounds float* %tmp4153, i64 1
- %tmp4155 = getelementptr inbounds float* %tmp4154, i64 1
- %tmp4156 = getelementptr inbounds float* %tmp4155, i64 1
- %tmp4157 = getelementptr inbounds float* %tmp4156, i64 1
- %tmp4158 = getelementptr inbounds float* %tmp4157, i64 1
- %tmp4159 = getelementptr inbounds float* %tmp4158, i64 1
- %tmp4160 = getelementptr inbounds float* %tmp4159, i64 1
- %tmp4161 = getelementptr inbounds float* %tmp4160, i64 1
- %tmp4162 = getelementptr inbounds float* %tmp4161, i64 1
- %tmp4163 = getelementptr inbounds float* %tmp4162, i64 1
- %tmp4164 = getelementptr inbounds float* %tmp4163, i64 1
- %tmp4165 = getelementptr inbounds float* %tmp4164, i64 1
- %tmp4166 = getelementptr inbounds float* %tmp4165, i64 1
- %tmp4167 = getelementptr inbounds float* %tmp4166, i64 1
- %tmp4168 = getelementptr inbounds float* %tmp4167, i64 1
- %tmp4169 = getelementptr inbounds float* %tmp4168, i64 1
- %tmp4170 = getelementptr inbounds float* %tmp4169, i64 1
- %tmp4171 = getelementptr inbounds float* %tmp4170, i64 1
- %tmp4172 = getelementptr inbounds float* %tmp4171, i64 1
- %tmp4173 = getelementptr inbounds float* %tmp4172, i64 1
- %tmp4174 = getelementptr inbounds float* %tmp4173, i64 1
- %tmp4175 = getelementptr inbounds float* %tmp4174, i64 1
- %tmp4176 = getelementptr inbounds float* %tmp4175, i64 1
- %tmp4177 = getelementptr inbounds float* %tmp4176, i64 1
- %tmp4178 = getelementptr inbounds float* %tmp4177, i64 1
- %tmp4179 = getelementptr inbounds float* %tmp4178, i64 1
- %tmp4180 = getelementptr inbounds float* %tmp4179, i64 1
- %tmp4181 = getelementptr inbounds float* %tmp4180, i64 1
- %tmp4182 = getelementptr inbounds float* %tmp4181, i64 1
- %tmp4183 = getelementptr inbounds float* %tmp4182, i64 1
- %tmp4184 = getelementptr inbounds float* %tmp4183, i64 1
- %tmp4185 = getelementptr inbounds float* %tmp4184, i64 1
- %tmp4186 = getelementptr inbounds float* %tmp4185, i64 1
- %tmp4187 = getelementptr inbounds float* %tmp4186, i64 1
- %tmp4188 = getelementptr inbounds float* %tmp4187, i64 1
- %tmp4189 = getelementptr inbounds float* %tmp4188, i64 1
- %tmp4190 = getelementptr inbounds float* %tmp4189, i64 1
- %tmp4191 = getelementptr inbounds float* %tmp4190, i64 1
- %tmp4192 = getelementptr inbounds float* %tmp4191, i64 1
- %tmp4193 = getelementptr inbounds float* %tmp4192, i64 1
- %tmp4194 = getelementptr inbounds float* %tmp4193, i64 1
- %tmp4195 = getelementptr inbounds float* %tmp4194, i64 1
- %tmp4196 = getelementptr inbounds float* %tmp4195, i64 1
- %tmp4197 = getelementptr inbounds float* %tmp4196, i64 1
- %tmp4198 = getelementptr inbounds float* %tmp4197, i64 1
- %tmp4199 = getelementptr inbounds float* %tmp4198, i64 1
- %tmp4200 = getelementptr inbounds float* %tmp4199, i64 1
- %tmp4201 = getelementptr inbounds float* %tmp4200, i64 1
- %tmp4202 = getelementptr inbounds float* %tmp4201, i64 1
- %tmp4203 = getelementptr inbounds float* %tmp4202, i64 1
- %tmp4204 = getelementptr inbounds float* %tmp4203, i64 1
- %tmp4205 = getelementptr inbounds float* %tmp4204, i64 1
- %tmp4206 = getelementptr inbounds float* %tmp4205, i64 1
- %tmp4207 = getelementptr inbounds float* %tmp4206, i64 1
- %tmp4208 = getelementptr inbounds float* %tmp4207, i64 1
- %tmp4209 = getelementptr inbounds float* %tmp4208, i64 1
- %tmp4210 = getelementptr inbounds float* %tmp4209, i64 1
- %tmp4211 = getelementptr inbounds float* %tmp4210, i64 1
- %tmp4212 = getelementptr inbounds float* %tmp4211, i64 1
- %tmp4213 = getelementptr inbounds float* %tmp4212, i64 1
- %tmp4214 = getelementptr inbounds float* %tmp4213, i64 1
- %tmp4215 = getelementptr inbounds float* %tmp4214, i64 1
- %tmp4216 = getelementptr inbounds float* %tmp4215, i64 1
- %tmp4217 = getelementptr inbounds float* %tmp4216, i64 1
- %tmp4218 = getelementptr inbounds float* %tmp4217, i64 1
- %tmp4219 = getelementptr inbounds float* %tmp4218, i64 1
- %tmp4220 = getelementptr inbounds float* %tmp4219, i64 1
- %tmp4221 = getelementptr inbounds float* %tmp4220, i64 1
- %tmp4222 = getelementptr inbounds float* %tmp4221, i64 1
- %tmp4223 = getelementptr inbounds float* %tmp4222, i64 1
- %tmp4224 = getelementptr inbounds float* %tmp4223, i64 1
- %tmp4225 = getelementptr inbounds float* %tmp4224, i64 1
- %tmp4226 = getelementptr inbounds float* %tmp4225, i64 1
- %tmp4227 = getelementptr inbounds float* %tmp4226, i64 1
- %tmp4228 = getelementptr inbounds float* %tmp4227, i64 1
- %tmp4229 = getelementptr inbounds float* %tmp4228, i64 1
- %tmp4230 = getelementptr inbounds float* %tmp4229, i64 1
- %tmp4231 = getelementptr inbounds float* %tmp4230, i64 1
- %tmp4232 = getelementptr inbounds float* %tmp4231, i64 1
- %tmp4233 = getelementptr inbounds float* %tmp4232, i64 1
- %tmp4234 = getelementptr inbounds float* %tmp4233, i64 1
- %tmp4235 = getelementptr inbounds float* %tmp4234, i64 1
- %tmp4236 = getelementptr inbounds float* %tmp4235, i64 1
- %tmp4237 = getelementptr inbounds float* %tmp4236, i64 1
- %tmp4238 = getelementptr inbounds float* %tmp4237, i64 1
- %tmp4239 = getelementptr inbounds float* %tmp4238, i64 1
- %tmp4240 = getelementptr inbounds float* %tmp4239, i64 1
- %tmp4241 = getelementptr inbounds float* %tmp4240, i64 1
- %tmp4242 = getelementptr inbounds float* %tmp4241, i64 1
- %tmp4243 = getelementptr inbounds float* %tmp4242, i64 1
- %tmp4244 = getelementptr inbounds float* %tmp4243, i64 1
- %tmp4245 = getelementptr inbounds float* %tmp4244, i64 1
- %tmp4246 = getelementptr inbounds float* %tmp4245, i64 1
- %tmp4247 = getelementptr inbounds float* %tmp4246, i64 1
- %tmp4248 = getelementptr inbounds float* %tmp4247, i64 1
- %tmp4249 = getelementptr inbounds float* %tmp4248, i64 1
- %tmp4250 = getelementptr inbounds float* %tmp4249, i64 1
- %tmp4251 = getelementptr inbounds float* %tmp4250, i64 1
- %tmp4252 = getelementptr inbounds float* %tmp4251, i64 1
- %tmp4253 = getelementptr inbounds float* %tmp4252, i64 1
- %tmp4254 = getelementptr inbounds float* %tmp4253, i64 1
- %tmp4255 = getelementptr inbounds float* %tmp4254, i64 1
- %tmp4256 = getelementptr inbounds float* %tmp4255, i64 1
- %tmp4257 = getelementptr inbounds float* %tmp4256, i64 1
- %tmp4258 = getelementptr inbounds float* %tmp4257, i64 1
- %tmp4259 = getelementptr inbounds float* %tmp4258, i64 1
- %tmp4260 = getelementptr inbounds float* %tmp4259, i64 1
- %tmp4261 = getelementptr inbounds float* %tmp4260, i64 1
- %tmp4262 = getelementptr inbounds float* %tmp4261, i64 1
- %tmp4263 = getelementptr inbounds float* %tmp4262, i64 1
- %tmp4264 = getelementptr inbounds float* %tmp4263, i64 1
- %tmp4265 = getelementptr inbounds float* %tmp4264, i64 1
- %tmp4266 = getelementptr inbounds float* %tmp4265, i64 1
- %tmp4267 = getelementptr inbounds float* %tmp4266, i64 1
- %tmp4268 = getelementptr inbounds float* %tmp4267, i64 1
- %tmp4269 = getelementptr inbounds float* %tmp4268, i64 1
- %tmp4270 = getelementptr inbounds float* %tmp4269, i64 1
- %tmp4271 = getelementptr inbounds float* %tmp4270, i64 1
- %tmp4272 = getelementptr inbounds float* %tmp4271, i64 1
- %tmp4273 = getelementptr inbounds float* %tmp4272, i64 1
- %tmp4274 = getelementptr inbounds float* %tmp4273, i64 1
- %tmp4275 = getelementptr inbounds float* %tmp4274, i64 1
- %tmp4276 = getelementptr inbounds float* %tmp4275, i64 1
- %tmp4277 = getelementptr inbounds float* %tmp4276, i64 1
- %tmp4278 = getelementptr inbounds float* %tmp4277, i64 1
- %tmp4279 = getelementptr inbounds float* %tmp4278, i64 1
- %tmp4280 = getelementptr inbounds float* %tmp4279, i64 1
- %tmp4281 = getelementptr inbounds float* %tmp4280, i64 1
- %tmp4282 = getelementptr inbounds float* %tmp4281, i64 1
- %tmp4283 = getelementptr inbounds float* %tmp4282, i64 1
- %tmp4284 = getelementptr inbounds float* %tmp4283, i64 1
- %tmp4285 = getelementptr inbounds float* %tmp4284, i64 1
- %tmp4286 = getelementptr inbounds float* %tmp4285, i64 1
- %tmp4287 = getelementptr inbounds float* %tmp4286, i64 1
- %tmp4288 = getelementptr inbounds float* %tmp4287, i64 1
- %tmp4289 = getelementptr inbounds float* %tmp4288, i64 1
- %tmp4290 = getelementptr inbounds float* %tmp4289, i64 1
- %tmp4291 = getelementptr inbounds float* %tmp4290, i64 1
- %tmp4292 = getelementptr inbounds float* %tmp4291, i64 1
- %tmp4293 = getelementptr inbounds float* %tmp4292, i64 1
- %tmp4294 = getelementptr inbounds float* %tmp4293, i64 1
- %tmp4295 = getelementptr inbounds float* %tmp4294, i64 1
- %tmp4296 = getelementptr inbounds float* %tmp4295, i64 1
- %tmp4297 = getelementptr inbounds float* %tmp4296, i64 1
- %tmp4298 = getelementptr inbounds float* %tmp4297, i64 1
- %tmp4299 = getelementptr inbounds float* %tmp4298, i64 1
- %tmp4300 = getelementptr inbounds float* %tmp4299, i64 1
- %tmp4301 = getelementptr inbounds float* %tmp4300, i64 1
- %tmp4302 = getelementptr inbounds float* %tmp4301, i64 1
- %tmp4303 = getelementptr inbounds float* %tmp4302, i64 1
- %tmp4304 = getelementptr inbounds float* %tmp4303, i64 1
- %tmp4305 = getelementptr inbounds float* %tmp4304, i64 1
- %tmp4306 = getelementptr inbounds float* %tmp4305, i64 1
- %tmp4307 = getelementptr inbounds float* %tmp4306, i64 1
- %tmp4308 = getelementptr inbounds float* %tmp4307, i64 1
- %tmp4309 = getelementptr inbounds float* %tmp4308, i64 1
- %tmp4310 = getelementptr inbounds float* %tmp4309, i64 1
- %tmp4311 = getelementptr inbounds float* %tmp4310, i64 1
- %tmp4312 = getelementptr inbounds float* %tmp4311, i64 1
- %tmp4313 = getelementptr inbounds float* %tmp4312, i64 1
- %tmp4314 = getelementptr inbounds float* %tmp4313, i64 1
- %tmp4315 = getelementptr inbounds float* %tmp4314, i64 1
- %tmp4316 = getelementptr inbounds float* %tmp4315, i64 1
- %tmp4317 = getelementptr inbounds float* %tmp4316, i64 1
- %tmp4318 = getelementptr inbounds float* %tmp4317, i64 1
- %tmp4319 = getelementptr inbounds float* %tmp4318, i64 1
- %tmp4320 = getelementptr inbounds float* %tmp4319, i64 1
- %tmp4321 = getelementptr inbounds float* %tmp4320, i64 1
- %tmp4322 = getelementptr inbounds float* %tmp4321, i64 1
- %tmp4323 = getelementptr inbounds float* %tmp4322, i64 1
- %tmp4324 = getelementptr inbounds float* %tmp4323, i64 1
- %tmp4325 = getelementptr inbounds float* %tmp4324, i64 1
- %tmp4326 = getelementptr inbounds float* %tmp4325, i64 1
- %tmp4327 = getelementptr inbounds float* %tmp4326, i64 1
- %tmp4328 = getelementptr inbounds float* %tmp4327, i64 1
- %tmp4329 = getelementptr inbounds float* %tmp4328, i64 1
- %tmp4330 = getelementptr inbounds float* %tmp4329, i64 1
- %tmp4331 = getelementptr inbounds float* %tmp4330, i64 1
- %tmp4332 = getelementptr inbounds float* %tmp4331, i64 1
- %tmp4333 = getelementptr inbounds float* %tmp4332, i64 1
- %tmp4334 = getelementptr inbounds float* %tmp4333, i64 1
- %tmp4335 = getelementptr inbounds float* %tmp4334, i64 1
- %tmp4336 = getelementptr inbounds float* %tmp4335, i64 1
- %tmp4337 = getelementptr inbounds float* %tmp4336, i64 1
- %tmp4338 = getelementptr inbounds float* %tmp4337, i64 1
- %tmp4339 = getelementptr inbounds float* %tmp4338, i64 1
- %tmp4340 = getelementptr inbounds float* %tmp4339, i64 1
- %tmp4341 = getelementptr inbounds float* %tmp4340, i64 1
- %tmp4342 = getelementptr inbounds float* %tmp4341, i64 1
- %tmp4343 = getelementptr inbounds float* %tmp4342, i64 1
- %tmp4344 = getelementptr inbounds float* %tmp4343, i64 1
- %tmp4345 = getelementptr inbounds float* %tmp4344, i64 1
- %tmp4346 = getelementptr inbounds float* %tmp4345, i64 1
- %tmp4347 = getelementptr inbounds float* %tmp4346, i64 1
- %tmp4348 = getelementptr inbounds float* %tmp4347, i64 1
- %tmp4349 = getelementptr inbounds float* %tmp4348, i64 1
- %tmp4350 = getelementptr inbounds float* %tmp4349, i64 1
- %tmp4351 = getelementptr inbounds float* %tmp4350, i64 1
- %tmp4352 = getelementptr inbounds float* %tmp4351, i64 1
- %tmp4353 = getelementptr inbounds float* %tmp4352, i64 1
- %tmp4354 = getelementptr inbounds float* %tmp4353, i64 1
- %tmp4355 = getelementptr inbounds float* %tmp4354, i64 1
- %tmp4356 = getelementptr inbounds float* %tmp4355, i64 1
- %tmp4357 = getelementptr inbounds float* %tmp4356, i64 1
- %tmp4358 = getelementptr inbounds float* %tmp4357, i64 1
- %tmp4359 = getelementptr inbounds float* %tmp4358, i64 1
- %tmp4360 = getelementptr inbounds float* %tmp4359, i64 1
- %tmp4361 = getelementptr inbounds float* %tmp4360, i64 1
- %tmp4362 = getelementptr inbounds float* %tmp4361, i64 1
- %tmp4363 = getelementptr inbounds float* %tmp4362, i64 1
- %tmp4364 = getelementptr inbounds float* %tmp4363, i64 1
- %tmp4365 = getelementptr inbounds float* %tmp4364, i64 1
- %tmp4366 = getelementptr inbounds float* %tmp4365, i64 1
- %tmp4367 = getelementptr inbounds float* %tmp4366, i64 1
- %tmp4368 = getelementptr inbounds float* %tmp4367, i64 1
- %tmp4369 = getelementptr inbounds float* %tmp4368, i64 1
- %tmp4370 = getelementptr inbounds float* %tmp4369, i64 1
- %tmp4371 = getelementptr inbounds float* %tmp4370, i64 1
- %tmp4372 = getelementptr inbounds float* %tmp4371, i64 1
- %tmp4373 = getelementptr inbounds float* %tmp4372, i64 1
- %tmp4374 = getelementptr inbounds float* %tmp4373, i64 1
- %tmp4375 = getelementptr inbounds float* %tmp4374, i64 1
- %tmp4376 = getelementptr inbounds float* %tmp4375, i64 1
- %tmp4377 = getelementptr inbounds float* %tmp4376, i64 1
- %tmp4378 = getelementptr inbounds float* %tmp4377, i64 1
- %tmp4379 = getelementptr inbounds float* %tmp4378, i64 1
- %tmp4380 = getelementptr inbounds float* %tmp4379, i64 1
- %tmp4381 = getelementptr inbounds float* %tmp4380, i64 1
- %tmp4382 = getelementptr inbounds float* %tmp4381, i64 1
- %tmp4383 = getelementptr inbounds float* %tmp4382, i64 1
- %tmp4384 = getelementptr inbounds float* %tmp4383, i64 1
- %tmp4385 = getelementptr inbounds float* %tmp4384, i64 1
- %tmp4386 = getelementptr inbounds float* %tmp4385, i64 1
- %tmp4387 = getelementptr inbounds float* %tmp4386, i64 1
- %tmp4388 = getelementptr inbounds float* %tmp4387, i64 1
- %tmp4389 = getelementptr inbounds float* %tmp4388, i64 1
- %tmp4390 = getelementptr inbounds float* %tmp4389, i64 1
- %tmp4391 = getelementptr inbounds float* %tmp4390, i64 1
- %tmp4392 = getelementptr inbounds float* %tmp4391, i64 1
- %tmp4393 = getelementptr inbounds float* %tmp4392, i64 1
- %tmp4394 = getelementptr inbounds float* %tmp4393, i64 1
- %tmp4395 = getelementptr inbounds float* %tmp4394, i64 1
- %tmp4396 = getelementptr inbounds float* %tmp4395, i64 1
- %tmp4397 = getelementptr inbounds float* %tmp4396, i64 1
- %tmp4398 = getelementptr inbounds float* %tmp4397, i64 1
- %tmp4399 = getelementptr inbounds float* %tmp4398, i64 1
- %tmp4400 = getelementptr inbounds float* %tmp4399, i64 1
- %tmp4401 = getelementptr inbounds float* %tmp4400, i64 1
- %tmp4402 = getelementptr inbounds float* %tmp4401, i64 1
- %tmp4403 = getelementptr inbounds float* %tmp4402, i64 1
- %tmp4404 = getelementptr inbounds float* %tmp4403, i64 1
- %tmp4405 = getelementptr inbounds float* %tmp4404, i64 1
- %tmp4406 = getelementptr inbounds float* %tmp4405, i64 1
- %tmp4407 = getelementptr inbounds float* %tmp4406, i64 1
- %tmp4408 = getelementptr inbounds float* %tmp4407, i64 1
- %tmp4409 = getelementptr inbounds float* %tmp4408, i64 1
- %tmp4410 = getelementptr inbounds float* %tmp4409, i64 1
- %tmp4411 = getelementptr inbounds float* %tmp4410, i64 1
- %tmp4412 = getelementptr inbounds float* %tmp4411, i64 1
- %tmp4413 = getelementptr inbounds float* %tmp4412, i64 1
- %tmp4414 = getelementptr inbounds float* %tmp4413, i64 1
- %tmp4415 = getelementptr inbounds float* %tmp4414, i64 1
- %tmp4416 = getelementptr inbounds float* %tmp4415, i64 1
- %tmp4417 = getelementptr inbounds float* %tmp4416, i64 1
- %tmp4418 = getelementptr inbounds float* %tmp4417, i64 1
- %tmp4419 = getelementptr inbounds float* %tmp4418, i64 1
- %tmp4420 = getelementptr inbounds float* %tmp4419, i64 1
- %tmp4421 = getelementptr inbounds float* %tmp4420, i64 1
- %tmp4422 = getelementptr inbounds float* %tmp4421, i64 1
- %tmp4423 = getelementptr inbounds float* %tmp4422, i64 1
- %tmp4424 = getelementptr inbounds float* %tmp4423, i64 1
- %tmp4425 = getelementptr inbounds float* %tmp4424, i64 1
- %tmp4426 = getelementptr inbounds float* %tmp4425, i64 1
- %tmp4427 = getelementptr inbounds float* %tmp4426, i64 1
- %tmp4428 = getelementptr inbounds float* %tmp4427, i64 1
- %tmp4429 = getelementptr inbounds float* %tmp4428, i64 1
- %tmp4430 = getelementptr inbounds float* %tmp4429, i64 1
- %tmp4431 = getelementptr inbounds float* %tmp4430, i64 1
- %tmp4432 = getelementptr inbounds float* %tmp4431, i64 1
- %tmp4433 = getelementptr inbounds float* %tmp4432, i64 1
- %tmp4434 = getelementptr inbounds float* %tmp4433, i64 1
- %tmp4435 = getelementptr inbounds float* %tmp4434, i64 1
- %tmp4436 = getelementptr inbounds float* %tmp4435, i64 1
- %tmp4437 = getelementptr inbounds float* %tmp4436, i64 1
- %tmp4438 = getelementptr inbounds float* %tmp4437, i64 1
- %tmp4439 = getelementptr inbounds float* %tmp4438, i64 1
- %tmp4440 = getelementptr inbounds float* %tmp4439, i64 1
- %tmp4441 = getelementptr inbounds float* %tmp4440, i64 1
- %tmp4442 = getelementptr inbounds float* %tmp4441, i64 1
- %tmp4443 = getelementptr inbounds float* %tmp4442, i64 1
- %tmp4444 = getelementptr inbounds float* %tmp4443, i64 1
- %tmp4445 = getelementptr inbounds float* %tmp4444, i64 1
- %tmp4446 = getelementptr inbounds float* %tmp4445, i64 1
- %tmp4447 = getelementptr inbounds float* %tmp4446, i64 1
- %tmp4448 = getelementptr inbounds float* %tmp4447, i64 1
- %tmp4449 = getelementptr inbounds float* %tmp4448, i64 1
- %tmp4450 = getelementptr inbounds float* %tmp4449, i64 1
- %tmp4451 = getelementptr inbounds float* %tmp4450, i64 1
- %tmp4452 = getelementptr inbounds float* %tmp4451, i64 1
- %tmp4453 = getelementptr inbounds float* %tmp4452, i64 1
- %tmp4454 = getelementptr inbounds float* %tmp4453, i64 1
- %tmp4455 = getelementptr inbounds float* %tmp4454, i64 1
- %tmp4456 = getelementptr inbounds float* %tmp4455, i64 1
- %tmp4457 = getelementptr inbounds float* %tmp4456, i64 1
- %tmp4458 = getelementptr inbounds float* %tmp4457, i64 1
- %tmp4459 = getelementptr inbounds float* %tmp4458, i64 1
- %tmp4460 = getelementptr inbounds float* %tmp4459, i64 1
- %tmp4461 = getelementptr inbounds float* %tmp4460, i64 1
- %tmp4462 = getelementptr inbounds float* %tmp4461, i64 1
- %tmp4463 = getelementptr inbounds float* %tmp4462, i64 1
- %tmp4464 = getelementptr inbounds float* %tmp4463, i64 1
- %tmp4465 = getelementptr inbounds float* %tmp4464, i64 1
- %tmp4466 = getelementptr inbounds float* %tmp4465, i64 1
- %tmp4467 = getelementptr inbounds float* %tmp4466, i64 1
- %tmp4468 = getelementptr inbounds float* %tmp4467, i64 1
- %tmp4469 = getelementptr inbounds float* %tmp4468, i64 1
- %tmp4470 = getelementptr inbounds float* %tmp4469, i64 1
- %tmp4471 = getelementptr inbounds float* %tmp4470, i64 1
- %tmp4472 = getelementptr inbounds float* %tmp4471, i64 1
- %tmp4473 = getelementptr inbounds float* %tmp4472, i64 1
- %tmp4474 = getelementptr inbounds float* %tmp4473, i64 1
- %tmp4475 = getelementptr inbounds float* %tmp4474, i64 1
- %tmp4476 = getelementptr inbounds float* %tmp4475, i64 1
- %tmp4477 = getelementptr inbounds float* %tmp4476, i64 1
- %tmp4478 = getelementptr inbounds float* %tmp4477, i64 1
- %tmp4479 = getelementptr inbounds float* %tmp4478, i64 1
- %tmp4480 = getelementptr inbounds float* %tmp4479, i64 1
- %tmp4481 = getelementptr inbounds float* %tmp4480, i64 1
- %tmp4482 = getelementptr inbounds float* %tmp4481, i64 1
- %tmp4483 = getelementptr inbounds float* %tmp4482, i64 1
- %tmp4484 = getelementptr inbounds float* %tmp4483, i64 1
- %tmp4485 = getelementptr inbounds float* %tmp4484, i64 1
- %tmp4486 = getelementptr inbounds float* %tmp4485, i64 1
- %tmp4487 = getelementptr inbounds float* %tmp4486, i64 1
- %tmp4488 = getelementptr inbounds float* %tmp4487, i64 1
- %tmp4489 = getelementptr inbounds float* %tmp4488, i64 1
- %tmp4490 = getelementptr inbounds float* %tmp4489, i64 1
- %tmp4491 = getelementptr inbounds float* %tmp4490, i64 1
- %tmp4492 = getelementptr inbounds float* %tmp4491, i64 1
- %tmp4493 = getelementptr inbounds float* %tmp4492, i64 1
- %tmp4494 = getelementptr inbounds float* %tmp4493, i64 1
- %tmp4495 = getelementptr inbounds float* %tmp4494, i64 1
- %tmp4496 = getelementptr inbounds float* %tmp4495, i64 1
- %tmp4497 = getelementptr inbounds float* %tmp4496, i64 1
- %tmp4498 = getelementptr inbounds float* %tmp4497, i64 1
- %tmp4499 = getelementptr inbounds float* %tmp4498, i64 1
- %tmp4500 = getelementptr inbounds float* %tmp4499, i64 1
- %tmp4501 = getelementptr inbounds float* %tmp4500, i64 1
- %tmp4502 = getelementptr inbounds float* %tmp4501, i64 1
- %tmp4503 = getelementptr inbounds float* %tmp4502, i64 1
- %tmp4504 = getelementptr inbounds float* %tmp4503, i64 1
- %tmp4505 = getelementptr inbounds float* %tmp4504, i64 1
- %tmp4506 = getelementptr inbounds float* %tmp4505, i64 1
- %tmp4507 = getelementptr inbounds float* %tmp4506, i64 1
- %tmp4508 = getelementptr inbounds float* %tmp4507, i64 1
- %tmp4509 = getelementptr inbounds float* %tmp4508, i64 1
- %tmp4510 = getelementptr inbounds float* %tmp4509, i64 1
- %tmp4511 = getelementptr inbounds float* %tmp4510, i64 1
- %tmp4512 = getelementptr inbounds float* %tmp4511, i64 1
- %tmp4513 = getelementptr inbounds float* %tmp4512, i64 1
- %tmp4514 = getelementptr inbounds float* %tmp4513, i64 1
- %tmp4515 = getelementptr inbounds float* %tmp4514, i64 1
- %tmp4516 = getelementptr inbounds float* %tmp4515, i64 1
- %tmp4517 = getelementptr inbounds float* %tmp4516, i64 1
- %tmp4518 = getelementptr inbounds float* %tmp4517, i64 1
- %tmp4519 = getelementptr inbounds float* %tmp4518, i64 1
- %tmp4520 = getelementptr inbounds float* %tmp4519, i64 1
- %tmp4521 = getelementptr inbounds float* %tmp4520, i64 1
- %tmp4522 = getelementptr inbounds float* %tmp4521, i64 1
- %tmp4523 = getelementptr inbounds float* %tmp4522, i64 1
- %tmp4524 = getelementptr inbounds float* %tmp4523, i64 1
- %tmp4525 = getelementptr inbounds float* %tmp4524, i64 1
- %tmp4526 = getelementptr inbounds float* %tmp4525, i64 1
- %tmp4527 = getelementptr inbounds float* %tmp4526, i64 1
- %tmp4528 = getelementptr inbounds float* %tmp4527, i64 1
- %tmp4529 = getelementptr inbounds float* %tmp4528, i64 1
- %tmp4530 = getelementptr inbounds float* %tmp4529, i64 1
- %tmp4531 = getelementptr inbounds float* %tmp4530, i64 1
- %tmp4532 = getelementptr inbounds float* %tmp4531, i64 1
- %tmp4533 = getelementptr inbounds float* %tmp4532, i64 1
- %tmp4534 = getelementptr inbounds float* %tmp4533, i64 1
- %tmp4535 = getelementptr inbounds float* %tmp4534, i64 1
- %tmp4536 = getelementptr inbounds float* %tmp4535, i64 1
- %tmp4537 = getelementptr inbounds float* %tmp4536, i64 1
- %tmp4538 = getelementptr inbounds float* %tmp4537, i64 1
- %tmp4539 = getelementptr inbounds float* %tmp4538, i64 1
- %tmp4540 = getelementptr inbounds float* %tmp4539, i64 1
- %tmp4541 = getelementptr inbounds float* %tmp4540, i64 1
- %tmp4542 = getelementptr inbounds float* %tmp4541, i64 1
- %tmp4543 = getelementptr inbounds float* %tmp4542, i64 1
- %tmp4544 = getelementptr inbounds float* %tmp4543, i64 1
- %tmp4545 = getelementptr inbounds float* %tmp4544, i64 1
- %tmp4546 = getelementptr inbounds float* %tmp4545, i64 1
- %tmp4547 = getelementptr inbounds float* %tmp4546, i64 1
- %tmp4548 = getelementptr inbounds float* %tmp4547, i64 1
- %tmp4549 = getelementptr inbounds float* %tmp4548, i64 1
- %tmp4550 = getelementptr inbounds float* %tmp4549, i64 1
- %tmp4551 = getelementptr inbounds float* %tmp4550, i64 1
- %tmp4552 = getelementptr inbounds float* %tmp4551, i64 1
- %tmp4553 = getelementptr inbounds float* %tmp4552, i64 1
- %tmp4554 = getelementptr inbounds float* %tmp4553, i64 1
- %tmp4555 = getelementptr inbounds float* %tmp4554, i64 1
- %tmp4556 = getelementptr inbounds float* %tmp4555, i64 1
- %tmp4557 = getelementptr inbounds float* %tmp4556, i64 1
- %tmp4558 = getelementptr inbounds float* %tmp4557, i64 1
- %tmp4559 = getelementptr inbounds float* %tmp4558, i64 1
- %tmp4560 = getelementptr inbounds float* %tmp4559, i64 1
- %tmp4561 = getelementptr inbounds float* %tmp4560, i64 1
- %tmp4562 = getelementptr inbounds float* %tmp4561, i64 1
- %tmp4563 = getelementptr inbounds float* %tmp4562, i64 1
- %tmp4564 = getelementptr inbounds float* %tmp4563, i64 1
- %tmp4565 = getelementptr inbounds float* %tmp4564, i64 1
- %tmp4566 = getelementptr inbounds float* %tmp4565, i64 1
- %tmp4567 = getelementptr inbounds float* %tmp4566, i64 1
- %tmp4568 = getelementptr inbounds float* %tmp4567, i64 1
- %tmp4569 = getelementptr inbounds float* %tmp4568, i64 1
- %tmp4570 = getelementptr inbounds float* %tmp4569, i64 1
- %tmp4571 = getelementptr inbounds float* %tmp4570, i64 1
- %tmp4572 = getelementptr inbounds float* %tmp4571, i64 1
- %tmp4573 = getelementptr inbounds float* %tmp4572, i64 1
- %tmp4574 = getelementptr inbounds float* %tmp4573, i64 1
- %tmp4575 = getelementptr inbounds float* %tmp4574, i64 1
- %tmp4576 = getelementptr inbounds float* %tmp4575, i64 1
- %tmp4577 = getelementptr inbounds float* %tmp4576, i64 1
- %tmp4578 = getelementptr inbounds float* %tmp4577, i64 1
- %tmp4579 = getelementptr inbounds float* %tmp4578, i64 1
- %tmp4580 = getelementptr inbounds float* %tmp4579, i64 1
- %tmp4581 = getelementptr inbounds float* %tmp4580, i64 1
- %tmp4582 = getelementptr inbounds float* %tmp4581, i64 1
- %tmp4583 = getelementptr inbounds float* %tmp4582, i64 1
- %tmp4584 = getelementptr inbounds float* %tmp4583, i64 1
- %tmp4585 = getelementptr inbounds float* %tmp4584, i64 1
- %tmp4586 = getelementptr inbounds float* %tmp4585, i64 1
- %tmp4587 = getelementptr inbounds float* %tmp4586, i64 1
- %tmp4588 = getelementptr inbounds float* %tmp4587, i64 1
- %tmp4589 = getelementptr inbounds float* %tmp4588, i64 1
- %tmp4590 = getelementptr inbounds float* %tmp4589, i64 1
- %tmp4591 = getelementptr inbounds float* %tmp4590, i64 1
- %tmp4592 = getelementptr inbounds float* %tmp4591, i64 1
- %tmp4593 = getelementptr inbounds float* %tmp4592, i64 1
- %tmp4594 = getelementptr inbounds float* %tmp4593, i64 1
- %tmp4595 = getelementptr inbounds float* %tmp4594, i64 1
- %tmp4596 = getelementptr inbounds float* %tmp4595, i64 1
- %tmp4597 = getelementptr inbounds float* %tmp4596, i64 1
- %tmp4598 = getelementptr inbounds float* %tmp4597, i64 1
- %tmp4599 = getelementptr inbounds float* %tmp4598, i64 1
- %tmp4600 = getelementptr inbounds float* %tmp4599, i64 1
- %tmp4601 = getelementptr inbounds float* %tmp4600, i64 1
- %tmp4602 = getelementptr inbounds float* %tmp4601, i64 1
- %tmp4603 = getelementptr inbounds float* %tmp4602, i64 1
- %tmp4604 = getelementptr inbounds float* %tmp4603, i64 1
- %tmp4605 = getelementptr inbounds float* %tmp4604, i64 1
- %tmp4606 = getelementptr inbounds float* %tmp4605, i64 1
- %tmp4607 = getelementptr inbounds float* %tmp4606, i64 1
- %tmp4608 = getelementptr inbounds float* %tmp4607, i64 1
- %tmp4609 = getelementptr inbounds float* %tmp4608, i64 1
- %tmp4610 = getelementptr inbounds float* %tmp4609, i64 1
- %tmp4611 = getelementptr inbounds float* %tmp4610, i64 1
- %tmp4612 = getelementptr inbounds float* %tmp4611, i64 1
- %tmp4613 = getelementptr inbounds float* %tmp4612, i64 1
- %tmp4614 = getelementptr inbounds float* %tmp4613, i64 1
- %tmp4615 = getelementptr inbounds float* %tmp4614, i64 1
- %tmp4616 = getelementptr inbounds float* %tmp4615, i64 1
- %tmp4617 = getelementptr inbounds float* %tmp4616, i64 1
- %tmp4618 = getelementptr inbounds float* %tmp4617, i64 1
- %tmp4619 = getelementptr inbounds float* %tmp4618, i64 1
- %tmp4620 = getelementptr inbounds float* %tmp4619, i64 1
- %tmp4621 = getelementptr inbounds float* %tmp4620, i64 1
- %tmp4622 = getelementptr inbounds float* %tmp4621, i64 1
- %tmp4623 = getelementptr inbounds float* %tmp4622, i64 1
- %tmp4624 = getelementptr inbounds float* %tmp4623, i64 1
- %tmp4625 = getelementptr inbounds float* %tmp4624, i64 1
- %tmp4626 = getelementptr inbounds float* %tmp4625, i64 1
- %tmp4627 = getelementptr inbounds float* %tmp4626, i64 1
- %tmp4628 = getelementptr inbounds float* %tmp4627, i64 1
- %tmp4629 = getelementptr inbounds float* %tmp4628, i64 1
- %tmp4630 = getelementptr inbounds float* %tmp4629, i64 1
- %tmp4631 = getelementptr inbounds float* %tmp4630, i64 1
- %tmp4632 = getelementptr inbounds float* %tmp4631, i64 1
- %tmp4633 = getelementptr inbounds float* %tmp4632, i64 1
- %tmp4634 = getelementptr inbounds float* %tmp4633, i64 1
- %tmp4635 = getelementptr inbounds float* %tmp4634, i64 1
- %tmp4636 = getelementptr inbounds float* %tmp4635, i64 1
- %tmp4637 = getelementptr inbounds float* %tmp4636, i64 1
- %tmp4638 = getelementptr inbounds float* %tmp4637, i64 1
- %tmp4639 = getelementptr inbounds float* %tmp4638, i64 1
- %tmp4640 = getelementptr inbounds float* %tmp4639, i64 1
- %tmp4641 = getelementptr inbounds float* %tmp4640, i64 1
- %tmp4642 = getelementptr inbounds float* %tmp4641, i64 1
- %tmp4643 = getelementptr inbounds float* %tmp4642, i64 1
- %tmp4644 = getelementptr inbounds float* %tmp4643, i64 1
- %tmp4645 = getelementptr inbounds float* %tmp4644, i64 1
- %tmp4646 = getelementptr inbounds float* %tmp4645, i64 1
- %tmp4647 = getelementptr inbounds float* %tmp4646, i64 1
- %tmp4648 = getelementptr inbounds float* %tmp4647, i64 1
- %tmp4649 = getelementptr inbounds float* %tmp4648, i64 1
- %tmp4650 = getelementptr inbounds float* %tmp4649, i64 1
- %tmp4651 = getelementptr inbounds float* %tmp4650, i64 1
- %tmp4652 = getelementptr inbounds float* %tmp4651, i64 1
- %tmp4653 = getelementptr inbounds float* %tmp4652, i64 1
- %tmp4654 = getelementptr inbounds float* %tmp4653, i64 1
- %tmp4655 = getelementptr inbounds float* %tmp4654, i64 1
- %tmp4656 = getelementptr inbounds float* %tmp4655, i64 1
- %tmp4657 = getelementptr inbounds float* %tmp4656, i64 1
- %tmp4658 = getelementptr inbounds float* %tmp4657, i64 1
- %tmp4659 = getelementptr inbounds float* %tmp4658, i64 1
- %tmp4660 = getelementptr inbounds float* %tmp4659, i64 1
- %tmp4661 = getelementptr inbounds float* %tmp4660, i64 1
- %tmp4662 = getelementptr inbounds float* %tmp4661, i64 1
- %tmp4663 = getelementptr inbounds float* %tmp4662, i64 1
- %tmp4664 = getelementptr inbounds float* %tmp4663, i64 1
- %tmp4665 = getelementptr inbounds float* %tmp4664, i64 1
- %tmp4666 = getelementptr inbounds float* %tmp4665, i64 1
- %tmp4667 = getelementptr inbounds float* %tmp4666, i64 1
- %tmp4668 = getelementptr inbounds float* %tmp4667, i64 1
- %tmp4669 = getelementptr inbounds float* %tmp4668, i64 1
- %tmp4670 = getelementptr inbounds float* %tmp4669, i64 1
- %tmp4671 = getelementptr inbounds float* %tmp4670, i64 1
- %tmp4672 = getelementptr inbounds float* %tmp4671, i64 1
- %tmp4673 = getelementptr inbounds float* %tmp4672, i64 1
- %tmp4674 = getelementptr inbounds float* %tmp4673, i64 1
- %tmp4675 = getelementptr inbounds float* %tmp4674, i64 1
- %tmp4676 = getelementptr inbounds float* %tmp4675, i64 1
- %tmp4677 = getelementptr inbounds float* %tmp4676, i64 1
- %tmp4678 = getelementptr inbounds float* %tmp4677, i64 1
- %tmp4679 = getelementptr inbounds float* %tmp4678, i64 1
- %tmp4680 = getelementptr inbounds float* %tmp4679, i64 1
- %tmp4681 = getelementptr inbounds float* %tmp4680, i64 1
- %tmp4682 = getelementptr inbounds float* %tmp4681, i64 1
- %tmp4683 = getelementptr inbounds float* %tmp4682, i64 1
- %tmp4684 = getelementptr inbounds float* %tmp4683, i64 1
- %tmp4685 = getelementptr inbounds float* %tmp4684, i64 1
- %tmp4686 = getelementptr inbounds float* %tmp4685, i64 1
- %tmp4687 = getelementptr inbounds float* %tmp4686, i64 1
- %tmp4688 = getelementptr inbounds float* %tmp4687, i64 1
- %tmp4689 = getelementptr inbounds float* %tmp4688, i64 1
- %tmp4690 = getelementptr inbounds float* %tmp4689, i64 1
- %tmp4691 = getelementptr inbounds float* %tmp4690, i64 1
- %tmp4692 = getelementptr inbounds float* %tmp4691, i64 1
- %tmp4693 = getelementptr inbounds float* %tmp4692, i64 1
- %tmp4694 = getelementptr inbounds float* %tmp4693, i64 1
- %tmp4695 = getelementptr inbounds float* %tmp4694, i64 1
- %tmp4696 = getelementptr inbounds float* %tmp4695, i64 1
- %tmp4697 = getelementptr inbounds float* %tmp4696, i64 1
- %tmp4698 = getelementptr inbounds float* %tmp4697, i64 1
- %tmp4699 = getelementptr inbounds float* %tmp4698, i64 1
- %tmp4700 = getelementptr inbounds float* %tmp4699, i64 1
- %tmp4701 = getelementptr inbounds float* %tmp4700, i64 1
- %tmp4702 = getelementptr inbounds float* %tmp4701, i64 1
- %tmp4703 = getelementptr inbounds float* %tmp4702, i64 1
- %tmp4704 = getelementptr inbounds float* %tmp4703, i64 1
- %tmp4705 = getelementptr inbounds float* %tmp4704, i64 1
- %tmp4706 = getelementptr inbounds float* %tmp4705, i64 1
- %tmp4707 = getelementptr inbounds float* %tmp4706, i64 1
- %tmp4708 = getelementptr inbounds float* %tmp4707, i64 1
- %tmp4709 = getelementptr inbounds float* %tmp4708, i64 1
- %tmp4710 = getelementptr inbounds float* %tmp4709, i64 1
- %tmp4711 = getelementptr inbounds float* %tmp4710, i64 1
- %tmp4712 = getelementptr inbounds float* %tmp4711, i64 1
- %tmp4713 = getelementptr inbounds float* %tmp4712, i64 1
- %tmp4714 = getelementptr inbounds float* %tmp4713, i64 1
- %tmp4715 = getelementptr inbounds float* %tmp4714, i64 1
- %tmp4716 = getelementptr inbounds float* %tmp4715, i64 1
- %tmp4717 = getelementptr inbounds float* %tmp4716, i64 1
- %tmp4718 = getelementptr inbounds float* %tmp4717, i64 1
- %tmp4719 = getelementptr inbounds float* %tmp4718, i64 1
- %tmp4720 = getelementptr inbounds float* %tmp4719, i64 1
- %tmp4721 = getelementptr inbounds float* %tmp4720, i64 1
- %tmp4722 = getelementptr inbounds float* %tmp4721, i64 1
- %tmp4723 = getelementptr inbounds float* %tmp4722, i64 1
- %tmp4724 = getelementptr inbounds float* %tmp4723, i64 1
- %tmp4725 = getelementptr inbounds float* %tmp4724, i64 1
- %tmp4726 = getelementptr inbounds float* %tmp4725, i64 1
- %tmp4727 = getelementptr inbounds float* %tmp4726, i64 1
- %tmp4728 = getelementptr inbounds float* %tmp4727, i64 1
- %tmp4729 = getelementptr inbounds float* %tmp4728, i64 1
- %tmp4730 = getelementptr inbounds float* %tmp4729, i64 1
- %tmp4731 = getelementptr inbounds float* %tmp4730, i64 1
- %tmp4732 = getelementptr inbounds float* %tmp4731, i64 1
- %tmp4733 = getelementptr inbounds float* %tmp4732, i64 1
- %tmp4734 = getelementptr inbounds float* %tmp4733, i64 1
- %tmp4735 = getelementptr inbounds float* %tmp4734, i64 1
- %tmp4736 = getelementptr inbounds float* %tmp4735, i64 1
- %tmp4737 = getelementptr inbounds float* %tmp4736, i64 1
- %tmp4738 = getelementptr inbounds float* %tmp4737, i64 1
- %tmp4739 = getelementptr inbounds float* %tmp4738, i64 1
- %tmp4740 = getelementptr inbounds float* %tmp4739, i64 1
- %tmp4741 = getelementptr inbounds float* %tmp4740, i64 1
- %tmp4742 = getelementptr inbounds float* %tmp4741, i64 1
- %tmp4743 = getelementptr inbounds float* %tmp4742, i64 1
- %tmp4744 = getelementptr inbounds float* %tmp4743, i64 1
- %tmp4745 = getelementptr inbounds float* %tmp4744, i64 1
- %tmp4746 = getelementptr inbounds float* %tmp4745, i64 1
- %tmp4747 = getelementptr inbounds float* %tmp4746, i64 1
- %tmp4748 = getelementptr inbounds float* %tmp4747, i64 1
- %tmp4749 = getelementptr inbounds float* %tmp4748, i64 1
- %tmp4750 = getelementptr inbounds float* %tmp4749, i64 1
- %tmp4751 = getelementptr inbounds float* %tmp4750, i64 1
- %tmp4752 = getelementptr inbounds float* %tmp4751, i64 1
- %tmp4753 = getelementptr inbounds float* %tmp4752, i64 1
- %tmp4754 = getelementptr inbounds float* %tmp4753, i64 1
- %tmp4755 = getelementptr inbounds float* %tmp4754, i64 1
- %tmp4756 = getelementptr inbounds float* %tmp4755, i64 1
- %tmp4757 = getelementptr inbounds float* %tmp4756, i64 1
- %tmp4758 = getelementptr inbounds float* %tmp4757, i64 1
- %tmp4759 = getelementptr inbounds float* %tmp4758, i64 1
- %tmp4760 = getelementptr inbounds float* %tmp4759, i64 1
- %tmp4761 = getelementptr inbounds float* %tmp4760, i64 1
- %tmp4762 = getelementptr inbounds float* %tmp4761, i64 1
- %tmp4763 = getelementptr inbounds float* %tmp4762, i64 1
- %tmp4764 = getelementptr inbounds float* %tmp4763, i64 1
- %tmp4765 = getelementptr inbounds float* %tmp4764, i64 1
- %tmp4766 = getelementptr inbounds float* %tmp4765, i64 1
- %tmp4767 = getelementptr inbounds float* %tmp4766, i64 1
- %tmp4768 = getelementptr inbounds float* %tmp4767, i64 1
- %tmp4769 = getelementptr inbounds float* %tmp4768, i64 1
- %tmp4770 = getelementptr inbounds float* %tmp4769, i64 1
- %tmp4771 = getelementptr inbounds float* %tmp4770, i64 1
- %tmp4772 = getelementptr inbounds float* %tmp4771, i64 1
- %tmp4773 = getelementptr inbounds float* %tmp4772, i64 1
- %tmp4774 = getelementptr inbounds float* %tmp4773, i64 1
- %tmp4775 = getelementptr inbounds float* %tmp4774, i64 1
- %tmp4776 = getelementptr inbounds float* %tmp4775, i64 1
- %tmp4777 = getelementptr inbounds float* %tmp4776, i64 1
- %tmp4778 = getelementptr inbounds float* %tmp4777, i64 1
- %tmp4779 = getelementptr inbounds float* %tmp4778, i64 1
- %tmp4780 = getelementptr inbounds float* %tmp4779, i64 1
- %tmp4781 = getelementptr inbounds float* %tmp4780, i64 1
- %tmp4782 = getelementptr inbounds float* %tmp4781, i64 1
- %tmp4783 = getelementptr inbounds float* %tmp4782, i64 1
- %tmp4784 = getelementptr inbounds float* %tmp4783, i64 1
- %tmp4785 = getelementptr inbounds float* %tmp4784, i64 1
- %tmp4786 = getelementptr inbounds float* %tmp4785, i64 1
- %tmp4787 = getelementptr inbounds float* %tmp4786, i64 1
- %tmp4788 = getelementptr inbounds float* %tmp4787, i64 1
- %tmp4789 = getelementptr inbounds float* %tmp4788, i64 1
- %tmp4790 = getelementptr inbounds float* %tmp4789, i64 1
- %tmp4791 = getelementptr inbounds float* %tmp4790, i64 1
- %tmp4792 = getelementptr inbounds float* %tmp4791, i64 1
- %tmp4793 = getelementptr inbounds float* %tmp4792, i64 1
- %tmp4794 = getelementptr inbounds float* %tmp4793, i64 1
- %tmp4795 = getelementptr inbounds float* %tmp4794, i64 1
- %tmp4796 = getelementptr inbounds float* %tmp4795, i64 1
- %tmp4797 = getelementptr inbounds float* %tmp4796, i64 1
- %tmp4798 = getelementptr inbounds float* %tmp4797, i64 1
- %tmp4799 = getelementptr inbounds float* %tmp4798, i64 1
- %tmp4800 = getelementptr inbounds float* %tmp4799, i64 1
- %tmp4801 = getelementptr inbounds float* %tmp4800, i64 1
- %tmp4802 = getelementptr inbounds float* %tmp4801, i64 1
- %tmp4803 = getelementptr inbounds float* %tmp4802, i64 1
- %tmp4804 = getelementptr inbounds float* %tmp4803, i64 1
- %tmp4805 = getelementptr inbounds float* %tmp4804, i64 1
- %tmp4806 = getelementptr inbounds float* %tmp4805, i64 1
- %tmp4807 = getelementptr inbounds float* %tmp4806, i64 1
- %tmp4808 = getelementptr inbounds float* %tmp4807, i64 1
- %tmp4809 = getelementptr inbounds float* %tmp4808, i64 1
- %tmp4810 = getelementptr inbounds float* %tmp4809, i64 1
- %tmp4811 = getelementptr inbounds float* %tmp4810, i64 1
- %tmp4812 = getelementptr inbounds float* %tmp4811, i64 1
- %tmp4813 = getelementptr inbounds float* %tmp4812, i64 1
- %tmp4814 = getelementptr inbounds float* %tmp4813, i64 1
- %tmp4815 = getelementptr inbounds float* %tmp4814, i64 1
- %tmp4816 = getelementptr inbounds float* %tmp4815, i64 1
- %tmp4817 = getelementptr inbounds float* %tmp4816, i64 1
- %tmp4818 = getelementptr inbounds float* %tmp4817, i64 1
- %tmp4819 = getelementptr inbounds float* %tmp4818, i64 1
- %tmp4820 = getelementptr inbounds float* %tmp4819, i64 1
- %tmp4821 = getelementptr inbounds float* %tmp4820, i64 1
- %tmp4822 = getelementptr inbounds float* %tmp4821, i64 1
- %tmp4823 = getelementptr inbounds float* %tmp4822, i64 1
- %tmp4824 = getelementptr inbounds float* %tmp4823, i64 1
- %tmp4825 = getelementptr inbounds float* %tmp4824, i64 1
- %tmp4826 = getelementptr inbounds float* %tmp4825, i64 1
- %tmp4827 = getelementptr inbounds float* %tmp4826, i64 1
- %tmp4828 = getelementptr inbounds float* %tmp4827, i64 1
- %tmp4829 = getelementptr inbounds float* %tmp4828, i64 1
- %tmp4830 = getelementptr inbounds float* %tmp4829, i64 1
- %tmp4831 = getelementptr inbounds float* %tmp4830, i64 1
- %tmp4832 = getelementptr inbounds float* %tmp4831, i64 1
- %tmp4833 = getelementptr inbounds float* %tmp4832, i64 1
- %tmp4834 = getelementptr inbounds float* %tmp4833, i64 1
- %tmp4835 = getelementptr inbounds float* %tmp4834, i64 1
- %tmp4836 = getelementptr inbounds float* %tmp4835, i64 1
- %tmp4837 = getelementptr inbounds float* %tmp4836, i64 1
- %tmp4838 = getelementptr inbounds float* %tmp4837, i64 1
- %tmp4839 = getelementptr inbounds float* %tmp4838, i64 1
- %tmp4840 = getelementptr inbounds float* %tmp4839, i64 1
- %tmp4841 = getelementptr inbounds float* %tmp4840, i64 1
- %tmp4842 = getelementptr inbounds float* %tmp4841, i64 1
- %tmp4843 = getelementptr inbounds float* %tmp4842, i64 1
- %tmp4844 = getelementptr inbounds float* %tmp4843, i64 1
- %tmp4845 = getelementptr inbounds float* %tmp4844, i64 1
- %tmp4846 = getelementptr inbounds float* %tmp4845, i64 1
- %tmp4847 = getelementptr inbounds float* %tmp4846, i64 1
- %tmp4848 = getelementptr inbounds float* %tmp4847, i64 1
- %tmp4849 = getelementptr inbounds float* %tmp4848, i64 1
- %tmp4850 = getelementptr inbounds float* %tmp4849, i64 1
- %tmp4851 = getelementptr inbounds float* %tmp4850, i64 1
- %tmp4852 = getelementptr inbounds float* %tmp4851, i64 1
- %tmp4853 = getelementptr inbounds float* %tmp4852, i64 1
- %tmp4854 = getelementptr inbounds float* %tmp4853, i64 1
- %tmp4855 = getelementptr inbounds float* %tmp4854, i64 1
- %tmp4856 = getelementptr inbounds float* %tmp4855, i64 1
- %tmp4857 = getelementptr inbounds float* %tmp4856, i64 1
- %tmp4858 = getelementptr inbounds float* %tmp4857, i64 1
- %tmp4859 = getelementptr inbounds float* %tmp4858, i64 1
- %tmp4860 = getelementptr inbounds float* %tmp4859, i64 1
- %tmp4861 = getelementptr inbounds float* %tmp4860, i64 1
- %tmp4862 = getelementptr inbounds float* %tmp4861, i64 1
- %tmp4863 = getelementptr inbounds float* %tmp4862, i64 1
- %tmp4864 = getelementptr inbounds float* %tmp4863, i64 1
- %tmp4865 = getelementptr inbounds float* %tmp4864, i64 1
- %tmp4866 = getelementptr inbounds float* %tmp4865, i64 1
- %tmp4867 = getelementptr inbounds float* %tmp4866, i64 1
- %tmp4868 = getelementptr inbounds float* %tmp4867, i64 1
- %tmp4869 = getelementptr inbounds float* %tmp4868, i64 1
- %tmp4870 = getelementptr inbounds float* %tmp4869, i64 1
- %tmp4871 = getelementptr inbounds float* %tmp4870, i64 1
- %tmp4872 = getelementptr inbounds float* %tmp4871, i64 1
- %tmp4873 = getelementptr inbounds float* %tmp4872, i64 1
- %tmp4874 = getelementptr inbounds float* %tmp4873, i64 1
- %tmp4875 = getelementptr inbounds float* %tmp4874, i64 1
- %tmp4876 = getelementptr inbounds float* %tmp4875, i64 1
- %tmp4877 = getelementptr inbounds float* %tmp4876, i64 1
- %tmp4878 = getelementptr inbounds float* %tmp4877, i64 1
- %tmp4879 = getelementptr inbounds float* %tmp4878, i64 1
- %tmp4880 = getelementptr inbounds float* %tmp4879, i64 1
- %tmp4881 = getelementptr inbounds float* %tmp4880, i64 1
- %tmp4882 = getelementptr inbounds float* %tmp4881, i64 1
- %tmp4883 = getelementptr inbounds float* %tmp4882, i64 1
- %tmp4884 = getelementptr inbounds float* %tmp4883, i64 1
- %tmp4885 = getelementptr inbounds float* %tmp4884, i64 1
- %tmp4886 = getelementptr inbounds float* %tmp4885, i64 1
- %tmp4887 = getelementptr inbounds float* %tmp4886, i64 1
- %tmp4888 = getelementptr inbounds float* %tmp4887, i64 1
- %tmp4889 = getelementptr inbounds float* %tmp4888, i64 1
- %tmp4890 = getelementptr inbounds float* %tmp4889, i64 1
- %tmp4891 = getelementptr inbounds float* %tmp4890, i64 1
- %tmp4892 = getelementptr inbounds float* %tmp4891, i64 1
- %tmp4893 = getelementptr inbounds float* %tmp4892, i64 1
- %tmp4894 = getelementptr inbounds float* %tmp4893, i64 1
- %tmp4895 = getelementptr inbounds float* %tmp4894, i64 1
- %tmp4896 = getelementptr inbounds float* %tmp4895, i64 1
- %tmp4897 = getelementptr inbounds float* %tmp4896, i64 1
- %tmp4898 = getelementptr inbounds float* %tmp4897, i64 1
- %tmp4899 = getelementptr inbounds float* %tmp4898, i64 1
- %tmp4900 = getelementptr inbounds float* %tmp4899, i64 1
- %tmp4901 = getelementptr inbounds float* %tmp4900, i64 1
- %tmp4902 = getelementptr inbounds float* %tmp4901, i64 1
- %tmp4903 = getelementptr inbounds float* %tmp4902, i64 1
- %tmp4904 = getelementptr inbounds float* %tmp4903, i64 1
- %tmp4905 = getelementptr inbounds float* %tmp4904, i64 1
- %tmp4906 = getelementptr inbounds float* %tmp4905, i64 1
- %tmp4907 = getelementptr inbounds float* %tmp4906, i64 1
- %tmp4908 = getelementptr inbounds float* %tmp4907, i64 1
- %tmp4909 = getelementptr inbounds float* %tmp4908, i64 1
- %tmp4910 = getelementptr inbounds float* %tmp4909, i64 1
- %tmp4911 = getelementptr inbounds float* %tmp4910, i64 1
- %tmp4912 = getelementptr inbounds float* %tmp4911, i64 1
- %tmp4913 = getelementptr inbounds float* %tmp4912, i64 1
- %tmp4914 = getelementptr inbounds float* %tmp4913, i64 1
- %tmp4915 = getelementptr inbounds float* %tmp4914, i64 1
- %tmp4916 = getelementptr inbounds float* %tmp4915, i64 1
- %tmp4917 = getelementptr inbounds float* %tmp4916, i64 1
- %tmp4918 = getelementptr inbounds float* %tmp4917, i64 1
- %tmp4919 = getelementptr inbounds float* %tmp4918, i64 1
- %tmp4920 = getelementptr inbounds float* %tmp4919, i64 1
- %tmp4921 = getelementptr inbounds float* %tmp4920, i64 1
- %tmp4922 = getelementptr inbounds float* %tmp4921, i64 1
- %tmp4923 = getelementptr inbounds float* %tmp4922, i64 1
- %tmp4924 = getelementptr inbounds float* %tmp4923, i64 1
- %tmp4925 = getelementptr inbounds float* %tmp4924, i64 1
- %tmp4926 = getelementptr inbounds float* %tmp4925, i64 1
- %tmp4927 = getelementptr inbounds float* %tmp4926, i64 1
- %tmp4928 = getelementptr inbounds float* %tmp4927, i64 1
- %tmp4929 = getelementptr inbounds float* %tmp4928, i64 1
- %tmp4930 = getelementptr inbounds float* %tmp4929, i64 1
- %tmp4931 = getelementptr inbounds float* %tmp4930, i64 1
- %tmp4932 = getelementptr inbounds float* %tmp4931, i64 1
- %tmp4933 = getelementptr inbounds float* %tmp4932, i64 1
- %tmp4934 = getelementptr inbounds float* %tmp4933, i64 1
- %tmp4935 = getelementptr inbounds float* %tmp4934, i64 1
- %tmp4936 = getelementptr inbounds float* %tmp4935, i64 1
- %tmp4937 = getelementptr inbounds float* %tmp4936, i64 1
- %tmp4938 = getelementptr inbounds float* %tmp4937, i64 1
- %tmp4939 = getelementptr inbounds float* %tmp4938, i64 1
- %tmp4940 = getelementptr inbounds float* %tmp4939, i64 1
- %tmp4941 = getelementptr inbounds float* %tmp4940, i64 1
- %tmp4942 = getelementptr inbounds float* %tmp4941, i64 1
- %tmp4943 = getelementptr inbounds float* %tmp4942, i64 1
- %tmp4944 = getelementptr inbounds float* %tmp4943, i64 1
- %tmp4945 = getelementptr inbounds float* %tmp4944, i64 1
- %tmp4946 = getelementptr inbounds float* %tmp4945, i64 1
- %tmp4947 = getelementptr inbounds float* %tmp4946, i64 1
- %tmp4948 = getelementptr inbounds float* %tmp4947, i64 1
- %tmp4949 = getelementptr inbounds float* %tmp4948, i64 1
- %tmp4950 = getelementptr inbounds float* %tmp4949, i64 1
- %tmp4951 = getelementptr inbounds float* %tmp4950, i64 1
- %tmp4952 = getelementptr inbounds float* %tmp4951, i64 1
- %tmp4953 = getelementptr inbounds float* %tmp4952, i64 1
- %tmp4954 = getelementptr inbounds float* %tmp4953, i64 1
- %tmp4955 = getelementptr inbounds float* %tmp4954, i64 1
- %tmp4956 = getelementptr inbounds float* %tmp4955, i64 1
- %tmp4957 = getelementptr inbounds float* %tmp4956, i64 1
- %tmp4958 = getelementptr inbounds float* %tmp4957, i64 1
- %tmp4959 = getelementptr inbounds float* %tmp4958, i64 1
- %tmp4960 = getelementptr inbounds float* %tmp4959, i64 1
- %tmp4961 = getelementptr inbounds float* %tmp4960, i64 1
- %tmp4962 = getelementptr inbounds float* %tmp4961, i64 1
- %tmp4963 = getelementptr inbounds float* %tmp4962, i64 1
- %tmp4964 = getelementptr inbounds float* %tmp4963, i64 1
- %tmp4965 = getelementptr inbounds float* %tmp4964, i64 1
- %tmp4966 = getelementptr inbounds float* %tmp4965, i64 1
- %tmp4967 = getelementptr inbounds float* %tmp4966, i64 1
- %tmp4968 = getelementptr inbounds float* %tmp4967, i64 1
- %tmp4969 = getelementptr inbounds float* %tmp4968, i64 1
- %tmp4970 = getelementptr inbounds float* %tmp4969, i64 1
- %tmp4971 = getelementptr inbounds float* %tmp4970, i64 1
- %tmp4972 = getelementptr inbounds float* %tmp4971, i64 1
- %tmp4973 = getelementptr inbounds float* %tmp4972, i64 1
- %tmp4974 = getelementptr inbounds float* %tmp4973, i64 1
- %tmp4975 = getelementptr inbounds float* %tmp4974, i64 1
- %tmp4976 = getelementptr inbounds float* %tmp4975, i64 1
- %tmp4977 = getelementptr inbounds float* %tmp4976, i64 1
- %tmp4978 = getelementptr inbounds float* %tmp4977, i64 1
- %tmp4979 = getelementptr inbounds float* %tmp4978, i64 1
- %tmp4980 = getelementptr inbounds float* %tmp4979, i64 1
- %tmp4981 = getelementptr inbounds float* %tmp4980, i64 1
- %tmp4982 = getelementptr inbounds float* %tmp4981, i64 1
- %tmp4983 = getelementptr inbounds float* %tmp4982, i64 1
- %tmp4984 = getelementptr inbounds float* %tmp4983, i64 1
- %tmp4985 = getelementptr inbounds float* %tmp4984, i64 1
- %tmp4986 = getelementptr inbounds float* %tmp4985, i64 1
- %tmp4987 = getelementptr inbounds float* %tmp4986, i64 1
- %tmp4988 = getelementptr inbounds float* %tmp4987, i64 1
- %tmp4989 = getelementptr inbounds float* %tmp4988, i64 1
- %tmp4990 = getelementptr inbounds float* %tmp4989, i64 1
- %tmp4991 = getelementptr inbounds float* %tmp4990, i64 1
- %tmp4992 = getelementptr inbounds float* %tmp4991, i64 1
- %tmp4993 = getelementptr inbounds float* %tmp4992, i64 1
- %tmp4994 = getelementptr inbounds float* %tmp4993, i64 1
- %tmp4995 = getelementptr inbounds float* %tmp4994, i64 1
- %tmp4996 = getelementptr inbounds float* %tmp4995, i64 1
- %tmp4997 = getelementptr inbounds float* %tmp4996, i64 1
- %tmp4998 = getelementptr inbounds float* %tmp4997, i64 1
- %tmp4999 = getelementptr inbounds float* %tmp4998, i64 1
- %tmp5000 = getelementptr inbounds float* %tmp4999, i64 1
- %tmp5001 = getelementptr inbounds float* %tmp5000, i64 1
- %tmp5002 = getelementptr inbounds float* %tmp5001, i64 1
- %tmp5003 = getelementptr inbounds float* %tmp5002, i64 1
- %tmp5004 = getelementptr inbounds float* %tmp5003, i64 1
- %tmp5005 = getelementptr inbounds float* %tmp5004, i64 1
- %tmp5006 = getelementptr inbounds float* %tmp5005, i64 1
- %tmp5007 = getelementptr inbounds float* %tmp5006, i64 1
- %tmp5008 = getelementptr inbounds float* %tmp5007, i64 1
- %tmp5009 = getelementptr inbounds float* %tmp5008, i64 1
- %tmp5010 = getelementptr inbounds float* %tmp5009, i64 1
- %tmp5011 = getelementptr inbounds float* %tmp5010, i64 1
- %tmp5012 = getelementptr inbounds float* %tmp5011, i64 1
- %tmp5013 = getelementptr inbounds float* %tmp5012, i64 1
- %tmp5014 = getelementptr inbounds float* %tmp5013, i64 1
- %tmp5015 = getelementptr inbounds float* %tmp5014, i64 1
- %tmp5016 = getelementptr inbounds float* %tmp5015, i64 1
- %tmp5017 = getelementptr inbounds float* %tmp5016, i64 1
- %tmp5018 = getelementptr inbounds float* %tmp5017, i64 1
- %tmp5019 = getelementptr inbounds float* %tmp5018, i64 1
- %tmp5020 = getelementptr inbounds float* %tmp5019, i64 1
- %tmp5021 = getelementptr inbounds float* %tmp5020, i64 1
- %tmp5022 = getelementptr inbounds float* %tmp5021, i64 1
- %tmp5023 = getelementptr inbounds float* %tmp5022, i64 1
- %tmp5024 = getelementptr inbounds float* %tmp5023, i64 1
- %tmp5025 = getelementptr inbounds float* %tmp5024, i64 1
- %tmp5026 = getelementptr inbounds float* %tmp5025, i64 1
- %tmp5027 = getelementptr inbounds float* %tmp5026, i64 1
- %tmp5028 = getelementptr inbounds float* %tmp5027, i64 1
- %tmp5029 = getelementptr inbounds float* %tmp5028, i64 1
- %tmp5030 = getelementptr inbounds float* %tmp5029, i64 1
- %tmp5031 = getelementptr inbounds float* %tmp5030, i64 1
- %tmp5032 = getelementptr inbounds float* %tmp5031, i64 1
- %tmp5033 = getelementptr inbounds float* %tmp5032, i64 1
- %tmp5034 = getelementptr inbounds float* %tmp5033, i64 1
- %tmp5035 = getelementptr inbounds float* %tmp5034, i64 1
- %tmp5036 = getelementptr inbounds float* %tmp5035, i64 1
- %tmp5037 = getelementptr inbounds float* %tmp5036, i64 1
- %tmp5038 = getelementptr inbounds float* %tmp5037, i64 1
- %tmp5039 = getelementptr inbounds float* %tmp5038, i64 1
- %tmp5040 = getelementptr inbounds float* %tmp5039, i64 1
- %tmp5041 = getelementptr inbounds float* %tmp5040, i64 1
- %tmp5042 = getelementptr inbounds float* %tmp5041, i64 1
- %tmp5043 = getelementptr inbounds float* %tmp5042, i64 1
- %tmp5044 = getelementptr inbounds float* %tmp5043, i64 1
- %tmp5045 = getelementptr inbounds float* %tmp5044, i64 1
- %tmp5046 = getelementptr inbounds float* %tmp5045, i64 1
- %tmp5047 = getelementptr inbounds float* %tmp5046, i64 1
- %tmp5048 = getelementptr inbounds float* %tmp5047, i64 1
- %tmp5049 = getelementptr inbounds float* %tmp5048, i64 1
- %tmp5050 = getelementptr inbounds float* %tmp5049, i64 1
- %tmp5051 = getelementptr inbounds float* %tmp5050, i64 1
- %tmp5052 = getelementptr inbounds float* %tmp5051, i64 1
- %tmp5053 = getelementptr inbounds float* %tmp5052, i64 1
- %tmp5054 = getelementptr inbounds float* %tmp5053, i64 1
- %tmp5055 = getelementptr inbounds float* %tmp5054, i64 1
- %tmp5056 = getelementptr inbounds float* %tmp5055, i64 1
- %tmp5057 = getelementptr inbounds float* %tmp5056, i64 1
- %tmp5058 = getelementptr inbounds float* %tmp5057, i64 1
- %tmp5059 = getelementptr inbounds float* %tmp5058, i64 1
- %tmp5060 = getelementptr inbounds float* %tmp5059, i64 1
- %tmp5061 = getelementptr inbounds float* %tmp5060, i64 1
- %tmp5062 = getelementptr inbounds float* %tmp5061, i64 1
- %tmp5063 = getelementptr inbounds float* %tmp5062, i64 1
- %tmp5064 = getelementptr inbounds float* %tmp5063, i64 1
- %tmp5065 = getelementptr inbounds float* %tmp5064, i64 1
- %tmp5066 = getelementptr inbounds float* %tmp5065, i64 1
- %tmp5067 = getelementptr inbounds float* %tmp5066, i64 1
- %tmp5068 = getelementptr inbounds float* %tmp5067, i64 1
- %tmp5069 = getelementptr inbounds float* %tmp5068, i64 1
- %tmp5070 = getelementptr inbounds float* %tmp5069, i64 1
- %tmp5071 = getelementptr inbounds float* %tmp5070, i64 1
- %tmp5072 = getelementptr inbounds float* %tmp5071, i64 1
- %tmp5073 = getelementptr inbounds float* %tmp5072, i64 1
- %tmp5074 = getelementptr inbounds float* %tmp5073, i64 1
- %tmp5075 = getelementptr inbounds float* %tmp5074, i64 1
- %tmp5076 = getelementptr inbounds float* %tmp5075, i64 1
- %tmp5077 = getelementptr inbounds float* %tmp5076, i64 1
- %tmp5078 = getelementptr inbounds float* %tmp5077, i64 1
- %tmp5079 = getelementptr inbounds float* %tmp5078, i64 1
- %tmp5080 = getelementptr inbounds float* %tmp5079, i64 1
- %tmp5081 = getelementptr inbounds float* %tmp5080, i64 1
- %tmp5082 = getelementptr inbounds float* %tmp5081, i64 1
- %tmp5083 = getelementptr inbounds float* %tmp5082, i64 1
- %tmp5084 = getelementptr inbounds float* %tmp5083, i64 1
- %tmp5085 = getelementptr inbounds float* %tmp5084, i64 1
- %tmp5086 = getelementptr inbounds float* %tmp5085, i64 1
- %tmp5087 = getelementptr inbounds float* %tmp5086, i64 1
- %tmp5088 = getelementptr inbounds float* %tmp5087, i64 1
- %tmp5089 = getelementptr inbounds float* %tmp5088, i64 1
- %tmp5090 = getelementptr inbounds float* %tmp5089, i64 1
- %tmp5091 = getelementptr inbounds float* %tmp5090, i64 1
- %tmp5092 = getelementptr inbounds float* %tmp5091, i64 1
- %tmp5093 = getelementptr inbounds float* %tmp5092, i64 1
- %tmp5094 = getelementptr inbounds float* %tmp5093, i64 1
- %tmp5095 = getelementptr inbounds float* %tmp5094, i64 1
- %tmp5096 = getelementptr inbounds float* %tmp5095, i64 1
- %tmp5097 = getelementptr inbounds float* %tmp5096, i64 1
- %tmp5098 = getelementptr inbounds float* %tmp5097, i64 1
- %tmp5099 = getelementptr inbounds float* %tmp5098, i64 1
- %tmp5100 = getelementptr inbounds float* %tmp5099, i64 1
- %tmp5101 = getelementptr inbounds float* %tmp5100, i64 1
- %tmp5102 = getelementptr inbounds float* %tmp5101, i64 1
- %tmp5103 = getelementptr inbounds float* %tmp5102, i64 1
- %tmp5104 = getelementptr inbounds float* %tmp5103, i64 1
- %tmp5105 = getelementptr inbounds float* %tmp5104, i64 1
- %tmp5106 = getelementptr inbounds float* %tmp5105, i64 1
- %tmp5107 = getelementptr inbounds float* %tmp5106, i64 1
- %tmp5108 = getelementptr inbounds float* %tmp5107, i64 1
- %tmp5109 = getelementptr inbounds float* %tmp5108, i64 1
- %tmp5110 = getelementptr inbounds float* %tmp5109, i64 1
- %tmp5111 = getelementptr inbounds float* %tmp5110, i64 1
- %tmp5112 = getelementptr inbounds float* %tmp5111, i64 1
- %tmp5113 = getelementptr inbounds float* %tmp5112, i64 1
- %tmp5114 = getelementptr inbounds float* %tmp5113, i64 1
- %tmp5115 = getelementptr inbounds float* %tmp5114, i64 1
- %tmp5116 = getelementptr inbounds float* %tmp5115, i64 1
- %tmp5117 = getelementptr inbounds float* %tmp5116, i64 1
- %tmp5118 = getelementptr inbounds float* %tmp5117, i64 1
- %tmp5119 = getelementptr inbounds float* %tmp5118, i64 1
- %tmp5120 = getelementptr inbounds float* %tmp5119, i64 1
- %tmp5121 = getelementptr inbounds float* %tmp5120, i64 1
- %tmp5122 = getelementptr inbounds float* %tmp5121, i64 1
- %tmp5123 = getelementptr inbounds float* %tmp5122, i64 1
- %tmp5124 = getelementptr inbounds float* %tmp5123, i64 1
- %tmp5125 = getelementptr inbounds float* %tmp5124, i64 1
- %tmp5126 = getelementptr inbounds float* %tmp5125, i64 1
- %tmp5127 = getelementptr inbounds float* %tmp5126, i64 1
- %tmp5128 = getelementptr inbounds float* %tmp5127, i64 1
- %tmp5129 = getelementptr inbounds float* %tmp5128, i64 1
- %tmp5130 = getelementptr inbounds float* %tmp5129, i64 1
- %tmp5131 = getelementptr inbounds float* %tmp5130, i64 1
- %tmp5132 = getelementptr inbounds float* %tmp5131, i64 1
- %tmp5133 = getelementptr inbounds float* %tmp5132, i64 1
- %tmp5134 = getelementptr inbounds float* %tmp5133, i64 1
- %tmp5135 = getelementptr inbounds float* %tmp5134, i64 1
- %tmp5136 = getelementptr inbounds float* %tmp5135, i64 1
- %tmp5137 = getelementptr inbounds float* %tmp5136, i64 1
- %tmp5138 = getelementptr inbounds float* %tmp5137, i64 1
- %tmp5139 = getelementptr inbounds float* %tmp5138, i64 1
- %tmp5140 = getelementptr inbounds float* %tmp5139, i64 1
- %tmp5141 = getelementptr inbounds float* %tmp5140, i64 1
- %tmp5142 = getelementptr inbounds float* %tmp5141, i64 1
- %tmp5143 = getelementptr inbounds float* %tmp5142, i64 1
- %tmp5144 = getelementptr inbounds float* %tmp5143, i64 1
- %tmp5145 = getelementptr inbounds float* %tmp5144, i64 1
- %tmp5146 = getelementptr inbounds float* %tmp5145, i64 1
- %tmp5147 = getelementptr inbounds float* %tmp5146, i64 1
- %tmp5148 = getelementptr inbounds float* %tmp5147, i64 1
- %tmp5149 = getelementptr inbounds float* %tmp5148, i64 1
- %tmp5150 = getelementptr inbounds float* %tmp5149, i64 1
- %tmp5151 = getelementptr inbounds float* %tmp5150, i64 1
- %tmp5152 = getelementptr inbounds float* %tmp5151, i64 1
- %tmp5153 = getelementptr inbounds float* %tmp5152, i64 1
- %tmp5154 = getelementptr inbounds float* %tmp5153, i64 1
- %tmp5155 = getelementptr inbounds float* %tmp5154, i64 1
- %tmp5156 = getelementptr inbounds float* %tmp5155, i64 1
- %tmp5157 = getelementptr inbounds float* %tmp5156, i64 1
- %tmp5158 = getelementptr inbounds float* %tmp5157, i64 1
- %tmp5159 = getelementptr inbounds float* %tmp5158, i64 1
- %tmp5160 = getelementptr inbounds float* %tmp5159, i64 1
- %tmp5161 = getelementptr inbounds float* %tmp5160, i64 1
- %tmp5162 = getelementptr inbounds float* %tmp5161, i64 1
- %tmp5163 = getelementptr inbounds float* %tmp5162, i64 1
- %tmp5164 = getelementptr inbounds float* %tmp5163, i64 1
- %tmp5165 = getelementptr inbounds float* %tmp5164, i64 1
- %tmp5166 = getelementptr inbounds float* %tmp5165, i64 1
- %tmp5167 = getelementptr inbounds float* %tmp5166, i64 1
- %tmp5168 = getelementptr inbounds float* %tmp5167, i64 1
- %tmp5169 = getelementptr inbounds float* %tmp5168, i64 1
- %tmp5170 = getelementptr inbounds float* %tmp5169, i64 1
- %tmp5171 = getelementptr inbounds float* %tmp5170, i64 1
- %tmp5172 = getelementptr inbounds float* %tmp5171, i64 1
- %tmp5173 = getelementptr inbounds float* %tmp5172, i64 1
- %tmp5174 = getelementptr inbounds float* %tmp5173, i64 1
- %tmp5175 = getelementptr inbounds float* %tmp5174, i64 1
- %tmp5176 = getelementptr inbounds float* %tmp5175, i64 1
- %tmp5177 = getelementptr inbounds float* %tmp5176, i64 1
- %tmp5178 = getelementptr inbounds float* %tmp5177, i64 1
- %tmp5179 = getelementptr inbounds float* %tmp5178, i64 1
- %tmp5180 = getelementptr inbounds float* %tmp5179, i64 1
- %tmp5181 = getelementptr inbounds float* %tmp5180, i64 1
- %tmp5182 = getelementptr inbounds float* %tmp5181, i64 1
- %tmp5183 = getelementptr inbounds float* %tmp5182, i64 1
- %tmp5184 = getelementptr inbounds float* %tmp5183, i64 1
- %tmp5185 = getelementptr inbounds float* %tmp5184, i64 1
- %tmp5186 = getelementptr inbounds float* %tmp5185, i64 1
- %tmp5187 = getelementptr inbounds float* %tmp5186, i64 1
- %tmp5188 = getelementptr inbounds float* %tmp5187, i64 1
- %tmp5189 = getelementptr inbounds float* %tmp5188, i64 1
- %tmp5190 = getelementptr inbounds float* %tmp5189, i64 1
- %tmp5191 = getelementptr inbounds float* %tmp5190, i64 1
- %tmp5192 = getelementptr inbounds float* %tmp5191, i64 1
- %tmp5193 = getelementptr inbounds float* %tmp5192, i64 1
- %tmp5194 = getelementptr inbounds float* %tmp5193, i64 1
- %tmp5195 = getelementptr inbounds float* %tmp5194, i64 1
- %tmp5196 = getelementptr inbounds float* %tmp5195, i64 1
- %tmp5197 = getelementptr inbounds float* %tmp5196, i64 1
- %tmp5198 = getelementptr inbounds float* %tmp5197, i64 1
- %tmp5199 = getelementptr inbounds float* %tmp5198, i64 1
- %tmp5200 = getelementptr inbounds float* %tmp5199, i64 1
- %tmp5201 = getelementptr inbounds float* %tmp5200, i64 1
- %tmp5202 = getelementptr inbounds float* %tmp5201, i64 1
- %tmp5203 = getelementptr inbounds float* %tmp5202, i64 1
- %tmp5204 = getelementptr inbounds float* %tmp5203, i64 1
- %tmp5205 = getelementptr inbounds float* %tmp5204, i64 1
- %tmp5206 = getelementptr inbounds float* %tmp5205, i64 1
- %tmp5207 = getelementptr inbounds float* %tmp5206, i64 1
- %tmp5208 = getelementptr inbounds float* %tmp5207, i64 1
- %tmp5209 = getelementptr inbounds float* %tmp5208, i64 1
- %tmp5210 = getelementptr inbounds float* %tmp5209, i64 1
- %tmp5211 = getelementptr inbounds float* %tmp5210, i64 1
- %tmp5212 = getelementptr inbounds float* %tmp5211, i64 1
- %tmp5213 = getelementptr inbounds float* %tmp5212, i64 1
- %tmp5214 = getelementptr inbounds float* %tmp5213, i64 1
- %tmp5215 = getelementptr inbounds float* %tmp5214, i64 1
- %tmp5216 = getelementptr inbounds float* %tmp5215, i64 1
- %tmp5217 = getelementptr inbounds float* %tmp5216, i64 1
- %tmp5218 = getelementptr inbounds float* %tmp5217, i64 1
- %tmp5219 = getelementptr inbounds float* %tmp5218, i64 1
- %tmp5220 = getelementptr inbounds float* %tmp5219, i64 1
- %tmp5221 = getelementptr inbounds float* %tmp5220, i64 1
- %tmp5222 = getelementptr inbounds float* %tmp5221, i64 1
- %tmp5223 = getelementptr inbounds float* %tmp5222, i64 1
- %tmp5224 = getelementptr inbounds float* %tmp5223, i64 1
- %tmp5225 = getelementptr inbounds float* %tmp5224, i64 1
- %tmp5226 = getelementptr inbounds float* %tmp5225, i64 1
- %tmp5227 = getelementptr inbounds float* %tmp5226, i64 1
- %tmp5228 = getelementptr inbounds float* %tmp5227, i64 1
- %tmp5229 = getelementptr inbounds float* %tmp5228, i64 1
- %tmp5230 = getelementptr inbounds float* %tmp5229, i64 1
- %tmp5231 = getelementptr inbounds float* %tmp5230, i64 1
- %tmp5232 = getelementptr inbounds float* %tmp5231, i64 1
- %tmp5233 = getelementptr inbounds float* %tmp5232, i64 1
- %tmp5234 = getelementptr inbounds float* %tmp5233, i64 1
- %tmp5235 = getelementptr inbounds float* %tmp5234, i64 1
- %tmp5236 = getelementptr inbounds float* %tmp5235, i64 1
- %tmp5237 = getelementptr inbounds float* %tmp5236, i64 1
- %tmp5238 = getelementptr inbounds float* %tmp5237, i64 1
- %tmp5239 = getelementptr inbounds float* %tmp5238, i64 1
- %tmp5240 = getelementptr inbounds float* %tmp5239, i64 1
- %tmp5241 = getelementptr inbounds float* %tmp5240, i64 1
- %tmp5242 = getelementptr inbounds float* %tmp5241, i64 1
- %tmp5243 = getelementptr inbounds float* %tmp5242, i64 1
- %tmp5244 = getelementptr inbounds float* %tmp5243, i64 1
- %tmp5245 = getelementptr inbounds float* %tmp5244, i64 1
- %tmp5246 = getelementptr inbounds float* %tmp5245, i64 1
- %tmp5247 = getelementptr inbounds float* %tmp5246, i64 1
- %tmp5248 = getelementptr inbounds float* %tmp5247, i64 1
- %tmp5249 = getelementptr inbounds float* %tmp5248, i64 1
- %tmp5250 = getelementptr inbounds float* %tmp5249, i64 1
- %tmp5251 = getelementptr inbounds float* %tmp5250, i64 1
- %tmp5252 = getelementptr inbounds float* %tmp5251, i64 1
- %tmp5253 = getelementptr inbounds float* %tmp5252, i64 1
- %tmp5254 = getelementptr inbounds float* %tmp5253, i64 1
- %tmp5255 = getelementptr inbounds float* %tmp5254, i64 1
- %tmp5256 = getelementptr inbounds float* %tmp5255, i64 1
- %tmp5257 = getelementptr inbounds float* %tmp5256, i64 1
- %tmp5258 = getelementptr inbounds float* %tmp5257, i64 1
- %tmp5259 = getelementptr inbounds float* %tmp5258, i64 1
- %tmp5260 = getelementptr inbounds float* %tmp5259, i64 1
- %tmp5261 = getelementptr inbounds float* %tmp5260, i64 1
- %tmp5262 = getelementptr inbounds float* %tmp5261, i64 1
- %tmp5263 = getelementptr inbounds float* %tmp5262, i64 1
- %tmp5264 = getelementptr inbounds float* %tmp5263, i64 1
- %tmp5265 = getelementptr inbounds float* %tmp5264, i64 1
- %tmp5266 = getelementptr inbounds float* %tmp5265, i64 1
- %tmp5267 = getelementptr inbounds float* %tmp5266, i64 1
- %tmp5268 = getelementptr inbounds float* %tmp5267, i64 1
- %tmp5269 = getelementptr inbounds float* %tmp5268, i64 1
- %tmp5270 = getelementptr inbounds float* %tmp5269, i64 1
- %tmp5271 = getelementptr inbounds float* %tmp5270, i64 1
- %tmp5272 = getelementptr inbounds float* %tmp5271, i64 1
- %tmp5273 = getelementptr inbounds float* %tmp5272, i64 1
- %tmp5274 = getelementptr inbounds float* %tmp5273, i64 1
- %tmp5275 = getelementptr inbounds float* %tmp5274, i64 1
- %tmp5276 = getelementptr inbounds float* %tmp5275, i64 1
- %tmp5277 = getelementptr inbounds float* %tmp5276, i64 1
- %tmp5278 = getelementptr inbounds float* %tmp5277, i64 1
- %tmp5279 = getelementptr inbounds float* %tmp5278, i64 1
- %tmp5280 = getelementptr inbounds float* %tmp5279, i64 1
- %tmp5281 = getelementptr inbounds float* %tmp5280, i64 1
- %tmp5282 = getelementptr inbounds float* %tmp5281, i64 1
- %tmp5283 = getelementptr inbounds float* %tmp5282, i64 1
- %tmp5284 = getelementptr inbounds float* %tmp5283, i64 1
- %tmp5285 = getelementptr inbounds float* %tmp5284, i64 1
- %tmp5286 = getelementptr inbounds float* %tmp5285, i64 1
- %tmp5287 = getelementptr inbounds float* %tmp5286, i64 1
- %tmp5288 = getelementptr inbounds float* %tmp5287, i64 1
- %tmp5289 = getelementptr inbounds float* %tmp5288, i64 1
- %tmp5290 = getelementptr inbounds float* %tmp5289, i64 1
- %tmp5291 = getelementptr inbounds float* %tmp5290, i64 1
- %tmp5292 = getelementptr inbounds float* %tmp5291, i64 1
- %tmp5293 = getelementptr inbounds float* %tmp5292, i64 1
- %tmp5294 = getelementptr inbounds float* %tmp5293, i64 1
- %tmp5295 = getelementptr inbounds float* %tmp5294, i64 1
- %tmp5296 = getelementptr inbounds float* %tmp5295, i64 1
- %tmp5297 = getelementptr inbounds float* %tmp5296, i64 1
- %tmp5298 = getelementptr inbounds float* %tmp5297, i64 1
- %tmp5299 = getelementptr inbounds float* %tmp5298, i64 1
- %tmp5300 = getelementptr inbounds float* %tmp5299, i64 1
- %tmp5301 = getelementptr inbounds float* %tmp5300, i64 1
- %tmp5302 = getelementptr inbounds float* %tmp5301, i64 1
- %tmp5303 = getelementptr inbounds float* %tmp5302, i64 1
- %tmp5304 = getelementptr inbounds float* %tmp5303, i64 1
- %tmp5305 = getelementptr inbounds float* %tmp5304, i64 1
- %tmp5306 = getelementptr inbounds float* %tmp5305, i64 1
- %tmp5307 = getelementptr inbounds float* %tmp5306, i64 1
- %tmp5308 = getelementptr inbounds float* %tmp5307, i64 1
- %tmp5309 = getelementptr inbounds float* %tmp5308, i64 1
- %tmp5310 = getelementptr inbounds float* %tmp5309, i64 1
- %tmp5311 = getelementptr inbounds float* %tmp5310, i64 1
- %tmp5312 = getelementptr inbounds float* %tmp5311, i64 1
- %tmp5313 = getelementptr inbounds float* %tmp5312, i64 1
- %tmp5314 = getelementptr inbounds float* %tmp5313, i64 1
- %tmp5315 = getelementptr inbounds float* %tmp5314, i64 1
- %tmp5316 = getelementptr inbounds float* %tmp5315, i64 1
- %tmp5317 = getelementptr inbounds float* %tmp5316, i64 1
- %tmp5318 = getelementptr inbounds float* %tmp5317, i64 1
- %tmp5319 = getelementptr inbounds float* %tmp5318, i64 1
- %tmp5320 = getelementptr inbounds float* %tmp5319, i64 1
- %tmp5321 = getelementptr inbounds float* %tmp5320, i64 1
- %tmp5322 = getelementptr inbounds float* %tmp5321, i64 1
- %tmp5323 = getelementptr inbounds float* %tmp5322, i64 1
- %tmp5324 = getelementptr inbounds float* %tmp5323, i64 1
- %tmp5325 = getelementptr inbounds float* %tmp5324, i64 1
- %tmp5326 = getelementptr inbounds float* %tmp5325, i64 1
- %tmp5327 = getelementptr inbounds float* %tmp5326, i64 1
- %tmp5328 = getelementptr inbounds float* %tmp5327, i64 1
- %tmp5329 = getelementptr inbounds float* %tmp5328, i64 1
- %tmp5330 = getelementptr inbounds float* %tmp5329, i64 1
- %tmp5331 = getelementptr inbounds float* %tmp5330, i64 1
- %tmp5332 = getelementptr inbounds float* %tmp5331, i64 1
- %tmp5333 = getelementptr inbounds float* %tmp5332, i64 1
- %tmp5334 = getelementptr inbounds float* %tmp5333, i64 1
- %tmp5335 = getelementptr inbounds float* %tmp5334, i64 1
- %tmp5336 = getelementptr inbounds float* %tmp5335, i64 1
- %tmp5337 = getelementptr inbounds float* %tmp5336, i64 1
- %tmp5338 = getelementptr inbounds float* %tmp5337, i64 1
- %tmp5339 = getelementptr inbounds float* %tmp5338, i64 1
- %tmp5340 = getelementptr inbounds float* %tmp5339, i64 1
- %tmp5341 = getelementptr inbounds float* %tmp5340, i64 1
- %tmp5342 = getelementptr inbounds float* %tmp5341, i64 1
- %tmp5343 = getelementptr inbounds float* %tmp5342, i64 1
- %tmp5344 = getelementptr inbounds float* %tmp5343, i64 1
- %tmp5345 = getelementptr inbounds float* %tmp5344, i64 1
- %tmp5346 = getelementptr inbounds float* %tmp5345, i64 1
- %tmp5347 = getelementptr inbounds float* %tmp5346, i64 1
- %tmp5348 = getelementptr inbounds float* %tmp5347, i64 1
- %tmp5349 = getelementptr inbounds float* %tmp5348, i64 1
- %tmp5350 = getelementptr inbounds float* %tmp5349, i64 1
- %tmp5351 = getelementptr inbounds float* %tmp5350, i64 1
- %tmp5352 = getelementptr inbounds float* %tmp5351, i64 1
- %tmp5353 = getelementptr inbounds float* %tmp5352, i64 1
- %tmp5354 = getelementptr inbounds float* %tmp5353, i64 1
- %tmp5355 = getelementptr inbounds float* %tmp5354, i64 1
- %tmp5356 = getelementptr inbounds float* %tmp5355, i64 1
- %tmp5357 = getelementptr inbounds float* %tmp5356, i64 1
- %tmp5358 = getelementptr inbounds float* %tmp5357, i64 1
- %tmp5359 = getelementptr inbounds float* %tmp5358, i64 1
- %tmp5360 = getelementptr inbounds float* %tmp5359, i64 1
- %tmp5361 = getelementptr inbounds float* %tmp5360, i64 1
- %tmp5362 = getelementptr inbounds float* %tmp5361, i64 1
- %tmp5363 = getelementptr inbounds float* %tmp5362, i64 1
- %tmp5364 = getelementptr inbounds float* %tmp5363, i64 1
- %tmp5365 = getelementptr inbounds float* %tmp5364, i64 1
- %tmp5366 = getelementptr inbounds float* %tmp5365, i64 1
- %tmp5367 = getelementptr inbounds float* %tmp5366, i64 1
- %tmp5368 = getelementptr inbounds float* %tmp5367, i64 1
- %tmp5369 = getelementptr inbounds float* %tmp5368, i64 1
- %tmp5370 = getelementptr inbounds float* %tmp5369, i64 1
- %tmp5371 = getelementptr inbounds float* %tmp5370, i64 1
- %tmp5372 = getelementptr inbounds float* %tmp5371, i64 1
- %tmp5373 = getelementptr inbounds float* %tmp5372, i64 1
- %tmp5374 = getelementptr inbounds float* %tmp5373, i64 1
- %tmp5375 = getelementptr inbounds float* %tmp5374, i64 1
- %tmp5376 = getelementptr inbounds float* %tmp5375, i64 1
- %tmp5377 = getelementptr inbounds float* %tmp5376, i64 1
- %tmp5378 = getelementptr inbounds float* %tmp5377, i64 1
- %tmp5379 = getelementptr inbounds float* %tmp5378, i64 1
- %tmp5380 = getelementptr inbounds float* %tmp5379, i64 1
- %tmp5381 = getelementptr inbounds float* %tmp5380, i64 1
- %tmp5382 = getelementptr inbounds float* %tmp5381, i64 1
- %tmp5383 = getelementptr inbounds float* %tmp5382, i64 1
- %tmp5384 = getelementptr inbounds float* %tmp5383, i64 1
- %tmp5385 = getelementptr inbounds float* %tmp5384, i64 1
- %tmp5386 = getelementptr inbounds float* %tmp5385, i64 1
- %tmp5387 = getelementptr inbounds float* %tmp5386, i64 1
- %tmp5388 = getelementptr inbounds float* %tmp5387, i64 1
- %tmp5389 = getelementptr inbounds float* %tmp5388, i64 1
- %tmp5390 = getelementptr inbounds float* %tmp5389, i64 1
- %tmp5391 = getelementptr inbounds float* %tmp5390, i64 1
- %tmp5392 = getelementptr inbounds float* %tmp5391, i64 1
- %tmp5393 = getelementptr inbounds float* %tmp5392, i64 1
- %tmp5394 = getelementptr inbounds float* %tmp5393, i64 1
- %tmp5395 = getelementptr inbounds float* %tmp5394, i64 1
- %tmp5396 = getelementptr inbounds float* %tmp5395, i64 1
- %tmp5397 = getelementptr inbounds float* %tmp5396, i64 1
- %tmp5398 = getelementptr inbounds float* %tmp5397, i64 1
- %tmp5399 = getelementptr inbounds float* %tmp5398, i64 1
- %tmp5400 = getelementptr inbounds float* %tmp5399, i64 1
- %tmp5401 = getelementptr inbounds float* %tmp5400, i64 1
- %tmp5402 = getelementptr inbounds float* %tmp5401, i64 1
- %tmp5403 = getelementptr inbounds float* %tmp5402, i64 1
- %tmp5404 = getelementptr inbounds float* %tmp5403, i64 1
- %tmp5405 = getelementptr inbounds float* %tmp5404, i64 1
- %tmp5406 = getelementptr inbounds float* %tmp5405, i64 1
- %tmp5407 = getelementptr inbounds float* %tmp5406, i64 1
- %tmp5408 = getelementptr inbounds float* %tmp5407, i64 1
- %tmp5409 = getelementptr inbounds float* %tmp5408, i64 1
- %tmp5410 = getelementptr inbounds float* %tmp5409, i64 1
- %tmp5411 = getelementptr inbounds float* %tmp5410, i64 1
- %tmp5412 = getelementptr inbounds float* %tmp5411, i64 1
- %tmp5413 = getelementptr inbounds float* %tmp5412, i64 1
- %tmp5414 = getelementptr inbounds float* %tmp5413, i64 1
- %tmp5415 = getelementptr inbounds float* %tmp5414, i64 1
- %tmp5416 = getelementptr inbounds float* %tmp5415, i64 1
- %tmp5417 = getelementptr inbounds float* %tmp5416, i64 1
- %tmp5418 = getelementptr inbounds float* %tmp5417, i64 1
- %tmp5419 = getelementptr inbounds float* %tmp5418, i64 1
- %tmp5420 = getelementptr inbounds float* %tmp5419, i64 1
- %tmp5421 = getelementptr inbounds float* %tmp5420, i64 1
- %tmp5422 = getelementptr inbounds float* %tmp5421, i64 1
- %tmp5423 = getelementptr inbounds float* %tmp5422, i64 1
- %tmp5424 = getelementptr inbounds float* %tmp5423, i64 1
- %tmp5425 = getelementptr inbounds float* %tmp5424, i64 1
- %tmp5426 = getelementptr inbounds float* %tmp5425, i64 1
- %tmp5427 = getelementptr inbounds float* %tmp5426, i64 1
- %tmp5428 = getelementptr inbounds float* %tmp5427, i64 1
- %tmp5429 = getelementptr inbounds float* %tmp5428, i64 1
- %tmp5430 = getelementptr inbounds float* %tmp5429, i64 1
- %tmp5431 = getelementptr inbounds float* %tmp5430, i64 1
- %tmp5432 = getelementptr inbounds float* %tmp5431, i64 1
- %tmp5433 = getelementptr inbounds float* %tmp5432, i64 1
- %tmp5434 = getelementptr inbounds float* %tmp5433, i64 1
- %tmp5435 = getelementptr inbounds float* %tmp5434, i64 1
- %tmp5436 = getelementptr inbounds float* %tmp5435, i64 1
- %tmp5437 = getelementptr inbounds float* %tmp5436, i64 1
- %tmp5438 = getelementptr inbounds float* %tmp5437, i64 1
- %tmp5439 = getelementptr inbounds float* %tmp5438, i64 1
- %tmp5440 = getelementptr inbounds float* %tmp5439, i64 1
- %tmp5441 = getelementptr inbounds float* %tmp5440, i64 1
- %tmp5442 = getelementptr inbounds float* %tmp5441, i64 1
- %tmp5443 = getelementptr inbounds float* %tmp5442, i64 1
- %tmp5444 = getelementptr inbounds float* %tmp5443, i64 1
- %tmp5445 = getelementptr inbounds float* %tmp5444, i64 1
- %tmp5446 = getelementptr inbounds float* %tmp5445, i64 1
- %tmp5447 = getelementptr inbounds float* %tmp5446, i64 1
- %tmp5448 = getelementptr inbounds float* %tmp5447, i64 1
- %tmp5449 = getelementptr inbounds float* %tmp5448, i64 1
- %tmp5450 = getelementptr inbounds float* %tmp5449, i64 1
- %tmp5451 = getelementptr inbounds float* %tmp5450, i64 1
- %tmp5452 = getelementptr inbounds float* %tmp5451, i64 1
- %tmp5453 = getelementptr inbounds float* %tmp5452, i64 1
- %tmp5454 = getelementptr inbounds float* %tmp5453, i64 1
- %tmp5455 = getelementptr inbounds float* %tmp5454, i64 1
- %tmp5456 = getelementptr inbounds float* %tmp5455, i64 1
- %tmp5457 = getelementptr inbounds float* %tmp5456, i64 1
- %tmp5458 = getelementptr inbounds float* %tmp5457, i64 1
- %tmp5459 = getelementptr inbounds float* %tmp5458, i64 1
- %tmp5460 = getelementptr inbounds float* %tmp5459, i64 1
- %tmp5461 = getelementptr inbounds float* %tmp5460, i64 1
- %tmp5462 = getelementptr inbounds float* %tmp5461, i64 1
- %tmp5463 = getelementptr inbounds float* %tmp5462, i64 1
- %tmp5464 = getelementptr inbounds float* %tmp5463, i64 1
- %tmp5465 = getelementptr inbounds float* %tmp5464, i64 1
- %tmp5466 = getelementptr inbounds float* %tmp5465, i64 1
- %tmp5467 = getelementptr inbounds float* %tmp5466, i64 1
- %tmp5468 = getelementptr inbounds float* %tmp5467, i64 1
- %tmp5469 = getelementptr inbounds float* %tmp5468, i64 1
- %tmp5470 = getelementptr inbounds float* %tmp5469, i64 1
- %tmp5471 = getelementptr inbounds float* %tmp5470, i64 1
- %tmp5472 = getelementptr inbounds float* %tmp5471, i64 1
- %tmp5473 = getelementptr inbounds float* %tmp5472, i64 1
- %tmp5474 = getelementptr inbounds float* %tmp5473, i64 1
- %tmp5475 = getelementptr inbounds float* %tmp5474, i64 1
- %tmp5476 = getelementptr inbounds float* %tmp5475, i64 1
- %tmp5477 = getelementptr inbounds float* %tmp5476, i64 1
- %tmp5478 = getelementptr inbounds float* %tmp5477, i64 1
- %tmp5479 = getelementptr inbounds float* %tmp5478, i64 1
- %tmp5480 = getelementptr inbounds float* %tmp5479, i64 1
- %tmp5481 = getelementptr inbounds float* %tmp5480, i64 1
- %tmp5482 = getelementptr inbounds float* %tmp5481, i64 1
- %tmp5483 = getelementptr inbounds float* %tmp5482, i64 1
- %tmp5484 = getelementptr inbounds float* %tmp5483, i64 1
- %tmp5485 = getelementptr inbounds float* %tmp5484, i64 1
- %tmp5486 = getelementptr inbounds float* %tmp5485, i64 1
- %tmp5487 = getelementptr inbounds float* %tmp5486, i64 1
- %tmp5488 = getelementptr inbounds float* %tmp5487, i64 1
- %tmp5489 = getelementptr inbounds float* %tmp5488, i64 1
- %tmp5490 = getelementptr inbounds float* %tmp5489, i64 1
- %tmp5491 = getelementptr inbounds float* %tmp5490, i64 1
- %tmp5492 = getelementptr inbounds float* %tmp5491, i64 1
- %tmp5493 = getelementptr inbounds float* %tmp5492, i64 1
- %tmp5494 = getelementptr inbounds float* %tmp5493, i64 1
- %tmp5495 = getelementptr inbounds float* %tmp5494, i64 1
- %tmp5496 = getelementptr inbounds float* %tmp5495, i64 1
- %tmp5497 = getelementptr inbounds float* %tmp5496, i64 1
- %tmp5498 = getelementptr inbounds float* %tmp5497, i64 1
- %tmp5499 = getelementptr inbounds float* %tmp5498, i64 1
- %tmp5500 = getelementptr inbounds float* %tmp5499, i64 1
- %tmp5501 = getelementptr inbounds float* %tmp5500, i64 1
- %tmp5502 = getelementptr inbounds float* %tmp5501, i64 1
- %tmp5503 = getelementptr inbounds float* %tmp5502, i64 1
- %tmp5504 = getelementptr inbounds float* %tmp5503, i64 1
- %tmp5505 = getelementptr inbounds float* %tmp5504, i64 1
- %tmp5506 = getelementptr inbounds float* %tmp5505, i64 1
- %tmp5507 = getelementptr inbounds float* %tmp5506, i64 1
- %tmp5508 = getelementptr inbounds float* %tmp5507, i64 1
- %tmp5509 = getelementptr inbounds float* %tmp5508, i64 1
- %tmp5510 = getelementptr inbounds float* %tmp5509, i64 1
- %tmp5511 = getelementptr inbounds float* %tmp5510, i64 1
- %tmp5512 = getelementptr inbounds float* %tmp5511, i64 1
- %tmp5513 = getelementptr inbounds float* %tmp5512, i64 1
- %tmp5514 = getelementptr inbounds float* %tmp5513, i64 1
- %tmp5515 = getelementptr inbounds float* %tmp5514, i64 1
- %tmp5516 = getelementptr inbounds float* %tmp5515, i64 1
- %tmp5517 = getelementptr inbounds float* %tmp5516, i64 1
- %tmp5518 = getelementptr inbounds float* %tmp5517, i64 1
- %tmp5519 = getelementptr inbounds float* %tmp5518, i64 1
- %tmp5520 = getelementptr inbounds float* %tmp5519, i64 1
- %tmp5521 = getelementptr inbounds float* %tmp5520, i64 1
- %tmp5522 = getelementptr inbounds float* %tmp5521, i64 1
- %tmp5523 = getelementptr inbounds float* %tmp5522, i64 1
- %tmp5524 = getelementptr inbounds float* %tmp5523, i64 1
- %tmp5525 = getelementptr inbounds float* %tmp5524, i64 1
- %tmp5526 = getelementptr inbounds float* %tmp5525, i64 1
- %tmp5527 = getelementptr inbounds float* %tmp5526, i64 1
- %tmp5528 = getelementptr inbounds float* %tmp5527, i64 1
- %tmp5529 = getelementptr inbounds float* %tmp5528, i64 1
- %tmp5530 = getelementptr inbounds float* %tmp5529, i64 1
- %tmp5531 = getelementptr inbounds float* %tmp5530, i64 1
- %tmp5532 = getelementptr inbounds float* %tmp5531, i64 1
- %tmp5533 = getelementptr inbounds float* %tmp5532, i64 1
- %tmp5534 = getelementptr inbounds float* %tmp5533, i64 1
- %tmp5535 = getelementptr inbounds float* %tmp5534, i64 1
- %tmp5536 = getelementptr inbounds float* %tmp5535, i64 1
- %tmp5537 = getelementptr inbounds float* %tmp5536, i64 1
- %tmp5538 = getelementptr inbounds float* %tmp5537, i64 1
- %tmp5539 = getelementptr inbounds float* %tmp5538, i64 1
- %tmp5540 = getelementptr inbounds float* %tmp5539, i64 1
- %tmp5541 = getelementptr inbounds float* %tmp5540, i64 1
- %tmp5542 = getelementptr inbounds float* %tmp5541, i64 1
- %tmp5543 = getelementptr inbounds float* %tmp5542, i64 1
- %tmp5544 = getelementptr inbounds float* %tmp5543, i64 1
- %tmp5545 = getelementptr inbounds float* %tmp5544, i64 1
- %tmp5546 = getelementptr inbounds float* %tmp5545, i64 1
- %tmp5547 = getelementptr inbounds float* %tmp5546, i64 1
- %tmp5548 = getelementptr inbounds float* %tmp5547, i64 1
- %tmp5549 = getelementptr inbounds float* %tmp5548, i64 1
- %tmp5550 = getelementptr inbounds float* %tmp5549, i64 1
- %tmp5551 = getelementptr inbounds float* %tmp5550, i64 1
- %tmp5552 = getelementptr inbounds float* %tmp5551, i64 1
- %tmp5553 = getelementptr inbounds float* %tmp5552, i64 1
- %tmp5554 = getelementptr inbounds float* %tmp5553, i64 1
- %tmp5555 = getelementptr inbounds float* %tmp5554, i64 1
- %tmp5556 = getelementptr inbounds float* %tmp5555, i64 1
- %tmp5557 = getelementptr inbounds float* %tmp5556, i64 1
- %tmp5558 = getelementptr inbounds float* %tmp5557, i64 1
- %tmp5559 = getelementptr inbounds float* %tmp5558, i64 1
- %tmp5560 = getelementptr inbounds float* %tmp5559, i64 1
- %tmp5561 = getelementptr inbounds float* %tmp5560, i64 1
- %tmp5562 = getelementptr inbounds float* %tmp5561, i64 1
- %tmp5563 = getelementptr inbounds float* %tmp5562, i64 1
- %tmp5564 = getelementptr inbounds float* %tmp5563, i64 1
- %tmp5565 = getelementptr inbounds float* %tmp5564, i64 1
- %tmp5566 = getelementptr inbounds float* %tmp5565, i64 1
- %tmp5567 = getelementptr inbounds float* %tmp5566, i64 1
- %tmp5568 = getelementptr inbounds float* %tmp5567, i64 1
- %tmp5569 = getelementptr inbounds float* %tmp5568, i64 1
- %tmp5570 = getelementptr inbounds float* %tmp5569, i64 1
- %tmp5571 = getelementptr inbounds float* %tmp5570, i64 1
- %tmp5572 = getelementptr inbounds float* %tmp5571, i64 1
- %tmp5573 = getelementptr inbounds float* %tmp5572, i64 1
- %tmp5574 = getelementptr inbounds float* %tmp5573, i64 1
- %tmp5575 = getelementptr inbounds float* %tmp5574, i64 1
- %tmp5576 = getelementptr inbounds float* %tmp5575, i64 1
- %tmp5577 = getelementptr inbounds float* %tmp5576, i64 1
- %tmp5578 = getelementptr inbounds float* %tmp5577, i64 1
- %tmp5579 = getelementptr inbounds float* %tmp5578, i64 1
- %tmp5580 = getelementptr inbounds float* %tmp5579, i64 1
- %tmp5581 = getelementptr inbounds float* %tmp5580, i64 1
- %tmp5582 = getelementptr inbounds float* %tmp5581, i64 1
- %tmp5583 = getelementptr inbounds float* %tmp5582, i64 1
- %tmp5584 = getelementptr inbounds float* %tmp5583, i64 1
- %tmp5585 = getelementptr inbounds float* %tmp5584, i64 1
- %tmp5586 = getelementptr inbounds float* %tmp5585, i64 1
- %tmp5587 = getelementptr inbounds float* %tmp5586, i64 1
- %tmp5588 = getelementptr inbounds float* %tmp5587, i64 1
- %tmp5589 = getelementptr inbounds float* %tmp5588, i64 1
- %tmp5590 = getelementptr inbounds float* %tmp5589, i64 1
- %tmp5591 = getelementptr inbounds float* %tmp5590, i64 1
- %tmp5592 = getelementptr inbounds float* %tmp5591, i64 1
- %tmp5593 = getelementptr inbounds float* %tmp5592, i64 1
- %tmp5594 = getelementptr inbounds float* %tmp5593, i64 1
- %tmp5595 = getelementptr inbounds float* %tmp5594, i64 1
- %tmp5596 = getelementptr inbounds float* %tmp5595, i64 1
- %tmp5597 = getelementptr inbounds float* %tmp5596, i64 1
- %tmp5598 = getelementptr inbounds float* %tmp5597, i64 1
- %tmp5599 = getelementptr inbounds float* %tmp5598, i64 1
- %tmp5600 = getelementptr inbounds float* %tmp5599, i64 1
- %tmp5601 = getelementptr inbounds float* %tmp5600, i64 1
- %tmp5602 = getelementptr inbounds float* %tmp5601, i64 1
- %tmp5603 = getelementptr inbounds float* %tmp5602, i64 1
- %tmp5604 = getelementptr inbounds float* %tmp5603, i64 1
- %tmp5605 = getelementptr inbounds float* %tmp5604, i64 1
- %tmp5606 = getelementptr inbounds float* %tmp5605, i64 1
- %tmp5607 = getelementptr inbounds float* %tmp5606, i64 1
- %tmp5608 = getelementptr inbounds float* %tmp5607, i64 1
- %tmp5609 = getelementptr inbounds float* %tmp5608, i64 1
- %tmp5610 = getelementptr inbounds float* %tmp5609, i64 1
- %tmp5611 = getelementptr inbounds float* %tmp5610, i64 1
- %tmp5612 = getelementptr inbounds float* %tmp5611, i64 1
- %tmp5613 = getelementptr inbounds float* %tmp5612, i64 1
- %tmp5614 = getelementptr inbounds float* %tmp5613, i64 1
- %tmp5615 = getelementptr inbounds float* %tmp5614, i64 1
- %tmp5616 = getelementptr inbounds float* %tmp5615, i64 1
- %tmp5617 = getelementptr inbounds float* %tmp5616, i64 1
- %tmp5618 = getelementptr inbounds float* %tmp5617, i64 1
- %tmp5619 = getelementptr inbounds float* %tmp5618, i64 1
- %tmp5620 = getelementptr inbounds float* %tmp5619, i64 1
- %tmp5621 = getelementptr inbounds float* %tmp5620, i64 1
- %tmp5622 = getelementptr inbounds float* %tmp5621, i64 1
- %tmp5623 = getelementptr inbounds float* %tmp5622, i64 1
- %tmp5624 = getelementptr inbounds float* %tmp5623, i64 1
- %tmp5625 = getelementptr inbounds float* %tmp5624, i64 1
- %tmp5626 = getelementptr inbounds float* %tmp5625, i64 1
- %tmp5627 = getelementptr inbounds float* %tmp5626, i64 1
- %tmp5628 = getelementptr inbounds float* %tmp5627, i64 1
- %tmp5629 = getelementptr inbounds float* %tmp5628, i64 1
- %tmp5630 = getelementptr inbounds float* %tmp5629, i64 1
- %tmp5631 = getelementptr inbounds float* %tmp5630, i64 1
- %tmp5632 = getelementptr inbounds float* %tmp5631, i64 1
- %tmp5633 = getelementptr inbounds float* %tmp5632, i64 1
- %tmp5634 = getelementptr inbounds float* %tmp5633, i64 1
- %tmp5635 = getelementptr inbounds float* %tmp5634, i64 1
- %tmp5636 = getelementptr inbounds float* %tmp5635, i64 1
- %tmp5637 = getelementptr inbounds float* %tmp5636, i64 1
- %tmp5638 = getelementptr inbounds float* %tmp5637, i64 1
- %tmp5639 = getelementptr inbounds float* %tmp5638, i64 1
- %tmp5640 = getelementptr inbounds float* %tmp5639, i64 1
- %tmp5641 = getelementptr inbounds float* %tmp5640, i64 1
- %tmp5642 = getelementptr inbounds float* %tmp5641, i64 1
- %tmp5643 = getelementptr inbounds float* %tmp5642, i64 1
- %tmp5644 = getelementptr inbounds float* %tmp5643, i64 1
- %tmp5645 = getelementptr inbounds float* %tmp5644, i64 1
- %tmp5646 = getelementptr inbounds float* %tmp5645, i64 1
- %tmp5647 = getelementptr inbounds float* %tmp5646, i64 1
- %tmp5648 = getelementptr inbounds float* %tmp5647, i64 1
- %tmp5649 = getelementptr inbounds float* %tmp5648, i64 1
- %tmp5650 = getelementptr inbounds float* %tmp5649, i64 1
- %tmp5651 = getelementptr inbounds float* %tmp5650, i64 1
- %tmp5652 = getelementptr inbounds float* %tmp5651, i64 1
- %tmp5653 = getelementptr inbounds float* %tmp5652, i64 1
- %tmp5654 = getelementptr inbounds float* %tmp5653, i64 1
- %tmp5655 = getelementptr inbounds float* %tmp5654, i64 1
- %tmp5656 = getelementptr inbounds float* %tmp5655, i64 1
- %tmp5657 = getelementptr inbounds float* %tmp5656, i64 1
- %tmp5658 = getelementptr inbounds float* %tmp5657, i64 1
- %tmp5659 = getelementptr inbounds float* %tmp5658, i64 1
- %tmp5660 = getelementptr inbounds float* %tmp5659, i64 1
- %tmp5661 = getelementptr inbounds float* %tmp5660, i64 1
- %tmp5662 = getelementptr inbounds float* %tmp5661, i64 1
- %tmp5663 = getelementptr inbounds float* %tmp5662, i64 1
- %tmp5664 = getelementptr inbounds float* %tmp5663, i64 1
- %tmp5665 = getelementptr inbounds float* %tmp5664, i64 1
- %tmp5666 = getelementptr inbounds float* %tmp5665, i64 1
- %tmp5667 = getelementptr inbounds float* %tmp5666, i64 1
- %tmp5668 = getelementptr inbounds float* %tmp5667, i64 1
- %tmp5669 = getelementptr inbounds float* %tmp5668, i64 1
- %tmp5670 = getelementptr inbounds float* %tmp5669, i64 1
- %tmp5671 = getelementptr inbounds float* %tmp5670, i64 1
- %tmp5672 = getelementptr inbounds float* %tmp5671, i64 1
- %tmp5673 = getelementptr inbounds float* %tmp5672, i64 1
- %tmp5674 = getelementptr inbounds float* %tmp5673, i64 1
- %tmp5675 = getelementptr inbounds float* %tmp5674, i64 1
- %tmp5676 = getelementptr inbounds float* %tmp5675, i64 1
- %tmp5677 = getelementptr inbounds float* %tmp5676, i64 1
- %tmp5678 = getelementptr inbounds float* %tmp5677, i64 1
- %tmp5679 = getelementptr inbounds float* %tmp5678, i64 1
- %tmp5680 = getelementptr inbounds float* %tmp5679, i64 1
- %tmp5681 = getelementptr inbounds float* %tmp5680, i64 1
- %tmp5682 = getelementptr inbounds float* %tmp5681, i64 1
- %tmp5683 = getelementptr inbounds float* %tmp5682, i64 1
- %tmp5684 = getelementptr inbounds float* %tmp5683, i64 1
- %tmp5685 = getelementptr inbounds float* %tmp5684, i64 1
- %tmp5686 = getelementptr inbounds float* %tmp5685, i64 1
- %tmp5687 = getelementptr inbounds float* %tmp5686, i64 1
- %tmp5688 = getelementptr inbounds float* %tmp5687, i64 1
- %tmp5689 = getelementptr inbounds float* %tmp5688, i64 1
- %tmp5690 = getelementptr inbounds float* %tmp5689, i64 1
- %tmp5691 = getelementptr inbounds float* %tmp5690, i64 1
- %tmp5692 = getelementptr inbounds float* %tmp5691, i64 1
- %tmp5693 = getelementptr inbounds float* %tmp5692, i64 1
- %tmp5694 = getelementptr inbounds float* %tmp5693, i64 1
- %tmp5695 = getelementptr inbounds float* %tmp5694, i64 1
- %tmp5696 = getelementptr inbounds float* %tmp5695, i64 1
- %tmp5697 = getelementptr inbounds float* %tmp5696, i64 1
- %tmp5698 = getelementptr inbounds float* %tmp5697, i64 1
- %tmp5699 = getelementptr inbounds float* %tmp5698, i64 1
- %tmp5700 = getelementptr inbounds float* %tmp5699, i64 1
- %tmp5701 = getelementptr inbounds float* %tmp5700, i64 1
- %tmp5702 = getelementptr inbounds float* %tmp5701, i64 1
- %tmp5703 = getelementptr inbounds float* %tmp5702, i64 1
- %tmp5704 = getelementptr inbounds float* %tmp5703, i64 1
- %tmp5705 = getelementptr inbounds float* %tmp5704, i64 1
- %tmp5706 = getelementptr inbounds float* %tmp5705, i64 1
- %tmp5707 = getelementptr inbounds float* %tmp5706, i64 1
- %tmp5708 = getelementptr inbounds float* %tmp5707, i64 1
- %tmp5709 = getelementptr inbounds float* %tmp5708, i64 1
- %tmp5710 = getelementptr inbounds float* %tmp5709, i64 1
- %tmp5711 = getelementptr inbounds float* %tmp5710, i64 1
- %tmp5712 = getelementptr inbounds float* %tmp5711, i64 1
- %tmp5713 = getelementptr inbounds float* %tmp5712, i64 1
- %tmp5714 = getelementptr inbounds float* %tmp5713, i64 1
- %tmp5715 = getelementptr inbounds float* %tmp5714, i64 1
- %tmp5716 = getelementptr inbounds float* %tmp5715, i64 1
- %tmp5717 = getelementptr inbounds float* %tmp5716, i64 1
- %tmp5718 = getelementptr inbounds float* %tmp5717, i64 1
- %tmp5719 = getelementptr inbounds float* %tmp5718, i64 1
- %tmp5720 = getelementptr inbounds float* %tmp5719, i64 1
- %tmp5721 = getelementptr inbounds float* %tmp5720, i64 1
- %tmp5722 = getelementptr inbounds float* %tmp5721, i64 1
- %tmp5723 = getelementptr inbounds float* %tmp5722, i64 1
- %tmp5724 = getelementptr inbounds float* %tmp5723, i64 1
- %tmp5725 = getelementptr inbounds float* %tmp5724, i64 1
- %tmp5726 = getelementptr inbounds float* %tmp5725, i64 1
- %tmp5727 = getelementptr inbounds float* %tmp5726, i64 1
- %tmp5728 = getelementptr inbounds float* %tmp5727, i64 1
- %tmp5729 = getelementptr inbounds float* %tmp5728, i64 1
- %tmp5730 = getelementptr inbounds float* %tmp5729, i64 1
- %tmp5731 = getelementptr inbounds float* %tmp5730, i64 1
- %tmp5732 = getelementptr inbounds float* %tmp5731, i64 1
- %tmp5733 = getelementptr inbounds float* %tmp5732, i64 1
- %tmp5734 = getelementptr inbounds float* %tmp5733, i64 1
- %tmp5735 = getelementptr inbounds float* %tmp5734, i64 1
- %tmp5736 = getelementptr inbounds float* %tmp5735, i64 1
- %tmp5737 = getelementptr inbounds float* %tmp5736, i64 1
- %tmp5738 = getelementptr inbounds float* %tmp5737, i64 1
- %tmp5739 = getelementptr inbounds float* %tmp5738, i64 1
- %tmp5740 = getelementptr inbounds float* %tmp5739, i64 1
- %tmp5741 = getelementptr inbounds float* %tmp5740, i64 1
- %tmp5742 = getelementptr inbounds float* %tmp5741, i64 1
- %tmp5743 = getelementptr inbounds float* %tmp5742, i64 1
- %tmp5744 = getelementptr inbounds float* %tmp5743, i64 1
- %tmp5745 = getelementptr inbounds float* %tmp5744, i64 1
- %tmp5746 = getelementptr inbounds float* %tmp5745, i64 1
- %tmp5747 = getelementptr inbounds float* %tmp5746, i64 1
- %tmp5748 = getelementptr inbounds float* %tmp5747, i64 1
- %tmp5749 = getelementptr inbounds float* %tmp5748, i64 1
- %tmp5750 = getelementptr inbounds float* %tmp5749, i64 1
- %tmp5751 = getelementptr inbounds float* %tmp5750, i64 1
- %tmp5752 = getelementptr inbounds float* %tmp5751, i64 1
- %tmp5753 = getelementptr inbounds float* %tmp5752, i64 1
- %tmp5754 = getelementptr inbounds float* %tmp5753, i64 1
- %tmp5755 = getelementptr inbounds float* %tmp5754, i64 1
- %tmp5756 = getelementptr inbounds float* %tmp5755, i64 1
- %tmp5757 = getelementptr inbounds float* %tmp5756, i64 1
- %tmp5758 = getelementptr inbounds float* %tmp5757, i64 1
- %tmp5759 = getelementptr inbounds float* %tmp5758, i64 1
- %tmp5760 = getelementptr inbounds float* %tmp5759, i64 1
- %tmp5761 = getelementptr inbounds float* %tmp5760, i64 1
- %tmp5762 = getelementptr inbounds float* %tmp5761, i64 1
- %tmp5763 = getelementptr inbounds float* %tmp5762, i64 1
- %tmp5764 = getelementptr inbounds float* %tmp5763, i64 1
- %tmp5765 = getelementptr inbounds float* %tmp5764, i64 1
- %tmp5766 = getelementptr inbounds float* %tmp5765, i64 1
- %tmp5767 = getelementptr inbounds float* %tmp5766, i64 1
- %tmp5768 = getelementptr inbounds float* %tmp5767, i64 1
- %tmp5769 = getelementptr inbounds float* %tmp5768, i64 1
- %tmp5770 = getelementptr inbounds float* %tmp5769, i64 1
- %tmp5771 = getelementptr inbounds float* %tmp5770, i64 1
- %tmp5772 = getelementptr inbounds float* %tmp5771, i64 1
- %tmp5773 = getelementptr inbounds float* %tmp5772, i64 1
- %tmp5774 = getelementptr inbounds float* %tmp5773, i64 1
- %tmp5775 = getelementptr inbounds float* %tmp5774, i64 1
- %tmp5776 = getelementptr inbounds float* %tmp5775, i64 1
- %tmp5777 = getelementptr inbounds float* %tmp5776, i64 1
- %tmp5778 = getelementptr inbounds float* %tmp5777, i64 1
- %tmp5779 = getelementptr inbounds float* %tmp5778, i64 1
- %tmp5780 = getelementptr inbounds float* %tmp5779, i64 1
- %tmp5781 = getelementptr inbounds float* %tmp5780, i64 1
- %tmp5782 = getelementptr inbounds float* %tmp5781, i64 1
- %tmp5783 = getelementptr inbounds float* %tmp5782, i64 1
- %tmp5784 = getelementptr inbounds float* %tmp5783, i64 1
- %tmp5785 = getelementptr inbounds float* %tmp5784, i64 1
- %tmp5786 = getelementptr inbounds float* %tmp5785, i64 1
- %tmp5787 = getelementptr inbounds float* %tmp5786, i64 1
- %tmp5788 = getelementptr inbounds float* %tmp5787, i64 1
- %tmp5789 = getelementptr inbounds float* %tmp5788, i64 1
- %tmp5790 = getelementptr inbounds float* %tmp5789, i64 1
- %tmp5791 = getelementptr inbounds float* %tmp5790, i64 1
- %tmp5792 = getelementptr inbounds float* %tmp5791, i64 1
- %tmp5793 = getelementptr inbounds float* %tmp5792, i64 1
- %tmp5794 = getelementptr inbounds float* %tmp5793, i64 1
- %tmp5795 = getelementptr inbounds float* %tmp5794, i64 1
- %tmp5796 = getelementptr inbounds float* %tmp5795, i64 1
- %tmp5797 = getelementptr inbounds float* %tmp5796, i64 1
- %tmp5798 = getelementptr inbounds float* %tmp5797, i64 1
- %tmp5799 = getelementptr inbounds float* %tmp5798, i64 1
- %tmp5800 = getelementptr inbounds float* %tmp5799, i64 1
- %tmp5801 = getelementptr inbounds float* %tmp5800, i64 1
- %tmp5802 = getelementptr inbounds float* %tmp5801, i64 1
- %tmp5803 = getelementptr inbounds float* %tmp5802, i64 1
- %tmp5804 = getelementptr inbounds float* %tmp5803, i64 1
- %tmp5805 = getelementptr inbounds float* %tmp5804, i64 1
- %tmp5806 = getelementptr inbounds float* %tmp5805, i64 1
- %tmp5807 = getelementptr inbounds float* %tmp5806, i64 1
- %tmp5808 = getelementptr inbounds float* %tmp5807, i64 1
- %tmp5809 = getelementptr inbounds float* %tmp5808, i64 1
- %tmp5810 = getelementptr inbounds float* %tmp5809, i64 1
- %tmp5811 = getelementptr inbounds float* %tmp5810, i64 1
- %tmp5812 = getelementptr inbounds float* %tmp5811, i64 1
- %tmp5813 = getelementptr inbounds float* %tmp5812, i64 1
- %tmp5814 = getelementptr inbounds float* %tmp5813, i64 1
- %tmp5815 = getelementptr inbounds float* %tmp5814, i64 1
- %tmp5816 = getelementptr inbounds float* %tmp5815, i64 1
- %tmp5817 = getelementptr inbounds float* %tmp5816, i64 1
- %tmp5818 = getelementptr inbounds float* %tmp5817, i64 1
- %tmp5819 = getelementptr inbounds float* %tmp5818, i64 1
- %tmp5820 = getelementptr inbounds float* %tmp5819, i64 1
- %tmp5821 = getelementptr inbounds float* %tmp5820, i64 1
- %tmp5822 = getelementptr inbounds float* %tmp5821, i64 1
- %tmp5823 = getelementptr inbounds float* %tmp5822, i64 1
- %tmp5824 = getelementptr inbounds float* %tmp5823, i64 1
- %tmp5825 = getelementptr inbounds float* %tmp5824, i64 1
- %tmp5826 = getelementptr inbounds float* %tmp5825, i64 1
- %tmp5827 = getelementptr inbounds float* %tmp5826, i64 1
- %tmp5828 = getelementptr inbounds float* %tmp5827, i64 1
- %tmp5829 = getelementptr inbounds float* %tmp5828, i64 1
- %tmp5830 = getelementptr inbounds float* %tmp5829, i64 1
- %tmp5831 = getelementptr inbounds float* %tmp5830, i64 1
- %tmp5832 = getelementptr inbounds float* %tmp5831, i64 1
- %tmp5833 = getelementptr inbounds float* %tmp5832, i64 1
- %tmp5834 = getelementptr inbounds float* %tmp5833, i64 1
- %tmp5835 = getelementptr inbounds float* %tmp5834, i64 1
- %tmp5836 = getelementptr inbounds float* %tmp5835, i64 1
- %tmp5837 = getelementptr inbounds float* %tmp5836, i64 1
- %tmp5838 = getelementptr inbounds float* %tmp5837, i64 1
- %tmp5839 = getelementptr inbounds float* %tmp5838, i64 1
- %tmp5840 = getelementptr inbounds float* %tmp5839, i64 1
- %tmp5841 = getelementptr inbounds float* %tmp5840, i64 1
- %tmp5842 = getelementptr inbounds float* %tmp5841, i64 1
- %tmp5843 = getelementptr inbounds float* %tmp5842, i64 1
- %tmp5844 = getelementptr inbounds float* %tmp5843, i64 1
- %tmp5845 = getelementptr inbounds float* %tmp5844, i64 1
- %tmp5846 = getelementptr inbounds float* %tmp5845, i64 1
- %tmp5847 = getelementptr inbounds float* %tmp5846, i64 1
- %tmp5848 = getelementptr inbounds float* %tmp5847, i64 1
- %tmp5849 = getelementptr inbounds float* %tmp5848, i64 1
- %tmp5850 = getelementptr inbounds float* %tmp5849, i64 1
- %tmp5851 = getelementptr inbounds float* %tmp5850, i64 1
- %tmp5852 = getelementptr inbounds float* %tmp5851, i64 1
- %tmp5853 = getelementptr inbounds float* %tmp5852, i64 1
- %tmp5854 = getelementptr inbounds float* %tmp5853, i64 1
- %tmp5855 = getelementptr inbounds float* %tmp5854, i64 1
- %tmp5856 = getelementptr inbounds float* %tmp5855, i64 1
- %tmp5857 = getelementptr inbounds float* %tmp5856, i64 1
- %tmp5858 = getelementptr inbounds float* %tmp5857, i64 1
- %tmp5859 = getelementptr inbounds float* %tmp5858, i64 1
- %tmp5860 = getelementptr inbounds float* %tmp5859, i64 1
- %tmp5861 = getelementptr inbounds float* %tmp5860, i64 1
- %tmp5862 = getelementptr inbounds float* %tmp5861, i64 1
- %tmp5863 = getelementptr inbounds float* %tmp5862, i64 1
- %tmp5864 = getelementptr inbounds float* %tmp5863, i64 1
- %tmp5865 = getelementptr inbounds float* %tmp5864, i64 1
- %tmp5866 = getelementptr inbounds float* %tmp5865, i64 1
- %tmp5867 = getelementptr inbounds float* %tmp5866, i64 1
- %tmp5868 = getelementptr inbounds float* %tmp5867, i64 1
- %tmp5869 = getelementptr inbounds float* %tmp5868, i64 1
- %tmp5870 = getelementptr inbounds float* %tmp5869, i64 1
- %tmp5871 = getelementptr inbounds float* %tmp5870, i64 1
- %tmp5872 = getelementptr inbounds float* %tmp5871, i64 1
- %tmp5873 = getelementptr inbounds float* %tmp5872, i64 1
- %tmp5874 = getelementptr inbounds float* %tmp5873, i64 1
- %tmp5875 = getelementptr inbounds float* %tmp5874, i64 1
- %tmp5876 = getelementptr inbounds float* %tmp5875, i64 1
- %tmp5877 = getelementptr inbounds float* %tmp5876, i64 1
- %tmp5878 = getelementptr inbounds float* %tmp5877, i64 1
- %tmp5879 = getelementptr inbounds float* %tmp5878, i64 1
- %tmp5880 = getelementptr inbounds float* %tmp5879, i64 1
- %tmp5881 = getelementptr inbounds float* %tmp5880, i64 1
- %tmp5882 = getelementptr inbounds float* %tmp5881, i64 1
- %tmp5883 = getelementptr inbounds float* %tmp5882, i64 1
- %tmp5884 = getelementptr inbounds float* %tmp5883, i64 1
- %tmp5885 = getelementptr inbounds float* %tmp5884, i64 1
- %tmp5886 = getelementptr inbounds float* %tmp5885, i64 1
- %tmp5887 = getelementptr inbounds float* %tmp5886, i64 1
- %tmp5888 = getelementptr inbounds float* %tmp5887, i64 1
- %tmp5889 = getelementptr inbounds float* %tmp5888, i64 1
- %tmp5890 = getelementptr inbounds float* %tmp5889, i64 1
- %tmp5891 = getelementptr inbounds float* %tmp5890, i64 1
- %tmp5892 = getelementptr inbounds float* %tmp5891, i64 1
- %tmp5893 = getelementptr inbounds float* %tmp5892, i64 1
- %tmp5894 = getelementptr inbounds float* %tmp5893, i64 1
- %tmp5895 = getelementptr inbounds float* %tmp5894, i64 1
- %tmp5896 = getelementptr inbounds float* %tmp5895, i64 1
- %tmp5897 = getelementptr inbounds float* %tmp5896, i64 1
- %tmp5898 = getelementptr inbounds float* %tmp5897, i64 1
- %tmp5899 = getelementptr inbounds float* %tmp5898, i64 1
- %tmp5900 = getelementptr inbounds float* %tmp5899, i64 1
- %tmp5901 = getelementptr inbounds float* %tmp5900, i64 1
- %tmp5902 = getelementptr inbounds float* %tmp5901, i64 1
- %tmp5903 = getelementptr inbounds float* %tmp5902, i64 1
- %tmp5904 = getelementptr inbounds float* %tmp5903, i64 1
- %tmp5905 = getelementptr inbounds float* %tmp5904, i64 1
- %tmp5906 = getelementptr inbounds float* %tmp5905, i64 1
- %tmp5907 = getelementptr inbounds float* %tmp5906, i64 1
- %tmp5908 = getelementptr inbounds float* %tmp5907, i64 1
- %tmp5909 = getelementptr inbounds float* %tmp5908, i64 1
- %tmp5910 = getelementptr inbounds float* %tmp5909, i64 1
- %tmp5911 = getelementptr inbounds float* %tmp5910, i64 1
- %tmp5912 = getelementptr inbounds float* %tmp5911, i64 1
- %tmp5913 = getelementptr inbounds float* %tmp5912, i64 1
- %tmp5914 = getelementptr inbounds float* %tmp5913, i64 1
- %tmp5915 = getelementptr inbounds float* %tmp5914, i64 1
- %tmp5916 = getelementptr inbounds float* %tmp5915, i64 1
- %tmp5917 = getelementptr inbounds float* %tmp5916, i64 1
- %tmp5918 = getelementptr inbounds float* %tmp5917, i64 1
- %tmp5919 = getelementptr inbounds float* %tmp5918, i64 1
- %tmp5920 = getelementptr inbounds float* %tmp5919, i64 1
- %tmp5921 = getelementptr inbounds float* %tmp5920, i64 1
- %tmp5922 = getelementptr inbounds float* %tmp5921, i64 1
- %tmp5923 = getelementptr inbounds float* %tmp5922, i64 1
- %tmp5924 = getelementptr inbounds float* %tmp5923, i64 1
- %tmp5925 = getelementptr inbounds float* %tmp5924, i64 1
- %tmp5926 = getelementptr inbounds float* %tmp5925, i64 1
- %tmp5927 = getelementptr inbounds float* %tmp5926, i64 1
- %tmp5928 = getelementptr inbounds float* %tmp5927, i64 1
- %tmp5929 = getelementptr inbounds float* %tmp5928, i64 1
- %tmp5930 = getelementptr inbounds float* %tmp5929, i64 1
- %tmp5931 = getelementptr inbounds float* %tmp5930, i64 1
- %tmp5932 = getelementptr inbounds float* %tmp5931, i64 1
- %tmp5933 = getelementptr inbounds float* %tmp5932, i64 1
- %tmp5934 = getelementptr inbounds float* %tmp5933, i64 1
- %tmp5935 = getelementptr inbounds float* %tmp5934, i64 1
- %tmp5936 = getelementptr inbounds float* %tmp5935, i64 1
- %tmp5937 = getelementptr inbounds float* %tmp5936, i64 1
- %tmp5938 = getelementptr inbounds float* %tmp5937, i64 1
- %tmp5939 = getelementptr inbounds float* %tmp5938, i64 1
- %tmp5940 = getelementptr inbounds float* %tmp5939, i64 1
- %tmp5941 = getelementptr inbounds float* %tmp5940, i64 1
- %tmp5942 = getelementptr inbounds float* %tmp5941, i64 1
- %tmp5943 = getelementptr inbounds float* %tmp5942, i64 1
- %tmp5944 = getelementptr inbounds float* %tmp5943, i64 1
- %tmp5945 = getelementptr inbounds float* %tmp5944, i64 1
- %tmp5946 = getelementptr inbounds float* %tmp5945, i64 1
- %tmp5947 = getelementptr inbounds float* %tmp5946, i64 1
- %tmp5948 = getelementptr inbounds float* %tmp5947, i64 1
- %tmp5949 = getelementptr inbounds float* %tmp5948, i64 1
- %tmp5950 = getelementptr inbounds float* %tmp5949, i64 1
- %tmp5951 = getelementptr inbounds float* %tmp5950, i64 1
- %tmp5952 = getelementptr inbounds float* %tmp5951, i64 1
- %tmp5953 = getelementptr inbounds float* %tmp5952, i64 1
- %tmp5954 = getelementptr inbounds float* %tmp5953, i64 1
- %tmp5955 = getelementptr inbounds float* %tmp5954, i64 1
- %tmp5956 = getelementptr inbounds float* %tmp5955, i64 1
- %tmp5957 = getelementptr inbounds float* %tmp5956, i64 1
- %tmp5958 = getelementptr inbounds float* %tmp5957, i64 1
- %tmp5959 = getelementptr inbounds float* %tmp5958, i64 1
- %tmp5960 = getelementptr inbounds float* %tmp5959, i64 1
- %tmp5961 = getelementptr inbounds float* %tmp5960, i64 1
- %tmp5962 = getelementptr inbounds float* %tmp5961, i64 1
- %tmp5963 = getelementptr inbounds float* %tmp5962, i64 1
- %tmp5964 = getelementptr inbounds float* %tmp5963, i64 1
- %tmp5965 = getelementptr inbounds float* %tmp5964, i64 1
- %tmp5966 = getelementptr inbounds float* %tmp5965, i64 1
- %tmp5967 = getelementptr inbounds float* %tmp5966, i64 1
- %tmp5968 = getelementptr inbounds float* %tmp5967, i64 1
- %tmp5969 = getelementptr inbounds float* %tmp5968, i64 1
- %tmp5970 = getelementptr inbounds float* %tmp5969, i64 1
- %tmp5971 = getelementptr inbounds float* %tmp5970, i64 1
- %tmp5972 = getelementptr inbounds float* %tmp5971, i64 1
- %tmp5973 = getelementptr inbounds float* %tmp5972, i64 1
- %tmp5974 = getelementptr inbounds float* %tmp5973, i64 1
- %tmp5975 = getelementptr inbounds float* %tmp5974, i64 1
- %tmp5976 = getelementptr inbounds float* %tmp5975, i64 1
- %tmp5977 = getelementptr inbounds float* %tmp5976, i64 1
- %tmp5978 = getelementptr inbounds float* %tmp5977, i64 1
- %tmp5979 = getelementptr inbounds float* %tmp5978, i64 1
- %tmp5980 = getelementptr inbounds float* %tmp5979, i64 1
- %tmp5981 = getelementptr inbounds float* %tmp5980, i64 1
- %tmp5982 = getelementptr inbounds float* %tmp5981, i64 1
- %tmp5983 = getelementptr inbounds float* %tmp5982, i64 1
- %tmp5984 = getelementptr inbounds float* %tmp5983, i64 1
- %tmp5985 = getelementptr inbounds float* %tmp5984, i64 1
- %tmp5986 = getelementptr inbounds float* %tmp5985, i64 1
- %tmp5987 = getelementptr inbounds float* %tmp5986, i64 1
- %tmp5988 = getelementptr inbounds float* %tmp5987, i64 1
- %tmp5989 = getelementptr inbounds float* %tmp5988, i64 1
- %tmp5990 = getelementptr inbounds float* %tmp5989, i64 1
- %tmp5991 = getelementptr inbounds float* %tmp5990, i64 1
- %tmp5992 = getelementptr inbounds float* %tmp5991, i64 1
- %tmp5993 = getelementptr inbounds float* %tmp5992, i64 1
- %tmp5994 = getelementptr inbounds float* %tmp5993, i64 1
- %tmp5995 = getelementptr inbounds float* %tmp5994, i64 1
- %tmp5996 = getelementptr inbounds float* %tmp5995, i64 1
- %tmp5997 = getelementptr inbounds float* %tmp5996, i64 1
- %tmp5998 = getelementptr inbounds float* %tmp5997, i64 1
- %tmp5999 = getelementptr inbounds float* %tmp5998, i64 1
- %tmp6000 = getelementptr inbounds float* %tmp5999, i64 1
- %tmp6001 = getelementptr inbounds float* %tmp6000, i64 1
- %tmp6002 = getelementptr inbounds float* %tmp6001, i64 1
- %tmp6003 = getelementptr inbounds float* %tmp6002, i64 1
- %tmp6004 = getelementptr inbounds float* %tmp6003, i64 1
- %tmp6005 = getelementptr inbounds float* %tmp6004, i64 1
- %tmp6006 = getelementptr inbounds float* %tmp6005, i64 1
- %tmp6007 = getelementptr inbounds float* %tmp6006, i64 1
- %tmp6008 = getelementptr inbounds float* %tmp6007, i64 1
- %tmp6009 = getelementptr inbounds float* %tmp6008, i64 1
- %tmp6010 = getelementptr inbounds float* %tmp6009, i64 1
- %tmp6011 = getelementptr inbounds float* %tmp6010, i64 1
- %tmp6012 = getelementptr inbounds float* %tmp6011, i64 1
- %tmp6013 = getelementptr inbounds float* %tmp6012, i64 1
- %tmp6014 = getelementptr inbounds float* %tmp6013, i64 1
- %tmp6015 = getelementptr inbounds float* %tmp6014, i64 1
- %tmp6016 = getelementptr inbounds float* %tmp6015, i64 1
- %tmp6017 = getelementptr inbounds float* %tmp6016, i64 1
- %tmp6018 = getelementptr inbounds float* %tmp6017, i64 1
- %tmp6019 = getelementptr inbounds float* %tmp6018, i64 1
- %tmp6020 = getelementptr inbounds float* %tmp6019, i64 1
- %tmp6021 = getelementptr inbounds float* %tmp6020, i64 1
- %tmp6022 = getelementptr inbounds float* %tmp6021, i64 1
- %tmp6023 = getelementptr inbounds float* %tmp6022, i64 1
- %tmp6024 = getelementptr inbounds float* %tmp6023, i64 1
- %tmp6025 = getelementptr inbounds float* %tmp6024, i64 1
- %tmp6026 = getelementptr inbounds float* %tmp6025, i64 1
- %tmp6027 = getelementptr inbounds float* %tmp6026, i64 1
- %tmp6028 = getelementptr inbounds float* %tmp6027, i64 1
- %tmp6029 = getelementptr inbounds float* %tmp6028, i64 1
- %tmp6030 = getelementptr inbounds float* %tmp6029, i64 1
- %tmp6031 = getelementptr inbounds float* %tmp6030, i64 1
- %tmp6032 = getelementptr inbounds float* %tmp6031, i64 1
- %tmp6033 = getelementptr inbounds float* %tmp6032, i64 1
- %tmp6034 = getelementptr inbounds float* %tmp6033, i64 1
- %tmp6035 = getelementptr inbounds float* %tmp6034, i64 1
- %tmp6036 = getelementptr inbounds float* %tmp6035, i64 1
- %tmp6037 = getelementptr inbounds float* %tmp6036, i64 1
- %tmp6038 = getelementptr inbounds float* %tmp6037, i64 1
- %tmp6039 = getelementptr inbounds float* %tmp6038, i64 1
- %tmp6040 = getelementptr inbounds float* %tmp6039, i64 1
- %tmp6041 = getelementptr inbounds float* %tmp6040, i64 1
- %tmp6042 = getelementptr inbounds float* %tmp6041, i64 1
- %tmp6043 = getelementptr inbounds float* %tmp6042, i64 1
- %tmp6044 = getelementptr inbounds float* %tmp6043, i64 1
- %tmp6045 = getelementptr inbounds float* %tmp6044, i64 1
- %tmp6046 = getelementptr inbounds float* %tmp6045, i64 1
- %tmp6047 = getelementptr inbounds float* %tmp6046, i64 1
- %tmp6048 = getelementptr inbounds float* %tmp6047, i64 1
- %tmp6049 = getelementptr inbounds float* %tmp6048, i64 1
- %tmp6050 = getelementptr inbounds float* %tmp6049, i64 1
- %tmp6051 = getelementptr inbounds float* %tmp6050, i64 1
- %tmp6052 = getelementptr inbounds float* %tmp6051, i64 1
- %tmp6053 = getelementptr inbounds float* %tmp6052, i64 1
- %tmp6054 = getelementptr inbounds float* %tmp6053, i64 1
- %tmp6055 = getelementptr inbounds float* %tmp6054, i64 1
- %tmp6056 = getelementptr inbounds float* %tmp6055, i64 1
- %tmp6057 = getelementptr inbounds float* %tmp6056, i64 1
- %tmp6058 = getelementptr inbounds float* %tmp6057, i64 1
- %tmp6059 = getelementptr inbounds float* %tmp6058, i64 1
- %tmp6060 = getelementptr inbounds float* %tmp6059, i64 1
- %tmp6061 = getelementptr inbounds float* %tmp6060, i64 1
- %tmp6062 = getelementptr inbounds float* %tmp6061, i64 1
- %tmp6063 = getelementptr inbounds float* %tmp6062, i64 1
- %tmp6064 = getelementptr inbounds float* %tmp6063, i64 1
- %tmp6065 = getelementptr inbounds float* %tmp6064, i64 1
- %tmp6066 = getelementptr inbounds float* %tmp6065, i64 1
- %tmp6067 = getelementptr inbounds float* %tmp6066, i64 1
- %tmp6068 = getelementptr inbounds float* %tmp6067, i64 1
- %tmp6069 = getelementptr inbounds float* %tmp6068, i64 1
- %tmp6070 = getelementptr inbounds float* %tmp6069, i64 1
- %tmp6071 = getelementptr inbounds float* %tmp6070, i64 1
- %tmp6072 = getelementptr inbounds float* %tmp6071, i64 1
- %tmp6073 = getelementptr inbounds float* %tmp6072, i64 1
- %tmp6074 = getelementptr inbounds float* %tmp6073, i64 1
- %tmp6075 = getelementptr inbounds float* %tmp6074, i64 1
- %tmp6076 = getelementptr inbounds float* %tmp6075, i64 1
- %tmp6077 = getelementptr inbounds float* %tmp6076, i64 1
- %tmp6078 = getelementptr inbounds float* %tmp6077, i64 1
- %tmp6079 = getelementptr inbounds float* %tmp6078, i64 1
- %tmp6080 = getelementptr inbounds float* %tmp6079, i64 1
- %tmp6081 = getelementptr inbounds float* %tmp6080, i64 1
- %tmp6082 = getelementptr inbounds float* %tmp6081, i64 1
- %tmp6083 = getelementptr inbounds float* %tmp6082, i64 1
- %tmp6084 = getelementptr inbounds float* %tmp6083, i64 1
- %tmp6085 = getelementptr inbounds float* %tmp6084, i64 1
- %tmp6086 = getelementptr inbounds float* %tmp6085, i64 1
- %tmp6087 = getelementptr inbounds float* %tmp6086, i64 1
- %tmp6088 = getelementptr inbounds float* %tmp6087, i64 1
- %tmp6089 = getelementptr inbounds float* %tmp6088, i64 1
- %tmp6090 = getelementptr inbounds float* %tmp6089, i64 1
- %tmp6091 = getelementptr inbounds float* %tmp6090, i64 1
- %tmp6092 = getelementptr inbounds float* %tmp6091, i64 1
- %tmp6093 = getelementptr inbounds float* %tmp6092, i64 1
- %tmp6094 = getelementptr inbounds float* %tmp6093, i64 1
- %tmp6095 = getelementptr inbounds float* %tmp6094, i64 1
- %tmp6096 = getelementptr inbounds float* %tmp6095, i64 1
- %tmp6097 = getelementptr inbounds float* %tmp6096, i64 1
- %tmp6098 = getelementptr inbounds float* %tmp6097, i64 1
- %tmp6099 = getelementptr inbounds float* %tmp6098, i64 1
- %tmp6100 = getelementptr inbounds float* %tmp6099, i64 1
- %tmp6101 = getelementptr inbounds float* %tmp6100, i64 1
- %tmp6102 = getelementptr inbounds float* %tmp6101, i64 1
- %tmp6103 = getelementptr inbounds float* %tmp6102, i64 1
- %tmp6104 = getelementptr inbounds float* %tmp6103, i64 1
- %tmp6105 = getelementptr inbounds float* %tmp6104, i64 1
- %tmp6106 = getelementptr inbounds float* %tmp6105, i64 1
- %tmp6107 = getelementptr inbounds float* %tmp6106, i64 1
- %tmp6108 = getelementptr inbounds float* %tmp6107, i64 1
- %tmp6109 = getelementptr inbounds float* %tmp6108, i64 1
- %tmp6110 = getelementptr inbounds float* %tmp6109, i64 1
- %tmp6111 = getelementptr inbounds float* %tmp6110, i64 1
- %tmp6112 = getelementptr inbounds float* %tmp6111, i64 1
- %tmp6113 = getelementptr inbounds float* %tmp6112, i64 1
- %tmp6114 = getelementptr inbounds float* %tmp6113, i64 1
- %tmp6115 = getelementptr inbounds float* %tmp6114, i64 1
- %tmp6116 = getelementptr inbounds float* %tmp6115, i64 1
- %tmp6117 = getelementptr inbounds float* %tmp6116, i64 1
- %tmp6118 = getelementptr inbounds float* %tmp6117, i64 1
- %tmp6119 = getelementptr inbounds float* %tmp6118, i64 1
- %tmp6120 = getelementptr inbounds float* %tmp6119, i64 1
- %tmp6121 = getelementptr inbounds float* %tmp6120, i64 1
- %tmp6122 = getelementptr inbounds float* %tmp6121, i64 1
- %tmp6123 = getelementptr inbounds float* %tmp6122, i64 1
- %tmp6124 = getelementptr inbounds float* %tmp6123, i64 1
- %tmp6125 = getelementptr inbounds float* %tmp6124, i64 1
- %tmp6126 = getelementptr inbounds float* %tmp6125, i64 1
- %tmp6127 = getelementptr inbounds float* %tmp6126, i64 1
- %tmp6128 = getelementptr inbounds float* %tmp6127, i64 1
- %tmp6129 = getelementptr inbounds float* %tmp6128, i64 1
- %tmp6130 = getelementptr inbounds float* %tmp6129, i64 1
- %tmp6131 = getelementptr inbounds float* %tmp6130, i64 1
- %tmp6132 = getelementptr inbounds float* %tmp6131, i64 1
- %tmp6133 = getelementptr inbounds float* %tmp6132, i64 1
- %tmp6134 = getelementptr inbounds float* %tmp6133, i64 1
- %tmp6135 = getelementptr inbounds float* %tmp6134, i64 1
- %tmp6136 = getelementptr inbounds float* %tmp6135, i64 1
- %tmp6137 = getelementptr inbounds float* %tmp6136, i64 1
- %tmp6138 = getelementptr inbounds float* %tmp6137, i64 1
- %tmp6139 = getelementptr inbounds float* %tmp6138, i64 1
- %tmp6140 = getelementptr inbounds float* %tmp6139, i64 1
- %tmp6141 = getelementptr inbounds float* %tmp6140, i64 1
- %tmp6142 = getelementptr inbounds float* %tmp6141, i64 1
- %tmp6143 = getelementptr inbounds float* %tmp6142, i64 1
- %tmp6144 = getelementptr inbounds float* %tmp6143, i64 1
- %tmp6145 = getelementptr inbounds float* %tmp6144, i64 1
- %tmp6146 = getelementptr inbounds float* %tmp6145, i64 1
- %tmp6147 = getelementptr inbounds float* %tmp6146, i64 1
- %tmp6148 = getelementptr inbounds float* %tmp6147, i64 1
- %tmp6149 = getelementptr inbounds float* %tmp6148, i64 1
- %tmp6150 = getelementptr inbounds float* %tmp6149, i64 1
- %tmp6151 = getelementptr inbounds float* %tmp6150, i64 1
- %tmp6152 = getelementptr inbounds float* %tmp6151, i64 1
- %tmp6153 = getelementptr inbounds float* %tmp6152, i64 1
- %tmp6154 = getelementptr inbounds float* %tmp6153, i64 1
- %tmp6155 = getelementptr inbounds float* %tmp6154, i64 1
- %tmp6156 = getelementptr inbounds float* %tmp6155, i64 1
- %tmp6157 = getelementptr inbounds float* %tmp6156, i64 1
- %tmp6158 = getelementptr inbounds float* %tmp6157, i64 1
- %tmp6159 = getelementptr inbounds float* %tmp6158, i64 1
- %tmp6160 = getelementptr inbounds float* %tmp6159, i64 1
- %tmp6161 = getelementptr inbounds float* %tmp6160, i64 1
- %tmp6162 = getelementptr inbounds float* %tmp6161, i64 1
- %tmp6163 = getelementptr inbounds float* %tmp6162, i64 1
- %tmp6164 = getelementptr inbounds float* %tmp6163, i64 1
- %tmp6165 = getelementptr inbounds float* %tmp6164, i64 1
- %tmp6166 = getelementptr inbounds float* %tmp6165, i64 1
- %tmp6167 = getelementptr inbounds float* %tmp6166, i64 1
- %tmp6168 = getelementptr inbounds float* %tmp6167, i64 1
- %tmp6169 = getelementptr inbounds float* %tmp6168, i64 1
- %tmp6170 = getelementptr inbounds float* %tmp6169, i64 1
- %tmp6171 = getelementptr inbounds float* %tmp6170, i64 1
- %tmp6172 = getelementptr inbounds float* %tmp6171, i64 1
- %tmp6173 = getelementptr inbounds float* %tmp6172, i64 1
- %tmp6174 = getelementptr inbounds float* %tmp6173, i64 1
- %tmp6175 = getelementptr inbounds float* %tmp6174, i64 1
- %tmp6176 = getelementptr inbounds float* %tmp6175, i64 1
- %tmp6177 = getelementptr inbounds float* %tmp6176, i64 1
- %tmp6178 = getelementptr inbounds float* %tmp6177, i64 1
- %tmp6179 = getelementptr inbounds float* %tmp6178, i64 1
- %tmp6180 = getelementptr inbounds float* %tmp6179, i64 1
- %tmp6181 = getelementptr inbounds float* %tmp6180, i64 1
- %tmp6182 = getelementptr inbounds float* %tmp6181, i64 1
- %tmp6183 = getelementptr inbounds float* %tmp6182, i64 1
- %tmp6184 = getelementptr inbounds float* %tmp6183, i64 1
- %tmp6185 = getelementptr inbounds float* %tmp6184, i64 1
- %tmp6186 = getelementptr inbounds float* %tmp6185, i64 1
- %tmp6187 = getelementptr inbounds float* %tmp6186, i64 1
- %tmp6188 = getelementptr inbounds float* %tmp6187, i64 1
- %tmp6189 = getelementptr inbounds float* %tmp6188, i64 1
- %tmp6190 = getelementptr inbounds float* %tmp6189, i64 1
- %tmp6191 = getelementptr inbounds float* %tmp6190, i64 1
- %tmp6192 = getelementptr inbounds float* %tmp6191, i64 1
- %tmp6193 = getelementptr inbounds float* %tmp6192, i64 1
- %tmp6194 = getelementptr inbounds float* %tmp6193, i64 1
- %tmp6195 = getelementptr inbounds float* %tmp6194, i64 1
- %tmp6196 = getelementptr inbounds float* %tmp6195, i64 1
- %tmp6197 = getelementptr inbounds float* %tmp6196, i64 1
- %tmp6198 = getelementptr inbounds float* %tmp6197, i64 1
- %tmp6199 = getelementptr inbounds float* %tmp6198, i64 1
- %tmp6200 = getelementptr inbounds float* %tmp6199, i64 1
- %tmp6201 = getelementptr inbounds float* %tmp6200, i64 1
- %tmp6202 = getelementptr inbounds float* %tmp6201, i64 1
- %tmp6203 = getelementptr inbounds float* %tmp6202, i64 1
- %tmp6204 = getelementptr inbounds float* %tmp6203, i64 1
- %tmp6205 = getelementptr inbounds float* %tmp6204, i64 1
- %tmp6206 = getelementptr inbounds float* %tmp6205, i64 1
- %tmp6207 = getelementptr inbounds float* %tmp6206, i64 1
- %tmp6208 = getelementptr inbounds float* %tmp6207, i64 1
- %tmp6209 = getelementptr inbounds float* %tmp6208, i64 1
- %tmp6210 = getelementptr inbounds float* %tmp6209, i64 1
- %tmp6211 = getelementptr inbounds float* %tmp6210, i64 1
- %tmp6212 = getelementptr inbounds float* %tmp6211, i64 1
- %tmp6213 = getelementptr inbounds float* %tmp6212, i64 1
- %tmp6214 = getelementptr inbounds float* %tmp6213, i64 1
- %tmp6215 = getelementptr inbounds float* %tmp6214, i64 1
- %tmp6216 = getelementptr inbounds float* %tmp6215, i64 1
- %tmp6217 = getelementptr inbounds float* %tmp6216, i64 1
- %tmp6218 = getelementptr inbounds float* %tmp6217, i64 1
- %tmp6219 = getelementptr inbounds float* %tmp6218, i64 1
- %tmp6220 = getelementptr inbounds float* %tmp6219, i64 1
- %tmp6221 = getelementptr inbounds float* %tmp6220, i64 1
- %tmp6222 = getelementptr inbounds float* %tmp6221, i64 1
- %tmp6223 = getelementptr inbounds float* %tmp6222, i64 1
- %tmp6224 = getelementptr inbounds float* %tmp6223, i64 1
- %tmp6225 = getelementptr inbounds float* %tmp6224, i64 1
- %tmp6226 = getelementptr inbounds float* %tmp6225, i64 1
- %tmp6227 = getelementptr inbounds float* %tmp6226, i64 1
- %tmp6228 = getelementptr inbounds float* %tmp6227, i64 1
- %tmp6229 = getelementptr inbounds float* %tmp6228, i64 1
- %tmp6230 = getelementptr inbounds float* %tmp6229, i64 1
- %tmp6231 = getelementptr inbounds float* %tmp6230, i64 1
- %tmp6232 = getelementptr inbounds float* %tmp6231, i64 1
- %tmp6233 = getelementptr inbounds float* %tmp6232, i64 1
- %tmp6234 = getelementptr inbounds float* %tmp6233, i64 1
- %tmp6235 = getelementptr inbounds float* %tmp6234, i64 1
- %tmp6236 = getelementptr inbounds float* %tmp6235, i64 1
- %tmp6237 = getelementptr inbounds float* %tmp6236, i64 1
- %tmp6238 = getelementptr inbounds float* %tmp6237, i64 1
- %tmp6239 = getelementptr inbounds float* %tmp6238, i64 1
- %tmp6240 = getelementptr inbounds float* %tmp6239, i64 1
- %tmp6241 = getelementptr inbounds float* %tmp6240, i64 1
- %tmp6242 = getelementptr inbounds float* %tmp6241, i64 1
- %tmp6243 = getelementptr inbounds float* %tmp6242, i64 1
- %tmp6244 = getelementptr inbounds float* %tmp6243, i64 1
- %tmp6245 = getelementptr inbounds float* %tmp6244, i64 1
- %tmp6246 = getelementptr inbounds float* %tmp6245, i64 1
- %tmp6247 = getelementptr inbounds float* %tmp6246, i64 1
- %tmp6248 = getelementptr inbounds float* %tmp6247, i64 1
- %tmp6249 = getelementptr inbounds float* %tmp6248, i64 1
- %tmp6250 = getelementptr inbounds float* %tmp6249, i64 1
- %tmp6251 = getelementptr inbounds float* %tmp6250, i64 1
- %tmp6252 = getelementptr inbounds float* %tmp6251, i64 1
- %tmp6253 = getelementptr inbounds float* %tmp6252, i64 1
- %tmp6254 = getelementptr inbounds float* %tmp6253, i64 1
- %tmp6255 = getelementptr inbounds float* %tmp6254, i64 1
- %tmp6256 = getelementptr inbounds float* %tmp6255, i64 1
- %tmp6257 = getelementptr inbounds float* %tmp6256, i64 1
- %tmp6258 = getelementptr inbounds float* %tmp6257, i64 1
- %tmp6259 = getelementptr inbounds float* %tmp6258, i64 1
- %tmp6260 = getelementptr inbounds float* %tmp6259, i64 1
- %tmp6261 = getelementptr inbounds float* %tmp6260, i64 1
- %tmp6262 = getelementptr inbounds float* %tmp6261, i64 1
- %tmp6263 = getelementptr inbounds float* %tmp6262, i64 1
- %tmp6264 = getelementptr inbounds float* %tmp6263, i64 1
- %tmp6265 = getelementptr inbounds float* %tmp6264, i64 1
- %tmp6266 = getelementptr inbounds float* %tmp6265, i64 1
- %tmp6267 = getelementptr inbounds float* %tmp6266, i64 1
- %tmp6268 = getelementptr inbounds float* %tmp6267, i64 1
- %tmp6269 = getelementptr inbounds float* %tmp6268, i64 1
- %tmp6270 = getelementptr inbounds float* %tmp6269, i64 1
- %tmp6271 = getelementptr inbounds float* %tmp6270, i64 1
- %tmp6272 = getelementptr inbounds float* %tmp6271, i64 1
- %tmp6273 = getelementptr inbounds float* %tmp6272, i64 1
- %tmp6274 = getelementptr inbounds float* %tmp6273, i64 1
- %tmp6275 = getelementptr inbounds float* %tmp6274, i64 1
- %tmp6276 = getelementptr inbounds float* %tmp6275, i64 1
- %tmp6277 = getelementptr inbounds float* %tmp6276, i64 1
- %tmp6278 = getelementptr inbounds float* %tmp6277, i64 1
- %tmp6279 = getelementptr inbounds float* %tmp6278, i64 1
- %tmp6280 = getelementptr inbounds float* %tmp6279, i64 1
- %tmp6281 = getelementptr inbounds float* %tmp6280, i64 1
- %tmp6282 = getelementptr inbounds float* %tmp6281, i64 1
- %tmp6283 = getelementptr inbounds float* %tmp6282, i64 1
- %tmp6284 = getelementptr inbounds float* %tmp6283, i64 1
- %tmp6285 = getelementptr inbounds float* %tmp6284, i64 1
- %tmp6286 = getelementptr inbounds float* %tmp6285, i64 1
- %tmp6287 = getelementptr inbounds float* %tmp6286, i64 1
- %tmp6288 = getelementptr inbounds float* %tmp6287, i64 1
- %tmp6289 = getelementptr inbounds float* %tmp6288, i64 1
- %tmp6290 = getelementptr inbounds float* %tmp6289, i64 1
- %tmp6291 = getelementptr inbounds float* %tmp6290, i64 1
- %tmp6292 = getelementptr inbounds float* %tmp6291, i64 1
- %tmp6293 = getelementptr inbounds float* %tmp6292, i64 1
- %tmp6294 = getelementptr inbounds float* %tmp6293, i64 1
- %tmp6295 = getelementptr inbounds float* %tmp6294, i64 1
- %tmp6296 = getelementptr inbounds float* %tmp6295, i64 1
- %tmp6297 = getelementptr inbounds float* %tmp6296, i64 1
- %tmp6298 = getelementptr inbounds float* %tmp6297, i64 1
- %tmp6299 = getelementptr inbounds float* %tmp6298, i64 1
- %tmp6300 = getelementptr inbounds float* %tmp6299, i64 1
- %tmp6301 = getelementptr inbounds float* %tmp6300, i64 1
- %tmp6302 = getelementptr inbounds float* %tmp6301, i64 1
- %tmp6303 = getelementptr inbounds float* %tmp6302, i64 1
- %tmp6304 = getelementptr inbounds float* %tmp6303, i64 1
- %tmp6305 = getelementptr inbounds float* %tmp6304, i64 1
- %tmp6306 = getelementptr inbounds float* %tmp6305, i64 1
- %tmp6307 = getelementptr inbounds float* %tmp6306, i64 1
- %tmp6308 = getelementptr inbounds float* %tmp6307, i64 1
- %tmp6309 = getelementptr inbounds float* %tmp6308, i64 1
- %tmp6310 = getelementptr inbounds float* %tmp6309, i64 1
- %tmp6311 = getelementptr inbounds float* %tmp6310, i64 1
- %tmp6312 = getelementptr inbounds float* %tmp6311, i64 1
- %tmp6313 = getelementptr inbounds float* %tmp6312, i64 1
- %tmp6314 = getelementptr inbounds float* %tmp6313, i64 1
- %tmp6315 = getelementptr inbounds float* %tmp6314, i64 1
- %tmp6316 = getelementptr inbounds float* %tmp6315, i64 1
- %tmp6317 = getelementptr inbounds float* %tmp6316, i64 1
- %tmp6318 = getelementptr inbounds float* %tmp6317, i64 1
- %tmp6319 = getelementptr inbounds float* %tmp6318, i64 1
- %tmp6320 = getelementptr inbounds float* %tmp6319, i64 1
- %tmp6321 = getelementptr inbounds float* %tmp6320, i64 1
- %tmp6322 = getelementptr inbounds float* %tmp6321, i64 1
- %tmp6323 = getelementptr inbounds float* %tmp6322, i64 1
- %tmp6324 = getelementptr inbounds float* %tmp6323, i64 1
- %tmp6325 = getelementptr inbounds float* %tmp6324, i64 1
- %tmp6326 = getelementptr inbounds float* %tmp6325, i64 1
- %tmp6327 = getelementptr inbounds float* %tmp6326, i64 1
- %tmp6328 = getelementptr inbounds float* %tmp6327, i64 1
- %tmp6329 = getelementptr inbounds float* %tmp6328, i64 1
- %tmp6330 = getelementptr inbounds float* %tmp6329, i64 1
- %tmp6331 = getelementptr inbounds float* %tmp6330, i64 1
- %tmp6332 = getelementptr inbounds float* %tmp6331, i64 1
- %tmp6333 = getelementptr inbounds float* %tmp6332, i64 1
- %tmp6334 = getelementptr inbounds float* %tmp6333, i64 1
- %tmp6335 = getelementptr inbounds float* %tmp6334, i64 1
- %tmp6336 = getelementptr inbounds float* %tmp6335, i64 1
- %tmp6337 = getelementptr inbounds float* %tmp6336, i64 1
- %tmp6338 = getelementptr inbounds float* %tmp6337, i64 1
- %tmp6339 = getelementptr inbounds float* %tmp6338, i64 1
- %tmp6340 = getelementptr inbounds float* %tmp6339, i64 1
- %tmp6341 = getelementptr inbounds float* %tmp6340, i64 1
- %tmp6342 = getelementptr inbounds float* %tmp6341, i64 1
- %tmp6343 = getelementptr inbounds float* %tmp6342, i64 1
- %tmp6344 = getelementptr inbounds float* %tmp6343, i64 1
- %tmp6345 = getelementptr inbounds float* %tmp6344, i64 1
- %tmp6346 = getelementptr inbounds float* %tmp6345, i64 1
- %tmp6347 = getelementptr inbounds float* %tmp6346, i64 1
- %tmp6348 = getelementptr inbounds float* %tmp6347, i64 1
- %tmp6349 = getelementptr inbounds float* %tmp6348, i64 1
- %tmp6350 = getelementptr inbounds float* %tmp6349, i64 1
- %tmp6351 = getelementptr inbounds float* %tmp6350, i64 1
- %tmp6352 = getelementptr inbounds float* %tmp6351, i64 1
- %tmp6353 = getelementptr inbounds float* %tmp6352, i64 1
- %tmp6354 = getelementptr inbounds float* %tmp6353, i64 1
- %tmp6355 = getelementptr inbounds float* %tmp6354, i64 1
- %tmp6356 = getelementptr inbounds float* %tmp6355, i64 1
- %tmp6357 = getelementptr inbounds float* %tmp6356, i64 1
- %tmp6358 = getelementptr inbounds float* %tmp6357, i64 1
- %tmp6359 = getelementptr inbounds float* %tmp6358, i64 1
- %tmp6360 = getelementptr inbounds float* %tmp6359, i64 1
- %tmp6361 = getelementptr inbounds float* %tmp6360, i64 1
- %tmp6362 = getelementptr inbounds float* %tmp6361, i64 1
- %tmp6363 = getelementptr inbounds float* %tmp6362, i64 1
- %tmp6364 = getelementptr inbounds float* %tmp6363, i64 1
- %tmp6365 = getelementptr inbounds float* %tmp6364, i64 1
- %tmp6366 = getelementptr inbounds float* %tmp6365, i64 1
- %tmp6367 = getelementptr inbounds float* %tmp6366, i64 1
- %tmp6368 = getelementptr inbounds float* %tmp6367, i64 1
- %tmp6369 = getelementptr inbounds float* %tmp6368, i64 1
- %tmp6370 = getelementptr inbounds float* %tmp6369, i64 1
- %tmp6371 = getelementptr inbounds float* %tmp6370, i64 1
- %tmp6372 = getelementptr inbounds float* %tmp6371, i64 1
- %tmp6373 = getelementptr inbounds float* %tmp6372, i64 1
- %tmp6374 = getelementptr inbounds float* %tmp6373, i64 1
- %tmp6375 = getelementptr inbounds float* %tmp6374, i64 1
- %tmp6376 = getelementptr inbounds float* %tmp6375, i64 1
- %tmp6377 = getelementptr inbounds float* %tmp6376, i64 1
- %tmp6378 = getelementptr inbounds float* %tmp6377, i64 1
- %tmp6379 = getelementptr inbounds float* %tmp6378, i64 1
- %tmp6380 = getelementptr inbounds float* %tmp6379, i64 1
- %tmp6381 = getelementptr inbounds float* %tmp6380, i64 1
- %tmp6382 = getelementptr inbounds float* %tmp6381, i64 1
- %tmp6383 = getelementptr inbounds float* %tmp6382, i64 1
- %tmp6384 = getelementptr inbounds float* %tmp6383, i64 1
- %tmp6385 = getelementptr inbounds float* %tmp6384, i64 1
- %tmp6386 = getelementptr inbounds float* %tmp6385, i64 1
- %tmp6387 = getelementptr inbounds float* %tmp6386, i64 1
- %tmp6388 = getelementptr inbounds float* %tmp6387, i64 1
- %tmp6389 = getelementptr inbounds float* %tmp6388, i64 1
- %tmp6390 = getelementptr inbounds float* %tmp6389, i64 1
- %tmp6391 = getelementptr inbounds float* %tmp6390, i64 1
- %tmp6392 = getelementptr inbounds float* %tmp6391, i64 1
- %tmp6393 = getelementptr inbounds float* %tmp6392, i64 1
- %tmp6394 = getelementptr inbounds float* %tmp6393, i64 1
- %tmp6395 = getelementptr inbounds float* %tmp6394, i64 1
- %tmp6396 = getelementptr inbounds float* %tmp6395, i64 1
- %tmp6397 = getelementptr inbounds float* %tmp6396, i64 1
- %tmp6398 = getelementptr inbounds float* %tmp6397, i64 1
- %tmp6399 = getelementptr inbounds float* %tmp6398, i64 1
- %tmp6400 = getelementptr inbounds float* %tmp6399, i64 1
- %tmp6401 = getelementptr inbounds float* %tmp6400, i64 1
- %tmp6402 = getelementptr inbounds float* %tmp6401, i64 1
- %tmp6403 = getelementptr inbounds float* %tmp6402, i64 1
- %tmp6404 = getelementptr inbounds float* %tmp6403, i64 1
- %tmp6405 = getelementptr inbounds float* %tmp6404, i64 1
- %tmp6406 = getelementptr inbounds float* %tmp6405, i64 1
- %tmp6407 = getelementptr inbounds float* %tmp6406, i64 1
- %tmp6408 = getelementptr inbounds float* %tmp6407, i64 1
- %tmp6409 = getelementptr inbounds float* %tmp6408, i64 1
- %tmp6410 = getelementptr inbounds float* %tmp6409, i64 1
- %tmp6411 = getelementptr inbounds float* %tmp6410, i64 1
- %tmp6412 = getelementptr inbounds float* %tmp6411, i64 1
- %tmp6413 = getelementptr inbounds float* %tmp6412, i64 1
- %tmp6414 = getelementptr inbounds float* %tmp6413, i64 1
- %tmp6415 = getelementptr inbounds float* %tmp6414, i64 1
- %tmp6416 = getelementptr inbounds float* %tmp6415, i64 1
- %tmp6417 = getelementptr inbounds float* %tmp6416, i64 1
- %tmp6418 = getelementptr inbounds float* %tmp6417, i64 1
- %tmp6419 = getelementptr inbounds float* %tmp6418, i64 1
- %tmp6420 = getelementptr inbounds float* %tmp6419, i64 1
- %tmp6421 = getelementptr inbounds float* %tmp6420, i64 1
- %tmp6422 = getelementptr inbounds float* %tmp6421, i64 1
- %tmp6423 = getelementptr inbounds float* %tmp6422, i64 1
- %tmp6424 = getelementptr inbounds float* %tmp6423, i64 1
- %tmp6425 = getelementptr inbounds float* %tmp6424, i64 1
- %tmp6426 = getelementptr inbounds float* %tmp6425, i64 1
- %tmp6427 = getelementptr inbounds float* %tmp6426, i64 1
- %tmp6428 = getelementptr inbounds float* %tmp6427, i64 1
- %tmp6429 = getelementptr inbounds float* %tmp6428, i64 1
- %tmp6430 = getelementptr inbounds float* %tmp6429, i64 1
- %tmp6431 = getelementptr inbounds float* %tmp6430, i64 1
- %tmp6432 = getelementptr inbounds float* %tmp6431, i64 1
- %tmp6433 = getelementptr inbounds float* %tmp6432, i64 1
- %tmp6434 = getelementptr inbounds float* %tmp6433, i64 1
- %tmp6435 = getelementptr inbounds float* %tmp6434, i64 1
- %tmp6436 = getelementptr inbounds float* %tmp6435, i64 1
- %tmp6437 = getelementptr inbounds float* %tmp6436, i64 1
- %tmp6438 = getelementptr inbounds float* %tmp6437, i64 1
- %tmp6439 = getelementptr inbounds float* %tmp6438, i64 1
- %tmp6440 = getelementptr inbounds float* %tmp6439, i64 1
- %tmp6441 = getelementptr inbounds float* %tmp6440, i64 1
- %tmp6442 = getelementptr inbounds float* %tmp6441, i64 1
- %tmp6443 = getelementptr inbounds float* %tmp6442, i64 1
- %tmp6444 = getelementptr inbounds float* %tmp6443, i64 1
- %tmp6445 = getelementptr inbounds float* %tmp6444, i64 1
- %tmp6446 = getelementptr inbounds float* %tmp6445, i64 1
- %tmp6447 = getelementptr inbounds float* %tmp6446, i64 1
- %tmp6448 = getelementptr inbounds float* %tmp6447, i64 1
- %tmp6449 = getelementptr inbounds float* %tmp6448, i64 1
- %tmp6450 = getelementptr inbounds float* %tmp6449, i64 1
- %tmp6451 = getelementptr inbounds float* %tmp6450, i64 1
- %tmp6452 = getelementptr inbounds float* %tmp6451, i64 1
- %tmp6453 = getelementptr inbounds float* %tmp6452, i64 1
- %tmp6454 = getelementptr inbounds float* %tmp6453, i64 1
- %tmp6455 = getelementptr inbounds float* %tmp6454, i64 1
- %tmp6456 = getelementptr inbounds float* %tmp6455, i64 1
- %tmp6457 = getelementptr inbounds float* %tmp6456, i64 1
- %tmp6458 = getelementptr inbounds float* %tmp6457, i64 1
- %tmp6459 = getelementptr inbounds float* %tmp6458, i64 1
- %tmp6460 = getelementptr inbounds float* %tmp6459, i64 1
- %tmp6461 = getelementptr inbounds float* %tmp6460, i64 1
- %tmp6462 = getelementptr inbounds float* %tmp6461, i64 1
- %tmp6463 = getelementptr inbounds float* %tmp6462, i64 1
- %tmp6464 = getelementptr inbounds float* %tmp6463, i64 1
- %tmp6465 = getelementptr inbounds float* %tmp6464, i64 1
- %tmp6466 = getelementptr inbounds float* %tmp6465, i64 1
- %tmp6467 = getelementptr inbounds float* %tmp6466, i64 1
- %tmp6468 = getelementptr inbounds float* %tmp6467, i64 1
- %tmp6469 = getelementptr inbounds float* %tmp6468, i64 1
- %tmp6470 = getelementptr inbounds float* %tmp6469, i64 1
- %tmp6471 = getelementptr inbounds float* %tmp6470, i64 1
- %tmp6472 = getelementptr inbounds float* %tmp6471, i64 1
- %tmp6473 = getelementptr inbounds float* %tmp6472, i64 1
- %tmp6474 = getelementptr inbounds float* %tmp6473, i64 1
- %tmp6475 = getelementptr inbounds float* %tmp6474, i64 1
- %tmp6476 = getelementptr inbounds float* %tmp6475, i64 1
- %tmp6477 = getelementptr inbounds float* %tmp6476, i64 1
- %tmp6478 = getelementptr inbounds float* %tmp6477, i64 1
- %tmp6479 = getelementptr inbounds float* %tmp6478, i64 1
- %tmp6480 = getelementptr inbounds float* %tmp6479, i64 1
- %tmp6481 = getelementptr inbounds float* %tmp6480, i64 1
- %tmp6482 = getelementptr inbounds float* %tmp6481, i64 1
- %tmp6483 = getelementptr inbounds float* %tmp6482, i64 1
- %tmp6484 = getelementptr inbounds float* %tmp6483, i64 1
- %tmp6485 = getelementptr inbounds float* %tmp6484, i64 1
- %tmp6486 = getelementptr inbounds float* %tmp6485, i64 1
- %tmp6487 = getelementptr inbounds float* %tmp6486, i64 1
- %tmp6488 = getelementptr inbounds float* %tmp6487, i64 1
- %tmp6489 = getelementptr inbounds float* %tmp6488, i64 1
- %tmp6490 = getelementptr inbounds float* %tmp6489, i64 1
- %tmp6491 = getelementptr inbounds float* %tmp6490, i64 1
- %tmp6492 = getelementptr inbounds float* %tmp6491, i64 1
- %tmp6493 = getelementptr inbounds float* %tmp6492, i64 1
- %tmp6494 = getelementptr inbounds float* %tmp6493, i64 1
- %tmp6495 = getelementptr inbounds float* %tmp6494, i64 1
- %tmp6496 = getelementptr inbounds float* %tmp6495, i64 1
- %tmp6497 = getelementptr inbounds float* %tmp6496, i64 1
- %tmp6498 = getelementptr inbounds float* %tmp6497, i64 1
- %tmp6499 = getelementptr inbounds float* %tmp6498, i64 1
- %tmp6500 = getelementptr inbounds float* %tmp6499, i64 1
- %tmp6501 = getelementptr inbounds float* %tmp6500, i64 1
- %tmp6502 = getelementptr inbounds float* %tmp6501, i64 1
- %tmp6503 = getelementptr inbounds float* %tmp6502, i64 1
- %tmp6504 = getelementptr inbounds float* %tmp6503, i64 1
- %tmp6505 = getelementptr inbounds float* %tmp6504, i64 1
- %tmp6506 = getelementptr inbounds float* %tmp6505, i64 1
- %tmp6507 = getelementptr inbounds float* %tmp6506, i64 1
- %tmp6508 = getelementptr inbounds float* %tmp6507, i64 1
- %tmp6509 = getelementptr inbounds float* %tmp6508, i64 1
- %tmp6510 = getelementptr inbounds float* %tmp6509, i64 1
- %tmp6511 = getelementptr inbounds float* %tmp6510, i64 1
- %tmp6512 = getelementptr inbounds float* %tmp6511, i64 1
- %tmp6513 = getelementptr inbounds float* %tmp6512, i64 1
- %tmp6514 = getelementptr inbounds float* %tmp6513, i64 1
- %tmp6515 = getelementptr inbounds float* %tmp6514, i64 1
- %tmp6516 = getelementptr inbounds float* %tmp6515, i64 1
- %tmp6517 = getelementptr inbounds float* %tmp6516, i64 1
- %tmp6518 = getelementptr inbounds float* %tmp6517, i64 1
- %tmp6519 = getelementptr inbounds float* %tmp6518, i64 1
- %tmp6520 = getelementptr inbounds float* %tmp6519, i64 1
- %tmp6521 = getelementptr inbounds float* %tmp6520, i64 1
- %tmp6522 = getelementptr inbounds float* %tmp6521, i64 1
- %tmp6523 = getelementptr inbounds float* %tmp6522, i64 1
- %tmp6524 = getelementptr inbounds float* %tmp6523, i64 1
- %tmp6525 = getelementptr inbounds float* %tmp6524, i64 1
- %tmp6526 = getelementptr inbounds float* %tmp6525, i64 1
- %tmp6527 = getelementptr inbounds float* %tmp6526, i64 1
- %tmp6528 = getelementptr inbounds float* %tmp6527, i64 1
- %tmp6529 = getelementptr inbounds float* %tmp6528, i64 1
- %tmp6530 = getelementptr inbounds float* %tmp6529, i64 1
- %tmp6531 = getelementptr inbounds float* %tmp6530, i64 1
- %tmp6532 = getelementptr inbounds float* %tmp6531, i64 1
- %tmp6533 = getelementptr inbounds float* %tmp6532, i64 1
- %tmp6534 = getelementptr inbounds float* %tmp6533, i64 1
- %tmp6535 = getelementptr inbounds float* %tmp6534, i64 1
- %tmp6536 = getelementptr inbounds float* %tmp6535, i64 1
- %tmp6537 = getelementptr inbounds float* %tmp6536, i64 1
- %tmp6538 = getelementptr inbounds float* %tmp6537, i64 1
- %tmp6539 = getelementptr inbounds float* %tmp6538, i64 1
- %tmp6540 = getelementptr inbounds float* %tmp6539, i64 1
- %tmp6541 = getelementptr inbounds float* %tmp6540, i64 1
- %tmp6542 = getelementptr inbounds float* %tmp6541, i64 1
- %tmp6543 = getelementptr inbounds float* %tmp6542, i64 1
- %tmp6544 = getelementptr inbounds float* %tmp6543, i64 1
- %tmp6545 = getelementptr inbounds float* %tmp6544, i64 1
- %tmp6546 = getelementptr inbounds float* %tmp6545, i64 1
- %tmp6547 = getelementptr inbounds float* %tmp6546, i64 1
- %tmp6548 = getelementptr inbounds float* %tmp6547, i64 1
- %tmp6549 = getelementptr inbounds float* %tmp6548, i64 1
- %tmp6550 = getelementptr inbounds float* %tmp6549, i64 1
- %tmp6551 = getelementptr inbounds float* %tmp6550, i64 1
- %tmp6552 = getelementptr inbounds float* %tmp6551, i64 1
- %tmp6553 = getelementptr inbounds float* %tmp6552, i64 1
- %tmp6554 = getelementptr inbounds float* %tmp6553, i64 1
- %tmp6555 = getelementptr inbounds float* %tmp6554, i64 1
- %tmp6556 = getelementptr inbounds float* %tmp6555, i64 1
- %tmp6557 = getelementptr inbounds float* %tmp6556, i64 1
- %tmp6558 = getelementptr inbounds float* %tmp6557, i64 1
- %tmp6559 = getelementptr inbounds float* %tmp6558, i64 1
- %tmp6560 = getelementptr inbounds float* %tmp6559, i64 1
- %tmp6561 = getelementptr inbounds float* %tmp6560, i64 1
- %tmp6562 = getelementptr inbounds float* %tmp6561, i64 1
- %tmp6563 = getelementptr inbounds float* %tmp6562, i64 1
- %tmp6564 = getelementptr inbounds float* %tmp6563, i64 1
- %tmp6565 = getelementptr inbounds float* %tmp6564, i64 1
- %tmp6566 = getelementptr inbounds float* %tmp6565, i64 1
- %tmp6567 = getelementptr inbounds float* %tmp6566, i64 1
- %tmp6568 = getelementptr inbounds float* %tmp6567, i64 1
- %tmp6569 = getelementptr inbounds float* %tmp6568, i64 1
- %tmp6570 = getelementptr inbounds float* %tmp6569, i64 1
- %tmp6571 = getelementptr inbounds float* %tmp6570, i64 1
- %tmp6572 = getelementptr inbounds float* %tmp6571, i64 1
- %tmp6573 = getelementptr inbounds float* %tmp6572, i64 1
- %tmp6574 = getelementptr inbounds float* %tmp6573, i64 1
- %tmp6575 = getelementptr inbounds float* %tmp6574, i64 1
- %tmp6576 = getelementptr inbounds float* %tmp6575, i64 1
- %tmp6577 = getelementptr inbounds float* %tmp6576, i64 1
- %tmp6578 = getelementptr inbounds float* %tmp6577, i64 1
- %tmp6579 = getelementptr inbounds float* %tmp6578, i64 1
- %tmp6580 = getelementptr inbounds float* %tmp6579, i64 1
- %tmp6581 = getelementptr inbounds float* %tmp6580, i64 1
- %tmp6582 = getelementptr inbounds float* %tmp6581, i64 1
- %tmp6583 = getelementptr inbounds float* %tmp6582, i64 1
- %tmp6584 = getelementptr inbounds float* %tmp6583, i64 1
- %tmp6585 = getelementptr inbounds float* %tmp6584, i64 1
- %tmp6586 = getelementptr inbounds float* %tmp6585, i64 1
- %tmp6587 = getelementptr inbounds float* %tmp6586, i64 1
- %tmp6588 = getelementptr inbounds float* %tmp6587, i64 1
- %tmp6589 = getelementptr inbounds float* %tmp6588, i64 1
- %tmp6590 = getelementptr inbounds float* %tmp6589, i64 1
- %tmp6591 = getelementptr inbounds float* %tmp6590, i64 1
- %tmp6592 = getelementptr inbounds float* %tmp6591, i64 1
- %tmp6593 = getelementptr inbounds float* %tmp6592, i64 1
- %tmp6594 = getelementptr inbounds float* %tmp6593, i64 1
- %tmp6595 = getelementptr inbounds float* %tmp6594, i64 1
- %tmp6596 = getelementptr inbounds float* %tmp6595, i64 1
- %tmp6597 = getelementptr inbounds float* %tmp6596, i64 1
- %tmp6598 = getelementptr inbounds float* %tmp6597, i64 1
- %tmp6599 = getelementptr inbounds float* %tmp6598, i64 1
- %tmp6600 = getelementptr inbounds float* %tmp6599, i64 1
- %tmp6601 = getelementptr inbounds float* %tmp6600, i64 1
- %tmp6602 = getelementptr inbounds float* %tmp6601, i64 1
- %tmp6603 = getelementptr inbounds float* %tmp6602, i64 1
- %tmp6604 = getelementptr inbounds float* %tmp6603, i64 1
- %tmp6605 = getelementptr inbounds float* %tmp6604, i64 1
- %tmp6606 = getelementptr inbounds float* %tmp6605, i64 1
- %tmp6607 = getelementptr inbounds float* %tmp6606, i64 1
- %tmp6608 = getelementptr inbounds float* %tmp6607, i64 1
- %tmp6609 = getelementptr inbounds float* %tmp6608, i64 1
- %tmp6610 = getelementptr inbounds float* %tmp6609, i64 1
- %tmp6611 = getelementptr inbounds float* %tmp6610, i64 1
- %tmp6612 = getelementptr inbounds float* %tmp6611, i64 1
- %tmp6613 = getelementptr inbounds float* %tmp6612, i64 1
- %tmp6614 = getelementptr inbounds float* %tmp6613, i64 1
- %tmp6615 = getelementptr inbounds float* %tmp6614, i64 1
- %tmp6616 = getelementptr inbounds float* %tmp6615, i64 1
- %tmp6617 = getelementptr inbounds float* %tmp6616, i64 1
- %tmp6618 = getelementptr inbounds float* %tmp6617, i64 1
- %tmp6619 = getelementptr inbounds float* %tmp6618, i64 1
- %tmp6620 = getelementptr inbounds float* %tmp6619, i64 1
- %tmp6621 = getelementptr inbounds float* %tmp6620, i64 1
- %tmp6622 = getelementptr inbounds float* %tmp6621, i64 1
- %tmp6623 = getelementptr inbounds float* %tmp6622, i64 1
- %tmp6624 = getelementptr inbounds float* %tmp6623, i64 1
- %tmp6625 = getelementptr inbounds float* %tmp6624, i64 1
- %tmp6626 = getelementptr inbounds float* %tmp6625, i64 1
- %tmp6627 = getelementptr inbounds float* %tmp6626, i64 1
- %tmp6628 = getelementptr inbounds float* %tmp6627, i64 1
- %tmp6629 = getelementptr inbounds float* %tmp6628, i64 1
- %tmp6630 = getelementptr inbounds float* %tmp6629, i64 1
- %tmp6631 = getelementptr inbounds float* %tmp6630, i64 1
- %tmp6632 = getelementptr inbounds float* %tmp6631, i64 1
- %tmp6633 = getelementptr inbounds float* %tmp6632, i64 1
- %tmp6634 = getelementptr inbounds float* %tmp6633, i64 1
- %tmp6635 = getelementptr inbounds float* %tmp6634, i64 1
- %tmp6636 = getelementptr inbounds float* %tmp6635, i64 1
- %tmp6637 = getelementptr inbounds float* %tmp6636, i64 1
- %tmp6638 = getelementptr inbounds float* %tmp6637, i64 1
- %tmp6639 = getelementptr inbounds float* %tmp6638, i64 1
- %tmp6640 = getelementptr inbounds float* %tmp6639, i64 1
- %tmp6641 = getelementptr inbounds float* %tmp6640, i64 1
- %tmp6642 = getelementptr inbounds float* %tmp6641, i64 1
- %tmp6643 = getelementptr inbounds float* %tmp6642, i64 1
- %tmp6644 = getelementptr inbounds float* %tmp6643, i64 1
- %tmp6645 = getelementptr inbounds float* %tmp6644, i64 1
- %tmp6646 = getelementptr inbounds float* %tmp6645, i64 1
- %tmp6647 = getelementptr inbounds float* %tmp6646, i64 1
- %tmp6648 = getelementptr inbounds float* %tmp6647, i64 1
- %tmp6649 = getelementptr inbounds float* %tmp6648, i64 1
- %tmp6650 = getelementptr inbounds float* %tmp6649, i64 1
- %tmp6651 = getelementptr inbounds float* %tmp6650, i64 1
- %tmp6652 = getelementptr inbounds float* %tmp6651, i64 1
- %tmp6653 = getelementptr inbounds float* %tmp6652, i64 1
- %tmp6654 = getelementptr inbounds float* %tmp6653, i64 1
- %tmp6655 = getelementptr inbounds float* %tmp6654, i64 1
- %tmp6656 = getelementptr inbounds float* %tmp6655, i64 1
- %tmp6657 = getelementptr inbounds float* %tmp6656, i64 1
- %tmp6658 = getelementptr inbounds float* %tmp6657, i64 1
- %tmp6659 = getelementptr inbounds float* %tmp6658, i64 1
- %tmp6660 = getelementptr inbounds float* %tmp6659, i64 1
- %tmp6661 = getelementptr inbounds float* %tmp6660, i64 1
- %tmp6662 = getelementptr inbounds float* %tmp6661, i64 1
- %tmp6663 = getelementptr inbounds float* %tmp6662, i64 1
- %tmp6664 = getelementptr inbounds float* %tmp6663, i64 1
- %tmp6665 = getelementptr inbounds float* %tmp6664, i64 1
- %tmp6666 = getelementptr inbounds float* %tmp6665, i64 1
- %tmp6667 = getelementptr inbounds float* %tmp6666, i64 1
- %tmp6668 = getelementptr inbounds float* %tmp6667, i64 1
- %tmp6669 = getelementptr inbounds float* %tmp6668, i64 1
- %tmp6670 = getelementptr inbounds float* %tmp6669, i64 1
- %tmp6671 = getelementptr inbounds float* %tmp6670, i64 1
- %tmp6672 = getelementptr inbounds float* %tmp6671, i64 1
- %tmp6673 = getelementptr inbounds float* %tmp6672, i64 1
- %tmp6674 = getelementptr inbounds float* %tmp6673, i64 1
- %tmp6675 = getelementptr inbounds float* %tmp6674, i64 1
- %tmp6676 = getelementptr inbounds float* %tmp6675, i64 1
- %tmp6677 = getelementptr inbounds float* %tmp6676, i64 1
- %tmp6678 = getelementptr inbounds float* %tmp6677, i64 1
- %tmp6679 = getelementptr inbounds float* %tmp6678, i64 1
- %tmp6680 = getelementptr inbounds float* %tmp6679, i64 1
- %tmp6681 = getelementptr inbounds float* %tmp6680, i64 1
- %tmp6682 = getelementptr inbounds float* %tmp6681, i64 1
- %tmp6683 = getelementptr inbounds float* %tmp6682, i64 1
- %tmp6684 = getelementptr inbounds float* %tmp6683, i64 1
- %tmp6685 = getelementptr inbounds float* %tmp6684, i64 1
- %tmp6686 = getelementptr inbounds float* %tmp6685, i64 1
- %tmp6687 = getelementptr inbounds float* %tmp6686, i64 1
- %tmp6688 = getelementptr inbounds float* %tmp6687, i64 1
- %tmp6689 = getelementptr inbounds float* %tmp6688, i64 1
- %tmp6690 = getelementptr inbounds float* %tmp6689, i64 1
- %tmp6691 = getelementptr inbounds float* %tmp6690, i64 1
- %tmp6692 = getelementptr inbounds float* %tmp6691, i64 1
- %tmp6693 = getelementptr inbounds float* %tmp6692, i64 1
- %tmp6694 = getelementptr inbounds float* %tmp6693, i64 1
- %tmp6695 = getelementptr inbounds float* %tmp6694, i64 1
- %tmp6696 = getelementptr inbounds float* %tmp6695, i64 1
- %tmp6697 = getelementptr inbounds float* %tmp6696, i64 1
- %tmp6698 = getelementptr inbounds float* %tmp6697, i64 1
- %tmp6699 = getelementptr inbounds float* %tmp6698, i64 1
- %tmp6700 = getelementptr inbounds float* %tmp6699, i64 1
- %tmp6701 = getelementptr inbounds float* %tmp6700, i64 1
- %tmp6702 = getelementptr inbounds float* %tmp6701, i64 1
- %tmp6703 = getelementptr inbounds float* %tmp6702, i64 1
- %tmp6704 = getelementptr inbounds float* %tmp6703, i64 1
- %tmp6705 = getelementptr inbounds float* %tmp6704, i64 1
- %tmp6706 = getelementptr inbounds float* %tmp6705, i64 1
- %tmp6707 = getelementptr inbounds float* %tmp6706, i64 1
- %tmp6708 = getelementptr inbounds float* %tmp6707, i64 1
- %tmp6709 = getelementptr inbounds float* %tmp6708, i64 1
- %tmp6710 = getelementptr inbounds float* %tmp6709, i64 1
- %tmp6711 = getelementptr inbounds float* %tmp6710, i64 1
- %tmp6712 = getelementptr inbounds float* %tmp6711, i64 1
- %tmp6713 = getelementptr inbounds float* %tmp6712, i64 1
- %tmp6714 = getelementptr inbounds float* %tmp6713, i64 1
- %tmp6715 = getelementptr inbounds float* %tmp6714, i64 1
- %tmp6716 = getelementptr inbounds float* %tmp6715, i64 1
- %tmp6717 = getelementptr inbounds float* %tmp6716, i64 1
- %tmp6718 = getelementptr inbounds float* %tmp6717, i64 1
- %tmp6719 = getelementptr inbounds float* %tmp6718, i64 1
- %tmp6720 = getelementptr inbounds float* %tmp6719, i64 1
- %tmp6721 = getelementptr inbounds float* %tmp6720, i64 1
- %tmp6722 = getelementptr inbounds float* %tmp6721, i64 1
- %tmp6723 = getelementptr inbounds float* %tmp6722, i64 1
- %tmp6724 = getelementptr inbounds float* %tmp6723, i64 1
- %tmp6725 = getelementptr inbounds float* %tmp6724, i64 1
- %tmp6726 = getelementptr inbounds float* %tmp6725, i64 1
- %tmp6727 = getelementptr inbounds float* %tmp6726, i64 1
- %tmp6728 = getelementptr inbounds float* %tmp6727, i64 1
- %tmp6729 = getelementptr inbounds float* %tmp6728, i64 1
- %tmp6730 = getelementptr inbounds float* %tmp6729, i64 1
- %tmp6731 = getelementptr inbounds float* %tmp6730, i64 1
- %tmp6732 = getelementptr inbounds float* %tmp6731, i64 1
- %tmp6733 = getelementptr inbounds float* %tmp6732, i64 1
- %tmp6734 = getelementptr inbounds float* %tmp6733, i64 1
- %tmp6735 = getelementptr inbounds float* %tmp6734, i64 1
- %tmp6736 = getelementptr inbounds float* %tmp6735, i64 1
- %tmp6737 = getelementptr inbounds float* %tmp6736, i64 1
- %tmp6738 = getelementptr inbounds float* %tmp6737, i64 1
- %tmp6739 = getelementptr inbounds float* %tmp6738, i64 1
- %tmp6740 = getelementptr inbounds float* %tmp6739, i64 1
- %tmp6741 = getelementptr inbounds float* %tmp6740, i64 1
- %tmp6742 = getelementptr inbounds float* %tmp6741, i64 1
- %tmp6743 = getelementptr inbounds float* %tmp6742, i64 1
- %tmp6744 = getelementptr inbounds float* %tmp6743, i64 1
- %tmp6745 = getelementptr inbounds float* %tmp6744, i64 1
- %tmp6746 = getelementptr inbounds float* %tmp6745, i64 1
- %tmp6747 = getelementptr inbounds float* %tmp6746, i64 1
- %tmp6748 = getelementptr inbounds float* %tmp6747, i64 1
- %tmp6749 = getelementptr inbounds float* %tmp6748, i64 1
- %tmp6750 = getelementptr inbounds float* %tmp6749, i64 1
- %tmp6751 = getelementptr inbounds float* %tmp6750, i64 1
- %tmp6752 = getelementptr inbounds float* %tmp6751, i64 1
- %tmp6753 = getelementptr inbounds float* %tmp6752, i64 1
- %tmp6754 = getelementptr inbounds float* %tmp6753, i64 1
- %tmp6755 = getelementptr inbounds float* %tmp6754, i64 1
- %tmp6756 = getelementptr inbounds float* %tmp6755, i64 1
- %tmp6757 = getelementptr inbounds float* %tmp6756, i64 1
- %tmp6758 = getelementptr inbounds float* %tmp6757, i64 1
- %tmp6759 = getelementptr inbounds float* %tmp6758, i64 1
- %tmp6760 = getelementptr inbounds float* %tmp6759, i64 1
- %tmp6761 = getelementptr inbounds float* %tmp6760, i64 1
- %tmp6762 = getelementptr inbounds float* %tmp6761, i64 1
- %tmp6763 = getelementptr inbounds float* %tmp6762, i64 1
- %tmp6764 = getelementptr inbounds float* %tmp6763, i64 1
- %tmp6765 = getelementptr inbounds float* %tmp6764, i64 1
- %tmp6766 = getelementptr inbounds float* %tmp6765, i64 1
- %tmp6767 = getelementptr inbounds float* %tmp6766, i64 1
- %tmp6768 = getelementptr inbounds float* %tmp6767, i64 1
- %tmp6769 = getelementptr inbounds float* %tmp6768, i64 1
- %tmp6770 = getelementptr inbounds float* %tmp6769, i64 1
- %tmp6771 = getelementptr inbounds float* %tmp6770, i64 1
- %tmp6772 = getelementptr inbounds float* %tmp6771, i64 1
- %tmp6773 = getelementptr inbounds float* %tmp6772, i64 1
- %tmp6774 = getelementptr inbounds float* %tmp6773, i64 1
- %tmp6775 = getelementptr inbounds float* %tmp6774, i64 1
- %tmp6776 = getelementptr inbounds float* %tmp6775, i64 1
- %tmp6777 = getelementptr inbounds float* %tmp6776, i64 1
- %tmp6778 = getelementptr inbounds float* %tmp6777, i64 1
- %tmp6779 = getelementptr inbounds float* %tmp6778, i64 1
- %tmp6780 = getelementptr inbounds float* %tmp6779, i64 1
- %tmp6781 = getelementptr inbounds float* %tmp6780, i64 1
- %tmp6782 = getelementptr inbounds float* %tmp6781, i64 1
- %tmp6783 = getelementptr inbounds float* %tmp6782, i64 1
- %tmp6784 = getelementptr inbounds float* %tmp6783, i64 1
- %tmp6785 = getelementptr inbounds float* %tmp6784, i64 1
- %tmp6786 = getelementptr inbounds float* %tmp6785, i64 1
- %tmp6787 = getelementptr inbounds float* %tmp6786, i64 1
- %tmp6788 = getelementptr inbounds float* %tmp6787, i64 1
- %tmp6789 = getelementptr inbounds float* %tmp6788, i64 1
- %tmp6790 = getelementptr inbounds float* %tmp6789, i64 1
- %tmp6791 = getelementptr inbounds float* %tmp6790, i64 1
- %tmp6792 = getelementptr inbounds float* %tmp6791, i64 1
- %tmp6793 = getelementptr inbounds float* %tmp6792, i64 1
- %tmp6794 = getelementptr inbounds float* %tmp6793, i64 1
- %tmp6795 = getelementptr inbounds float* %tmp6794, i64 1
- %tmp6796 = getelementptr inbounds float* %tmp6795, i64 1
- %tmp6797 = getelementptr inbounds float* %tmp6796, i64 1
- %tmp6798 = getelementptr inbounds float* %tmp6797, i64 1
- %tmp6799 = getelementptr inbounds float* %tmp6798, i64 1
- %tmp6800 = getelementptr inbounds float* %tmp6799, i64 1
- %tmp6801 = getelementptr inbounds float* %tmp6800, i64 1
- %tmp6802 = getelementptr inbounds float* %tmp6801, i64 1
- %tmp6803 = getelementptr inbounds float* %tmp6802, i64 1
- %tmp6804 = getelementptr inbounds float* %tmp6803, i64 1
- %tmp6805 = getelementptr inbounds float* %tmp6804, i64 1
- %tmp6806 = getelementptr inbounds float* %tmp6805, i64 1
- %tmp6807 = getelementptr inbounds float* %tmp6806, i64 1
- %tmp6808 = getelementptr inbounds float* %tmp6807, i64 1
- %tmp6809 = getelementptr inbounds float* %tmp6808, i64 1
- %tmp6810 = getelementptr inbounds float* %tmp6809, i64 1
- %tmp6811 = getelementptr inbounds float* %tmp6810, i64 1
- %tmp6812 = getelementptr inbounds float* %tmp6811, i64 1
- %tmp6813 = getelementptr inbounds float* %tmp6812, i64 1
- %tmp6814 = getelementptr inbounds float* %tmp6813, i64 1
- %tmp6815 = getelementptr inbounds float* %tmp6814, i64 1
- %tmp6816 = getelementptr inbounds float* %tmp6815, i64 1
- %tmp6817 = getelementptr inbounds float* %tmp6816, i64 1
- %tmp6818 = getelementptr inbounds float* %tmp6817, i64 1
- %tmp6819 = getelementptr inbounds float* %tmp6818, i64 1
- %tmp6820 = getelementptr inbounds float* %tmp6819, i64 1
- %tmp6821 = getelementptr inbounds float* %tmp6820, i64 1
- %tmp6822 = getelementptr inbounds float* %tmp6821, i64 1
- %tmp6823 = getelementptr inbounds float* %tmp6822, i64 1
- %tmp6824 = getelementptr inbounds float* %tmp6823, i64 1
- %tmp6825 = getelementptr inbounds float* %tmp6824, i64 1
- %tmp6826 = getelementptr inbounds float* %tmp6825, i64 1
- %tmp6827 = getelementptr inbounds float* %tmp6826, i64 1
- %tmp6828 = getelementptr inbounds float* %tmp6827, i64 1
- %tmp6829 = getelementptr inbounds float* %tmp6828, i64 1
- %tmp6830 = getelementptr inbounds float* %tmp6829, i64 1
- %tmp6831 = getelementptr inbounds float* %tmp6830, i64 1
- %tmp6832 = getelementptr inbounds float* %tmp6831, i64 1
- %tmp6833 = getelementptr inbounds float* %tmp6832, i64 1
- %tmp6834 = getelementptr inbounds float* %tmp6833, i64 1
- %tmp6835 = getelementptr inbounds float* %tmp6834, i64 1
- %tmp6836 = getelementptr inbounds float* %tmp6835, i64 1
- %tmp6837 = getelementptr inbounds float* %tmp6836, i64 1
- %tmp6838 = getelementptr inbounds float* %tmp6837, i64 1
- %tmp6839 = getelementptr inbounds float* %tmp6838, i64 1
- %tmp6840 = getelementptr inbounds float* %tmp6839, i64 1
- %tmp6841 = getelementptr inbounds float* %tmp6840, i64 1
- %tmp6842 = getelementptr inbounds float* %tmp6841, i64 1
- %tmp6843 = getelementptr inbounds float* %tmp6842, i64 1
- %tmp6844 = getelementptr inbounds float* %tmp6843, i64 1
- %tmp6845 = getelementptr inbounds float* %tmp6844, i64 1
- %tmp6846 = getelementptr inbounds float* %tmp6845, i64 1
- %tmp6847 = getelementptr inbounds float* %tmp6846, i64 1
- %tmp6848 = getelementptr inbounds float* %tmp6847, i64 1
- %tmp6849 = getelementptr inbounds float* %tmp6848, i64 1
- %tmp6850 = getelementptr inbounds float* %tmp6849, i64 1
- %tmp6851 = getelementptr inbounds float* %tmp6850, i64 1
- %tmp6852 = getelementptr inbounds float* %tmp6851, i64 1
- %tmp6853 = getelementptr inbounds float* %tmp6852, i64 1
- %tmp6854 = getelementptr inbounds float* %tmp6853, i64 1
- %tmp6855 = getelementptr inbounds float* %tmp6854, i64 1
- %tmp6856 = getelementptr inbounds float* %tmp6855, i64 1
- %tmp6857 = getelementptr inbounds float* %tmp6856, i64 1
- %tmp6858 = getelementptr inbounds float* %tmp6857, i64 1
- %tmp6859 = getelementptr inbounds float* %tmp6858, i64 1
- %tmp6860 = getelementptr inbounds float* %tmp6859, i64 1
- %tmp6861 = getelementptr inbounds float* %tmp6860, i64 1
- %tmp6862 = getelementptr inbounds float* %tmp6861, i64 1
- %tmp6863 = getelementptr inbounds float* %tmp6862, i64 1
- %tmp6864 = getelementptr inbounds float* %tmp6863, i64 1
- %tmp6865 = getelementptr inbounds float* %tmp6864, i64 1
- %tmp6866 = getelementptr inbounds float* %tmp6865, i64 1
- %tmp6867 = getelementptr inbounds float* %tmp6866, i64 1
- %tmp6868 = getelementptr inbounds float* %tmp6867, i64 1
- %tmp6869 = getelementptr inbounds float* %tmp6868, i64 1
- %tmp6870 = getelementptr inbounds float* %tmp6869, i64 1
- %tmp6871 = getelementptr inbounds float* %tmp6870, i64 1
- %tmp6872 = getelementptr inbounds float* %tmp6871, i64 1
- %tmp6873 = getelementptr inbounds float* %tmp6872, i64 1
- %tmp6874 = getelementptr inbounds float* %tmp6873, i64 1
- %tmp6875 = getelementptr inbounds float* %tmp6874, i64 1
- %tmp6876 = getelementptr inbounds float* %tmp6875, i64 1
- %tmp6877 = getelementptr inbounds float* %tmp6876, i64 1
- %tmp6878 = getelementptr inbounds float* %tmp6877, i64 1
- %tmp6879 = getelementptr inbounds float* %tmp6878, i64 1
- %tmp6880 = getelementptr inbounds float* %tmp6879, i64 1
- %tmp6881 = getelementptr inbounds float* %tmp6880, i64 1
- %tmp6882 = getelementptr inbounds float* %tmp6881, i64 1
- %tmp6883 = getelementptr inbounds float* %tmp6882, i64 1
- %tmp6884 = getelementptr inbounds float* %tmp6883, i64 1
- %tmp6885 = getelementptr inbounds float* %tmp6884, i64 1
- %tmp6886 = getelementptr inbounds float* %tmp6885, i64 1
- %tmp6887 = getelementptr inbounds float* %tmp6886, i64 1
- %tmp6888 = getelementptr inbounds float* %tmp6887, i64 1
- %tmp6889 = getelementptr inbounds float* %tmp6888, i64 1
- %tmp6890 = getelementptr inbounds float* %tmp6889, i64 1
- %tmp6891 = getelementptr inbounds float* %tmp6890, i64 1
- %tmp6892 = getelementptr inbounds float* %tmp6891, i64 1
- %tmp6893 = getelementptr inbounds float* %tmp6892, i64 1
- %tmp6894 = getelementptr inbounds float* %tmp6893, i64 1
- %tmp6895 = getelementptr inbounds float* %tmp6894, i64 1
- %tmp6896 = getelementptr inbounds float* %tmp6895, i64 1
- %tmp6897 = getelementptr inbounds float* %tmp6896, i64 1
- %tmp6898 = getelementptr inbounds float* %tmp6897, i64 1
- %tmp6899 = getelementptr inbounds float* %tmp6898, i64 1
- %tmp6900 = getelementptr inbounds float* %tmp6899, i64 1
- %tmp6901 = getelementptr inbounds float* %tmp6900, i64 1
- %tmp6902 = getelementptr inbounds float* %tmp6901, i64 1
- %tmp6903 = getelementptr inbounds float* %tmp6902, i64 1
- %tmp6904 = getelementptr inbounds float* %tmp6903, i64 1
- %tmp6905 = getelementptr inbounds float* %tmp6904, i64 1
- %tmp6906 = getelementptr inbounds float* %tmp6905, i64 1
- %tmp6907 = getelementptr inbounds float* %tmp6906, i64 1
- %tmp6908 = getelementptr inbounds float* %tmp6907, i64 1
- %tmp6909 = getelementptr inbounds float* %tmp6908, i64 1
- %tmp6910 = getelementptr inbounds float* %tmp6909, i64 1
- %tmp6911 = getelementptr inbounds float* %tmp6910, i64 1
- %tmp6912 = getelementptr inbounds float* %tmp6911, i64 1
- %tmp6913 = getelementptr inbounds float* %tmp6912, i64 1
- %tmp6914 = getelementptr inbounds float* %tmp6913, i64 1
- %tmp6915 = getelementptr inbounds float* %tmp6914, i64 1
- %tmp6916 = getelementptr inbounds float* %tmp6915, i64 1
- %tmp6917 = getelementptr inbounds float* %tmp6916, i64 1
- %tmp6918 = getelementptr inbounds float* %tmp6917, i64 1
- %tmp6919 = getelementptr inbounds float* %tmp6918, i64 1
- %tmp6920 = getelementptr inbounds float* %tmp6919, i64 1
- %tmp6921 = getelementptr inbounds float* %tmp6920, i64 1
- %tmp6922 = getelementptr inbounds float* %tmp6921, i64 1
- %tmp6923 = getelementptr inbounds float* %tmp6922, i64 1
- %tmp6924 = getelementptr inbounds float* %tmp6923, i64 1
- %tmp6925 = getelementptr inbounds float* %tmp6924, i64 1
- %tmp6926 = getelementptr inbounds float* %tmp6925, i64 1
- %tmp6927 = getelementptr inbounds float* %tmp6926, i64 1
- %tmp6928 = getelementptr inbounds float* %tmp6927, i64 1
- %tmp6929 = getelementptr inbounds float* %tmp6928, i64 1
- %tmp6930 = getelementptr inbounds float* %tmp6929, i64 1
- %tmp6931 = getelementptr inbounds float* %tmp6930, i64 1
- %tmp6932 = getelementptr inbounds float* %tmp6931, i64 1
- %tmp6933 = getelementptr inbounds float* %tmp6932, i64 1
- %tmp6934 = getelementptr inbounds float* %tmp6933, i64 1
- %tmp6935 = getelementptr inbounds float* %tmp6934, i64 1
- %tmp6936 = getelementptr inbounds float* %tmp6935, i64 1
- %tmp6937 = getelementptr inbounds float* %tmp6936, i64 1
- %tmp6938 = getelementptr inbounds float* %tmp6937, i64 1
- %tmp6939 = getelementptr inbounds float* %tmp6938, i64 1
- %tmp6940 = getelementptr inbounds float* %tmp6939, i64 1
- %tmp6941 = getelementptr inbounds float* %tmp6940, i64 1
- %tmp6942 = getelementptr inbounds float* %tmp6941, i64 1
- %tmp6943 = getelementptr inbounds float* %tmp6942, i64 1
- %tmp6944 = getelementptr inbounds float* %tmp6943, i64 1
- %tmp6945 = getelementptr inbounds float* %tmp6944, i64 1
- %tmp6946 = getelementptr inbounds float* %tmp6945, i64 1
- %tmp6947 = getelementptr inbounds float* %tmp6946, i64 1
- %tmp6948 = getelementptr inbounds float* %tmp6947, i64 1
- %tmp6949 = getelementptr inbounds float* %tmp6948, i64 1
- %tmp6950 = getelementptr inbounds float* %tmp6949, i64 1
- %tmp6951 = getelementptr inbounds float* %tmp6950, i64 1
- %tmp6952 = getelementptr inbounds float* %tmp6951, i64 1
- %tmp6953 = getelementptr inbounds float* %tmp6952, i64 1
- %tmp6954 = getelementptr inbounds float* %tmp6953, i64 1
- %tmp6955 = getelementptr inbounds float* %tmp6954, i64 1
- %tmp6956 = getelementptr inbounds float* %tmp6955, i64 1
- %tmp6957 = getelementptr inbounds float* %tmp6956, i64 1
- %tmp6958 = getelementptr inbounds float* %tmp6957, i64 1
- %tmp6959 = getelementptr inbounds float* %tmp6958, i64 1
- %tmp6960 = getelementptr inbounds float* %tmp6959, i64 1
- %tmp6961 = getelementptr inbounds float* %tmp6960, i64 1
- %tmp6962 = getelementptr inbounds float* %tmp6961, i64 1
- %tmp6963 = getelementptr inbounds float* %tmp6962, i64 1
- %tmp6964 = getelementptr inbounds float* %tmp6963, i64 1
- %tmp6965 = getelementptr inbounds float* %tmp6964, i64 1
- %tmp6966 = getelementptr inbounds float* %tmp6965, i64 1
- %tmp6967 = getelementptr inbounds float* %tmp6966, i64 1
- %tmp6968 = getelementptr inbounds float* %tmp6967, i64 1
- %tmp6969 = getelementptr inbounds float* %tmp6968, i64 1
- %tmp6970 = getelementptr inbounds float* %tmp6969, i64 1
- %tmp6971 = getelementptr inbounds float* %tmp6970, i64 1
- %tmp6972 = getelementptr inbounds float* %tmp6971, i64 1
- %tmp6973 = getelementptr inbounds float* %tmp6972, i64 1
- %tmp6974 = getelementptr inbounds float* %tmp6973, i64 1
- %tmp6975 = getelementptr inbounds float* %tmp6974, i64 1
- %tmp6976 = getelementptr inbounds float* %tmp6975, i64 1
- %tmp6977 = getelementptr inbounds float* %tmp6976, i64 1
- %tmp6978 = getelementptr inbounds float* %tmp6977, i64 1
- %tmp6979 = getelementptr inbounds float* %tmp6978, i64 1
- %tmp6980 = getelementptr inbounds float* %tmp6979, i64 1
- %tmp6981 = getelementptr inbounds float* %tmp6980, i64 1
- %tmp6982 = getelementptr inbounds float* %tmp6981, i64 1
- %tmp6983 = getelementptr inbounds float* %tmp6982, i64 1
- %tmp6984 = getelementptr inbounds float* %tmp6983, i64 1
- %tmp6985 = getelementptr inbounds float* %tmp6984, i64 1
- %tmp6986 = getelementptr inbounds float* %tmp6985, i64 1
- %tmp6987 = getelementptr inbounds float* %tmp6986, i64 1
- %tmp6988 = getelementptr inbounds float* %tmp6987, i64 1
- %tmp6989 = getelementptr inbounds float* %tmp6988, i64 1
- %tmp6990 = getelementptr inbounds float* %tmp6989, i64 1
- %tmp6991 = getelementptr inbounds float* %tmp6990, i64 1
- %tmp6992 = getelementptr inbounds float* %tmp6991, i64 1
- %tmp6993 = getelementptr inbounds float* %tmp6992, i64 1
- %tmp6994 = getelementptr inbounds float* %tmp6993, i64 1
- %tmp6995 = getelementptr inbounds float* %tmp6994, i64 1
- %tmp6996 = getelementptr inbounds float* %tmp6995, i64 1
- %tmp6997 = getelementptr inbounds float* %tmp6996, i64 1
- %tmp6998 = getelementptr inbounds float* %tmp6997, i64 1
- %tmp6999 = getelementptr inbounds float* %tmp6998, i64 1
- %tmp7000 = getelementptr inbounds float* %tmp6999, i64 1
- %tmp7001 = getelementptr inbounds float* %tmp7000, i64 1
- %tmp7002 = getelementptr inbounds float* %tmp7001, i64 1
- %tmp7003 = getelementptr inbounds float* %tmp7002, i64 1
- %tmp7004 = getelementptr inbounds float* %tmp7003, i64 1
- %tmp7005 = getelementptr inbounds float* %tmp7004, i64 1
- %tmp7006 = getelementptr inbounds float* %tmp7005, i64 1
- %tmp7007 = getelementptr inbounds float* %tmp7006, i64 1
- %tmp7008 = getelementptr inbounds float* %tmp7007, i64 1
- %tmp7009 = getelementptr inbounds float* %tmp7008, i64 1
- %tmp7010 = getelementptr inbounds float* %tmp7009, i64 1
- %tmp7011 = getelementptr inbounds float* %tmp7010, i64 1
- %tmp7012 = getelementptr inbounds float* %tmp7011, i64 1
- %tmp7013 = getelementptr inbounds float* %tmp7012, i64 1
- %tmp7014 = getelementptr inbounds float* %tmp7013, i64 1
- %tmp7015 = getelementptr inbounds float* %tmp7014, i64 1
- %tmp7016 = getelementptr inbounds float* %tmp7015, i64 1
- %tmp7017 = getelementptr inbounds float* %tmp7016, i64 1
- %tmp7018 = getelementptr inbounds float* %tmp7017, i64 1
- %tmp7019 = getelementptr inbounds float* %tmp7018, i64 1
- %tmp7020 = getelementptr inbounds float* %tmp7019, i64 1
- %tmp7021 = getelementptr inbounds float* %tmp7020, i64 1
- %tmp7022 = getelementptr inbounds float* %tmp7021, i64 1
- %tmp7023 = getelementptr inbounds float* %tmp7022, i64 1
- %tmp7024 = getelementptr inbounds float* %tmp7023, i64 1
- %tmp7025 = getelementptr inbounds float* %tmp7024, i64 1
- %tmp7026 = getelementptr inbounds float* %tmp7025, i64 1
- %tmp7027 = getelementptr inbounds float* %tmp7026, i64 1
- %tmp7028 = getelementptr inbounds float* %tmp7027, i64 1
- %tmp7029 = getelementptr inbounds float* %tmp7028, i64 1
- %tmp7030 = getelementptr inbounds float* %tmp7029, i64 1
- %tmp7031 = getelementptr inbounds float* %tmp7030, i64 1
- %tmp7032 = getelementptr inbounds float* %tmp7031, i64 1
- %tmp7033 = getelementptr inbounds float* %tmp7032, i64 1
- %tmp7034 = getelementptr inbounds float* %tmp7033, i64 1
- %tmp7035 = getelementptr inbounds float* %tmp7034, i64 1
- %tmp7036 = getelementptr inbounds float* %tmp7035, i64 1
- %tmp7037 = getelementptr inbounds float* %tmp7036, i64 1
- %tmp7038 = getelementptr inbounds float* %tmp7037, i64 1
- %tmp7039 = getelementptr inbounds float* %tmp7038, i64 1
- %tmp7040 = getelementptr inbounds float* %tmp7039, i64 1
- %tmp7041 = getelementptr inbounds float* %tmp7040, i64 1
- %tmp7042 = getelementptr inbounds float* %tmp7041, i64 1
- %tmp7043 = getelementptr inbounds float* %tmp7042, i64 1
- %tmp7044 = getelementptr inbounds float* %tmp7043, i64 1
- %tmp7045 = getelementptr inbounds float* %tmp7044, i64 1
- %tmp7046 = getelementptr inbounds float* %tmp7045, i64 1
- %tmp7047 = getelementptr inbounds float* %tmp7046, i64 1
- %tmp7048 = getelementptr inbounds float* %tmp7047, i64 1
- %tmp7049 = getelementptr inbounds float* %tmp7048, i64 1
- %tmp7050 = getelementptr inbounds float* %tmp7049, i64 1
- %tmp7051 = getelementptr inbounds float* %tmp7050, i64 1
- %tmp7052 = getelementptr inbounds float* %tmp7051, i64 1
- %tmp7053 = getelementptr inbounds float* %tmp7052, i64 1
- %tmp7054 = getelementptr inbounds float* %tmp7053, i64 1
- %tmp7055 = getelementptr inbounds float* %tmp7054, i64 1
- %tmp7056 = getelementptr inbounds float* %tmp7055, i64 1
- %tmp7057 = getelementptr inbounds float* %tmp7056, i64 1
- %tmp7058 = getelementptr inbounds float* %tmp7057, i64 1
- %tmp7059 = getelementptr inbounds float* %tmp7058, i64 1
- %tmp7060 = getelementptr inbounds float* %tmp7059, i64 1
- %tmp7061 = getelementptr inbounds float* %tmp7060, i64 1
- %tmp7062 = getelementptr inbounds float* %tmp7061, i64 1
- %tmp7063 = getelementptr inbounds float* %tmp7062, i64 1
- %tmp7064 = getelementptr inbounds float* %tmp7063, i64 1
- %tmp7065 = getelementptr inbounds float* %tmp7064, i64 1
- %tmp7066 = getelementptr inbounds float* %tmp7065, i64 1
- %tmp7067 = getelementptr inbounds float* %tmp7066, i64 1
- %tmp7068 = getelementptr inbounds float* %tmp7067, i64 1
- %tmp7069 = getelementptr inbounds float* %tmp7068, i64 1
- %tmp7070 = getelementptr inbounds float* %tmp7069, i64 1
- %tmp7071 = getelementptr inbounds float* %tmp7070, i64 1
- %tmp7072 = getelementptr inbounds float* %tmp7071, i64 1
- %tmp7073 = getelementptr inbounds float* %tmp7072, i64 1
- %tmp7074 = getelementptr inbounds float* %tmp7073, i64 1
- %tmp7075 = getelementptr inbounds float* %tmp7074, i64 1
- %tmp7076 = getelementptr inbounds float* %tmp7075, i64 1
- %tmp7077 = getelementptr inbounds float* %tmp7076, i64 1
- %tmp7078 = getelementptr inbounds float* %tmp7077, i64 1
- %tmp7079 = getelementptr inbounds float* %tmp7078, i64 1
- %tmp7080 = getelementptr inbounds float* %tmp7079, i64 1
- %tmp7081 = getelementptr inbounds float* %tmp7080, i64 1
- %tmp7082 = getelementptr inbounds float* %tmp7081, i64 1
- %tmp7083 = getelementptr inbounds float* %tmp7082, i64 1
- %tmp7084 = getelementptr inbounds float* %tmp7083, i64 1
- %tmp7085 = getelementptr inbounds float* %tmp7084, i64 1
- %tmp7086 = getelementptr inbounds float* %tmp7085, i64 1
- %tmp7087 = getelementptr inbounds float* %tmp7086, i64 1
- %tmp7088 = getelementptr inbounds float* %tmp7087, i64 1
- %tmp7089 = getelementptr inbounds float* %tmp7088, i64 1
- %tmp7090 = getelementptr inbounds float* %tmp7089, i64 1
- %tmp7091 = getelementptr inbounds float* %tmp7090, i64 1
- %tmp7092 = getelementptr inbounds float* %tmp7091, i64 1
- %tmp7093 = getelementptr inbounds float* %tmp7092, i64 1
- %tmp7094 = getelementptr inbounds float* %tmp7093, i64 1
- %tmp7095 = getelementptr inbounds float* %tmp7094, i64 1
- %tmp7096 = getelementptr inbounds float* %tmp7095, i64 1
- %tmp7097 = getelementptr inbounds float* %tmp7096, i64 1
- %tmp7098 = getelementptr inbounds float* %tmp7097, i64 1
- %tmp7099 = getelementptr inbounds float* %tmp7098, i64 1
- %tmp7100 = getelementptr inbounds float* %tmp7099, i64 1
- %tmp7101 = getelementptr inbounds float* %tmp7100, i64 1
- %tmp7102 = getelementptr inbounds float* %tmp7101, i64 1
- %tmp7103 = getelementptr inbounds float* %tmp7102, i64 1
- %tmp7104 = getelementptr inbounds float* %tmp7103, i64 1
- %tmp7105 = getelementptr inbounds float* %tmp7104, i64 1
- %tmp7106 = getelementptr inbounds float* %tmp7105, i64 1
- %tmp7107 = getelementptr inbounds float* %tmp7106, i64 1
- %tmp7108 = getelementptr inbounds float* %tmp7107, i64 1
- %tmp7109 = getelementptr inbounds float* %tmp7108, i64 1
- %tmp7110 = getelementptr inbounds float* %tmp7109, i64 1
- %tmp7111 = getelementptr inbounds float* %tmp7110, i64 1
- %tmp7112 = getelementptr inbounds float* %tmp7111, i64 1
- %tmp7113 = getelementptr inbounds float* %tmp7112, i64 1
- %tmp7114 = getelementptr inbounds float* %tmp7113, i64 1
- %tmp7115 = getelementptr inbounds float* %tmp7114, i64 1
- %tmp7116 = getelementptr inbounds float* %tmp7115, i64 1
- %tmp7117 = getelementptr inbounds float* %tmp7116, i64 1
- %tmp7118 = getelementptr inbounds float* %tmp7117, i64 1
- %tmp7119 = getelementptr inbounds float* %tmp7118, i64 1
- %tmp7120 = getelementptr inbounds float* %tmp7119, i64 1
- %tmp7121 = getelementptr inbounds float* %tmp7120, i64 1
- %tmp7122 = getelementptr inbounds float* %tmp7121, i64 1
- %tmp7123 = getelementptr inbounds float* %tmp7122, i64 1
- %tmp7124 = getelementptr inbounds float* %tmp7123, i64 1
- %tmp7125 = getelementptr inbounds float* %tmp7124, i64 1
- %tmp7126 = getelementptr inbounds float* %tmp7125, i64 1
- %tmp7127 = getelementptr inbounds float* %tmp7126, i64 1
- %tmp7128 = getelementptr inbounds float* %tmp7127, i64 1
- %tmp7129 = getelementptr inbounds float* %tmp7128, i64 1
- %tmp7130 = getelementptr inbounds float* %tmp7129, i64 1
- %tmp7131 = getelementptr inbounds float* %tmp7130, i64 1
- %tmp7132 = getelementptr inbounds float* %tmp7131, i64 1
- %tmp7133 = getelementptr inbounds float* %tmp7132, i64 1
- %tmp7134 = getelementptr inbounds float* %tmp7133, i64 1
- %tmp7135 = getelementptr inbounds float* %tmp7134, i64 1
- %tmp7136 = getelementptr inbounds float* %tmp7135, i64 1
- %tmp7137 = getelementptr inbounds float* %tmp7136, i64 1
- %tmp7138 = getelementptr inbounds float* %tmp7137, i64 1
- %tmp7139 = getelementptr inbounds float* %tmp7138, i64 1
- %tmp7140 = getelementptr inbounds float* %tmp7139, i64 1
- %tmp7141 = getelementptr inbounds float* %tmp7140, i64 1
- %tmp7142 = getelementptr inbounds float* %tmp7141, i64 1
- %tmp7143 = getelementptr inbounds float* %tmp7142, i64 1
- %tmp7144 = getelementptr inbounds float* %tmp7143, i64 1
- %tmp7145 = getelementptr inbounds float* %tmp7144, i64 1
- %tmp7146 = getelementptr inbounds float* %tmp7145, i64 1
- %tmp7147 = getelementptr inbounds float* %tmp7146, i64 1
- %tmp7148 = getelementptr inbounds float* %tmp7147, i64 1
- %tmp7149 = getelementptr inbounds float* %tmp7148, i64 1
- %tmp7150 = getelementptr inbounds float* %tmp7149, i64 1
- %tmp7151 = getelementptr inbounds float* %tmp7150, i64 1
- %tmp7152 = getelementptr inbounds float* %tmp7151, i64 1
- %tmp7153 = getelementptr inbounds float* %tmp7152, i64 1
- %tmp7154 = getelementptr inbounds float* %tmp7153, i64 1
- %tmp7155 = getelementptr inbounds float* %tmp7154, i64 1
- %tmp7156 = getelementptr inbounds float* %tmp7155, i64 1
- %tmp7157 = getelementptr inbounds float* %tmp7156, i64 1
- %tmp7158 = getelementptr inbounds float* %tmp7157, i64 1
- %tmp7159 = getelementptr inbounds float* %tmp7158, i64 1
- %tmp7160 = getelementptr inbounds float* %tmp7159, i64 1
- %tmp7161 = getelementptr inbounds float* %tmp7160, i64 1
- %tmp7162 = getelementptr inbounds float* %tmp7161, i64 1
- %tmp7163 = getelementptr inbounds float* %tmp7162, i64 1
- %tmp7164 = getelementptr inbounds float* %tmp7163, i64 1
- %tmp7165 = getelementptr inbounds float* %tmp7164, i64 1
- %tmp7166 = getelementptr inbounds float* %tmp7165, i64 1
- %tmp7167 = getelementptr inbounds float* %tmp7166, i64 1
- %tmp7168 = getelementptr inbounds float* %tmp7167, i64 1
- %tmp7169 = getelementptr inbounds float* %tmp7168, i64 1
- %tmp7170 = getelementptr inbounds float* %tmp7169, i64 1
- %tmp7171 = getelementptr inbounds float* %tmp7170, i64 1
- %tmp7172 = getelementptr inbounds float* %tmp7171, i64 1
- %tmp7173 = getelementptr inbounds float* %tmp7172, i64 1
- %tmp7174 = getelementptr inbounds float* %tmp7173, i64 1
- %tmp7175 = getelementptr inbounds float* %tmp7174, i64 1
- %tmp7176 = getelementptr inbounds float* %tmp7175, i64 1
- %tmp7177 = getelementptr inbounds float* %tmp7176, i64 1
- %tmp7178 = getelementptr inbounds float* %tmp7177, i64 1
- %tmp7179 = getelementptr inbounds float* %tmp7178, i64 1
- %tmp7180 = getelementptr inbounds float* %tmp7179, i64 1
- %tmp7181 = getelementptr inbounds float* %tmp7180, i64 1
- %tmp7182 = getelementptr inbounds float* %tmp7181, i64 1
- %tmp7183 = getelementptr inbounds float* %tmp7182, i64 1
- %tmp7184 = getelementptr inbounds float* %tmp7183, i64 1
- %tmp7185 = getelementptr inbounds float* %tmp7184, i64 1
- %tmp7186 = getelementptr inbounds float* %tmp7185, i64 1
- %tmp7187 = getelementptr inbounds float* %tmp7186, i64 1
- %tmp7188 = getelementptr inbounds float* %tmp7187, i64 1
- %tmp7189 = getelementptr inbounds float* %tmp7188, i64 1
- %tmp7190 = getelementptr inbounds float* %tmp7189, i64 1
- %tmp7191 = getelementptr inbounds float* %tmp7190, i64 1
- %tmp7192 = getelementptr inbounds float* %tmp7191, i64 1
- %tmp7193 = getelementptr inbounds float* %tmp7192, i64 1
- %tmp7194 = getelementptr inbounds float* %tmp7193, i64 1
- %tmp7195 = getelementptr inbounds float* %tmp7194, i64 1
- %tmp7196 = getelementptr inbounds float* %tmp7195, i64 1
- %tmp7197 = getelementptr inbounds float* %tmp7196, i64 1
- %tmp7198 = getelementptr inbounds float* %tmp7197, i64 1
- %tmp7199 = getelementptr inbounds float* %tmp7198, i64 1
- %tmp7200 = getelementptr inbounds float* %tmp7199, i64 1
- %tmp7201 = getelementptr inbounds float* %tmp7200, i64 1
- %tmp7202 = getelementptr inbounds float* %tmp7201, i64 1
- %tmp7203 = getelementptr inbounds float* %tmp7202, i64 1
- %tmp7204 = getelementptr inbounds float* %tmp7203, i64 1
- %tmp7205 = getelementptr inbounds float* %tmp7204, i64 1
- %tmp7206 = getelementptr inbounds float* %tmp7205, i64 1
- %tmp7207 = getelementptr inbounds float* %tmp7206, i64 1
- %tmp7208 = getelementptr inbounds float* %tmp7207, i64 1
- %tmp7209 = getelementptr inbounds float* %tmp7208, i64 1
- %tmp7210 = getelementptr inbounds float* %tmp7209, i64 1
- %tmp7211 = getelementptr inbounds float* %tmp7210, i64 1
- %tmp7212 = getelementptr inbounds float* %tmp7211, i64 1
- %tmp7213 = getelementptr inbounds float* %tmp7212, i64 1
- %tmp7214 = getelementptr inbounds float* %tmp7213, i64 1
- %tmp7215 = getelementptr inbounds float* %tmp7214, i64 1
- %tmp7216 = getelementptr inbounds float* %tmp7215, i64 1
- %tmp7217 = getelementptr inbounds float* %tmp7216, i64 1
- %tmp7218 = getelementptr inbounds float* %tmp7217, i64 1
- %tmp7219 = getelementptr inbounds float* %tmp7218, i64 1
- %tmp7220 = getelementptr inbounds float* %tmp7219, i64 1
- %tmp7221 = getelementptr inbounds float* %tmp7220, i64 1
- %tmp7222 = getelementptr inbounds float* %tmp7221, i64 1
- %tmp7223 = getelementptr inbounds float* %tmp7222, i64 1
- %tmp7224 = getelementptr inbounds float* %tmp7223, i64 1
- %tmp7225 = getelementptr inbounds float* %tmp7224, i64 1
- %tmp7226 = getelementptr inbounds float* %tmp7225, i64 1
- %tmp7227 = getelementptr inbounds float* %tmp7226, i64 1
- %tmp7228 = getelementptr inbounds float* %tmp7227, i64 1
- %tmp7229 = getelementptr inbounds float* %tmp7228, i64 1
- %tmp7230 = getelementptr inbounds float* %tmp7229, i64 1
- %tmp7231 = getelementptr inbounds float* %tmp7230, i64 1
- %tmp7232 = getelementptr inbounds float* %tmp7231, i64 1
- %tmp7233 = getelementptr inbounds float* %tmp7232, i64 1
- %tmp7234 = getelementptr inbounds float* %tmp7233, i64 1
- %tmp7235 = getelementptr inbounds float* %tmp7234, i64 1
- %tmp7236 = getelementptr inbounds float* %tmp7235, i64 1
- %tmp7237 = getelementptr inbounds float* %tmp7236, i64 1
- %tmp7238 = getelementptr inbounds float* %tmp7237, i64 1
- %tmp7239 = getelementptr inbounds float* %tmp7238, i64 1
- %tmp7240 = getelementptr inbounds float* %tmp7239, i64 1
- %tmp7241 = getelementptr inbounds float* %tmp7240, i64 1
- %tmp7242 = getelementptr inbounds float* %tmp7241, i64 1
- %tmp7243 = getelementptr inbounds float* %tmp7242, i64 1
- %tmp7244 = getelementptr inbounds float* %tmp7243, i64 1
- %tmp7245 = getelementptr inbounds float* %tmp7244, i64 1
- %tmp7246 = getelementptr inbounds float* %tmp7245, i64 1
- %tmp7247 = getelementptr inbounds float* %tmp7246, i64 1
- %tmp7248 = getelementptr inbounds float* %tmp7247, i64 1
- %tmp7249 = getelementptr inbounds float* %tmp7248, i64 1
- %tmp7250 = getelementptr inbounds float* %tmp7249, i64 1
- %tmp7251 = getelementptr inbounds float* %tmp7250, i64 1
- %tmp7252 = getelementptr inbounds float* %tmp7251, i64 1
- %tmp7253 = getelementptr inbounds float* %tmp7252, i64 1
- %tmp7254 = getelementptr inbounds float* %tmp7253, i64 1
- %tmp7255 = getelementptr inbounds float* %tmp7254, i64 1
- %tmp7256 = getelementptr inbounds float* %tmp7255, i64 1
- %tmp7257 = getelementptr inbounds float* %tmp7256, i64 1
- %tmp7258 = getelementptr inbounds float* %tmp7257, i64 1
- %tmp7259 = getelementptr inbounds float* %tmp7258, i64 1
- %tmp7260 = getelementptr inbounds float* %tmp7259, i64 1
- %tmp7261 = getelementptr inbounds float* %tmp7260, i64 1
- %tmp7262 = getelementptr inbounds float* %tmp7261, i64 1
- %tmp7263 = getelementptr inbounds float* %tmp7262, i64 1
- %tmp7264 = getelementptr inbounds float* %tmp7263, i64 1
- %tmp7265 = getelementptr inbounds float* %tmp7264, i64 1
- %tmp7266 = getelementptr inbounds float* %tmp7265, i64 1
- %tmp7267 = getelementptr inbounds float* %tmp7266, i64 1
- %tmp7268 = getelementptr inbounds float* %tmp7267, i64 1
- %tmp7269 = getelementptr inbounds float* %tmp7268, i64 1
- %tmp7270 = getelementptr inbounds float* %tmp7269, i64 1
- %tmp7271 = getelementptr inbounds float* %tmp7270, i64 1
- %tmp7272 = getelementptr inbounds float* %tmp7271, i64 1
- %tmp7273 = getelementptr inbounds float* %tmp7272, i64 1
- %tmp7274 = getelementptr inbounds float* %tmp7273, i64 1
- %tmp7275 = getelementptr inbounds float* %tmp7274, i64 1
- %tmp7276 = getelementptr inbounds float* %tmp7275, i64 1
- %tmp7277 = getelementptr inbounds float* %tmp7276, i64 1
- %tmp7278 = getelementptr inbounds float* %tmp7277, i64 1
- %tmp7279 = getelementptr inbounds float* %tmp7278, i64 1
- %tmp7280 = getelementptr inbounds float* %tmp7279, i64 1
- %tmp7281 = getelementptr inbounds float* %tmp7280, i64 1
- %tmp7282 = getelementptr inbounds float* %tmp7281, i64 1
- %tmp7283 = getelementptr inbounds float* %tmp7282, i64 1
- %tmp7284 = getelementptr inbounds float* %tmp7283, i64 1
- %tmp7285 = getelementptr inbounds float* %tmp7284, i64 1
- %tmp7286 = getelementptr inbounds float* %tmp7285, i64 1
- %tmp7287 = getelementptr inbounds float* %tmp7286, i64 1
- %tmp7288 = getelementptr inbounds float* %tmp7287, i64 1
- %tmp7289 = getelementptr inbounds float* %tmp7288, i64 1
- %tmp7290 = getelementptr inbounds float* %tmp7289, i64 1
- %tmp7291 = getelementptr inbounds float* %tmp7290, i64 1
- %tmp7292 = getelementptr inbounds float* %tmp7291, i64 1
- %tmp7293 = getelementptr inbounds float* %tmp7292, i64 1
- %tmp7294 = getelementptr inbounds float* %tmp7293, i64 1
- %tmp7295 = getelementptr inbounds float* %tmp7294, i64 1
- %tmp7296 = getelementptr inbounds float* %tmp7295, i64 1
- %tmp7297 = getelementptr inbounds float* %tmp7296, i64 1
- %tmp7298 = getelementptr inbounds float* %tmp7297, i64 1
- %tmp7299 = getelementptr inbounds float* %tmp7298, i64 1
- %tmp7300 = getelementptr inbounds float* %tmp7299, i64 1
- %tmp7301 = getelementptr inbounds float* %tmp7300, i64 1
- %tmp7302 = getelementptr inbounds float* %tmp7301, i64 1
- %tmp7303 = getelementptr inbounds float* %tmp7302, i64 1
- %tmp7304 = getelementptr inbounds float* %tmp7303, i64 1
- %tmp7305 = getelementptr inbounds float* %tmp7304, i64 1
- %tmp7306 = getelementptr inbounds float* %tmp7305, i64 1
- %tmp7307 = getelementptr inbounds float* %tmp7306, i64 1
- %tmp7308 = getelementptr inbounds float* %tmp7307, i64 1
- %tmp7309 = getelementptr inbounds float* %tmp7308, i64 1
- %tmp7310 = getelementptr inbounds float* %tmp7309, i64 1
- %tmp7311 = getelementptr inbounds float* %tmp7310, i64 1
- %tmp7312 = getelementptr inbounds float* %tmp7311, i64 1
- %tmp7313 = getelementptr inbounds float* %tmp7312, i64 1
- %tmp7314 = getelementptr inbounds float* %tmp7313, i64 1
- %tmp7315 = getelementptr inbounds float* %tmp7314, i64 1
- %tmp7316 = getelementptr inbounds float* %tmp7315, i64 1
- %tmp7317 = getelementptr inbounds float* %tmp7316, i64 1
- %tmp7318 = getelementptr inbounds float* %tmp7317, i64 1
- %tmp7319 = getelementptr inbounds float* %tmp7318, i64 1
- %tmp7320 = getelementptr inbounds float* %tmp7319, i64 1
- %tmp7321 = getelementptr inbounds float* %tmp7320, i64 1
- %tmp7322 = getelementptr inbounds float* %tmp7321, i64 1
- %tmp7323 = getelementptr inbounds float* %tmp7322, i64 1
- %tmp7324 = getelementptr inbounds float* %tmp7323, i64 1
- %tmp7325 = getelementptr inbounds float* %tmp7324, i64 1
- %tmp7326 = getelementptr inbounds float* %tmp7325, i64 1
- %tmp7327 = getelementptr inbounds float* %tmp7326, i64 1
- %tmp7328 = getelementptr inbounds float* %tmp7327, i64 1
- %tmp7329 = getelementptr inbounds float* %tmp7328, i64 1
- %tmp7330 = getelementptr inbounds float* %tmp7329, i64 1
- %tmp7331 = getelementptr inbounds float* %tmp7330, i64 1
- %tmp7332 = getelementptr inbounds float* %tmp7331, i64 1
- %tmp7333 = getelementptr inbounds float* %tmp7332, i64 1
- %tmp7334 = getelementptr inbounds float* %tmp7333, i64 1
- %tmp7335 = getelementptr inbounds float* %tmp7334, i64 1
- %tmp7336 = getelementptr inbounds float* %tmp7335, i64 1
- %tmp7337 = getelementptr inbounds float* %tmp7336, i64 1
- %tmp7338 = getelementptr inbounds float* %tmp7337, i64 1
- %tmp7339 = getelementptr inbounds float* %tmp7338, i64 1
- %tmp7340 = getelementptr inbounds float* %tmp7339, i64 1
- %tmp7341 = getelementptr inbounds float* %tmp7340, i64 1
- %tmp7342 = getelementptr inbounds float* %tmp7341, i64 1
- %tmp7343 = getelementptr inbounds float* %tmp7342, i64 1
- %tmp7344 = getelementptr inbounds float* %tmp7343, i64 1
- %tmp7345 = getelementptr inbounds float* %tmp7344, i64 1
- %tmp7346 = getelementptr inbounds float* %tmp7345, i64 1
- %tmp7347 = getelementptr inbounds float* %tmp7346, i64 1
- %tmp7348 = getelementptr inbounds float* %tmp7347, i64 1
- %tmp7349 = getelementptr inbounds float* %tmp7348, i64 1
- %tmp7350 = getelementptr inbounds float* %tmp7349, i64 1
- %tmp7351 = getelementptr inbounds float* %tmp7350, i64 1
- %tmp7352 = getelementptr inbounds float* %tmp7351, i64 1
- %tmp7353 = getelementptr inbounds float* %tmp7352, i64 1
- %tmp7354 = getelementptr inbounds float* %tmp7353, i64 1
- %tmp7355 = getelementptr inbounds float* %tmp7354, i64 1
- %tmp7356 = getelementptr inbounds float* %tmp7355, i64 1
- %tmp7357 = getelementptr inbounds float* %tmp7356, i64 1
- %tmp7358 = getelementptr inbounds float* %tmp7357, i64 1
- %tmp7359 = getelementptr inbounds float* %tmp7358, i64 1
- %tmp7360 = getelementptr inbounds float* %tmp7359, i64 1
- %tmp7361 = getelementptr inbounds float* %tmp7360, i64 1
- %tmp7362 = getelementptr inbounds float* %tmp7361, i64 1
- %tmp7363 = getelementptr inbounds float* %tmp7362, i64 1
- %tmp7364 = getelementptr inbounds float* %tmp7363, i64 1
- %tmp7365 = getelementptr inbounds float* %tmp7364, i64 1
- %tmp7366 = getelementptr inbounds float* %tmp7365, i64 1
- %tmp7367 = getelementptr inbounds float* %tmp7366, i64 1
- %tmp7368 = getelementptr inbounds float* %tmp7367, i64 1
- %tmp7369 = getelementptr inbounds float* %tmp7368, i64 1
- %tmp7370 = getelementptr inbounds float* %tmp7369, i64 1
- %tmp7371 = getelementptr inbounds float* %tmp7370, i64 1
- %tmp7372 = getelementptr inbounds float* %tmp7371, i64 1
- %tmp7373 = getelementptr inbounds float* %tmp7372, i64 1
- %tmp7374 = getelementptr inbounds float* %tmp7373, i64 1
- %tmp7375 = getelementptr inbounds float* %tmp7374, i64 1
- %tmp7376 = getelementptr inbounds float* %tmp7375, i64 1
- %tmp7377 = getelementptr inbounds float* %tmp7376, i64 1
- %tmp7378 = getelementptr inbounds float* %tmp7377, i64 1
- %tmp7379 = getelementptr inbounds float* %tmp7378, i64 1
- %tmp7380 = getelementptr inbounds float* %tmp7379, i64 1
- %tmp7381 = getelementptr inbounds float* %tmp7380, i64 1
- %tmp7382 = getelementptr inbounds float* %tmp7381, i64 1
- %tmp7383 = getelementptr inbounds float* %tmp7382, i64 1
- %tmp7384 = getelementptr inbounds float* %tmp7383, i64 1
- %tmp7385 = getelementptr inbounds float* %tmp7384, i64 1
- %tmp7386 = getelementptr inbounds float* %tmp7385, i64 1
- %tmp7387 = getelementptr inbounds float* %tmp7386, i64 1
- %tmp7388 = getelementptr inbounds float* %tmp7387, i64 1
- %tmp7389 = getelementptr inbounds float* %tmp7388, i64 1
- %tmp7390 = getelementptr inbounds float* %tmp7389, i64 1
- %tmp7391 = getelementptr inbounds float* %tmp7390, i64 1
- %tmp7392 = getelementptr inbounds float* %tmp7391, i64 1
- %tmp7393 = getelementptr inbounds float* %tmp7392, i64 1
- %tmp7394 = getelementptr inbounds float* %tmp7393, i64 1
- %tmp7395 = getelementptr inbounds float* %tmp7394, i64 1
- %tmp7396 = getelementptr inbounds float* %tmp7395, i64 1
- %tmp7397 = getelementptr inbounds float* %tmp7396, i64 1
- %tmp7398 = getelementptr inbounds float* %tmp7397, i64 1
- %tmp7399 = getelementptr inbounds float* %tmp7398, i64 1
- %tmp7400 = getelementptr inbounds float* %tmp7399, i64 1
- %tmp7401 = getelementptr inbounds float* %tmp7400, i64 1
- %tmp7402 = getelementptr inbounds float* %tmp7401, i64 1
- %tmp7403 = getelementptr inbounds float* %tmp7402, i64 1
- %tmp7404 = getelementptr inbounds float* %tmp7403, i64 1
- %tmp7405 = getelementptr inbounds float* %tmp7404, i64 1
- %tmp7406 = getelementptr inbounds float* %tmp7405, i64 1
- %tmp7407 = getelementptr inbounds float* %tmp7406, i64 1
- %tmp7408 = getelementptr inbounds float* %tmp7407, i64 1
- %tmp7409 = getelementptr inbounds float* %tmp7408, i64 1
- %tmp7410 = getelementptr inbounds float* %tmp7409, i64 1
- %tmp7411 = getelementptr inbounds float* %tmp7410, i64 1
- %tmp7412 = getelementptr inbounds float* %tmp7411, i64 1
- %tmp7413 = getelementptr inbounds float* %tmp7412, i64 1
- %tmp7414 = getelementptr inbounds float* %tmp7413, i64 1
- %tmp7415 = getelementptr inbounds float* %tmp7414, i64 1
- %tmp7416 = getelementptr inbounds float* %tmp7415, i64 1
- %tmp7417 = getelementptr inbounds float* %tmp7416, i64 1
- %tmp7418 = getelementptr inbounds float* %tmp7417, i64 1
- %tmp7419 = getelementptr inbounds float* %tmp7418, i64 1
- %tmp7420 = getelementptr inbounds float* %tmp7419, i64 1
- %tmp7421 = getelementptr inbounds float* %tmp7420, i64 1
- %tmp7422 = getelementptr inbounds float* %tmp7421, i64 1
- %tmp7423 = getelementptr inbounds float* %tmp7422, i64 1
- %tmp7424 = getelementptr inbounds float* %tmp7423, i64 1
- %tmp7425 = getelementptr inbounds float* %tmp7424, i64 1
- %tmp7426 = getelementptr inbounds float* %tmp7425, i64 1
- %tmp7427 = getelementptr inbounds float* %tmp7426, i64 1
- %tmp7428 = getelementptr inbounds float* %tmp7427, i64 1
- %tmp7429 = getelementptr inbounds float* %tmp7428, i64 1
- %tmp7430 = getelementptr inbounds float* %tmp7429, i64 1
- %tmp7431 = getelementptr inbounds float* %tmp7430, i64 1
- %tmp7432 = getelementptr inbounds float* %tmp7431, i64 1
- %tmp7433 = getelementptr inbounds float* %tmp7432, i64 1
- %tmp7434 = getelementptr inbounds float* %tmp7433, i64 1
- %tmp7435 = getelementptr inbounds float* %tmp7434, i64 1
- %tmp7436 = getelementptr inbounds float* %tmp7435, i64 1
- %tmp7437 = getelementptr inbounds float* %tmp7436, i64 1
- %tmp7438 = getelementptr inbounds float* %tmp7437, i64 1
- %tmp7439 = getelementptr inbounds float* %tmp7438, i64 1
- %tmp7440 = getelementptr inbounds float* %tmp7439, i64 1
- %tmp7441 = getelementptr inbounds float* %tmp7440, i64 1
- %tmp7442 = getelementptr inbounds float* %tmp7441, i64 1
- %tmp7443 = getelementptr inbounds float* %tmp7442, i64 1
- %tmp7444 = getelementptr inbounds float* %tmp7443, i64 1
- %tmp7445 = getelementptr inbounds float* %tmp7444, i64 1
- %tmp7446 = getelementptr inbounds float* %tmp7445, i64 1
- %tmp7447 = getelementptr inbounds float* %tmp7446, i64 1
- %tmp7448 = getelementptr inbounds float* %tmp7447, i64 1
- %tmp7449 = getelementptr inbounds float* %tmp7448, i64 1
- %tmp7450 = getelementptr inbounds float* %tmp7449, i64 1
- %tmp7451 = getelementptr inbounds float* %tmp7450, i64 1
- %tmp7452 = getelementptr inbounds float* %tmp7451, i64 1
- %tmp7453 = getelementptr inbounds float* %tmp7452, i64 1
- %tmp7454 = getelementptr inbounds float* %tmp7453, i64 1
- %tmp7455 = getelementptr inbounds float* %tmp7454, i64 1
- %tmp7456 = getelementptr inbounds float* %tmp7455, i64 1
- %tmp7457 = getelementptr inbounds float* %tmp7456, i64 1
- %tmp7458 = getelementptr inbounds float* %tmp7457, i64 1
- %tmp7459 = getelementptr inbounds float* %tmp7458, i64 1
- %tmp7460 = getelementptr inbounds float* %tmp7459, i64 1
- %tmp7461 = getelementptr inbounds float* %tmp7460, i64 1
- %tmp7462 = getelementptr inbounds float* %tmp7461, i64 1
- %tmp7463 = getelementptr inbounds float* %tmp7462, i64 1
- %tmp7464 = getelementptr inbounds float* %tmp7463, i64 1
- %tmp7465 = getelementptr inbounds float* %tmp7464, i64 1
- %tmp7466 = getelementptr inbounds float* %tmp7465, i64 1
- %tmp7467 = getelementptr inbounds float* %tmp7466, i64 1
- %tmp7468 = getelementptr inbounds float* %tmp7467, i64 1
- %tmp7469 = getelementptr inbounds float* %tmp7468, i64 1
- %tmp7470 = getelementptr inbounds float* %tmp7469, i64 1
- %tmp7471 = getelementptr inbounds float* %tmp7470, i64 1
- %tmp7472 = getelementptr inbounds float* %tmp7471, i64 1
- %tmp7473 = getelementptr inbounds float* %tmp7472, i64 1
- %tmp7474 = getelementptr inbounds float* %tmp7473, i64 1
- %tmp7475 = getelementptr inbounds float* %tmp7474, i64 1
- %tmp7476 = getelementptr inbounds float* %tmp7475, i64 1
- %tmp7477 = getelementptr inbounds float* %tmp7476, i64 1
- %tmp7478 = getelementptr inbounds float* %tmp7477, i64 1
- %tmp7479 = getelementptr inbounds float* %tmp7478, i64 1
- %tmp7480 = getelementptr inbounds float* %tmp7479, i64 1
- %tmp7481 = getelementptr inbounds float* %tmp7480, i64 1
- %tmp7482 = getelementptr inbounds float* %tmp7481, i64 1
- %tmp7483 = getelementptr inbounds float* %tmp7482, i64 1
- %tmp7484 = getelementptr inbounds float* %tmp7483, i64 1
- %tmp7485 = getelementptr inbounds float* %tmp7484, i64 1
- %tmp7486 = getelementptr inbounds float* %tmp7485, i64 1
- %tmp7487 = getelementptr inbounds float* %tmp7486, i64 1
- %tmp7488 = getelementptr inbounds float* %tmp7487, i64 1
- %tmp7489 = getelementptr inbounds float* %tmp7488, i64 1
- %tmp7490 = getelementptr inbounds float* %tmp7489, i64 1
- %tmp7491 = getelementptr inbounds float* %tmp7490, i64 1
- %tmp7492 = getelementptr inbounds float* %tmp7491, i64 1
- %tmp7493 = getelementptr inbounds float* %tmp7492, i64 1
- %tmp7494 = getelementptr inbounds float* %tmp7493, i64 1
- %tmp7495 = getelementptr inbounds float* %tmp7494, i64 1
- %tmp7496 = getelementptr inbounds float* %tmp7495, i64 1
- %tmp7497 = getelementptr inbounds float* %tmp7496, i64 1
- %tmp7498 = getelementptr inbounds float* %tmp7497, i64 1
- %tmp7499 = getelementptr inbounds float* %tmp7498, i64 1
- %tmp7500 = getelementptr inbounds float* %tmp7499, i64 1
- %tmp7501 = getelementptr inbounds float* %tmp7500, i64 1
- %tmp7502 = getelementptr inbounds float* %tmp7501, i64 1
- %tmp7503 = getelementptr inbounds float* %tmp7502, i64 1
- %tmp7504 = getelementptr inbounds float* %tmp7503, i64 1
- %tmp7505 = getelementptr inbounds float* %tmp7504, i64 1
- %tmp7506 = getelementptr inbounds float* %tmp7505, i64 1
- %tmp7507 = getelementptr inbounds float* %tmp7506, i64 1
- %tmp7508 = getelementptr inbounds float* %tmp7507, i64 1
- %tmp7509 = getelementptr inbounds float* %tmp7508, i64 1
- %tmp7510 = getelementptr inbounds float* %tmp7509, i64 1
- %tmp7511 = getelementptr inbounds float* %tmp7510, i64 1
- %tmp7512 = getelementptr inbounds float* %tmp7511, i64 1
- %tmp7513 = getelementptr inbounds float* %tmp7512, i64 1
- %tmp7514 = getelementptr inbounds float* %tmp7513, i64 1
- %tmp7515 = getelementptr inbounds float* %tmp7514, i64 1
- %tmp7516 = getelementptr inbounds float* %tmp7515, i64 1
- %tmp7517 = getelementptr inbounds float* %tmp7516, i64 1
- %tmp7518 = getelementptr inbounds float* %tmp7517, i64 1
- %tmp7519 = getelementptr inbounds float* %tmp7518, i64 1
- %tmp7520 = getelementptr inbounds float* %tmp7519, i64 1
- %tmp7521 = getelementptr inbounds float* %tmp7520, i64 1
- %tmp7522 = getelementptr inbounds float* %tmp7521, i64 1
- %tmp7523 = getelementptr inbounds float* %tmp7522, i64 1
- %tmp7524 = getelementptr inbounds float* %tmp7523, i64 1
- %tmp7525 = getelementptr inbounds float* %tmp7524, i64 1
- %tmp7526 = getelementptr inbounds float* %tmp7525, i64 1
- %tmp7527 = getelementptr inbounds float* %tmp7526, i64 1
- %tmp7528 = getelementptr inbounds float* %tmp7527, i64 1
- %tmp7529 = getelementptr inbounds float* %tmp7528, i64 1
- %tmp7530 = getelementptr inbounds float* %tmp7529, i64 1
- %tmp7531 = getelementptr inbounds float* %tmp7530, i64 1
- %tmp7532 = getelementptr inbounds float* %tmp7531, i64 1
- %tmp7533 = getelementptr inbounds float* %tmp7532, i64 1
- %tmp7534 = getelementptr inbounds float* %tmp7533, i64 1
- %tmp7535 = getelementptr inbounds float* %tmp7534, i64 1
- %tmp7536 = getelementptr inbounds float* %tmp7535, i64 1
- %tmp7537 = getelementptr inbounds float* %tmp7536, i64 1
- %tmp7538 = getelementptr inbounds float* %tmp7537, i64 1
- %tmp7539 = getelementptr inbounds float* %tmp7538, i64 1
- %tmp7540 = getelementptr inbounds float* %tmp7539, i64 1
- %tmp7541 = getelementptr inbounds float* %tmp7540, i64 1
- %tmp7542 = getelementptr inbounds float* %tmp7541, i64 1
- %tmp7543 = getelementptr inbounds float* %tmp7542, i64 1
- %tmp7544 = getelementptr inbounds float* %tmp7543, i64 1
- %tmp7545 = getelementptr inbounds float* %tmp7544, i64 1
- %tmp7546 = getelementptr inbounds float* %tmp7545, i64 1
- %tmp7547 = getelementptr inbounds float* %tmp7546, i64 1
- %tmp7548 = getelementptr inbounds float* %tmp7547, i64 1
- %tmp7549 = getelementptr inbounds float* %tmp7548, i64 1
- %tmp7550 = getelementptr inbounds float* %tmp7549, i64 1
- %tmp7551 = getelementptr inbounds float* %tmp7550, i64 1
- %tmp7552 = getelementptr inbounds float* %tmp7551, i64 1
- %tmp7553 = getelementptr inbounds float* %tmp7552, i64 1
- %tmp7554 = getelementptr inbounds float* %tmp7553, i64 1
- %tmp7555 = getelementptr inbounds float* %tmp7554, i64 1
- %tmp7556 = getelementptr inbounds float* %tmp7555, i64 1
- %tmp7557 = getelementptr inbounds float* %tmp7556, i64 1
- %tmp7558 = getelementptr inbounds float* %tmp7557, i64 1
- %tmp7559 = getelementptr inbounds float* %tmp7558, i64 1
- %tmp7560 = getelementptr inbounds float* %tmp7559, i64 1
- %tmp7561 = getelementptr inbounds float* %tmp7560, i64 1
- %tmp7562 = getelementptr inbounds float* %tmp7561, i64 1
- %tmp7563 = getelementptr inbounds float* %tmp7562, i64 1
- %tmp7564 = getelementptr inbounds float* %tmp7563, i64 1
- %tmp7565 = getelementptr inbounds float* %tmp7564, i64 1
- %tmp7566 = getelementptr inbounds float* %tmp7565, i64 1
- %tmp7567 = getelementptr inbounds float* %tmp7566, i64 1
- %tmp7568 = getelementptr inbounds float* %tmp7567, i64 1
- %tmp7569 = getelementptr inbounds float* %tmp7568, i64 1
- %tmp7570 = getelementptr inbounds float* %tmp7569, i64 1
- %tmp7571 = getelementptr inbounds float* %tmp7570, i64 1
- %tmp7572 = getelementptr inbounds float* %tmp7571, i64 1
- %tmp7573 = getelementptr inbounds float* %tmp7572, i64 1
- %tmp7574 = getelementptr inbounds float* %tmp7573, i64 1
- %tmp7575 = getelementptr inbounds float* %tmp7574, i64 1
- %tmp7576 = getelementptr inbounds float* %tmp7575, i64 1
- %tmp7577 = getelementptr inbounds float* %tmp7576, i64 1
- %tmp7578 = getelementptr inbounds float* %tmp7577, i64 1
- %tmp7579 = getelementptr inbounds float* %tmp7578, i64 1
- %tmp7580 = getelementptr inbounds float* %tmp7579, i64 1
- %tmp7581 = getelementptr inbounds float* %tmp7580, i64 1
- %tmp7582 = getelementptr inbounds float* %tmp7581, i64 1
- %tmp7583 = getelementptr inbounds float* %tmp7582, i64 1
- %tmp7584 = getelementptr inbounds float* %tmp7583, i64 1
- %tmp7585 = getelementptr inbounds float* %tmp7584, i64 1
- %tmp7586 = getelementptr inbounds float* %tmp7585, i64 1
- %tmp7587 = getelementptr inbounds float* %tmp7586, i64 1
- %tmp7588 = getelementptr inbounds float* %tmp7587, i64 1
- %tmp7589 = getelementptr inbounds float* %tmp7588, i64 1
- %tmp7590 = getelementptr inbounds float* %tmp7589, i64 1
- %tmp7591 = getelementptr inbounds float* %tmp7590, i64 1
- %tmp7592 = getelementptr inbounds float* %tmp7591, i64 1
- %tmp7593 = getelementptr inbounds float* %tmp7592, i64 1
- %tmp7594 = getelementptr inbounds float* %tmp7593, i64 1
- %tmp7595 = getelementptr inbounds float* %tmp7594, i64 1
- %tmp7596 = getelementptr inbounds float* %tmp7595, i64 1
- %tmp7597 = getelementptr inbounds float* %tmp7596, i64 1
- %tmp7598 = getelementptr inbounds float* %tmp7597, i64 1
- %tmp7599 = getelementptr inbounds float* %tmp7598, i64 1
- %tmp7600 = getelementptr inbounds float* %tmp7599, i64 1
- %tmp7601 = getelementptr inbounds float* %tmp7600, i64 1
- %tmp7602 = getelementptr inbounds float* %tmp7601, i64 1
- %tmp7603 = getelementptr inbounds float* %tmp7602, i64 1
- %tmp7604 = getelementptr inbounds float* %tmp7603, i64 1
- %tmp7605 = getelementptr inbounds float* %tmp7604, i64 1
- %tmp7606 = getelementptr inbounds float* %tmp7605, i64 1
- %tmp7607 = getelementptr inbounds float* %tmp7606, i64 1
- %tmp7608 = getelementptr inbounds float* %tmp7607, i64 1
- %tmp7609 = getelementptr inbounds float* %tmp7608, i64 1
- %tmp7610 = getelementptr inbounds float* %tmp7609, i64 1
- %tmp7611 = getelementptr inbounds float* %tmp7610, i64 1
- %tmp7612 = getelementptr inbounds float* %tmp7611, i64 1
- %tmp7613 = getelementptr inbounds float* %tmp7612, i64 1
- %tmp7614 = getelementptr inbounds float* %tmp7613, i64 1
- %tmp7615 = getelementptr inbounds float* %tmp7614, i64 1
- %tmp7616 = getelementptr inbounds float* %tmp7615, i64 1
- %tmp7617 = getelementptr inbounds float* %tmp7616, i64 1
- %tmp7618 = getelementptr inbounds float* %tmp7617, i64 1
- %tmp7619 = getelementptr inbounds float* %tmp7618, i64 1
- %tmp7620 = getelementptr inbounds float* %tmp7619, i64 1
- %tmp7621 = getelementptr inbounds float* %tmp7620, i64 1
- %tmp7622 = getelementptr inbounds float* %tmp7621, i64 1
- %tmp7623 = getelementptr inbounds float* %tmp7622, i64 1
- %tmp7624 = getelementptr inbounds float* %tmp7623, i64 1
- %tmp7625 = getelementptr inbounds float* %tmp7624, i64 1
- %tmp7626 = getelementptr inbounds float* %tmp7625, i64 1
- %tmp7627 = getelementptr inbounds float* %tmp7626, i64 1
- %tmp7628 = getelementptr inbounds float* %tmp7627, i64 1
- %tmp7629 = getelementptr inbounds float* %tmp7628, i64 1
- %tmp7630 = getelementptr inbounds float* %tmp7629, i64 1
- %tmp7631 = getelementptr inbounds float* %tmp7630, i64 1
- %tmp7632 = getelementptr inbounds float* %tmp7631, i64 1
- %tmp7633 = getelementptr inbounds float* %tmp7632, i64 1
- %tmp7634 = getelementptr inbounds float* %tmp7633, i64 1
- %tmp7635 = getelementptr inbounds float* %tmp7634, i64 1
- %tmp7636 = getelementptr inbounds float* %tmp7635, i64 1
- %tmp7637 = getelementptr inbounds float* %tmp7636, i64 1
- %tmp7638 = getelementptr inbounds float* %tmp7637, i64 1
- %tmp7639 = getelementptr inbounds float* %tmp7638, i64 1
- %tmp7640 = getelementptr inbounds float* %tmp7639, i64 1
- %tmp7641 = getelementptr inbounds float* %tmp7640, i64 1
- %tmp7642 = getelementptr inbounds float* %tmp7641, i64 1
- %tmp7643 = getelementptr inbounds float* %tmp7642, i64 1
- %tmp7644 = getelementptr inbounds float* %tmp7643, i64 1
- %tmp7645 = getelementptr inbounds float* %tmp7644, i64 1
- %tmp7646 = getelementptr inbounds float* %tmp7645, i64 1
- %tmp7647 = getelementptr inbounds float* %tmp7646, i64 1
- %tmp7648 = getelementptr inbounds float* %tmp7647, i64 1
- %tmp7649 = getelementptr inbounds float* %tmp7648, i64 1
- %tmp7650 = getelementptr inbounds float* %tmp7649, i64 1
- %tmp7651 = getelementptr inbounds float* %tmp7650, i64 1
- %tmp7652 = getelementptr inbounds float* %tmp7651, i64 1
- %tmp7653 = getelementptr inbounds float* %tmp7652, i64 1
- %tmp7654 = getelementptr inbounds float* %tmp7653, i64 1
- %tmp7655 = getelementptr inbounds float* %tmp7654, i64 1
- %tmp7656 = getelementptr inbounds float* %tmp7655, i64 1
- %tmp7657 = getelementptr inbounds float* %tmp7656, i64 1
- %tmp7658 = getelementptr inbounds float* %tmp7657, i64 1
- %tmp7659 = getelementptr inbounds float* %tmp7658, i64 1
- %tmp7660 = getelementptr inbounds float* %tmp7659, i64 1
- %tmp7661 = getelementptr inbounds float* %tmp7660, i64 1
- %tmp7662 = getelementptr inbounds float* %tmp7661, i64 1
- %tmp7663 = getelementptr inbounds float* %tmp7662, i64 1
- %tmp7664 = getelementptr inbounds float* %tmp7663, i64 1
- %tmp7665 = getelementptr inbounds float* %tmp7664, i64 1
- %tmp7666 = getelementptr inbounds float* %tmp7665, i64 1
- %tmp7667 = getelementptr inbounds float* %tmp7666, i64 1
- %tmp7668 = getelementptr inbounds float* %tmp7667, i64 1
- %tmp7669 = getelementptr inbounds float* %tmp7668, i64 1
- %tmp7670 = getelementptr inbounds float* %tmp7669, i64 1
- %tmp7671 = getelementptr inbounds float* %tmp7670, i64 1
- %tmp7672 = getelementptr inbounds float* %tmp7671, i64 1
- %tmp7673 = getelementptr inbounds float* %tmp7672, i64 1
- %tmp7674 = getelementptr inbounds float* %tmp7673, i64 1
- %tmp7675 = getelementptr inbounds float* %tmp7674, i64 1
- %tmp7676 = getelementptr inbounds float* %tmp7675, i64 1
- %tmp7677 = getelementptr inbounds float* %tmp7676, i64 1
- %tmp7678 = getelementptr inbounds float* %tmp7677, i64 1
- %tmp7679 = getelementptr inbounds float* %tmp7678, i64 1
- %tmp7680 = getelementptr inbounds float* %tmp7679, i64 1
- %tmp7681 = getelementptr inbounds float* %tmp7680, i64 1
- %tmp7682 = getelementptr inbounds float* %tmp7681, i64 1
- %tmp7683 = getelementptr inbounds float* %tmp7682, i64 1
- %tmp7684 = getelementptr inbounds float* %tmp7683, i64 1
- %tmp7685 = getelementptr inbounds float* %tmp7684, i64 1
- %tmp7686 = getelementptr inbounds float* %tmp7685, i64 1
- %tmp7687 = getelementptr inbounds float* %tmp7686, i64 1
- %tmp7688 = getelementptr inbounds float* %tmp7687, i64 1
- %tmp7689 = getelementptr inbounds float* %tmp7688, i64 1
- %tmp7690 = getelementptr inbounds float* %tmp7689, i64 1
- %tmp7691 = getelementptr inbounds float* %tmp7690, i64 1
- %tmp7692 = getelementptr inbounds float* %tmp7691, i64 1
- %tmp7693 = getelementptr inbounds float* %tmp7692, i64 1
- %tmp7694 = getelementptr inbounds float* %tmp7693, i64 1
- %tmp7695 = getelementptr inbounds float* %tmp7694, i64 1
- %tmp7696 = getelementptr inbounds float* %tmp7695, i64 1
- %tmp7697 = getelementptr inbounds float* %tmp7696, i64 1
- %tmp7698 = getelementptr inbounds float* %tmp7697, i64 1
- %tmp7699 = getelementptr inbounds float* %tmp7698, i64 1
- %tmp7700 = getelementptr inbounds float* %tmp7699, i64 1
- %tmp7701 = getelementptr inbounds float* %tmp7700, i64 1
- %tmp7702 = getelementptr inbounds float* %tmp7701, i64 1
- %tmp7703 = getelementptr inbounds float* %tmp7702, i64 1
- %tmp7704 = getelementptr inbounds float* %tmp7703, i64 1
- %tmp7705 = getelementptr inbounds float* %tmp7704, i64 1
- %tmp7706 = getelementptr inbounds float* %tmp7705, i64 1
- %tmp7707 = getelementptr inbounds float* %tmp7706, i64 1
- %tmp7708 = getelementptr inbounds float* %tmp7707, i64 1
- %tmp7709 = getelementptr inbounds float* %tmp7708, i64 1
- %tmp7710 = getelementptr inbounds float* %tmp7709, i64 1
- %tmp7711 = getelementptr inbounds float* %tmp7710, i64 1
- %tmp7712 = getelementptr inbounds float* %tmp7711, i64 1
- %tmp7713 = getelementptr inbounds float* %tmp7712, i64 1
- %tmp7714 = getelementptr inbounds float* %tmp7713, i64 1
- %tmp7715 = getelementptr inbounds float* %tmp7714, i64 1
- %tmp7716 = getelementptr inbounds float* %tmp7715, i64 1
- %tmp7717 = getelementptr inbounds float* %tmp7716, i64 1
- %tmp7718 = getelementptr inbounds float* %tmp7717, i64 1
- %tmp7719 = getelementptr inbounds float* %tmp7718, i64 1
- %tmp7720 = getelementptr inbounds float* %tmp7719, i64 1
- %tmp7721 = getelementptr inbounds float* %tmp7720, i64 1
- %tmp7722 = getelementptr inbounds float* %tmp7721, i64 1
- %tmp7723 = getelementptr inbounds float* %tmp7722, i64 1
- %tmp7724 = getelementptr inbounds float* %tmp7723, i64 1
- %tmp7725 = getelementptr inbounds float* %tmp7724, i64 1
- %tmp7726 = getelementptr inbounds float* %tmp7725, i64 1
- %tmp7727 = getelementptr inbounds float* %tmp7726, i64 1
- %tmp7728 = getelementptr inbounds float* %tmp7727, i64 1
- %tmp7729 = getelementptr inbounds float* %tmp7728, i64 1
- %tmp7730 = getelementptr inbounds float* %tmp7729, i64 1
- %tmp7731 = getelementptr inbounds float* %tmp7730, i64 1
- %tmp7732 = getelementptr inbounds float* %tmp7731, i64 1
- %tmp7733 = getelementptr inbounds float* %tmp7732, i64 1
- %tmp7734 = getelementptr inbounds float* %tmp7733, i64 1
- %tmp7735 = getelementptr inbounds float* %tmp7734, i64 1
- %tmp7736 = getelementptr inbounds float* %tmp7735, i64 1
- %tmp7737 = getelementptr inbounds float* %tmp7736, i64 1
- %tmp7738 = getelementptr inbounds float* %tmp7737, i64 1
- %tmp7739 = getelementptr inbounds float* %tmp7738, i64 1
- %tmp7740 = getelementptr inbounds float* %tmp7739, i64 1
- %tmp7741 = getelementptr inbounds float* %tmp7740, i64 1
- %tmp7742 = getelementptr inbounds float* %tmp7741, i64 1
- %tmp7743 = getelementptr inbounds float* %tmp7742, i64 1
- %tmp7744 = getelementptr inbounds float* %tmp7743, i64 1
- %tmp7745 = getelementptr inbounds float* %tmp7744, i64 1
- %tmp7746 = getelementptr inbounds float* %tmp7745, i64 1
- %tmp7747 = getelementptr inbounds float* %tmp7746, i64 1
- %tmp7748 = getelementptr inbounds float* %tmp7747, i64 1
- %tmp7749 = getelementptr inbounds float* %tmp7748, i64 1
- %tmp7750 = getelementptr inbounds float* %tmp7749, i64 1
- %tmp7751 = getelementptr inbounds float* %tmp7750, i64 1
- %tmp7752 = getelementptr inbounds float* %tmp7751, i64 1
- %tmp7753 = getelementptr inbounds float* %tmp7752, i64 1
- %tmp7754 = getelementptr inbounds float* %tmp7753, i64 1
- %tmp7755 = getelementptr inbounds float* %tmp7754, i64 1
- %tmp7756 = getelementptr inbounds float* %tmp7755, i64 1
- %tmp7757 = getelementptr inbounds float* %tmp7756, i64 1
- %tmp7758 = getelementptr inbounds float* %tmp7757, i64 1
- %tmp7759 = getelementptr inbounds float* %tmp7758, i64 1
- %tmp7760 = getelementptr inbounds float* %tmp7759, i64 1
- %tmp7761 = getelementptr inbounds float* %tmp7760, i64 1
- %tmp7762 = getelementptr inbounds float* %tmp7761, i64 1
- %tmp7763 = getelementptr inbounds float* %tmp7762, i64 1
- %tmp7764 = getelementptr inbounds float* %tmp7763, i64 1
- %tmp7765 = getelementptr inbounds float* %tmp7764, i64 1
- %tmp7766 = getelementptr inbounds float* %tmp7765, i64 1
- %tmp7767 = getelementptr inbounds float* %tmp7766, i64 1
- %tmp7768 = getelementptr inbounds float* %tmp7767, i64 1
- %tmp7769 = getelementptr inbounds float* %tmp7768, i64 1
- %tmp7770 = getelementptr inbounds float* %tmp7769, i64 1
- %tmp7771 = getelementptr inbounds float* %tmp7770, i64 1
- %tmp7772 = getelementptr inbounds float* %tmp7771, i64 1
- %tmp7773 = getelementptr inbounds float* %tmp7772, i64 1
- %tmp7774 = getelementptr inbounds float* %tmp7773, i64 1
- %tmp7775 = getelementptr inbounds float* %tmp7774, i64 1
- %tmp7776 = getelementptr inbounds float* %tmp7775, i64 1
- %tmp7777 = getelementptr inbounds float* %tmp7776, i64 1
- %tmp7778 = getelementptr inbounds float* %tmp7777, i64 1
- %tmp7779 = getelementptr inbounds float* %tmp7778, i64 1
- %tmp7780 = getelementptr inbounds float* %tmp7779, i64 1
- %tmp7781 = getelementptr inbounds float* %tmp7780, i64 1
- %tmp7782 = getelementptr inbounds float* %tmp7781, i64 1
- %tmp7783 = getelementptr inbounds float* %tmp7782, i64 1
- %tmp7784 = getelementptr inbounds float* %tmp7783, i64 1
- %tmp7785 = getelementptr inbounds float* %tmp7784, i64 1
- %tmp7786 = getelementptr inbounds float* %tmp7785, i64 1
- %tmp7787 = getelementptr inbounds float* %tmp7786, i64 1
- %tmp7788 = getelementptr inbounds float* %tmp7787, i64 1
- %tmp7789 = getelementptr inbounds float* %tmp7788, i64 1
- %tmp7790 = getelementptr inbounds float* %tmp7789, i64 1
- %tmp7791 = getelementptr inbounds float* %tmp7790, i64 1
- %tmp7792 = getelementptr inbounds float* %tmp7791, i64 1
- %tmp7793 = getelementptr inbounds float* %tmp7792, i64 1
- %tmp7794 = getelementptr inbounds float* %tmp7793, i64 1
- %tmp7795 = getelementptr inbounds float* %tmp7794, i64 1
- %tmp7796 = getelementptr inbounds float* %tmp7795, i64 1
- %tmp7797 = getelementptr inbounds float* %tmp7796, i64 1
- %tmp7798 = getelementptr inbounds float* %tmp7797, i64 1
- %tmp7799 = getelementptr inbounds float* %tmp7798, i64 1
- %tmp7800 = getelementptr inbounds float* %tmp7799, i64 1
- %tmp7801 = getelementptr inbounds float* %tmp7800, i64 1
- %tmp7802 = getelementptr inbounds float* %tmp7801, i64 1
- %tmp7803 = getelementptr inbounds float* %tmp7802, i64 1
- %tmp7804 = getelementptr inbounds float* %tmp7803, i64 1
- %tmp7805 = getelementptr inbounds float* %tmp7804, i64 1
- %tmp7806 = getelementptr inbounds float* %tmp7805, i64 1
- %tmp7807 = getelementptr inbounds float* %tmp7806, i64 1
- %tmp7808 = getelementptr inbounds float* %tmp7807, i64 1
- %tmp7809 = getelementptr inbounds float* %tmp7808, i64 1
- %tmp7810 = getelementptr inbounds float* %tmp7809, i64 1
- %tmp7811 = getelementptr inbounds float* %tmp7810, i64 1
- %tmp7812 = getelementptr inbounds float* %tmp7811, i64 1
- %tmp7813 = getelementptr inbounds float* %tmp7812, i64 1
- %tmp7814 = getelementptr inbounds float* %tmp7813, i64 1
- %tmp7815 = getelementptr inbounds float* %tmp7814, i64 1
- %tmp7816 = getelementptr inbounds float* %tmp7815, i64 1
- %tmp7817 = getelementptr inbounds float* %tmp7816, i64 1
- %tmp7818 = getelementptr inbounds float* %tmp7817, i64 1
- %tmp7819 = getelementptr inbounds float* %tmp7818, i64 1
- %tmp7820 = getelementptr inbounds float* %tmp7819, i64 1
- %tmp7821 = getelementptr inbounds float* %tmp7820, i64 1
- %tmp7822 = getelementptr inbounds float* %tmp7821, i64 1
- %tmp7823 = getelementptr inbounds float* %tmp7822, i64 1
- %tmp7824 = getelementptr inbounds float* %tmp7823, i64 1
- %tmp7825 = getelementptr inbounds float* %tmp7824, i64 1
- %tmp7826 = getelementptr inbounds float* %tmp7825, i64 1
- %tmp7827 = getelementptr inbounds float* %tmp7826, i64 1
- %tmp7828 = getelementptr inbounds float* %tmp7827, i64 1
- %tmp7829 = getelementptr inbounds float* %tmp7828, i64 1
- %tmp7830 = getelementptr inbounds float* %tmp7829, i64 1
- %tmp7831 = getelementptr inbounds float* %tmp7830, i64 1
- %tmp7832 = getelementptr inbounds float* %tmp7831, i64 1
- %tmp7833 = getelementptr inbounds float* %tmp7832, i64 1
- %tmp7834 = getelementptr inbounds float* %tmp7833, i64 1
- %tmp7835 = getelementptr inbounds float* %tmp7834, i64 1
- %tmp7836 = getelementptr inbounds float* %tmp7835, i64 1
- %tmp7837 = getelementptr inbounds float* %tmp7836, i64 1
- %tmp7838 = getelementptr inbounds float* %tmp7837, i64 1
- %tmp7839 = getelementptr inbounds float* %tmp7838, i64 1
- %tmp7840 = getelementptr inbounds float* %tmp7839, i64 1
- %tmp7841 = getelementptr inbounds float* %tmp7840, i64 1
- %tmp7842 = getelementptr inbounds float* %tmp7841, i64 1
- %tmp7843 = getelementptr inbounds float* %tmp7842, i64 1
- %tmp7844 = getelementptr inbounds float* %tmp7843, i64 1
- %tmp7845 = getelementptr inbounds float* %tmp7844, i64 1
- %tmp7846 = getelementptr inbounds float* %tmp7845, i64 1
- %tmp7847 = getelementptr inbounds float* %tmp7846, i64 1
- %tmp7848 = getelementptr inbounds float* %tmp7847, i64 1
- %tmp7849 = getelementptr inbounds float* %tmp7848, i64 1
- %tmp7850 = getelementptr inbounds float* %tmp7849, i64 1
- %tmp7851 = getelementptr inbounds float* %tmp7850, i64 1
- %tmp7852 = getelementptr inbounds float* %tmp7851, i64 1
- %tmp7853 = getelementptr inbounds float* %tmp7852, i64 1
- %tmp7854 = getelementptr inbounds float* %tmp7853, i64 1
- %tmp7855 = getelementptr inbounds float* %tmp7854, i64 1
- %tmp7856 = getelementptr inbounds float* %tmp7855, i64 1
- %tmp7857 = getelementptr inbounds float* %tmp7856, i64 1
- %tmp7858 = getelementptr inbounds float* %tmp7857, i64 1
- %tmp7859 = getelementptr inbounds float* %tmp7858, i64 1
- %tmp7860 = getelementptr inbounds float* %tmp7859, i64 1
- %tmp7861 = getelementptr inbounds float* %tmp7860, i64 1
- %tmp7862 = getelementptr inbounds float* %tmp7861, i64 1
- %tmp7863 = getelementptr inbounds float* %tmp7862, i64 1
- %tmp7864 = getelementptr inbounds float* %tmp7863, i64 1
- %tmp7865 = getelementptr inbounds float* %tmp7864, i64 1
- %tmp7866 = getelementptr inbounds float* %tmp7865, i64 1
- %tmp7867 = getelementptr inbounds float* %tmp7866, i64 1
- %tmp7868 = getelementptr inbounds float* %tmp7867, i64 1
- %tmp7869 = getelementptr inbounds float* %tmp7868, i64 1
- %tmp7870 = getelementptr inbounds float* %tmp7869, i64 1
- %tmp7871 = getelementptr inbounds float* %tmp7870, i64 1
- %tmp7872 = getelementptr inbounds float* %tmp7871, i64 1
- %tmp7873 = getelementptr inbounds float* %tmp7872, i64 1
- %tmp7874 = getelementptr inbounds float* %tmp7873, i64 1
- %tmp7875 = getelementptr inbounds float* %tmp7874, i64 1
- %tmp7876 = getelementptr inbounds float* %tmp7875, i64 1
- %tmp7877 = getelementptr inbounds float* %tmp7876, i64 1
- %tmp7878 = getelementptr inbounds float* %tmp7877, i64 1
- %tmp7879 = getelementptr inbounds float* %tmp7878, i64 1
- %tmp7880 = getelementptr inbounds float* %tmp7879, i64 1
- %tmp7881 = getelementptr inbounds float* %tmp7880, i64 1
- %tmp7882 = getelementptr inbounds float* %tmp7881, i64 1
- %tmp7883 = getelementptr inbounds float* %tmp7882, i64 1
- %tmp7884 = getelementptr inbounds float* %tmp7883, i64 1
- %tmp7885 = getelementptr inbounds float* %tmp7884, i64 1
- %tmp7886 = getelementptr inbounds float* %tmp7885, i64 1
- %tmp7887 = getelementptr inbounds float* %tmp7886, i64 1
- %tmp7888 = getelementptr inbounds float* %tmp7887, i64 1
- %tmp7889 = getelementptr inbounds float* %tmp7888, i64 1
- %tmp7890 = getelementptr inbounds float* %tmp7889, i64 1
- %tmp7891 = getelementptr inbounds float* %tmp7890, i64 1
- %tmp7892 = getelementptr inbounds float* %tmp7891, i64 1
- %tmp7893 = getelementptr inbounds float* %tmp7892, i64 1
- %tmp7894 = getelementptr inbounds float* %tmp7893, i64 1
- %tmp7895 = getelementptr inbounds float* %tmp7894, i64 1
- %tmp7896 = getelementptr inbounds float* %tmp7895, i64 1
- %tmp7897 = getelementptr inbounds float* %tmp7896, i64 1
- %tmp7898 = getelementptr inbounds float* %tmp7897, i64 1
- %tmp7899 = getelementptr inbounds float* %tmp7898, i64 1
- %tmp7900 = getelementptr inbounds float* %tmp7899, i64 1
- %tmp7901 = getelementptr inbounds float* %tmp7900, i64 1
- %tmp7902 = getelementptr inbounds float* %tmp7901, i64 1
- %tmp7903 = getelementptr inbounds float* %tmp7902, i64 1
- %tmp7904 = getelementptr inbounds float* %tmp7903, i64 1
- %tmp7905 = getelementptr inbounds float* %tmp7904, i64 1
- %tmp7906 = getelementptr inbounds float* %tmp7905, i64 1
- %tmp7907 = getelementptr inbounds float* %tmp7906, i64 1
- %tmp7908 = getelementptr inbounds float* %tmp7907, i64 1
- %tmp7909 = getelementptr inbounds float* %tmp7908, i64 1
- %tmp7910 = getelementptr inbounds float* %tmp7909, i64 1
- %tmp7911 = getelementptr inbounds float* %tmp7910, i64 1
- %tmp7912 = getelementptr inbounds float* %tmp7911, i64 1
- %tmp7913 = getelementptr inbounds float* %tmp7912, i64 1
- %tmp7914 = getelementptr inbounds float* %tmp7913, i64 1
- %tmp7915 = getelementptr inbounds float* %tmp7914, i64 1
- %tmp7916 = getelementptr inbounds float* %tmp7915, i64 1
- %tmp7917 = getelementptr inbounds float* %tmp7916, i64 1
- %tmp7918 = getelementptr inbounds float* %tmp7917, i64 1
- %tmp7919 = getelementptr inbounds float* %tmp7918, i64 1
- %tmp7920 = getelementptr inbounds float* %tmp7919, i64 1
- %tmp7921 = getelementptr inbounds float* %tmp7920, i64 1
- %tmp7922 = getelementptr inbounds float* %tmp7921, i64 1
- %tmp7923 = getelementptr inbounds float* %tmp7922, i64 1
- %tmp7924 = getelementptr inbounds float* %tmp7923, i64 1
- %tmp7925 = getelementptr inbounds float* %tmp7924, i64 1
- %tmp7926 = getelementptr inbounds float* %tmp7925, i64 1
- %tmp7927 = getelementptr inbounds float* %tmp7926, i64 1
- %tmp7928 = getelementptr inbounds float* %tmp7927, i64 1
- %tmp7929 = getelementptr inbounds float* %tmp7928, i64 1
- %tmp7930 = getelementptr inbounds float* %tmp7929, i64 1
- %tmp7931 = getelementptr inbounds float* %tmp7930, i64 1
- %tmp7932 = getelementptr inbounds float* %tmp7931, i64 1
- %tmp7933 = getelementptr inbounds float* %tmp7932, i64 1
- %tmp7934 = getelementptr inbounds float* %tmp7933, i64 1
- %tmp7935 = getelementptr inbounds float* %tmp7934, i64 1
- %tmp7936 = getelementptr inbounds float* %tmp7935, i64 1
- %tmp7937 = getelementptr inbounds float* %tmp7936, i64 1
- %tmp7938 = getelementptr inbounds float* %tmp7937, i64 1
- %tmp7939 = getelementptr inbounds float* %tmp7938, i64 1
- %tmp7940 = getelementptr inbounds float* %tmp7939, i64 1
- %tmp7941 = getelementptr inbounds float* %tmp7940, i64 1
- %tmp7942 = getelementptr inbounds float* %tmp7941, i64 1
- %tmp7943 = getelementptr inbounds float* %tmp7942, i64 1
- %tmp7944 = getelementptr inbounds float* %tmp7943, i64 1
- %tmp7945 = getelementptr inbounds float* %tmp7944, i64 1
- %tmp7946 = getelementptr inbounds float* %tmp7945, i64 1
- %tmp7947 = getelementptr inbounds float* %tmp7946, i64 1
- %tmp7948 = getelementptr inbounds float* %tmp7947, i64 1
- %tmp7949 = getelementptr inbounds float* %tmp7948, i64 1
- %tmp7950 = getelementptr inbounds float* %tmp7949, i64 1
- %tmp7951 = getelementptr inbounds float* %tmp7950, i64 1
- %tmp7952 = getelementptr inbounds float* %tmp7951, i64 1
- %tmp7953 = getelementptr inbounds float* %tmp7952, i64 1
- %tmp7954 = getelementptr inbounds float* %tmp7953, i64 1
- %tmp7955 = getelementptr inbounds float* %tmp7954, i64 1
- %tmp7956 = getelementptr inbounds float* %tmp7955, i64 1
- %tmp7957 = getelementptr inbounds float* %tmp7956, i64 1
- %tmp7958 = getelementptr inbounds float* %tmp7957, i64 1
- %tmp7959 = getelementptr inbounds float* %tmp7958, i64 1
- %tmp7960 = getelementptr inbounds float* %tmp7959, i64 1
- %tmp7961 = getelementptr inbounds float* %tmp7960, i64 1
- %tmp7962 = getelementptr inbounds float* %tmp7961, i64 1
- %tmp7963 = getelementptr inbounds float* %tmp7962, i64 1
- %tmp7964 = getelementptr inbounds float* %tmp7963, i64 1
- %tmp7965 = getelementptr inbounds float* %tmp7964, i64 1
- %tmp7966 = getelementptr inbounds float* %tmp7965, i64 1
- %tmp7967 = getelementptr inbounds float* %tmp7966, i64 1
- %tmp7968 = getelementptr inbounds float* %tmp7967, i64 1
- %tmp7969 = getelementptr inbounds float* %tmp7968, i64 1
- %tmp7970 = getelementptr inbounds float* %tmp7969, i64 1
- %tmp7971 = getelementptr inbounds float* %tmp7970, i64 1
- %tmp7972 = getelementptr inbounds float* %tmp7971, i64 1
- %tmp7973 = getelementptr inbounds float* %tmp7972, i64 1
- %tmp7974 = getelementptr inbounds float* %tmp7973, i64 1
- %tmp7975 = getelementptr inbounds float* %tmp7974, i64 1
- %tmp7976 = getelementptr inbounds float* %tmp7975, i64 1
- %tmp7977 = getelementptr inbounds float* %tmp7976, i64 1
- %tmp7978 = getelementptr inbounds float* %tmp7977, i64 1
- %tmp7979 = getelementptr inbounds float* %tmp7978, i64 1
- %tmp7980 = getelementptr inbounds float* %tmp7979, i64 1
- %tmp7981 = getelementptr inbounds float* %tmp7980, i64 1
- %tmp7982 = getelementptr inbounds float* %tmp7981, i64 1
- %tmp7983 = getelementptr inbounds float* %tmp7982, i64 1
- %tmp7984 = getelementptr inbounds float* %tmp7983, i64 1
- %tmp7985 = getelementptr inbounds float* %tmp7984, i64 1
- %tmp7986 = getelementptr inbounds float* %tmp7985, i64 1
- %tmp7987 = getelementptr inbounds float* %tmp7986, i64 1
- %tmp7988 = getelementptr inbounds float* %tmp7987, i64 1
- %tmp7989 = getelementptr inbounds float* %tmp7988, i64 1
- %tmp7990 = getelementptr inbounds float* %tmp7989, i64 1
- %tmp7991 = getelementptr inbounds float* %tmp7990, i64 1
- %tmp7992 = getelementptr inbounds float* %tmp7991, i64 1
- %tmp7993 = getelementptr inbounds float* %tmp7992, i64 1
- %tmp7994 = getelementptr inbounds float* %tmp7993, i64 1
- %tmp7995 = getelementptr inbounds float* %tmp7994, i64 1
- %tmp7996 = getelementptr inbounds float* %tmp7995, i64 1
- %tmp7997 = getelementptr inbounds float* %tmp7996, i64 1
- %tmp7998 = getelementptr inbounds float* %tmp7997, i64 1
- %tmp7999 = getelementptr inbounds float* %tmp7998, i64 1
- %tmp8000 = getelementptr inbounds float* %tmp7999, i64 1
- %tmp8001 = getelementptr inbounds float* %tmp8000, i64 1
- %tmp8002 = getelementptr inbounds float* %tmp8001, i64 1
- %tmp8003 = getelementptr inbounds float* %tmp8002, i64 1
- %tmp8004 = getelementptr inbounds float* %tmp8003, i64 1
- %tmp8005 = getelementptr inbounds float* %tmp8004, i64 1
- %tmp8006 = getelementptr inbounds float* %tmp8005, i64 1
- %tmp8007 = getelementptr inbounds float* %tmp8006, i64 1
- %tmp8008 = getelementptr inbounds float* %tmp8007, i64 1
- %tmp8009 = getelementptr inbounds float* %tmp8008, i64 1
- %tmp8010 = getelementptr inbounds float* %tmp8009, i64 1
- %tmp8011 = getelementptr inbounds float* %tmp8010, i64 1
- %tmp8012 = getelementptr inbounds float* %tmp8011, i64 1
- %tmp8013 = getelementptr inbounds float* %tmp8012, i64 1
- %tmp8014 = getelementptr inbounds float* %tmp8013, i64 1
- %tmp8015 = getelementptr inbounds float* %tmp8014, i64 1
- %tmp8016 = getelementptr inbounds float* %tmp8015, i64 1
- %tmp8017 = getelementptr inbounds float* %tmp8016, i64 1
- %tmp8018 = getelementptr inbounds float* %tmp8017, i64 1
- %tmp8019 = getelementptr inbounds float* %tmp8018, i64 1
- %tmp8020 = getelementptr inbounds float* %tmp8019, i64 1
- %tmp8021 = getelementptr inbounds float* %tmp8020, i64 1
- %tmp8022 = getelementptr inbounds float* %tmp8021, i64 1
- %tmp8023 = getelementptr inbounds float* %tmp8022, i64 1
- %tmp8024 = getelementptr inbounds float* %tmp8023, i64 1
- %tmp8025 = getelementptr inbounds float* %tmp8024, i64 1
- %tmp8026 = getelementptr inbounds float* %tmp8025, i64 1
- %tmp8027 = getelementptr inbounds float* %tmp8026, i64 1
- %tmp8028 = getelementptr inbounds float* %tmp8027, i64 1
- %tmp8029 = getelementptr inbounds float* %tmp8028, i64 1
- %tmp8030 = getelementptr inbounds float* %tmp8029, i64 1
- %tmp8031 = getelementptr inbounds float* %tmp8030, i64 1
- %tmp8032 = getelementptr inbounds float* %tmp8031, i64 1
- %tmp8033 = getelementptr inbounds float* %tmp8032, i64 1
- %tmp8034 = getelementptr inbounds float* %tmp8033, i64 1
- %tmp8035 = getelementptr inbounds float* %tmp8034, i64 1
- %tmp8036 = getelementptr inbounds float* %tmp8035, i64 1
- %tmp8037 = getelementptr inbounds float* %tmp8036, i64 1
- %tmp8038 = getelementptr inbounds float* %tmp8037, i64 1
- %tmp8039 = getelementptr inbounds float* %tmp8038, i64 1
- %tmp8040 = getelementptr inbounds float* %tmp8039, i64 1
- %tmp8041 = getelementptr inbounds float* %tmp8040, i64 1
- %tmp8042 = getelementptr inbounds float* %tmp8041, i64 1
- %tmp8043 = getelementptr inbounds float* %tmp8042, i64 1
- %tmp8044 = getelementptr inbounds float* %tmp8043, i64 1
- %tmp8045 = getelementptr inbounds float* %tmp8044, i64 1
- %tmp8046 = getelementptr inbounds float* %tmp8045, i64 1
- %tmp8047 = getelementptr inbounds float* %tmp8046, i64 1
- %tmp8048 = getelementptr inbounds float* %tmp8047, i64 1
- %tmp8049 = getelementptr inbounds float* %tmp8048, i64 1
- %tmp8050 = getelementptr inbounds float* %tmp8049, i64 1
- %tmp8051 = getelementptr inbounds float* %tmp8050, i64 1
- %tmp8052 = getelementptr inbounds float* %tmp8051, i64 1
- %tmp8053 = getelementptr inbounds float* %tmp8052, i64 1
- %tmp8054 = getelementptr inbounds float* %tmp8053, i64 1
- %tmp8055 = getelementptr inbounds float* %tmp8054, i64 1
- %tmp8056 = getelementptr inbounds float* %tmp8055, i64 1
- %tmp8057 = getelementptr inbounds float* %tmp8056, i64 1
- %tmp8058 = getelementptr inbounds float* %tmp8057, i64 1
- %tmp8059 = getelementptr inbounds float* %tmp8058, i64 1
- %tmp8060 = getelementptr inbounds float* %tmp8059, i64 1
- %tmp8061 = getelementptr inbounds float* %tmp8060, i64 1
- %tmp8062 = getelementptr inbounds float* %tmp8061, i64 1
- %tmp8063 = getelementptr inbounds float* %tmp8062, i64 1
- %tmp8064 = getelementptr inbounds float* %tmp8063, i64 1
- %tmp8065 = getelementptr inbounds float* %tmp8064, i64 1
- %tmp8066 = getelementptr inbounds float* %tmp8065, i64 1
- %tmp8067 = getelementptr inbounds float* %tmp8066, i64 1
- %tmp8068 = getelementptr inbounds float* %tmp8067, i64 1
- %tmp8069 = getelementptr inbounds float* %tmp8068, i64 1
- %tmp8070 = getelementptr inbounds float* %tmp8069, i64 1
- %tmp8071 = getelementptr inbounds float* %tmp8070, i64 1
- %tmp8072 = getelementptr inbounds float* %tmp8071, i64 1
- %tmp8073 = getelementptr inbounds float* %tmp8072, i64 1
- %tmp8074 = getelementptr inbounds float* %tmp8073, i64 1
- %tmp8075 = getelementptr inbounds float* %tmp8074, i64 1
- %tmp8076 = getelementptr inbounds float* %tmp8075, i64 1
- %tmp8077 = getelementptr inbounds float* %tmp8076, i64 1
- %tmp8078 = getelementptr inbounds float* %tmp8077, i64 1
- %tmp8079 = getelementptr inbounds float* %tmp8078, i64 1
- %tmp8080 = getelementptr inbounds float* %tmp8079, i64 1
- %tmp8081 = getelementptr inbounds float* %tmp8080, i64 1
- %tmp8082 = getelementptr inbounds float* %tmp8081, i64 1
- %tmp8083 = getelementptr inbounds float* %tmp8082, i64 1
- %tmp8084 = getelementptr inbounds float* %tmp8083, i64 1
- %tmp8085 = getelementptr inbounds float* %tmp8084, i64 1
- %tmp8086 = getelementptr inbounds float* %tmp8085, i64 1
- %tmp8087 = getelementptr inbounds float* %tmp8086, i64 1
- %tmp8088 = getelementptr inbounds float* %tmp8087, i64 1
- %tmp8089 = getelementptr inbounds float* %tmp8088, i64 1
- %tmp8090 = getelementptr inbounds float* %tmp8089, i64 1
- %tmp8091 = getelementptr inbounds float* %tmp8090, i64 1
- %tmp8092 = getelementptr inbounds float* %tmp8091, i64 1
- %tmp8093 = getelementptr inbounds float* %tmp8092, i64 1
- %tmp8094 = getelementptr inbounds float* %tmp8093, i64 1
- %tmp8095 = getelementptr inbounds float* %tmp8094, i64 1
- %tmp8096 = getelementptr inbounds float* %tmp8095, i64 1
- %tmp8097 = getelementptr inbounds float* %tmp8096, i64 1
- %tmp8098 = getelementptr inbounds float* %tmp8097, i64 1
- %tmp8099 = getelementptr inbounds float* %tmp8098, i64 1
- %tmp8100 = getelementptr inbounds float* %tmp8099, i64 1
- %tmp8101 = getelementptr inbounds float* %tmp8100, i64 1
- %tmp8102 = getelementptr inbounds float* %tmp8101, i64 1
- %tmp8103 = getelementptr inbounds float* %tmp8102, i64 1
- %tmp8104 = getelementptr inbounds float* %tmp8103, i64 1
- %tmp8105 = getelementptr inbounds float* %tmp8104, i64 1
- %tmp8106 = getelementptr inbounds float* %tmp8105, i64 1
- %tmp8107 = getelementptr inbounds float* %tmp8106, i64 1
- %tmp8108 = getelementptr inbounds float* %tmp8107, i64 1
- %tmp8109 = getelementptr inbounds float* %tmp8108, i64 1
- %tmp8110 = getelementptr inbounds float* %tmp8109, i64 1
- %tmp8111 = getelementptr inbounds float* %tmp8110, i64 1
- %tmp8112 = getelementptr inbounds float* %tmp8111, i64 1
- %tmp8113 = getelementptr inbounds float* %tmp8112, i64 1
- %tmp8114 = getelementptr inbounds float* %tmp8113, i64 1
- %tmp8115 = getelementptr inbounds float* %tmp8114, i64 1
- %tmp8116 = getelementptr inbounds float* %tmp8115, i64 1
- %tmp8117 = getelementptr inbounds float* %tmp8116, i64 1
- %tmp8118 = getelementptr inbounds float* %tmp8117, i64 1
- %tmp8119 = getelementptr inbounds float* %tmp8118, i64 1
- %tmp8120 = getelementptr inbounds float* %tmp8119, i64 1
- %tmp8121 = getelementptr inbounds float* %tmp8120, i64 1
- %tmp8122 = getelementptr inbounds float* %tmp8121, i64 1
- %tmp8123 = getelementptr inbounds float* %tmp8122, i64 1
- %tmp8124 = getelementptr inbounds float* %tmp8123, i64 1
- %tmp8125 = getelementptr inbounds float* %tmp8124, i64 1
- %tmp8126 = getelementptr inbounds float* %tmp8125, i64 1
- %tmp8127 = getelementptr inbounds float* %tmp8126, i64 1
- %tmp8128 = getelementptr inbounds float* %tmp8127, i64 1
- %tmp8129 = getelementptr inbounds float* %tmp8128, i64 1
- %tmp8130 = getelementptr inbounds float* %tmp8129, i64 1
- %tmp8131 = getelementptr inbounds float* %tmp8130, i64 1
- %tmp8132 = getelementptr inbounds float* %tmp8131, i64 1
- %tmp8133 = getelementptr inbounds float* %tmp8132, i64 1
- %tmp8134 = getelementptr inbounds float* %tmp8133, i64 1
- %tmp8135 = getelementptr inbounds float* %tmp8134, i64 1
- %tmp8136 = getelementptr inbounds float* %tmp8135, i64 1
- %tmp8137 = getelementptr inbounds float* %tmp8136, i64 1
- %tmp8138 = getelementptr inbounds float* %tmp8137, i64 1
- %tmp8139 = getelementptr inbounds float* %tmp8138, i64 1
- %tmp8140 = getelementptr inbounds float* %tmp8139, i64 1
- %tmp8141 = getelementptr inbounds float* %tmp8140, i64 1
- %tmp8142 = getelementptr inbounds float* %tmp8141, i64 1
- %tmp8143 = getelementptr inbounds float* %tmp8142, i64 1
- %tmp8144 = getelementptr inbounds float* %tmp8143, i64 1
- %tmp8145 = getelementptr inbounds float* %tmp8144, i64 1
- %tmp8146 = getelementptr inbounds float* %tmp8145, i64 1
- %tmp8147 = getelementptr inbounds float* %tmp8146, i64 1
- %tmp8148 = getelementptr inbounds float* %tmp8147, i64 1
- %tmp8149 = getelementptr inbounds float* %tmp8148, i64 1
- %tmp8150 = getelementptr inbounds float* %tmp8149, i64 1
- %tmp8151 = getelementptr inbounds float* %tmp8150, i64 1
- %tmp8152 = getelementptr inbounds float* %tmp8151, i64 1
- %tmp8153 = getelementptr inbounds float* %tmp8152, i64 1
- %tmp8154 = getelementptr inbounds float* %tmp8153, i64 1
- %tmp8155 = getelementptr inbounds float* %tmp8154, i64 1
- %tmp8156 = getelementptr inbounds float* %tmp8155, i64 1
- %tmp8157 = getelementptr inbounds float* %tmp8156, i64 1
- %tmp8158 = getelementptr inbounds float* %tmp8157, i64 1
- %tmp8159 = getelementptr inbounds float* %tmp8158, i64 1
- %tmp8160 = getelementptr inbounds float* %tmp8159, i64 1
- %tmp8161 = getelementptr inbounds float* %tmp8160, i64 1
- %tmp8162 = getelementptr inbounds float* %tmp8161, i64 1
- %tmp8163 = getelementptr inbounds float* %tmp8162, i64 1
- %tmp8164 = getelementptr inbounds float* %tmp8163, i64 1
- %tmp8165 = getelementptr inbounds float* %tmp8164, i64 1
- %tmp8166 = getelementptr inbounds float* %tmp8165, i64 1
- %tmp8167 = getelementptr inbounds float* %tmp8166, i64 1
- %tmp8168 = getelementptr inbounds float* %tmp8167, i64 1
- %tmp8169 = getelementptr inbounds float* %tmp8168, i64 1
- %tmp8170 = getelementptr inbounds float* %tmp8169, i64 1
- %tmp8171 = getelementptr inbounds float* %tmp8170, i64 1
- %tmp8172 = getelementptr inbounds float* %tmp8171, i64 1
- %tmp8173 = getelementptr inbounds float* %tmp8172, i64 1
- %tmp8174 = getelementptr inbounds float* %tmp8173, i64 1
- %tmp8175 = getelementptr inbounds float* %tmp8174, i64 1
- %tmp8176 = getelementptr inbounds float* %tmp8175, i64 1
- %tmp8177 = getelementptr inbounds float* %tmp8176, i64 1
- %tmp8178 = getelementptr inbounds float* %tmp8177, i64 1
- %tmp8179 = getelementptr inbounds float* %tmp8178, i64 1
- %tmp8180 = getelementptr inbounds float* %tmp8179, i64 1
- %tmp8181 = getelementptr inbounds float* %tmp8180, i64 1
- %tmp8182 = getelementptr inbounds float* %tmp8181, i64 1
- %tmp8183 = getelementptr inbounds float* %tmp8182, i64 1
- %tmp8184 = getelementptr inbounds float* %tmp8183, i64 1
- %tmp8185 = getelementptr inbounds float* %tmp8184, i64 1
- %tmp8186 = getelementptr inbounds float* %tmp8185, i64 1
- %tmp8187 = getelementptr inbounds float* %tmp8186, i64 1
- %tmp8188 = getelementptr inbounds float* %tmp8187, i64 1
- %tmp8189 = getelementptr inbounds float* %tmp8188, i64 1
- %tmp8190 = getelementptr inbounds float* %tmp8189, i64 1
- %tmp8191 = getelementptr inbounds float* %tmp8190, i64 1
- %tmp8192 = getelementptr inbounds float* %tmp8191, i64 1
- %tmp8193 = getelementptr inbounds float* %tmp8192, i64 1
- %tmp8194 = getelementptr inbounds float* %tmp8193, i64 1
- %tmp8195 = getelementptr inbounds float* %tmp8194, i64 1
- %tmp8196 = getelementptr inbounds float* %tmp8195, i64 1
- %tmp8197 = getelementptr inbounds float* %tmp8196, i64 1
- %tmp8198 = getelementptr inbounds float* %tmp8197, i64 1
- %tmp8199 = getelementptr inbounds float* %tmp8198, i64 1
- %tmp8200 = getelementptr inbounds float* %tmp8199, i64 1
- %tmp8201 = getelementptr inbounds float* %tmp8200, i64 1
- %tmp8202 = getelementptr inbounds float* %tmp8201, i64 1
- %tmp8203 = getelementptr inbounds float* %tmp8202, i64 1
- %tmp8204 = getelementptr inbounds float* %tmp8203, i64 1
- %tmp8205 = getelementptr inbounds float* %tmp8204, i64 1
- %tmp8206 = getelementptr inbounds float* %tmp8205, i64 1
- %tmp8207 = getelementptr inbounds float* %tmp8206, i64 1
- %tmp8208 = getelementptr inbounds float* %tmp8207, i64 1
- %tmp8209 = getelementptr inbounds float* %tmp8208, i64 1
- %tmp8210 = getelementptr inbounds float* %tmp8209, i64 1
- %tmp8211 = getelementptr inbounds float* %tmp8210, i64 1
- %tmp8212 = getelementptr inbounds float* %tmp8211, i64 1
- %tmp8213 = getelementptr inbounds float* %tmp8212, i64 1
- %tmp8214 = getelementptr inbounds float* %tmp8213, i64 1
- %tmp8215 = getelementptr inbounds float* %tmp8214, i64 1
- %tmp8216 = getelementptr inbounds float* %tmp8215, i64 1
- %tmp8217 = getelementptr inbounds float* %tmp8216, i64 1
- %tmp8218 = getelementptr inbounds float* %tmp8217, i64 1
- %tmp8219 = getelementptr inbounds float* %tmp8218, i64 1
- %tmp8220 = getelementptr inbounds float* %tmp8219, i64 1
- %tmp8221 = getelementptr inbounds float* %tmp8220, i64 1
- %tmp8222 = getelementptr inbounds float* %tmp8221, i64 1
- %tmp8223 = getelementptr inbounds float* %tmp8222, i64 1
- %tmp8224 = getelementptr inbounds float* %tmp8223, i64 1
- %tmp8225 = getelementptr inbounds float* %tmp8224, i64 1
- %tmp8226 = getelementptr inbounds float* %tmp8225, i64 1
- %tmp8227 = getelementptr inbounds float* %tmp8226, i64 1
- %tmp8228 = getelementptr inbounds float* %tmp8227, i64 1
- %tmp8229 = getelementptr inbounds float* %tmp8228, i64 1
- %tmp8230 = getelementptr inbounds float* %tmp8229, i64 1
- %tmp8231 = getelementptr inbounds float* %tmp8230, i64 1
- %tmp8232 = getelementptr inbounds float* %tmp8231, i64 1
- %tmp8233 = getelementptr inbounds float* %tmp8232, i64 1
- %tmp8234 = getelementptr inbounds float* %tmp8233, i64 1
- %tmp8235 = getelementptr inbounds float* %tmp8234, i64 1
- %tmp8236 = getelementptr inbounds float* %tmp8235, i64 1
- %tmp8237 = getelementptr inbounds float* %tmp8236, i64 1
- %tmp8238 = getelementptr inbounds float* %tmp8237, i64 1
- %tmp8239 = getelementptr inbounds float* %tmp8238, i64 1
- %tmp8240 = getelementptr inbounds float* %tmp8239, i64 1
- %tmp8241 = getelementptr inbounds float* %tmp8240, i64 1
- %tmp8242 = getelementptr inbounds float* %tmp8241, i64 1
- %tmp8243 = getelementptr inbounds float* %tmp8242, i64 1
- %tmp8244 = getelementptr inbounds float* %tmp8243, i64 1
- %tmp8245 = getelementptr inbounds float* %tmp8244, i64 1
- %tmp8246 = getelementptr inbounds float* %tmp8245, i64 1
- %tmp8247 = getelementptr inbounds float* %tmp8246, i64 1
- %tmp8248 = getelementptr inbounds float* %tmp8247, i64 1
- %tmp8249 = getelementptr inbounds float* %tmp8248, i64 1
- %tmp8250 = getelementptr inbounds float* %tmp8249, i64 1
- %tmp8251 = getelementptr inbounds float* %tmp8250, i64 1
- %tmp8252 = getelementptr inbounds float* %tmp8251, i64 1
- %tmp8253 = getelementptr inbounds float* %tmp8252, i64 1
- %tmp8254 = getelementptr inbounds float* %tmp8253, i64 1
- %tmp8255 = getelementptr inbounds float* %tmp8254, i64 1
- %tmp8256 = getelementptr inbounds float* %tmp8255, i64 1
- %tmp8257 = getelementptr inbounds float* %tmp8256, i64 1
- %tmp8258 = getelementptr inbounds float* %tmp8257, i64 1
- %tmp8259 = getelementptr inbounds float* %tmp8258, i64 1
- %tmp8260 = getelementptr inbounds float* %tmp8259, i64 1
- %tmp8261 = getelementptr inbounds float* %tmp8260, i64 1
- %tmp8262 = getelementptr inbounds float* %tmp8261, i64 1
- %tmp8263 = getelementptr inbounds float* %tmp8262, i64 1
- %tmp8264 = getelementptr inbounds float* %tmp8263, i64 1
- %tmp8265 = getelementptr inbounds float* %tmp8264, i64 1
- %tmp8266 = getelementptr inbounds float* %tmp8265, i64 1
- %tmp8267 = getelementptr inbounds float* %tmp8266, i64 1
- %tmp8268 = getelementptr inbounds float* %tmp8267, i64 1
- %tmp8269 = getelementptr inbounds float* %tmp8268, i64 1
- %tmp8270 = getelementptr inbounds float* %tmp8269, i64 1
- %tmp8271 = getelementptr inbounds float* %tmp8270, i64 1
- %tmp8272 = getelementptr inbounds float* %tmp8271, i64 1
- %tmp8273 = getelementptr inbounds float* %tmp8272, i64 1
- %tmp8274 = getelementptr inbounds float* %tmp8273, i64 1
- %tmp8275 = getelementptr inbounds float* %tmp8274, i64 1
- %tmp8276 = getelementptr inbounds float* %tmp8275, i64 1
- %tmp8277 = getelementptr inbounds float* %tmp8276, i64 1
- %tmp8278 = getelementptr inbounds float* %tmp8277, i64 1
- %tmp8279 = getelementptr inbounds float* %tmp8278, i64 1
- %tmp8280 = getelementptr inbounds float* %tmp8279, i64 1
- %tmp8281 = getelementptr inbounds float* %tmp8280, i64 1
- %tmp8282 = getelementptr inbounds float* %tmp8281, i64 1
- %tmp8283 = getelementptr inbounds float* %tmp8282, i64 1
- %tmp8284 = getelementptr inbounds float* %tmp8283, i64 1
- %tmp8285 = getelementptr inbounds float* %tmp8284, i64 1
- %tmp8286 = getelementptr inbounds float* %tmp8285, i64 1
- %tmp8287 = getelementptr inbounds float* %tmp8286, i64 1
- %tmp8288 = getelementptr inbounds float* %tmp8287, i64 1
- %tmp8289 = getelementptr inbounds float* %tmp8288, i64 1
- %tmp8290 = getelementptr inbounds float* %tmp8289, i64 1
- %tmp8291 = getelementptr inbounds float* %tmp8290, i64 1
- %tmp8292 = getelementptr inbounds float* %tmp8291, i64 1
- %tmp8293 = getelementptr inbounds float* %tmp8292, i64 1
- %tmp8294 = getelementptr inbounds float* %tmp8293, i64 1
- %tmp8295 = getelementptr inbounds float* %tmp8294, i64 1
- %tmp8296 = getelementptr inbounds float* %tmp8295, i64 1
- %tmp8297 = getelementptr inbounds float* %tmp8296, i64 1
- %tmp8298 = getelementptr inbounds float* %tmp8297, i64 1
- %tmp8299 = getelementptr inbounds float* %tmp8298, i64 1
- %tmp8300 = getelementptr inbounds float* %tmp8299, i64 1
- %tmp8301 = getelementptr inbounds float* %tmp8300, i64 1
- %tmp8302 = getelementptr inbounds float* %tmp8301, i64 1
- %tmp8303 = getelementptr inbounds float* %tmp8302, i64 1
- %tmp8304 = getelementptr inbounds float* %tmp8303, i64 1
- %tmp8305 = getelementptr inbounds float* %tmp8304, i64 1
- %tmp8306 = getelementptr inbounds float* %tmp8305, i64 1
- %tmp8307 = getelementptr inbounds float* %tmp8306, i64 1
- %tmp8308 = getelementptr inbounds float* %tmp8307, i64 1
- %tmp8309 = getelementptr inbounds float* %tmp8308, i64 1
- %tmp8310 = getelementptr inbounds float* %tmp8309, i64 1
- %tmp8311 = getelementptr inbounds float* %tmp8310, i64 1
- %tmp8312 = getelementptr inbounds float* %tmp8311, i64 1
- %tmp8313 = getelementptr inbounds float* %tmp8312, i64 1
- %tmp8314 = getelementptr inbounds float* %tmp8313, i64 1
- %tmp8315 = getelementptr inbounds float* %tmp8314, i64 1
- %tmp8316 = getelementptr inbounds float* %tmp8315, i64 1
- %tmp8317 = getelementptr inbounds float* %tmp8316, i64 1
- %tmp8318 = getelementptr inbounds float* %tmp8317, i64 1
- %tmp8319 = getelementptr inbounds float* %tmp8318, i64 1
- %tmp8320 = getelementptr inbounds float* %tmp8319, i64 1
- %tmp8321 = getelementptr inbounds float* %tmp8320, i64 1
- %tmp8322 = getelementptr inbounds float* %tmp8321, i64 1
- %tmp8323 = getelementptr inbounds float* %tmp8322, i64 1
- %tmp8324 = getelementptr inbounds float* %tmp8323, i64 1
- %tmp8325 = getelementptr inbounds float* %tmp8324, i64 1
- %tmp8326 = getelementptr inbounds float* %tmp8325, i64 1
- %tmp8327 = getelementptr inbounds float* %tmp8326, i64 1
- %tmp8328 = getelementptr inbounds float* %tmp8327, i64 1
- %tmp8329 = getelementptr inbounds float* %tmp8328, i64 1
- %tmp8330 = getelementptr inbounds float* %tmp8329, i64 1
- %tmp8331 = getelementptr inbounds float* %tmp8330, i64 1
- %tmp8332 = getelementptr inbounds float* %tmp8331, i64 1
- %tmp8333 = getelementptr inbounds float* %tmp8332, i64 1
- %tmp8334 = getelementptr inbounds float* %tmp8333, i64 1
- %tmp8335 = getelementptr inbounds float* %tmp8334, i64 1
- %tmp8336 = getelementptr inbounds float* %tmp8335, i64 1
- %tmp8337 = getelementptr inbounds float* %tmp8336, i64 1
- %tmp8338 = getelementptr inbounds float* %tmp8337, i64 1
- %tmp8339 = getelementptr inbounds float* %tmp8338, i64 1
- %tmp8340 = getelementptr inbounds float* %tmp8339, i64 1
- %tmp8341 = getelementptr inbounds float* %tmp8340, i64 1
- %tmp8342 = getelementptr inbounds float* %tmp8341, i64 1
- %tmp8343 = getelementptr inbounds float* %tmp8342, i64 1
- %tmp8344 = getelementptr inbounds float* %tmp8343, i64 1
- %tmp8345 = getelementptr inbounds float* %tmp8344, i64 1
- %tmp8346 = getelementptr inbounds float* %tmp8345, i64 1
- %tmp8347 = getelementptr inbounds float* %tmp8346, i64 1
- %tmp8348 = getelementptr inbounds float* %tmp8347, i64 1
- %tmp8349 = getelementptr inbounds float* %tmp8348, i64 1
- %tmp8350 = getelementptr inbounds float* %tmp8349, i64 1
- %tmp8351 = getelementptr inbounds float* %tmp8350, i64 1
- %tmp8352 = getelementptr inbounds float* %tmp8351, i64 1
- %tmp8353 = getelementptr inbounds float* %tmp8352, i64 1
- %tmp8354 = getelementptr inbounds float* %tmp8353, i64 1
- %tmp8355 = getelementptr inbounds float* %tmp8354, i64 1
- %tmp8356 = getelementptr inbounds float* %tmp8355, i64 1
- %tmp8357 = getelementptr inbounds float* %tmp8356, i64 1
- %tmp8358 = getelementptr inbounds float* %tmp8357, i64 1
- %tmp8359 = getelementptr inbounds float* %tmp8358, i64 1
- %tmp8360 = getelementptr inbounds float* %tmp8359, i64 1
- %tmp8361 = getelementptr inbounds float* %tmp8360, i64 1
- %tmp8362 = getelementptr inbounds float* %tmp8361, i64 1
- %tmp8363 = getelementptr inbounds float* %tmp8362, i64 1
- %tmp8364 = getelementptr inbounds float* %tmp8363, i64 1
- %tmp8365 = getelementptr inbounds float* %tmp8364, i64 1
- %tmp8366 = getelementptr inbounds float* %tmp8365, i64 1
- %tmp8367 = getelementptr inbounds float* %tmp8366, i64 1
- %tmp8368 = getelementptr inbounds float* %tmp8367, i64 1
- %tmp8369 = getelementptr inbounds float* %tmp8368, i64 1
- %tmp8370 = getelementptr inbounds float* %tmp8369, i64 1
- %tmp8371 = getelementptr inbounds float* %tmp8370, i64 1
- %tmp8372 = getelementptr inbounds float* %tmp8371, i64 1
- %tmp8373 = getelementptr inbounds float* %tmp8372, i64 1
- %tmp8374 = getelementptr inbounds float* %tmp8373, i64 1
- %tmp8375 = getelementptr inbounds float* %tmp8374, i64 1
- %tmp8376 = getelementptr inbounds float* %tmp8375, i64 1
- %tmp8377 = getelementptr inbounds float* %tmp8376, i64 1
- %tmp8378 = getelementptr inbounds float* %tmp8377, i64 1
- %tmp8379 = getelementptr inbounds float* %tmp8378, i64 1
- %tmp8380 = getelementptr inbounds float* %tmp8379, i64 1
- %tmp8381 = getelementptr inbounds float* %tmp8380, i64 1
- %tmp8382 = getelementptr inbounds float* %tmp8381, i64 1
- %tmp8383 = getelementptr inbounds float* %tmp8382, i64 1
- %tmp8384 = getelementptr inbounds float* %tmp8383, i64 1
- %tmp8385 = getelementptr inbounds float* %tmp8384, i64 1
- %tmp8386 = getelementptr inbounds float* %tmp8385, i64 1
- %tmp8387 = getelementptr inbounds float* %tmp8386, i64 1
- %tmp8388 = getelementptr inbounds float* %tmp8387, i64 1
- %tmp8389 = getelementptr inbounds float* %tmp8388, i64 1
- %tmp8390 = getelementptr inbounds float* %tmp8389, i64 1
- %tmp8391 = getelementptr inbounds float* %tmp8390, i64 1
- %tmp8392 = getelementptr inbounds float* %tmp8391, i64 1
- %tmp8393 = getelementptr inbounds float* %tmp8392, i64 1
- %tmp8394 = getelementptr inbounds float* %tmp8393, i64 1
- %tmp8395 = getelementptr inbounds float* %tmp8394, i64 1
- %tmp8396 = getelementptr inbounds float* %tmp8395, i64 1
- %tmp8397 = getelementptr inbounds float* %tmp8396, i64 1
- %tmp8398 = getelementptr inbounds float* %tmp8397, i64 1
- %tmp8399 = getelementptr inbounds float* %tmp8398, i64 1
- %tmp8400 = getelementptr inbounds float* %tmp8399, i64 1
- %tmp8401 = getelementptr inbounds float* %tmp8400, i64 1
- %tmp8402 = getelementptr inbounds float* %tmp8401, i64 1
- %tmp8403 = getelementptr inbounds float* %tmp8402, i64 1
- %tmp8404 = getelementptr inbounds float* %tmp8403, i64 1
- %tmp8405 = getelementptr inbounds float* %tmp8404, i64 1
- %tmp8406 = getelementptr inbounds float* %tmp8405, i64 1
- %tmp8407 = getelementptr inbounds float* %tmp8406, i64 1
- %tmp8408 = getelementptr inbounds float* %tmp8407, i64 1
- %tmp8409 = getelementptr inbounds float* %tmp8408, i64 1
- %tmp8410 = getelementptr inbounds float* %tmp8409, i64 1
- %tmp8411 = getelementptr inbounds float* %tmp8410, i64 1
- %tmp8412 = getelementptr inbounds float* %tmp8411, i64 1
- %tmp8413 = getelementptr inbounds float* %tmp8412, i64 1
- %tmp8414 = getelementptr inbounds float* %tmp8413, i64 1
- %tmp8415 = getelementptr inbounds float* %tmp8414, i64 1
- %tmp8416 = getelementptr inbounds float* %tmp8415, i64 1
- %tmp8417 = getelementptr inbounds float* %tmp8416, i64 1
- %tmp8418 = getelementptr inbounds float* %tmp8417, i64 1
- %tmp8419 = getelementptr inbounds float* %tmp8418, i64 1
- %tmp8420 = getelementptr inbounds float* %tmp8419, i64 1
- %tmp8421 = getelementptr inbounds float* %tmp8420, i64 1
- %tmp8422 = getelementptr inbounds float* %tmp8421, i64 1
- %tmp8423 = getelementptr inbounds float* %tmp8422, i64 1
- %tmp8424 = getelementptr inbounds float* %tmp8423, i64 1
- %tmp8425 = getelementptr inbounds float* %tmp8424, i64 1
- %tmp8426 = getelementptr inbounds float* %tmp8425, i64 1
- %tmp8427 = getelementptr inbounds float* %tmp8426, i64 1
- %tmp8428 = getelementptr inbounds float* %tmp8427, i64 1
- %tmp8429 = getelementptr inbounds float* %tmp8428, i64 1
- %tmp8430 = getelementptr inbounds float* %tmp8429, i64 1
- %tmp8431 = getelementptr inbounds float* %tmp8430, i64 1
- %tmp8432 = getelementptr inbounds float* %tmp8431, i64 1
- %tmp8433 = getelementptr inbounds float* %tmp8432, i64 1
- %tmp8434 = getelementptr inbounds float* %tmp8433, i64 1
- %tmp8435 = getelementptr inbounds float* %tmp8434, i64 1
- %tmp8436 = getelementptr inbounds float* %tmp8435, i64 1
- %tmp8437 = getelementptr inbounds float* %tmp8436, i64 1
- %tmp8438 = getelementptr inbounds float* %tmp8437, i64 1
- %tmp8439 = getelementptr inbounds float* %tmp8438, i64 1
- %tmp8440 = getelementptr inbounds float* %tmp8439, i64 1
- %tmp8441 = getelementptr inbounds float* %tmp8440, i64 1
- %tmp8442 = getelementptr inbounds float* %tmp8441, i64 1
- %tmp8443 = getelementptr inbounds float* %tmp8442, i64 1
- %tmp8444 = getelementptr inbounds float* %tmp8443, i64 1
- %tmp8445 = getelementptr inbounds float* %tmp8444, i64 1
- %tmp8446 = getelementptr inbounds float* %tmp8445, i64 1
- %tmp8447 = getelementptr inbounds float* %tmp8446, i64 1
- %tmp8448 = getelementptr inbounds float* %tmp8447, i64 1
- %tmp8449 = getelementptr inbounds float* %tmp8448, i64 1
- %tmp8450 = getelementptr inbounds float* %tmp8449, i64 1
- %tmp8451 = getelementptr inbounds float* %tmp8450, i64 1
- %tmp8452 = getelementptr inbounds float* %tmp8451, i64 1
- %tmp8453 = getelementptr inbounds float* %tmp8452, i64 1
- %tmp8454 = getelementptr inbounds float* %tmp8453, i64 1
- %tmp8455 = getelementptr inbounds float* %tmp8454, i64 1
- %tmp8456 = getelementptr inbounds float* %tmp8455, i64 1
- %tmp8457 = getelementptr inbounds float* %tmp8456, i64 1
- %tmp8458 = getelementptr inbounds float* %tmp8457, i64 1
- %tmp8459 = getelementptr inbounds float* %tmp8458, i64 1
- %tmp8460 = getelementptr inbounds float* %tmp8459, i64 1
- %tmp8461 = getelementptr inbounds float* %tmp8460, i64 1
- %tmp8462 = getelementptr inbounds float* %tmp8461, i64 1
- %tmp8463 = getelementptr inbounds float* %tmp8462, i64 1
- %tmp8464 = getelementptr inbounds float* %tmp8463, i64 1
- %tmp8465 = getelementptr inbounds float* %tmp8464, i64 1
- %tmp8466 = getelementptr inbounds float* %tmp8465, i64 1
- %tmp8467 = getelementptr inbounds float* %tmp8466, i64 1
- %tmp8468 = getelementptr inbounds float* %tmp8467, i64 1
- %tmp8469 = getelementptr inbounds float* %tmp8468, i64 1
- %tmp8470 = getelementptr inbounds float* %tmp8469, i64 1
- %tmp8471 = getelementptr inbounds float* %tmp8470, i64 1
- %tmp8472 = getelementptr inbounds float* %tmp8471, i64 1
- %tmp8473 = getelementptr inbounds float* %tmp8472, i64 1
- %tmp8474 = getelementptr inbounds float* %tmp8473, i64 1
- %tmp8475 = getelementptr inbounds float* %tmp8474, i64 1
- %tmp8476 = getelementptr inbounds float* %tmp8475, i64 1
- %tmp8477 = getelementptr inbounds float* %tmp8476, i64 1
- %tmp8478 = getelementptr inbounds float* %tmp8477, i64 1
- %tmp8479 = getelementptr inbounds float* %tmp8478, i64 1
- %tmp8480 = getelementptr inbounds float* %tmp8479, i64 1
- %tmp8481 = getelementptr inbounds float* %tmp8480, i64 1
- %tmp8482 = getelementptr inbounds float* %tmp8481, i64 1
- %tmp8483 = getelementptr inbounds float* %tmp8482, i64 1
- %tmp8484 = getelementptr inbounds float* %tmp8483, i64 1
- %tmp8485 = getelementptr inbounds float* %tmp8484, i64 1
- %tmp8486 = getelementptr inbounds float* %tmp8485, i64 1
- %tmp8487 = getelementptr inbounds float* %tmp8486, i64 1
- %tmp8488 = getelementptr inbounds float* %tmp8487, i64 1
- %tmp8489 = getelementptr inbounds float* %tmp8488, i64 1
- %tmp8490 = getelementptr inbounds float* %tmp8489, i64 1
- %tmp8491 = getelementptr inbounds float* %tmp8490, i64 1
- %tmp8492 = getelementptr inbounds float* %tmp8491, i64 1
- %tmp8493 = getelementptr inbounds float* %tmp8492, i64 1
- %tmp8494 = getelementptr inbounds float* %tmp8493, i64 1
- %tmp8495 = getelementptr inbounds float* %tmp8494, i64 1
- %tmp8496 = getelementptr inbounds float* %tmp8495, i64 1
- %tmp8497 = getelementptr inbounds float* %tmp8496, i64 1
- %tmp8498 = getelementptr inbounds float* %tmp8497, i64 1
- %tmp8499 = getelementptr inbounds float* %tmp8498, i64 1
- %tmp8500 = getelementptr inbounds float* %tmp8499, i64 1
- %tmp8501 = getelementptr inbounds float* %tmp8500, i64 1
- %tmp8502 = getelementptr inbounds float* %tmp8501, i64 1
- %tmp8503 = getelementptr inbounds float* %tmp8502, i64 1
- %tmp8504 = getelementptr inbounds float* %tmp8503, i64 1
- %tmp8505 = getelementptr inbounds float* %tmp8504, i64 1
- %tmp8506 = getelementptr inbounds float* %tmp8505, i64 1
- %tmp8507 = getelementptr inbounds float* %tmp8506, i64 1
- %tmp8508 = getelementptr inbounds float* %tmp8507, i64 1
- %tmp8509 = getelementptr inbounds float* %tmp8508, i64 1
- %tmp8510 = getelementptr inbounds float* %tmp8509, i64 1
- %tmp8511 = getelementptr inbounds float* %tmp8510, i64 1
- %tmp8512 = getelementptr inbounds float* %tmp8511, i64 1
- %tmp8513 = getelementptr inbounds float* %tmp8512, i64 1
- %tmp8514 = getelementptr inbounds float* %tmp8513, i64 1
- %tmp8515 = getelementptr inbounds float* %tmp8514, i64 1
- %tmp8516 = getelementptr inbounds float* %tmp8515, i64 1
- %tmp8517 = getelementptr inbounds float* %tmp8516, i64 1
- %tmp8518 = getelementptr inbounds float* %tmp8517, i64 1
- %tmp8519 = getelementptr inbounds float* %tmp8518, i64 1
- %tmp8520 = getelementptr inbounds float* %tmp8519, i64 1
- %tmp8521 = getelementptr inbounds float* %tmp8520, i64 1
- %tmp8522 = getelementptr inbounds float* %tmp8521, i64 1
- %tmp8523 = getelementptr inbounds float* %tmp8522, i64 1
- %tmp8524 = getelementptr inbounds float* %tmp8523, i64 1
- %tmp8525 = getelementptr inbounds float* %tmp8524, i64 1
- %tmp8526 = getelementptr inbounds float* %tmp8525, i64 1
- %tmp8527 = getelementptr inbounds float* %tmp8526, i64 1
- %tmp8528 = getelementptr inbounds float* %tmp8527, i64 1
- %tmp8529 = getelementptr inbounds float* %tmp8528, i64 1
- %tmp8530 = getelementptr inbounds float* %tmp8529, i64 1
- %tmp8531 = getelementptr inbounds float* %tmp8530, i64 1
- %tmp8532 = getelementptr inbounds float* %tmp8531, i64 1
- %tmp8533 = getelementptr inbounds float* %tmp8532, i64 1
- %tmp8534 = getelementptr inbounds float* %tmp8533, i64 1
- %tmp8535 = getelementptr inbounds float* %tmp8534, i64 1
- %tmp8536 = getelementptr inbounds float* %tmp8535, i64 1
- %tmp8537 = getelementptr inbounds float* %tmp8536, i64 1
- %tmp8538 = getelementptr inbounds float* %tmp8537, i64 1
- %tmp8539 = getelementptr inbounds float* %tmp8538, i64 1
- %tmp8540 = getelementptr inbounds float* %tmp8539, i64 1
- %tmp8541 = getelementptr inbounds float* %tmp8540, i64 1
- %tmp8542 = getelementptr inbounds float* %tmp8541, i64 1
- %tmp8543 = getelementptr inbounds float* %tmp8542, i64 1
- %tmp8544 = getelementptr inbounds float* %tmp8543, i64 1
- %tmp8545 = getelementptr inbounds float* %tmp8544, i64 1
- %tmp8546 = getelementptr inbounds float* %tmp8545, i64 1
- %tmp8547 = getelementptr inbounds float* %tmp8546, i64 1
- %tmp8548 = getelementptr inbounds float* %tmp8547, i64 1
- %tmp8549 = getelementptr inbounds float* %tmp8548, i64 1
- %tmp8550 = getelementptr inbounds float* %tmp8549, i64 1
- %tmp8551 = getelementptr inbounds float* %tmp8550, i64 1
- %tmp8552 = getelementptr inbounds float* %tmp8551, i64 1
- %tmp8553 = getelementptr inbounds float* %tmp8552, i64 1
- %tmp8554 = getelementptr inbounds float* %tmp8553, i64 1
- %tmp8555 = getelementptr inbounds float* %tmp8554, i64 1
- %tmp8556 = getelementptr inbounds float* %tmp8555, i64 1
- %tmp8557 = getelementptr inbounds float* %tmp8556, i64 1
- %tmp8558 = getelementptr inbounds float* %tmp8557, i64 1
- %tmp8559 = getelementptr inbounds float* %tmp8558, i64 1
- %tmp8560 = getelementptr inbounds float* %tmp8559, i64 1
- %tmp8561 = getelementptr inbounds float* %tmp8560, i64 1
- %tmp8562 = getelementptr inbounds float* %tmp8561, i64 1
- %tmp8563 = getelementptr inbounds float* %tmp8562, i64 1
- %tmp8564 = getelementptr inbounds float* %tmp8563, i64 1
- %tmp8565 = getelementptr inbounds float* %tmp8564, i64 1
- %tmp8566 = getelementptr inbounds float* %tmp8565, i64 1
- %tmp8567 = getelementptr inbounds float* %tmp8566, i64 1
- %tmp8568 = getelementptr inbounds float* %tmp8567, i64 1
- %tmp8569 = getelementptr inbounds float* %tmp8568, i64 1
- %tmp8570 = getelementptr inbounds float* %tmp8569, i64 1
- %tmp8571 = getelementptr inbounds float* %tmp8570, i64 1
- %tmp8572 = getelementptr inbounds float* %tmp8571, i64 1
- %tmp8573 = getelementptr inbounds float* %tmp8572, i64 1
- %tmp8574 = getelementptr inbounds float* %tmp8573, i64 1
- %tmp8575 = getelementptr inbounds float* %tmp8574, i64 1
- %tmp8576 = getelementptr inbounds float* %tmp8575, i64 1
- %tmp8577 = getelementptr inbounds float* %tmp8576, i64 1
- %tmp8578 = getelementptr inbounds float* %tmp8577, i64 1
- %tmp8579 = getelementptr inbounds float* %tmp8578, i64 1
- %tmp8580 = getelementptr inbounds float* %tmp8579, i64 1
- %tmp8581 = getelementptr inbounds float* %tmp8580, i64 1
- %tmp8582 = getelementptr inbounds float* %tmp8581, i64 1
- %tmp8583 = getelementptr inbounds float* %tmp8582, i64 1
- %tmp8584 = getelementptr inbounds float* %tmp8583, i64 1
- %tmp8585 = getelementptr inbounds float* %tmp8584, i64 1
- %tmp8586 = getelementptr inbounds float* %tmp8585, i64 1
- %tmp8587 = getelementptr inbounds float* %tmp8586, i64 1
- %tmp8588 = getelementptr inbounds float* %tmp8587, i64 1
- %tmp8589 = getelementptr inbounds float* %tmp8588, i64 1
- %tmp8590 = getelementptr inbounds float* %tmp8589, i64 1
- %tmp8591 = getelementptr inbounds float* %tmp8590, i64 1
- %tmp8592 = getelementptr inbounds float* %tmp8591, i64 1
- %tmp8593 = getelementptr inbounds float* %tmp8592, i64 1
- %tmp8594 = getelementptr inbounds float* %tmp8593, i64 1
- %tmp8595 = getelementptr inbounds float* %tmp8594, i64 1
- %tmp8596 = getelementptr inbounds float* %tmp8595, i64 1
- %tmp8597 = getelementptr inbounds float* %tmp8596, i64 1
- %tmp8598 = getelementptr inbounds float* %tmp8597, i64 1
- %tmp8599 = getelementptr inbounds float* %tmp8598, i64 1
- %tmp8600 = getelementptr inbounds float* %tmp8599, i64 1
- %tmp8601 = getelementptr inbounds float* %tmp8600, i64 1
- %tmp8602 = getelementptr inbounds float* %tmp8601, i64 1
- %tmp8603 = getelementptr inbounds float* %tmp8602, i64 1
- %tmp8604 = getelementptr inbounds float* %tmp8603, i64 1
- %tmp8605 = getelementptr inbounds float* %tmp8604, i64 1
- %tmp8606 = getelementptr inbounds float* %tmp8605, i64 1
- %tmp8607 = getelementptr inbounds float* %tmp8606, i64 1
- %tmp8608 = getelementptr inbounds float* %tmp8607, i64 1
- %tmp8609 = getelementptr inbounds float* %tmp8608, i64 1
- %tmp8610 = getelementptr inbounds float* %tmp8609, i64 1
- %tmp8611 = getelementptr inbounds float* %tmp8610, i64 1
- %tmp8612 = getelementptr inbounds float* %tmp8611, i64 1
- %tmp8613 = getelementptr inbounds float* %tmp8612, i64 1
- %tmp8614 = getelementptr inbounds float* %tmp8613, i64 1
- %tmp8615 = getelementptr inbounds float* %tmp8614, i64 1
- %tmp8616 = getelementptr inbounds float* %tmp8615, i64 1
- %tmp8617 = getelementptr inbounds float* %tmp8616, i64 1
- %tmp8618 = getelementptr inbounds float* %tmp8617, i64 1
- %tmp8619 = getelementptr inbounds float* %tmp8618, i64 1
- %tmp8620 = getelementptr inbounds float* %tmp8619, i64 1
- %tmp8621 = getelementptr inbounds float* %tmp8620, i64 1
- %tmp8622 = getelementptr inbounds float* %tmp8621, i64 1
- %tmp8623 = getelementptr inbounds float* %tmp8622, i64 1
- %tmp8624 = getelementptr inbounds float* %tmp8623, i64 1
- %tmp8625 = getelementptr inbounds float* %tmp8624, i64 1
- %tmp8626 = getelementptr inbounds float* %tmp8625, i64 1
- %tmp8627 = getelementptr inbounds float* %tmp8626, i64 1
- %tmp8628 = getelementptr inbounds float* %tmp8627, i64 1
- %tmp8629 = getelementptr inbounds float* %tmp8628, i64 1
- %tmp8630 = getelementptr inbounds float* %tmp8629, i64 1
- %tmp8631 = getelementptr inbounds float* %tmp8630, i64 1
- %tmp8632 = getelementptr inbounds float* %tmp8631, i64 1
- %tmp8633 = getelementptr inbounds float* %tmp8632, i64 1
- %tmp8634 = getelementptr inbounds float* %tmp8633, i64 1
- %tmp8635 = getelementptr inbounds float* %tmp8634, i64 1
- %tmp8636 = getelementptr inbounds float* %tmp8635, i64 1
- %tmp8637 = getelementptr inbounds float* %tmp8636, i64 1
- %tmp8638 = getelementptr inbounds float* %tmp8637, i64 1
- %tmp8639 = getelementptr inbounds float* %tmp8638, i64 1
- %tmp8640 = getelementptr inbounds float* %tmp8639, i64 1
- %tmp8641 = getelementptr inbounds float* %tmp8640, i64 1
- %tmp8642 = getelementptr inbounds float* %tmp8641, i64 1
- %tmp8643 = getelementptr inbounds float* %tmp8642, i64 1
- %tmp8644 = getelementptr inbounds float* %tmp8643, i64 1
- %tmp8645 = getelementptr inbounds float* %tmp8644, i64 1
- %tmp8646 = getelementptr inbounds float* %tmp8645, i64 1
- %tmp8647 = getelementptr inbounds float* %tmp8646, i64 1
- %tmp8648 = getelementptr inbounds float* %tmp8647, i64 1
- %tmp8649 = getelementptr inbounds float* %tmp8648, i64 1
- %tmp8650 = getelementptr inbounds float* %tmp8649, i64 1
- %tmp8651 = getelementptr inbounds float* %tmp8650, i64 1
- %tmp8652 = getelementptr inbounds float* %tmp8651, i64 1
- %tmp8653 = getelementptr inbounds float* %tmp8652, i64 1
- %tmp8654 = getelementptr inbounds float* %tmp8653, i64 1
- %tmp8655 = getelementptr inbounds float* %tmp8654, i64 1
- %tmp8656 = getelementptr inbounds float* %tmp8655, i64 1
- %tmp8657 = getelementptr inbounds float* %tmp8656, i64 1
- %tmp8658 = getelementptr inbounds float* %tmp8657, i64 1
- %tmp8659 = getelementptr inbounds float* %tmp8658, i64 1
- %tmp8660 = getelementptr inbounds float* %tmp8659, i64 1
- %tmp8661 = getelementptr inbounds float* %tmp8660, i64 1
- %tmp8662 = getelementptr inbounds float* %tmp8661, i64 1
- %tmp8663 = getelementptr inbounds float* %tmp8662, i64 1
- %tmp8664 = getelementptr inbounds float* %tmp8663, i64 1
- %tmp8665 = getelementptr inbounds float* %tmp8664, i64 1
- %tmp8666 = getelementptr inbounds float* %tmp8665, i64 1
- %tmp8667 = getelementptr inbounds float* %tmp8666, i64 1
- %tmp8668 = getelementptr inbounds float* %tmp8667, i64 1
- %tmp8669 = getelementptr inbounds float* %tmp8668, i64 1
- %tmp8670 = getelementptr inbounds float* %tmp8669, i64 1
- %tmp8671 = getelementptr inbounds float* %tmp8670, i64 1
- %tmp8672 = getelementptr inbounds float* %tmp8671, i64 1
- %tmp8673 = getelementptr inbounds float* %tmp8672, i64 1
- %tmp8674 = getelementptr inbounds float* %tmp8673, i64 1
- %tmp8675 = getelementptr inbounds float* %tmp8674, i64 1
- %tmp8676 = getelementptr inbounds float* %tmp8675, i64 1
- %tmp8677 = getelementptr inbounds float* %tmp8676, i64 1
- %tmp8678 = getelementptr inbounds float* %tmp8677, i64 1
- %tmp8679 = getelementptr inbounds float* %tmp8678, i64 1
- %tmp8680 = getelementptr inbounds float* %tmp8679, i64 1
- %tmp8681 = getelementptr inbounds float* %tmp8680, i64 1
- %tmp8682 = getelementptr inbounds float* %tmp8681, i64 1
- %tmp8683 = getelementptr inbounds float* %tmp8682, i64 1
- %tmp8684 = getelementptr inbounds float* %tmp8683, i64 1
- %tmp8685 = getelementptr inbounds float* %tmp8684, i64 1
- %tmp8686 = getelementptr inbounds float* %tmp8685, i64 1
- %tmp8687 = getelementptr inbounds float* %tmp8686, i64 1
- %tmp8688 = getelementptr inbounds float* %tmp8687, i64 1
- %tmp8689 = getelementptr inbounds float* %tmp8688, i64 1
- %tmp8690 = getelementptr inbounds float* %tmp8689, i64 1
- %tmp8691 = getelementptr inbounds float* %tmp8690, i64 1
- %tmp8692 = getelementptr inbounds float* %tmp8691, i64 1
- %tmp8693 = getelementptr inbounds float* %tmp8692, i64 1
- %tmp8694 = getelementptr inbounds float* %tmp8693, i64 1
- %tmp8695 = getelementptr inbounds float* %tmp8694, i64 1
- %tmp8696 = getelementptr inbounds float* %tmp8695, i64 1
- %tmp8697 = getelementptr inbounds float* %tmp8696, i64 1
- %tmp8698 = getelementptr inbounds float* %tmp8697, i64 1
- %tmp8699 = getelementptr inbounds float* %tmp8698, i64 1
- %tmp8700 = getelementptr inbounds float* %tmp8699, i64 1
- %tmp8701 = getelementptr inbounds float* %tmp8700, i64 1
- %tmp8702 = getelementptr inbounds float* %tmp8701, i64 1
- %tmp8703 = getelementptr inbounds float* %tmp8702, i64 1
- %tmp8704 = getelementptr inbounds float* %tmp8703, i64 1
- %tmp8705 = getelementptr inbounds float* %tmp8704, i64 1
- %tmp8706 = getelementptr inbounds float* %tmp8705, i64 1
- %tmp8707 = getelementptr inbounds float* %tmp8706, i64 1
- %tmp8708 = getelementptr inbounds float* %tmp8707, i64 1
- %tmp8709 = getelementptr inbounds float* %tmp8708, i64 1
- %tmp8710 = getelementptr inbounds float* %tmp8709, i64 1
- %tmp8711 = getelementptr inbounds float* %tmp8710, i64 1
- %tmp8712 = getelementptr inbounds float* %tmp8711, i64 1
- %tmp8713 = getelementptr inbounds float* %tmp8712, i64 1
- %tmp8714 = getelementptr inbounds float* %tmp8713, i64 1
- %tmp8715 = getelementptr inbounds float* %tmp8714, i64 1
- %tmp8716 = getelementptr inbounds float* %tmp8715, i64 1
- %tmp8717 = getelementptr inbounds float* %tmp8716, i64 1
- %tmp8718 = getelementptr inbounds float* %tmp8717, i64 1
- %tmp8719 = getelementptr inbounds float* %tmp8718, i64 1
- %tmp8720 = getelementptr inbounds float* %tmp8719, i64 1
- %tmp8721 = getelementptr inbounds float* %tmp8720, i64 1
- %tmp8722 = getelementptr inbounds float* %tmp8721, i64 1
- %tmp8723 = getelementptr inbounds float* %tmp8722, i64 1
- %tmp8724 = getelementptr inbounds float* %tmp8723, i64 1
- %tmp8725 = getelementptr inbounds float* %tmp8724, i64 1
- %tmp8726 = getelementptr inbounds float* %tmp8725, i64 1
- %tmp8727 = getelementptr inbounds float* %tmp8726, i64 1
- %tmp8728 = getelementptr inbounds float* %tmp8727, i64 1
- %tmp8729 = getelementptr inbounds float* %tmp8728, i64 1
- %tmp8730 = getelementptr inbounds float* %tmp8729, i64 1
- %tmp8731 = getelementptr inbounds float* %tmp8730, i64 1
- %tmp8732 = getelementptr inbounds float* %tmp8731, i64 1
- %tmp8733 = getelementptr inbounds float* %tmp8732, i64 1
- %tmp8734 = getelementptr inbounds float* %tmp8733, i64 1
- %tmp8735 = getelementptr inbounds float* %tmp8734, i64 1
- %tmp8736 = getelementptr inbounds float* %tmp8735, i64 1
- %tmp8737 = getelementptr inbounds float* %tmp8736, i64 1
- %tmp8738 = getelementptr inbounds float* %tmp8737, i64 1
- %tmp8739 = getelementptr inbounds float* %tmp8738, i64 1
- %tmp8740 = getelementptr inbounds float* %tmp8739, i64 1
- %tmp8741 = getelementptr inbounds float* %tmp8740, i64 1
- %tmp8742 = getelementptr inbounds float* %tmp8741, i64 1
- %tmp8743 = getelementptr inbounds float* %tmp8742, i64 1
- %tmp8744 = getelementptr inbounds float* %tmp8743, i64 1
- %tmp8745 = getelementptr inbounds float* %tmp8744, i64 1
- %tmp8746 = getelementptr inbounds float* %tmp8745, i64 1
- %tmp8747 = getelementptr inbounds float* %tmp8746, i64 1
- %tmp8748 = getelementptr inbounds float* %tmp8747, i64 1
- %tmp8749 = getelementptr inbounds float* %tmp8748, i64 1
- %tmp8750 = getelementptr inbounds float* %tmp8749, i64 1
- %tmp8751 = getelementptr inbounds float* %tmp8750, i64 1
- %tmp8752 = getelementptr inbounds float* %tmp8751, i64 1
- %tmp8753 = getelementptr inbounds float* %tmp8752, i64 1
- %tmp8754 = getelementptr inbounds float* %tmp8753, i64 1
- %tmp8755 = getelementptr inbounds float* %tmp8754, i64 1
- %tmp8756 = getelementptr inbounds float* %tmp8755, i64 1
- %tmp8757 = getelementptr inbounds float* %tmp8756, i64 1
- %tmp8758 = getelementptr inbounds float* %tmp8757, i64 1
- %tmp8759 = getelementptr inbounds float* %tmp8758, i64 1
- %tmp8760 = getelementptr inbounds float* %tmp8759, i64 1
- %tmp8761 = getelementptr inbounds float* %tmp8760, i64 1
- %tmp8762 = getelementptr inbounds float* %tmp8761, i64 1
- %tmp8763 = getelementptr inbounds float* %tmp8762, i64 1
- %tmp8764 = getelementptr inbounds float* %tmp8763, i64 1
- %tmp8765 = getelementptr inbounds float* %tmp8764, i64 1
- %tmp8766 = getelementptr inbounds float* %tmp8765, i64 1
- %tmp8767 = getelementptr inbounds float* %tmp8766, i64 1
- %tmp8768 = getelementptr inbounds float* %tmp8767, i64 1
- %tmp8769 = getelementptr inbounds float* %tmp8768, i64 1
- %tmp8770 = getelementptr inbounds float* %tmp8769, i64 1
- %tmp8771 = getelementptr inbounds float* %tmp8770, i64 1
- %tmp8772 = getelementptr inbounds float* %tmp8771, i64 1
- %tmp8773 = getelementptr inbounds float* %tmp8772, i64 1
- %tmp8774 = getelementptr inbounds float* %tmp8773, i64 1
- %tmp8775 = getelementptr inbounds float* %tmp8774, i64 1
- %tmp8776 = getelementptr inbounds float* %tmp8775, i64 1
- %tmp8777 = getelementptr inbounds float* %tmp8776, i64 1
- %tmp8778 = getelementptr inbounds float* %tmp8777, i64 1
- %tmp8779 = getelementptr inbounds float* %tmp8778, i64 1
- %tmp8780 = getelementptr inbounds float* %tmp8779, i64 1
- %tmp8781 = getelementptr inbounds float* %tmp8780, i64 1
- %tmp8782 = getelementptr inbounds float* %tmp8781, i64 1
- %tmp8783 = getelementptr inbounds float* %tmp8782, i64 1
- %tmp8784 = getelementptr inbounds float* %tmp8783, i64 1
- %tmp8785 = getelementptr inbounds float* %tmp8784, i64 1
- %tmp8786 = getelementptr inbounds float* %tmp8785, i64 1
- %tmp8787 = getelementptr inbounds float* %tmp8786, i64 1
- %tmp8788 = getelementptr inbounds float* %tmp8787, i64 1
- %tmp8789 = getelementptr inbounds float* %tmp8788, i64 1
- %tmp8790 = getelementptr inbounds float* %tmp8789, i64 1
- %tmp8791 = getelementptr inbounds float* %tmp8790, i64 1
- %tmp8792 = getelementptr inbounds float* %tmp8791, i64 1
- %tmp8793 = getelementptr inbounds float* %tmp8792, i64 1
- %tmp8794 = getelementptr inbounds float* %tmp8793, i64 1
- %tmp8795 = getelementptr inbounds float* %tmp8794, i64 1
- %tmp8796 = getelementptr inbounds float* %tmp8795, i64 1
- %tmp8797 = getelementptr inbounds float* %tmp8796, i64 1
- %tmp8798 = getelementptr inbounds float* %tmp8797, i64 1
- %tmp8799 = getelementptr inbounds float* %tmp8798, i64 1
- %tmp8800 = getelementptr inbounds float* %tmp8799, i64 1
- %tmp8801 = getelementptr inbounds float* %tmp8800, i64 1
- %tmp8802 = getelementptr inbounds float* %tmp8801, i64 1
- %tmp8803 = getelementptr inbounds float* %tmp8802, i64 1
- %tmp8804 = getelementptr inbounds float* %tmp8803, i64 1
- %tmp8805 = getelementptr inbounds float* %tmp8804, i64 1
- %tmp8806 = getelementptr inbounds float* %tmp8805, i64 1
- %tmp8807 = getelementptr inbounds float* %tmp8806, i64 1
- %tmp8808 = getelementptr inbounds float* %tmp8807, i64 1
- %tmp8809 = getelementptr inbounds float* %tmp8808, i64 1
- %tmp8810 = getelementptr inbounds float* %tmp8809, i64 1
- %tmp8811 = getelementptr inbounds float* %tmp8810, i64 1
- %tmp8812 = getelementptr inbounds float* %tmp8811, i64 1
- %tmp8813 = getelementptr inbounds float* %tmp8812, i64 1
- %tmp8814 = getelementptr inbounds float* %tmp8813, i64 1
- %tmp8815 = getelementptr inbounds float* %tmp8814, i64 1
- %tmp8816 = getelementptr inbounds float* %tmp8815, i64 1
- %tmp8817 = getelementptr inbounds float* %tmp8816, i64 1
- %tmp8818 = getelementptr inbounds float* %tmp8817, i64 1
- %tmp8819 = getelementptr inbounds float* %tmp8818, i64 1
- %tmp8820 = getelementptr inbounds float* %tmp8819, i64 1
- %tmp8821 = getelementptr inbounds float* %tmp8820, i64 1
- %tmp8822 = getelementptr inbounds float* %tmp8821, i64 1
- %tmp8823 = getelementptr inbounds float* %tmp8822, i64 1
- %tmp8824 = getelementptr inbounds float* %tmp8823, i64 1
- %tmp8825 = getelementptr inbounds float* %tmp8824, i64 1
- %tmp8826 = getelementptr inbounds float* %tmp8825, i64 1
- %tmp8827 = getelementptr inbounds float* %tmp8826, i64 1
- %tmp8828 = getelementptr inbounds float* %tmp8827, i64 1
- %tmp8829 = getelementptr inbounds float* %tmp8828, i64 1
- %tmp8830 = getelementptr inbounds float* %tmp8829, i64 1
- %tmp8831 = getelementptr inbounds float* %tmp8830, i64 1
- %tmp8832 = getelementptr inbounds float* %tmp8831, i64 1
- %tmp8833 = getelementptr inbounds float* %tmp8832, i64 1
- %tmp8834 = getelementptr inbounds float* %tmp8833, i64 1
- %tmp8835 = getelementptr inbounds float* %tmp8834, i64 1
- %tmp8836 = getelementptr inbounds float* %tmp8835, i64 1
- %tmp8837 = getelementptr inbounds float* %tmp8836, i64 1
- %tmp8838 = getelementptr inbounds float* %tmp8837, i64 1
- %tmp8839 = getelementptr inbounds float* %tmp8838, i64 1
- %tmp8840 = getelementptr inbounds float* %tmp8839, i64 1
- %tmp8841 = getelementptr inbounds float* %tmp8840, i64 1
- %tmp8842 = getelementptr inbounds float* %tmp8841, i64 1
- %tmp8843 = getelementptr inbounds float* %tmp8842, i64 1
- %tmp8844 = getelementptr inbounds float* %tmp8843, i64 1
- %tmp8845 = getelementptr inbounds float* %tmp8844, i64 1
- %tmp8846 = getelementptr inbounds float* %tmp8845, i64 1
- %tmp8847 = getelementptr inbounds float* %tmp8846, i64 1
- %tmp8848 = getelementptr inbounds float* %tmp8847, i64 1
- %tmp8849 = getelementptr inbounds float* %tmp8848, i64 1
- %tmp8850 = getelementptr inbounds float* %tmp8849, i64 1
- %tmp8851 = getelementptr inbounds float* %tmp8850, i64 1
- %tmp8852 = getelementptr inbounds float* %tmp8851, i64 1
- %tmp8853 = getelementptr inbounds float* %tmp8852, i64 1
- %tmp8854 = getelementptr inbounds float* %tmp8853, i64 1
- %tmp8855 = getelementptr inbounds float* %tmp8854, i64 1
- %tmp8856 = getelementptr inbounds float* %tmp8855, i64 1
- %tmp8857 = getelementptr inbounds float* %tmp8856, i64 1
- %tmp8858 = getelementptr inbounds float* %tmp8857, i64 1
- %tmp8859 = getelementptr inbounds float* %tmp8858, i64 1
- %tmp8860 = getelementptr inbounds float* %tmp8859, i64 1
- %tmp8861 = getelementptr inbounds float* %tmp8860, i64 1
- %tmp8862 = getelementptr inbounds float* %tmp8861, i64 1
- %tmp8863 = getelementptr inbounds float* %tmp8862, i64 1
- %tmp8864 = getelementptr inbounds float* %tmp8863, i64 1
- %tmp8865 = getelementptr inbounds float* %tmp8864, i64 1
- %tmp8866 = getelementptr inbounds float* %tmp8865, i64 1
- %tmp8867 = getelementptr inbounds float* %tmp8866, i64 1
- %tmp8868 = getelementptr inbounds float* %tmp8867, i64 1
- %tmp8869 = getelementptr inbounds float* %tmp8868, i64 1
- %tmp8870 = getelementptr inbounds float* %tmp8869, i64 1
- %tmp8871 = getelementptr inbounds float* %tmp8870, i64 1
- %tmp8872 = getelementptr inbounds float* %tmp8871, i64 1
- %tmp8873 = getelementptr inbounds float* %tmp8872, i64 1
- %tmp8874 = getelementptr inbounds float* %tmp8873, i64 1
- %tmp8875 = getelementptr inbounds float* %tmp8874, i64 1
- %tmp8876 = getelementptr inbounds float* %tmp8875, i64 1
- %tmp8877 = getelementptr inbounds float* %tmp8876, i64 1
- %tmp8878 = getelementptr inbounds float* %tmp8877, i64 1
- %tmp8879 = getelementptr inbounds float* %tmp8878, i64 1
- %tmp8880 = getelementptr inbounds float* %tmp8879, i64 1
- %tmp8881 = getelementptr inbounds float* %tmp8880, i64 1
- %tmp8882 = getelementptr inbounds float* %tmp8881, i64 1
- %tmp8883 = getelementptr inbounds float* %tmp8882, i64 1
- %tmp8884 = getelementptr inbounds float* %tmp8883, i64 1
- %tmp8885 = getelementptr inbounds float* %tmp8884, i64 1
- %tmp8886 = getelementptr inbounds float* %tmp8885, i64 1
- %tmp8887 = getelementptr inbounds float* %tmp8886, i64 1
- %tmp8888 = getelementptr inbounds float* %tmp8887, i64 1
- %tmp8889 = getelementptr inbounds float* %tmp8888, i64 1
- %tmp8890 = getelementptr inbounds float* %tmp8889, i64 1
- %tmp8891 = getelementptr inbounds float* %tmp8890, i64 1
- %tmp8892 = getelementptr inbounds float* %tmp8891, i64 1
- %tmp8893 = getelementptr inbounds float* %tmp8892, i64 1
- %tmp8894 = getelementptr inbounds float* %tmp8893, i64 1
- %tmp8895 = getelementptr inbounds float* %tmp8894, i64 1
- %tmp8896 = getelementptr inbounds float* %tmp8895, i64 1
- %tmp8897 = getelementptr inbounds float* %tmp8896, i64 1
- %tmp8898 = getelementptr inbounds float* %tmp8897, i64 1
- %tmp8899 = getelementptr inbounds float* %tmp8898, i64 1
- %tmp8900 = getelementptr inbounds float* %tmp8899, i64 1
- %tmp8901 = getelementptr inbounds float* %tmp8900, i64 1
- %tmp8902 = getelementptr inbounds float* %tmp8901, i64 1
- %tmp8903 = getelementptr inbounds float* %tmp8902, i64 1
- %tmp8904 = getelementptr inbounds float* %tmp8903, i64 1
- %tmp8905 = getelementptr inbounds float* %tmp8904, i64 1
- %tmp8906 = getelementptr inbounds float* %tmp8905, i64 1
- %tmp8907 = getelementptr inbounds float* %tmp8906, i64 1
- %tmp8908 = getelementptr inbounds float* %tmp8907, i64 1
- %tmp8909 = getelementptr inbounds float* %tmp8908, i64 1
- %tmp8910 = getelementptr inbounds float* %tmp8909, i64 1
- %tmp8911 = getelementptr inbounds float* %tmp8910, i64 1
- %tmp8912 = getelementptr inbounds float* %tmp8911, i64 1
- %tmp8913 = getelementptr inbounds float* %tmp8912, i64 1
- %tmp8914 = getelementptr inbounds float* %tmp8913, i64 1
- %tmp8915 = getelementptr inbounds float* %tmp8914, i64 1
- %tmp8916 = getelementptr inbounds float* %tmp8915, i64 1
- %tmp8917 = getelementptr inbounds float* %tmp8916, i64 1
- %tmp8918 = getelementptr inbounds float* %tmp8917, i64 1
- %tmp8919 = getelementptr inbounds float* %tmp8918, i64 1
- %tmp8920 = getelementptr inbounds float* %tmp8919, i64 1
- %tmp8921 = getelementptr inbounds float* %tmp8920, i64 1
- %tmp8922 = getelementptr inbounds float* %tmp8921, i64 1
- %tmp8923 = getelementptr inbounds float* %tmp8922, i64 1
- %tmp8924 = getelementptr inbounds float* %tmp8923, i64 1
- %tmp8925 = getelementptr inbounds float* %tmp8924, i64 1
- %tmp8926 = getelementptr inbounds float* %tmp8925, i64 1
- %tmp8927 = getelementptr inbounds float* %tmp8926, i64 1
- %tmp8928 = getelementptr inbounds float* %tmp8927, i64 1
- %tmp8929 = getelementptr inbounds float* %tmp8928, i64 1
- %tmp8930 = getelementptr inbounds float* %tmp8929, i64 1
- %tmp8931 = getelementptr inbounds float* %tmp8930, i64 1
- %tmp8932 = getelementptr inbounds float* %tmp8931, i64 1
- %tmp8933 = getelementptr inbounds float* %tmp8932, i64 1
- %tmp8934 = getelementptr inbounds float* %tmp8933, i64 1
- %tmp8935 = getelementptr inbounds float* %tmp8934, i64 1
- %tmp8936 = getelementptr inbounds float* %tmp8935, i64 1
- %tmp8937 = getelementptr inbounds float* %tmp8936, i64 1
- %tmp8938 = getelementptr inbounds float* %tmp8937, i64 1
- %tmp8939 = getelementptr inbounds float* %tmp8938, i64 1
- %tmp8940 = getelementptr inbounds float* %tmp8939, i64 1
- %tmp8941 = getelementptr inbounds float* %tmp8940, i64 1
- %tmp8942 = getelementptr inbounds float* %tmp8941, i64 1
- %tmp8943 = getelementptr inbounds float* %tmp8942, i64 1
- %tmp8944 = getelementptr inbounds float* %tmp8943, i64 1
- %tmp8945 = getelementptr inbounds float* %tmp8944, i64 1
- %tmp8946 = getelementptr inbounds float* %tmp8945, i64 1
- %tmp8947 = getelementptr inbounds float* %tmp8946, i64 1
- %tmp8948 = getelementptr inbounds float* %tmp8947, i64 1
- %tmp8949 = getelementptr inbounds float* %tmp8948, i64 1
- %tmp8950 = getelementptr inbounds float* %tmp8949, i64 1
- %tmp8951 = getelementptr inbounds float* %tmp8950, i64 1
- %tmp8952 = getelementptr inbounds float* %tmp8951, i64 1
- %tmp8953 = getelementptr inbounds float* %tmp8952, i64 1
- %tmp8954 = getelementptr inbounds float* %tmp8953, i64 1
- %tmp8955 = getelementptr inbounds float* %tmp8954, i64 1
- %tmp8956 = getelementptr inbounds float* %tmp8955, i64 1
- %tmp8957 = getelementptr inbounds float* %tmp8956, i64 1
- %tmp8958 = getelementptr inbounds float* %tmp8957, i64 1
- %tmp8959 = getelementptr inbounds float* %tmp8958, i64 1
- %tmp8960 = getelementptr inbounds float* %tmp8959, i64 1
- %tmp8961 = getelementptr inbounds float* %tmp8960, i64 1
- %tmp8962 = getelementptr inbounds float* %tmp8961, i64 1
- %tmp8963 = getelementptr inbounds float* %tmp8962, i64 1
- %tmp8964 = getelementptr inbounds float* %tmp8963, i64 1
- %tmp8965 = getelementptr inbounds float* %tmp8964, i64 1
- %tmp8966 = getelementptr inbounds float* %tmp8965, i64 1
- %tmp8967 = getelementptr inbounds float* %tmp8966, i64 1
- %tmp8968 = getelementptr inbounds float* %tmp8967, i64 1
- %tmp8969 = getelementptr inbounds float* %tmp8968, i64 1
- %tmp8970 = getelementptr inbounds float* %tmp8969, i64 1
- %tmp8971 = getelementptr inbounds float* %tmp8970, i64 1
- %tmp8972 = getelementptr inbounds float* %tmp8971, i64 1
- %tmp8973 = getelementptr inbounds float* %tmp8972, i64 1
- %tmp8974 = getelementptr inbounds float* %tmp8973, i64 1
- %tmp8975 = getelementptr inbounds float* %tmp8974, i64 1
- %tmp8976 = getelementptr inbounds float* %tmp8975, i64 1
- %tmp8977 = getelementptr inbounds float* %tmp8976, i64 1
- %tmp8978 = getelementptr inbounds float* %tmp8977, i64 1
- %tmp8979 = getelementptr inbounds float* %tmp8978, i64 1
- %tmp8980 = getelementptr inbounds float* %tmp8979, i64 1
- %tmp8981 = getelementptr inbounds float* %tmp8980, i64 1
- %tmp8982 = getelementptr inbounds float* %tmp8981, i64 1
- %tmp8983 = getelementptr inbounds float* %tmp8982, i64 1
- %tmp8984 = getelementptr inbounds float* %tmp8983, i64 1
- %tmp8985 = getelementptr inbounds float* %tmp8984, i64 1
- %tmp8986 = getelementptr inbounds float* %tmp8985, i64 1
- %tmp8987 = getelementptr inbounds float* %tmp8986, i64 1
- %tmp8988 = getelementptr inbounds float* %tmp8987, i64 1
- %tmp8989 = getelementptr inbounds float* %tmp8988, i64 1
- %tmp8990 = getelementptr inbounds float* %tmp8989, i64 1
- %tmp8991 = getelementptr inbounds float* %tmp8990, i64 1
- %tmp8992 = getelementptr inbounds float* %tmp8991, i64 1
- %tmp8993 = getelementptr inbounds float* %tmp8992, i64 1
- %tmp8994 = getelementptr inbounds float* %tmp8993, i64 1
- %tmp8995 = getelementptr inbounds float* %tmp8994, i64 1
- %tmp8996 = getelementptr inbounds float* %tmp8995, i64 1
- %tmp8997 = getelementptr inbounds float* %tmp8996, i64 1
- %tmp8998 = getelementptr inbounds float* %tmp8997, i64 1
- %tmp8999 = getelementptr inbounds float* %tmp8998, i64 1
- %tmp9000 = getelementptr inbounds float* %tmp8999, i64 1
- %tmp9001 = getelementptr inbounds float* %tmp9000, i64 1
- %tmp9002 = getelementptr inbounds float* %tmp9001, i64 1
- %tmp9003 = getelementptr inbounds float* %tmp9002, i64 1
- %tmp9004 = getelementptr inbounds float* %tmp9003, i64 1
- %tmp9005 = getelementptr inbounds float* %tmp9004, i64 1
- %tmp9006 = getelementptr inbounds float* %tmp9005, i64 1
- %tmp9007 = getelementptr inbounds float* %tmp9006, i64 1
- %tmp9008 = getelementptr inbounds float* %tmp9007, i64 1
- %tmp9009 = getelementptr inbounds float* %tmp9008, i64 1
- %tmp9010 = getelementptr inbounds float* %tmp9009, i64 1
- %tmp9011 = getelementptr inbounds float* %tmp9010, i64 1
- %tmp9012 = getelementptr inbounds float* %tmp9011, i64 1
- %tmp9013 = getelementptr inbounds float* %tmp9012, i64 1
- %tmp9014 = getelementptr inbounds float* %tmp9013, i64 1
- %tmp9015 = getelementptr inbounds float* %tmp9014, i64 1
- %tmp9016 = getelementptr inbounds float* %tmp9015, i64 1
- %tmp9017 = getelementptr inbounds float* %tmp9016, i64 1
- %tmp9018 = getelementptr inbounds float* %tmp9017, i64 1
- %tmp9019 = getelementptr inbounds float* %tmp9018, i64 1
- %tmp9020 = getelementptr inbounds float* %tmp9019, i64 1
- %tmp9021 = getelementptr inbounds float* %tmp9020, i64 1
- %tmp9022 = getelementptr inbounds float* %tmp9021, i64 1
- %tmp9023 = getelementptr inbounds float* %tmp9022, i64 1
- %tmp9024 = getelementptr inbounds float* %tmp9023, i64 1
- %tmp9025 = getelementptr inbounds float* %tmp9024, i64 1
- %tmp9026 = getelementptr inbounds float* %tmp9025, i64 1
- %tmp9027 = getelementptr inbounds float* %tmp9026, i64 1
- %tmp9028 = getelementptr inbounds float* %tmp9027, i64 1
- %tmp9029 = getelementptr inbounds float* %tmp9028, i64 1
- %tmp9030 = getelementptr inbounds float* %tmp9029, i64 1
- %tmp9031 = getelementptr inbounds float* %tmp9030, i64 1
- %tmp9032 = getelementptr inbounds float* %tmp9031, i64 1
- %tmp9033 = getelementptr inbounds float* %tmp9032, i64 1
- %tmp9034 = getelementptr inbounds float* %tmp9033, i64 1
- %tmp9035 = getelementptr inbounds float* %tmp9034, i64 1
- %tmp9036 = getelementptr inbounds float* %tmp9035, i64 1
- %tmp9037 = getelementptr inbounds float* %tmp9036, i64 1
- %tmp9038 = getelementptr inbounds float* %tmp9037, i64 1
- %tmp9039 = getelementptr inbounds float* %tmp9038, i64 1
- %tmp9040 = getelementptr inbounds float* %tmp9039, i64 1
- %tmp9041 = getelementptr inbounds float* %tmp9040, i64 1
- %tmp9042 = getelementptr inbounds float* %tmp9041, i64 1
- %tmp9043 = getelementptr inbounds float* %tmp9042, i64 1
- %tmp9044 = getelementptr inbounds float* %tmp9043, i64 1
- %tmp9045 = getelementptr inbounds float* %tmp9044, i64 1
- %tmp9046 = getelementptr inbounds float* %tmp9045, i64 1
- %tmp9047 = getelementptr inbounds float* %tmp9046, i64 1
- %tmp9048 = getelementptr inbounds float* %tmp9047, i64 1
- %tmp9049 = getelementptr inbounds float* %tmp9048, i64 1
- %tmp9050 = getelementptr inbounds float* %tmp9049, i64 1
- %tmp9051 = getelementptr inbounds float* %tmp9050, i64 1
- %tmp9052 = getelementptr inbounds float* %tmp9051, i64 1
- %tmp9053 = getelementptr inbounds float* %tmp9052, i64 1
- %tmp9054 = getelementptr inbounds float* %tmp9053, i64 1
- %tmp9055 = getelementptr inbounds float* %tmp9054, i64 1
- %tmp9056 = getelementptr inbounds float* %tmp9055, i64 1
- %tmp9057 = getelementptr inbounds float* %tmp9056, i64 1
- %tmp9058 = getelementptr inbounds float* %tmp9057, i64 1
- %tmp9059 = getelementptr inbounds float* %tmp9058, i64 1
- %tmp9060 = getelementptr inbounds float* %tmp9059, i64 1
- %tmp9061 = getelementptr inbounds float* %tmp9060, i64 1
- %tmp9062 = getelementptr inbounds float* %tmp9061, i64 1
- %tmp9063 = getelementptr inbounds float* %tmp9062, i64 1
- %tmp9064 = getelementptr inbounds float* %tmp9063, i64 1
- %tmp9065 = getelementptr inbounds float* %tmp9064, i64 1
- %tmp9066 = getelementptr inbounds float* %tmp9065, i64 1
- %tmp9067 = getelementptr inbounds float* %tmp9066, i64 1
- %tmp9068 = getelementptr inbounds float* %tmp9067, i64 1
- %tmp9069 = getelementptr inbounds float* %tmp9068, i64 1
- %tmp9070 = getelementptr inbounds float* %tmp9069, i64 1
- %tmp9071 = getelementptr inbounds float* %tmp9070, i64 1
- %tmp9072 = getelementptr inbounds float* %tmp9071, i64 1
- %tmp9073 = getelementptr inbounds float* %tmp9072, i64 1
- %tmp9074 = getelementptr inbounds float* %tmp9073, i64 1
- %tmp9075 = getelementptr inbounds float* %tmp9074, i64 1
- %tmp9076 = getelementptr inbounds float* %tmp9075, i64 1
- %tmp9077 = getelementptr inbounds float* %tmp9076, i64 1
- %tmp9078 = getelementptr inbounds float* %tmp9077, i64 1
- %tmp9079 = getelementptr inbounds float* %tmp9078, i64 1
- %tmp9080 = getelementptr inbounds float* %tmp9079, i64 1
- %tmp9081 = getelementptr inbounds float* %tmp9080, i64 1
- %tmp9082 = getelementptr inbounds float* %tmp9081, i64 1
- %tmp9083 = getelementptr inbounds float* %tmp9082, i64 1
- %tmp9084 = getelementptr inbounds float* %tmp9083, i64 1
- %tmp9085 = getelementptr inbounds float* %tmp9084, i64 1
- %tmp9086 = getelementptr inbounds float* %tmp9085, i64 1
- %tmp9087 = getelementptr inbounds float* %tmp9086, i64 1
- %tmp9088 = getelementptr inbounds float* %tmp9087, i64 1
- %tmp9089 = getelementptr inbounds float* %tmp9088, i64 1
- %tmp9090 = getelementptr inbounds float* %tmp9089, i64 1
- %tmp9091 = getelementptr inbounds float* %tmp9090, i64 1
- %tmp9092 = getelementptr inbounds float* %tmp9091, i64 1
- %tmp9093 = getelementptr inbounds float* %tmp9092, i64 1
- %tmp9094 = getelementptr inbounds float* %tmp9093, i64 1
- %tmp9095 = getelementptr inbounds float* %tmp9094, i64 1
- %tmp9096 = getelementptr inbounds float* %tmp9095, i64 1
- %tmp9097 = getelementptr inbounds float* %tmp9096, i64 1
- %tmp9098 = getelementptr inbounds float* %tmp9097, i64 1
- %tmp9099 = getelementptr inbounds float* %tmp9098, i64 1
- %tmp9100 = getelementptr inbounds float* %tmp9099, i64 1
- %tmp9101 = getelementptr inbounds float* %tmp9100, i64 1
- %tmp9102 = getelementptr inbounds float* %tmp9101, i64 1
- %tmp9103 = getelementptr inbounds float* %tmp9102, i64 1
- %tmp9104 = getelementptr inbounds float* %tmp9103, i64 1
- %tmp9105 = getelementptr inbounds float* %tmp9104, i64 1
- %tmp9106 = getelementptr inbounds float* %tmp9105, i64 1
- %tmp9107 = getelementptr inbounds float* %tmp9106, i64 1
- %tmp9108 = getelementptr inbounds float* %tmp9107, i64 1
- %tmp9109 = getelementptr inbounds float* %tmp9108, i64 1
- %tmp9110 = getelementptr inbounds float* %tmp9109, i64 1
- %tmp9111 = getelementptr inbounds float* %tmp9110, i64 1
- %tmp9112 = getelementptr inbounds float* %tmp9111, i64 1
- %tmp9113 = getelementptr inbounds float* %tmp9112, i64 1
- %tmp9114 = getelementptr inbounds float* %tmp9113, i64 1
- %tmp9115 = getelementptr inbounds float* %tmp9114, i64 1
- %tmp9116 = getelementptr inbounds float* %tmp9115, i64 1
- %tmp9117 = getelementptr inbounds float* %tmp9116, i64 1
- %tmp9118 = getelementptr inbounds float* %tmp9117, i64 1
- %tmp9119 = getelementptr inbounds float* %tmp9118, i64 1
- %tmp9120 = getelementptr inbounds float* %tmp9119, i64 1
- %tmp9121 = getelementptr inbounds float* %tmp9120, i64 1
- %tmp9122 = getelementptr inbounds float* %tmp9121, i64 1
- %tmp9123 = getelementptr inbounds float* %tmp9122, i64 1
- %tmp9124 = getelementptr inbounds float* %tmp9123, i64 1
- %tmp9125 = getelementptr inbounds float* %tmp9124, i64 1
- %tmp9126 = getelementptr inbounds float* %tmp9125, i64 1
- %tmp9127 = getelementptr inbounds float* %tmp9126, i64 1
- %tmp9128 = getelementptr inbounds float* %tmp9127, i64 1
- %tmp9129 = getelementptr inbounds float* %tmp9128, i64 1
- %tmp9130 = getelementptr inbounds float* %tmp9129, i64 1
- %tmp9131 = getelementptr inbounds float* %tmp9130, i64 1
- %tmp9132 = getelementptr inbounds float* %tmp9131, i64 1
- %tmp9133 = getelementptr inbounds float* %tmp9132, i64 1
- %tmp9134 = getelementptr inbounds float* %tmp9133, i64 1
- %tmp9135 = getelementptr inbounds float* %tmp9134, i64 1
- %tmp9136 = getelementptr inbounds float* %tmp9135, i64 1
- %tmp9137 = getelementptr inbounds float* %tmp9136, i64 1
- %tmp9138 = getelementptr inbounds float* %tmp9137, i64 1
- %tmp9139 = getelementptr inbounds float* %tmp9138, i64 1
- %tmp9140 = getelementptr inbounds float* %tmp9139, i64 1
- %tmp9141 = getelementptr inbounds float* %tmp9140, i64 1
- %tmp9142 = getelementptr inbounds float* %tmp9141, i64 1
- %tmp9143 = getelementptr inbounds float* %tmp9142, i64 1
- %tmp9144 = getelementptr inbounds float* %tmp9143, i64 1
- %tmp9145 = getelementptr inbounds float* %tmp9144, i64 1
- %tmp9146 = getelementptr inbounds float* %tmp9145, i64 1
- %tmp9147 = getelementptr inbounds float* %tmp9146, i64 1
- %tmp9148 = getelementptr inbounds float* %tmp9147, i64 1
- %tmp9149 = getelementptr inbounds float* %tmp9148, i64 1
- %tmp9150 = getelementptr inbounds float* %tmp9149, i64 1
- %tmp9151 = getelementptr inbounds float* %tmp9150, i64 1
- %tmp9152 = getelementptr inbounds float* %tmp9151, i64 1
- %tmp9153 = getelementptr inbounds float* %tmp9152, i64 1
- %tmp9154 = getelementptr inbounds float* %tmp9153, i64 1
- %tmp9155 = getelementptr inbounds float* %tmp9154, i64 1
- %tmp9156 = getelementptr inbounds float* %tmp9155, i64 1
- %tmp9157 = getelementptr inbounds float* %tmp9156, i64 1
- %tmp9158 = getelementptr inbounds float* %tmp9157, i64 1
- %tmp9159 = getelementptr inbounds float* %tmp9158, i64 1
- %tmp9160 = getelementptr inbounds float* %tmp9159, i64 1
- %tmp9161 = getelementptr inbounds float* %tmp9160, i64 1
- %tmp9162 = getelementptr inbounds float* %tmp9161, i64 1
- %tmp9163 = getelementptr inbounds float* %tmp9162, i64 1
- %tmp9164 = getelementptr inbounds float* %tmp9163, i64 1
- %tmp9165 = getelementptr inbounds float* %tmp9164, i64 1
- %tmp9166 = getelementptr inbounds float* %tmp9165, i64 1
- %tmp9167 = getelementptr inbounds float* %tmp9166, i64 1
- %tmp9168 = getelementptr inbounds float* %tmp9167, i64 1
- %tmp9169 = getelementptr inbounds float* %tmp9168, i64 1
- %tmp9170 = getelementptr inbounds float* %tmp9169, i64 1
- %tmp9171 = getelementptr inbounds float* %tmp9170, i64 1
- %tmp9172 = getelementptr inbounds float* %tmp9171, i64 1
- %tmp9173 = getelementptr inbounds float* %tmp9172, i64 1
- %tmp9174 = getelementptr inbounds float* %tmp9173, i64 1
- %tmp9175 = getelementptr inbounds float* %tmp9174, i64 1
- %tmp9176 = getelementptr inbounds float* %tmp9175, i64 1
- %tmp9177 = getelementptr inbounds float* %tmp9176, i64 1
- %tmp9178 = getelementptr inbounds float* %tmp9177, i64 1
- %tmp9179 = getelementptr inbounds float* %tmp9178, i64 1
- %tmp9180 = getelementptr inbounds float* %tmp9179, i64 1
- %tmp9181 = getelementptr inbounds float* %tmp9180, i64 1
- %tmp9182 = getelementptr inbounds float* %tmp9181, i64 1
- %tmp9183 = getelementptr inbounds float* %tmp9182, i64 1
- %tmp9184 = getelementptr inbounds float* %tmp9183, i64 1
- %tmp9185 = getelementptr inbounds float* %tmp9184, i64 1
- %tmp9186 = getelementptr inbounds float* %tmp9185, i64 1
- %tmp9187 = getelementptr inbounds float* %tmp9186, i64 1
- %tmp9188 = getelementptr inbounds float* %tmp9187, i64 1
- %tmp9189 = getelementptr inbounds float* %tmp9188, i64 1
- %tmp9190 = getelementptr inbounds float* %tmp9189, i64 1
- %tmp9191 = getelementptr inbounds float* %tmp9190, i64 1
- %tmp9192 = getelementptr inbounds float* %tmp9191, i64 1
- %tmp9193 = getelementptr inbounds float* %tmp9192, i64 1
- %tmp9194 = getelementptr inbounds float* %tmp9193, i64 1
- %tmp9195 = getelementptr inbounds float* %tmp9194, i64 1
- %tmp9196 = getelementptr inbounds float* %tmp9195, i64 1
- %tmp9197 = getelementptr inbounds float* %tmp9196, i64 1
- %tmp9198 = getelementptr inbounds float* %tmp9197, i64 1
- %tmp9199 = getelementptr inbounds float* %tmp9198, i64 1
- %tmp9200 = getelementptr inbounds float* %tmp9199, i64 1
- %tmp9201 = getelementptr inbounds float* %tmp9200, i64 1
- %tmp9202 = getelementptr inbounds float* %tmp9201, i64 1
- %tmp9203 = getelementptr inbounds float* %tmp9202, i64 1
- %tmp9204 = getelementptr inbounds float* %tmp9203, i64 1
- %tmp9205 = getelementptr inbounds float* %tmp9204, i64 1
- %tmp9206 = getelementptr inbounds float* %tmp9205, i64 1
- %tmp9207 = getelementptr inbounds float* %tmp9206, i64 1
- %tmp9208 = getelementptr inbounds float* %tmp9207, i64 1
- %tmp9209 = getelementptr inbounds float* %tmp9208, i64 1
- %tmp9210 = getelementptr inbounds float* %tmp9209, i64 1
- %tmp9211 = getelementptr inbounds float* %tmp9210, i64 1
- %tmp9212 = getelementptr inbounds float* %tmp9211, i64 1
- %tmp9213 = getelementptr inbounds float* %tmp9212, i64 1
- %tmp9214 = getelementptr inbounds float* %tmp9213, i64 1
- %tmp9215 = getelementptr inbounds float* %tmp9214, i64 1
- %tmp9216 = getelementptr inbounds float* %tmp9215, i64 1
- %tmp9217 = getelementptr inbounds float* %tmp9216, i64 1
- %tmp9218 = getelementptr inbounds float* %tmp9217, i64 1
- %tmp9219 = getelementptr inbounds float* %tmp9218, i64 1
- %tmp9220 = getelementptr inbounds float* %tmp9219, i64 1
- %tmp9221 = getelementptr inbounds float* %tmp9220, i64 1
- %tmp9222 = getelementptr inbounds float* %tmp9221, i64 1
- %tmp9223 = getelementptr inbounds float* %tmp9222, i64 1
- %tmp9224 = getelementptr inbounds float* %tmp9223, i64 1
- %tmp9225 = getelementptr inbounds float* %tmp9224, i64 1
- %tmp9226 = getelementptr inbounds float* %tmp9225, i64 1
- %tmp9227 = getelementptr inbounds float* %tmp9226, i64 1
- %tmp9228 = getelementptr inbounds float* %tmp9227, i64 1
- %tmp9229 = getelementptr inbounds float* %tmp9228, i64 1
- %tmp9230 = getelementptr inbounds float* %tmp9229, i64 1
- %tmp9231 = getelementptr inbounds float* %tmp9230, i64 1
- %tmp9232 = getelementptr inbounds float* %tmp9231, i64 1
- %tmp9233 = getelementptr inbounds float* %tmp9232, i64 1
- %tmp9234 = getelementptr inbounds float* %tmp9233, i64 1
- %tmp9235 = getelementptr inbounds float* %tmp9234, i64 1
- %tmp9236 = getelementptr inbounds float* %tmp9235, i64 1
- %tmp9237 = getelementptr inbounds float* %tmp9236, i64 1
- %tmp9238 = getelementptr inbounds float* %tmp9237, i64 1
- %tmp9239 = getelementptr inbounds float* %tmp9238, i64 1
- %tmp9240 = getelementptr inbounds float* %tmp9239, i64 1
- %tmp9241 = getelementptr inbounds float* %tmp9240, i64 1
- %tmp9242 = getelementptr inbounds float* %tmp9241, i64 1
- %tmp9243 = getelementptr inbounds float* %tmp9242, i64 1
- %tmp9244 = getelementptr inbounds float* %tmp9243, i64 1
- %tmp9245 = getelementptr inbounds float* %tmp9244, i64 1
- %tmp9246 = getelementptr inbounds float* %tmp9245, i64 1
- %tmp9247 = getelementptr inbounds float* %tmp9246, i64 1
- %tmp9248 = getelementptr inbounds float* %tmp9247, i64 1
- %tmp9249 = getelementptr inbounds float* %tmp9248, i64 1
- %tmp9250 = getelementptr inbounds float* %tmp9249, i64 1
- %tmp9251 = getelementptr inbounds float* %tmp9250, i64 1
- %tmp9252 = getelementptr inbounds float* %tmp9251, i64 1
- %tmp9253 = getelementptr inbounds float* %tmp9252, i64 1
- %tmp9254 = getelementptr inbounds float* %tmp9253, i64 1
- %tmp9255 = getelementptr inbounds float* %tmp9254, i64 1
- %tmp9256 = getelementptr inbounds float* %tmp9255, i64 1
- %tmp9257 = getelementptr inbounds float* %tmp9256, i64 1
- %tmp9258 = getelementptr inbounds float* %tmp9257, i64 1
- %tmp9259 = getelementptr inbounds float* %tmp9258, i64 1
- %tmp9260 = getelementptr inbounds float* %tmp9259, i64 1
- %tmp9261 = getelementptr inbounds float* %tmp9260, i64 1
- %tmp9262 = getelementptr inbounds float* %tmp9261, i64 1
- %tmp9263 = getelementptr inbounds float* %tmp9262, i64 1
- %tmp9264 = getelementptr inbounds float* %tmp9263, i64 1
- %tmp9265 = getelementptr inbounds float* %tmp9264, i64 1
- %tmp9266 = getelementptr inbounds float* %tmp9265, i64 1
- %tmp9267 = getelementptr inbounds float* %tmp9266, i64 1
- %tmp9268 = getelementptr inbounds float* %tmp9267, i64 1
- %tmp9269 = getelementptr inbounds float* %tmp9268, i64 1
- %tmp9270 = getelementptr inbounds float* %tmp9269, i64 1
- %tmp9271 = getelementptr inbounds float* %tmp9270, i64 1
- %tmp9272 = getelementptr inbounds float* %tmp9271, i64 1
- %tmp9273 = getelementptr inbounds float* %tmp9272, i64 1
- %tmp9274 = getelementptr inbounds float* %tmp9273, i64 1
- %tmp9275 = getelementptr inbounds float* %tmp9274, i64 1
- %tmp9276 = getelementptr inbounds float* %tmp9275, i64 1
- %tmp9277 = getelementptr inbounds float* %tmp9276, i64 1
- %tmp9278 = getelementptr inbounds float* %tmp9277, i64 1
- %tmp9279 = getelementptr inbounds float* %tmp9278, i64 1
- %tmp9280 = getelementptr inbounds float* %tmp9279, i64 1
- %tmp9281 = getelementptr inbounds float* %tmp9280, i64 1
- %tmp9282 = getelementptr inbounds float* %tmp9281, i64 1
- %tmp9283 = getelementptr inbounds float* %tmp9282, i64 1
- %tmp9284 = getelementptr inbounds float* %tmp9283, i64 1
- %tmp9285 = getelementptr inbounds float* %tmp9284, i64 1
- %tmp9286 = getelementptr inbounds float* %tmp9285, i64 1
- %tmp9287 = getelementptr inbounds float* %tmp9286, i64 1
- %tmp9288 = getelementptr inbounds float* %tmp9287, i64 1
- %tmp9289 = getelementptr inbounds float* %tmp9288, i64 1
- %tmp9290 = getelementptr inbounds float* %tmp9289, i64 1
- %tmp9291 = getelementptr inbounds float* %tmp9290, i64 1
- %tmp9292 = getelementptr inbounds float* %tmp9291, i64 1
- %tmp9293 = getelementptr inbounds float* %tmp9292, i64 1
- %tmp9294 = getelementptr inbounds float* %tmp9293, i64 1
- %tmp9295 = getelementptr inbounds float* %tmp9294, i64 1
- %tmp9296 = getelementptr inbounds float* %tmp9295, i64 1
- %tmp9297 = getelementptr inbounds float* %tmp9296, i64 1
- %tmp9298 = getelementptr inbounds float* %tmp9297, i64 1
- %tmp9299 = getelementptr inbounds float* %tmp9298, i64 1
- %tmp9300 = getelementptr inbounds float* %tmp9299, i64 1
- %tmp9301 = getelementptr inbounds float* %tmp9300, i64 1
- %tmp9302 = getelementptr inbounds float* %tmp9301, i64 1
- %tmp9303 = getelementptr inbounds float* %tmp9302, i64 1
- %tmp9304 = getelementptr inbounds float* %tmp9303, i64 1
- %tmp9305 = getelementptr inbounds float* %tmp9304, i64 1
- %tmp9306 = getelementptr inbounds float* %tmp9305, i64 1
- %tmp9307 = getelementptr inbounds float* %tmp9306, i64 1
- %tmp9308 = getelementptr inbounds float* %tmp9307, i64 1
- %tmp9309 = getelementptr inbounds float* %tmp9308, i64 1
- %tmp9310 = getelementptr inbounds float* %tmp9309, i64 1
- %tmp9311 = getelementptr inbounds float* %tmp9310, i64 1
- %tmp9312 = getelementptr inbounds float* %tmp9311, i64 1
- %tmp9313 = getelementptr inbounds float* %tmp9312, i64 1
- %tmp9314 = getelementptr inbounds float* %tmp9313, i64 1
- %tmp9315 = getelementptr inbounds float* %tmp9314, i64 1
- %tmp9316 = getelementptr inbounds float* %tmp9315, i64 1
- %tmp9317 = getelementptr inbounds float* %tmp9316, i64 1
- %tmp9318 = getelementptr inbounds float* %tmp9317, i64 1
- %tmp9319 = getelementptr inbounds float* %tmp9318, i64 1
- %tmp9320 = getelementptr inbounds float* %tmp9319, i64 1
- %tmp9321 = getelementptr inbounds float* %tmp9320, i64 1
- %tmp9322 = getelementptr inbounds float* %tmp9321, i64 1
- %tmp9323 = getelementptr inbounds float* %tmp9322, i64 1
- %tmp9324 = getelementptr inbounds float* %tmp9323, i64 1
- %tmp9325 = getelementptr inbounds float* %tmp9324, i64 1
- %tmp9326 = getelementptr inbounds float* %tmp9325, i64 1
- %tmp9327 = getelementptr inbounds float* %tmp9326, i64 1
- %tmp9328 = getelementptr inbounds float* %tmp9327, i64 1
- %tmp9329 = getelementptr inbounds float* %tmp9328, i64 1
- %tmp9330 = getelementptr inbounds float* %tmp9329, i64 1
- %tmp9331 = getelementptr inbounds float* %tmp9330, i64 1
- %tmp9332 = getelementptr inbounds float* %tmp9331, i64 1
- %tmp9333 = getelementptr inbounds float* %tmp9332, i64 1
- %tmp9334 = getelementptr inbounds float* %tmp9333, i64 1
- %tmp9335 = getelementptr inbounds float* %tmp9334, i64 1
- %tmp9336 = getelementptr inbounds float* %tmp9335, i64 1
- %tmp9337 = getelementptr inbounds float* %tmp9336, i64 1
- %tmp9338 = getelementptr inbounds float* %tmp9337, i64 1
- %tmp9339 = getelementptr inbounds float* %tmp9338, i64 1
- %tmp9340 = getelementptr inbounds float* %tmp9339, i64 1
- %tmp9341 = getelementptr inbounds float* %tmp9340, i64 1
- %tmp9342 = getelementptr inbounds float* %tmp9341, i64 1
- %tmp9343 = getelementptr inbounds float* %tmp9342, i64 1
- %tmp9344 = getelementptr inbounds float* %tmp9343, i64 1
- %tmp9345 = getelementptr inbounds float* %tmp9344, i64 1
- %tmp9346 = getelementptr inbounds float* %tmp9345, i64 1
- %tmp9347 = getelementptr inbounds float* %tmp9346, i64 1
- %tmp9348 = getelementptr inbounds float* %tmp9347, i64 1
- %tmp9349 = getelementptr inbounds float* %tmp9348, i64 1
- %tmp9350 = getelementptr inbounds float* %tmp9349, i64 1
- %tmp9351 = getelementptr inbounds float* %tmp9350, i64 1
- %tmp9352 = getelementptr inbounds float* %tmp9351, i64 1
- %tmp9353 = getelementptr inbounds float* %tmp9352, i64 1
- %tmp9354 = getelementptr inbounds float* %tmp9353, i64 1
- %tmp9355 = getelementptr inbounds float* %tmp9354, i64 1
- %tmp9356 = getelementptr inbounds float* %tmp9355, i64 1
- %tmp9357 = getelementptr inbounds float* %tmp9356, i64 1
- %tmp9358 = getelementptr inbounds float* %tmp9357, i64 1
- %tmp9359 = getelementptr inbounds float* %tmp9358, i64 1
- %tmp9360 = getelementptr inbounds float* %tmp9359, i64 1
- %tmp9361 = getelementptr inbounds float* %tmp9360, i64 1
- %tmp9362 = getelementptr inbounds float* %tmp9361, i64 1
- %tmp9363 = getelementptr inbounds float* %tmp9362, i64 1
- %tmp9364 = getelementptr inbounds float* %tmp9363, i64 1
- %tmp9365 = getelementptr inbounds float* %tmp9364, i64 1
- %tmp9366 = getelementptr inbounds float* %tmp9365, i64 1
- %tmp9367 = getelementptr inbounds float* %tmp9366, i64 1
- %tmp9368 = getelementptr inbounds float* %tmp9367, i64 1
- %tmp9369 = getelementptr inbounds float* %tmp9368, i64 1
- %tmp9370 = getelementptr inbounds float* %tmp9369, i64 1
- %tmp9371 = getelementptr inbounds float* %tmp9370, i64 1
- %tmp9372 = getelementptr inbounds float* %tmp9371, i64 1
- %tmp9373 = getelementptr inbounds float* %tmp9372, i64 1
- %tmp9374 = getelementptr inbounds float* %tmp9373, i64 1
- %tmp9375 = getelementptr inbounds float* %tmp9374, i64 1
- %tmp9376 = getelementptr inbounds float* %tmp9375, i64 1
- %tmp9377 = getelementptr inbounds float* %tmp9376, i64 1
- %tmp9378 = getelementptr inbounds float* %tmp9377, i64 1
- %tmp9379 = getelementptr inbounds float* %tmp9378, i64 1
- %tmp9380 = getelementptr inbounds float* %tmp9379, i64 1
- %tmp9381 = getelementptr inbounds float* %tmp9380, i64 1
- %tmp9382 = getelementptr inbounds float* %tmp9381, i64 1
- %tmp9383 = getelementptr inbounds float* %tmp9382, i64 1
- %tmp9384 = getelementptr inbounds float* %tmp9383, i64 1
- %tmp9385 = getelementptr inbounds float* %tmp9384, i64 1
- %tmp9386 = getelementptr inbounds float* %tmp9385, i64 1
- %tmp9387 = getelementptr inbounds float* %tmp9386, i64 1
- %tmp9388 = getelementptr inbounds float* %tmp9387, i64 1
- %tmp9389 = getelementptr inbounds float* %tmp9388, i64 1
- %tmp9390 = getelementptr inbounds float* %tmp9389, i64 1
- %tmp9391 = getelementptr inbounds float* %tmp9390, i64 1
- %tmp9392 = getelementptr inbounds float* %tmp9391, i64 1
- %tmp9393 = getelementptr inbounds float* %tmp9392, i64 1
- %tmp9394 = getelementptr inbounds float* %tmp9393, i64 1
- %tmp9395 = getelementptr inbounds float* %tmp9394, i64 1
- %tmp9396 = getelementptr inbounds float* %tmp9395, i64 1
- %tmp9397 = getelementptr inbounds float* %tmp9396, i64 1
- %tmp9398 = getelementptr inbounds float* %tmp9397, i64 1
- %tmp9399 = getelementptr inbounds float* %tmp9398, i64 1
- %tmp9400 = getelementptr inbounds float* %tmp9399, i64 1
- %tmp9401 = getelementptr inbounds float* %tmp9400, i64 1
- %tmp9402 = getelementptr inbounds float* %tmp9401, i64 1
- %tmp9403 = getelementptr inbounds float* %tmp9402, i64 1
- %tmp9404 = getelementptr inbounds float* %tmp9403, i64 1
- %tmp9405 = getelementptr inbounds float* %tmp9404, i64 1
- %tmp9406 = getelementptr inbounds float* %tmp9405, i64 1
- %tmp9407 = getelementptr inbounds float* %tmp9406, i64 1
- %tmp9408 = getelementptr inbounds float* %tmp9407, i64 1
- %tmp9409 = getelementptr inbounds float* %tmp9408, i64 1
- %tmp9410 = getelementptr inbounds float* %tmp9409, i64 1
- %tmp9411 = getelementptr inbounds float* %tmp9410, i64 1
- %tmp9412 = getelementptr inbounds float* %tmp9411, i64 1
- %tmp9413 = getelementptr inbounds float* %tmp9412, i64 1
- %tmp9414 = getelementptr inbounds float* %tmp9413, i64 1
- %tmp9415 = getelementptr inbounds float* %tmp9414, i64 1
- %tmp9416 = getelementptr inbounds float* %tmp9415, i64 1
- %tmp9417 = getelementptr inbounds float* %tmp9416, i64 1
- %tmp9418 = getelementptr inbounds float* %tmp9417, i64 1
- %tmp9419 = getelementptr inbounds float* %tmp9418, i64 1
- %tmp9420 = getelementptr inbounds float* %tmp9419, i64 1
- %tmp9421 = getelementptr inbounds float* %tmp9420, i64 1
- %tmp9422 = getelementptr inbounds float* %tmp9421, i64 1
- %tmp9423 = getelementptr inbounds float* %tmp9422, i64 1
- %tmp9424 = getelementptr inbounds float* %tmp9423, i64 1
- %tmp9425 = getelementptr inbounds float* %tmp9424, i64 1
- %tmp9426 = getelementptr inbounds float* %tmp9425, i64 1
- %tmp9427 = getelementptr inbounds float* %tmp9426, i64 1
- %tmp9428 = getelementptr inbounds float* %tmp9427, i64 1
- %tmp9429 = getelementptr inbounds float* %tmp9428, i64 1
- %tmp9430 = getelementptr inbounds float* %tmp9429, i64 1
- %tmp9431 = getelementptr inbounds float* %tmp9430, i64 1
- %tmp9432 = getelementptr inbounds float* %tmp9431, i64 1
- %tmp9433 = getelementptr inbounds float* %tmp9432, i64 1
- %tmp9434 = getelementptr inbounds float* %tmp9433, i64 1
- %tmp9435 = getelementptr inbounds float* %tmp9434, i64 1
- %tmp9436 = getelementptr inbounds float* %tmp9435, i64 1
- %tmp9437 = getelementptr inbounds float* %tmp9436, i64 1
- %tmp9438 = getelementptr inbounds float* %tmp9437, i64 1
- %tmp9439 = getelementptr inbounds float* %tmp9438, i64 1
- %tmp9440 = getelementptr inbounds float* %tmp9439, i64 1
- %tmp9441 = getelementptr inbounds float* %tmp9440, i64 1
- %tmp9442 = getelementptr inbounds float* %tmp9441, i64 1
- %tmp9443 = getelementptr inbounds float* %tmp9442, i64 1
- %tmp9444 = getelementptr inbounds float* %tmp9443, i64 1
- %tmp9445 = getelementptr inbounds float* %tmp9444, i64 1
- %tmp9446 = getelementptr inbounds float* %tmp9445, i64 1
- %tmp9447 = getelementptr inbounds float* %tmp9446, i64 1
- %tmp9448 = getelementptr inbounds float* %tmp9447, i64 1
- %tmp9449 = getelementptr inbounds float* %tmp9448, i64 1
- %tmp9450 = getelementptr inbounds float* %tmp9449, i64 1
- %tmp9451 = getelementptr inbounds float* %tmp9450, i64 1
- %tmp9452 = getelementptr inbounds float* %tmp9451, i64 1
- %tmp9453 = getelementptr inbounds float* %tmp9452, i64 1
- %tmp9454 = getelementptr inbounds float* %tmp9453, i64 1
- %tmp9455 = getelementptr inbounds float* %tmp9454, i64 1
- %tmp9456 = getelementptr inbounds float* %tmp9455, i64 1
- %tmp9457 = getelementptr inbounds float* %tmp9456, i64 1
- %tmp9458 = getelementptr inbounds float* %tmp9457, i64 1
- %tmp9459 = getelementptr inbounds float* %tmp9458, i64 1
- %tmp9460 = getelementptr inbounds float* %tmp9459, i64 1
- %tmp9461 = getelementptr inbounds float* %tmp9460, i64 1
- %tmp9462 = getelementptr inbounds float* %tmp9461, i64 1
- %tmp9463 = getelementptr inbounds float* %tmp9462, i64 1
- %tmp9464 = getelementptr inbounds float* %tmp9463, i64 1
- %tmp9465 = getelementptr inbounds float* %tmp9464, i64 1
- %tmp9466 = getelementptr inbounds float* %tmp9465, i64 1
- %tmp9467 = getelementptr inbounds float* %tmp9466, i64 1
- %tmp9468 = getelementptr inbounds float* %tmp9467, i64 1
- %tmp9469 = getelementptr inbounds float* %tmp9468, i64 1
- %tmp9470 = getelementptr inbounds float* %tmp9469, i64 1
- %tmp9471 = getelementptr inbounds float* %tmp9470, i64 1
- %tmp9472 = getelementptr inbounds float* %tmp9471, i64 1
- %tmp9473 = getelementptr inbounds float* %tmp9472, i64 1
- %tmp9474 = getelementptr inbounds float* %tmp9473, i64 1
- %tmp9475 = getelementptr inbounds float* %tmp9474, i64 1
- %tmp9476 = getelementptr inbounds float* %tmp9475, i64 1
- %tmp9477 = getelementptr inbounds float* %tmp9476, i64 1
- %tmp9478 = getelementptr inbounds float* %tmp9477, i64 1
- %tmp9479 = getelementptr inbounds float* %tmp9478, i64 1
- %tmp9480 = getelementptr inbounds float* %tmp9479, i64 1
- %tmp9481 = getelementptr inbounds float* %tmp9480, i64 1
- %tmp9482 = getelementptr inbounds float* %tmp9481, i64 1
- %tmp9483 = getelementptr inbounds float* %tmp9482, i64 1
- %tmp9484 = getelementptr inbounds float* %tmp9483, i64 1
- %tmp9485 = getelementptr inbounds float* %tmp9484, i64 1
- %tmp9486 = getelementptr inbounds float* %tmp9485, i64 1
- %tmp9487 = getelementptr inbounds float* %tmp9486, i64 1
- %tmp9488 = getelementptr inbounds float* %tmp9487, i64 1
- %tmp9489 = getelementptr inbounds float* %tmp9488, i64 1
- %tmp9490 = getelementptr inbounds float* %tmp9489, i64 1
- %tmp9491 = getelementptr inbounds float* %tmp9490, i64 1
- %tmp9492 = getelementptr inbounds float* %tmp9491, i64 1
- %tmp9493 = getelementptr inbounds float* %tmp9492, i64 1
- %tmp9494 = getelementptr inbounds float* %tmp9493, i64 1
- %tmp9495 = getelementptr inbounds float* %tmp9494, i64 1
- %tmp9496 = getelementptr inbounds float* %tmp9495, i64 1
- %tmp9497 = getelementptr inbounds float* %tmp9496, i64 1
- %tmp9498 = getelementptr inbounds float* %tmp9497, i64 1
- %tmp9499 = getelementptr inbounds float* %tmp9498, i64 1
- %tmp9500 = getelementptr inbounds float* %tmp9499, i64 1
- %tmp9501 = getelementptr inbounds float* %tmp9500, i64 1
- %tmp9502 = getelementptr inbounds float* %tmp9501, i64 1
- %tmp9503 = getelementptr inbounds float* %tmp9502, i64 1
- %tmp9504 = getelementptr inbounds float* %tmp9503, i64 1
- %tmp9505 = getelementptr inbounds float* %tmp9504, i64 1
- %tmp9506 = getelementptr inbounds float* %tmp9505, i64 1
- %tmp9507 = getelementptr inbounds float* %tmp9506, i64 1
- %tmp9508 = getelementptr inbounds float* %tmp9507, i64 1
- %tmp9509 = getelementptr inbounds float* %tmp9508, i64 1
- %tmp9510 = getelementptr inbounds float* %tmp9509, i64 1
- %tmp9511 = getelementptr inbounds float* %tmp9510, i64 1
- %tmp9512 = getelementptr inbounds float* %tmp9511, i64 1
- %tmp9513 = getelementptr inbounds float* %tmp9512, i64 1
- %tmp9514 = getelementptr inbounds float* %tmp9513, i64 1
- %tmp9515 = getelementptr inbounds float* %tmp9514, i64 1
- %tmp9516 = getelementptr inbounds float* %tmp9515, i64 1
- %tmp9517 = getelementptr inbounds float* %tmp9516, i64 1
- %tmp9518 = getelementptr inbounds float* %tmp9517, i64 1
- %tmp9519 = getelementptr inbounds float* %tmp9518, i64 1
- %tmp9520 = getelementptr inbounds float* %tmp9519, i64 1
- %tmp9521 = getelementptr inbounds float* %tmp9520, i64 1
- %tmp9522 = getelementptr inbounds float* %tmp9521, i64 1
- %tmp9523 = getelementptr inbounds float* %tmp9522, i64 1
- %tmp9524 = getelementptr inbounds float* %tmp9523, i64 1
- %tmp9525 = getelementptr inbounds float* %tmp9524, i64 1
- %tmp9526 = getelementptr inbounds float* %tmp9525, i64 1
- %tmp9527 = getelementptr inbounds float* %tmp9526, i64 1
- %tmp9528 = getelementptr inbounds float* %tmp9527, i64 1
- %tmp9529 = getelementptr inbounds float* %tmp9528, i64 1
- %tmp9530 = getelementptr inbounds float* %tmp9529, i64 1
- %tmp9531 = getelementptr inbounds float* %tmp9530, i64 1
- %tmp9532 = getelementptr inbounds float* %tmp9531, i64 1
- %tmp9533 = getelementptr inbounds float* %tmp9532, i64 1
- %tmp9534 = getelementptr inbounds float* %tmp9533, i64 1
- %tmp9535 = getelementptr inbounds float* %tmp9534, i64 1
- %tmp9536 = getelementptr inbounds float* %tmp9535, i64 1
- %tmp9537 = getelementptr inbounds float* %tmp9536, i64 1
- %tmp9538 = getelementptr inbounds float* %tmp9537, i64 1
- %tmp9539 = getelementptr inbounds float* %tmp9538, i64 1
- %tmp9540 = getelementptr inbounds float* %tmp9539, i64 1
- %tmp9541 = getelementptr inbounds float* %tmp9540, i64 1
- %tmp9542 = getelementptr inbounds float* %tmp9541, i64 1
- %tmp9543 = getelementptr inbounds float* %tmp9542, i64 1
- %tmp9544 = getelementptr inbounds float* %tmp9543, i64 1
- %tmp9545 = getelementptr inbounds float* %tmp9544, i64 1
- %tmp9546 = getelementptr inbounds float* %tmp9545, i64 1
- %tmp9547 = getelementptr inbounds float* %tmp9546, i64 1
- %tmp9548 = getelementptr inbounds float* %tmp9547, i64 1
- %tmp9549 = getelementptr inbounds float* %tmp9548, i64 1
- %tmp9550 = getelementptr inbounds float* %tmp9549, i64 1
- %tmp9551 = getelementptr inbounds float* %tmp9550, i64 1
- %tmp9552 = getelementptr inbounds float* %tmp9551, i64 1
- %tmp9553 = getelementptr inbounds float* %tmp9552, i64 1
- %tmp9554 = getelementptr inbounds float* %tmp9553, i64 1
- %tmp9555 = getelementptr inbounds float* %tmp9554, i64 1
- %tmp9556 = getelementptr inbounds float* %tmp9555, i64 1
- %tmp9557 = getelementptr inbounds float* %tmp9556, i64 1
- %tmp9558 = getelementptr inbounds float* %tmp9557, i64 1
- %tmp9559 = getelementptr inbounds float* %tmp9558, i64 1
- %tmp9560 = getelementptr inbounds float* %tmp9559, i64 1
- %tmp9561 = getelementptr inbounds float* %tmp9560, i64 1
- %tmp9562 = getelementptr inbounds float* %tmp9561, i64 1
- %tmp9563 = getelementptr inbounds float* %tmp9562, i64 1
- %tmp9564 = getelementptr inbounds float* %tmp9563, i64 1
- %tmp9565 = getelementptr inbounds float* %tmp9564, i64 1
- %tmp9566 = getelementptr inbounds float* %tmp9565, i64 1
- %tmp9567 = getelementptr inbounds float* %tmp9566, i64 1
- %tmp9568 = getelementptr inbounds float* %tmp9567, i64 1
- %tmp9569 = getelementptr inbounds float* %tmp9568, i64 1
- %tmp9570 = getelementptr inbounds float* %tmp9569, i64 1
- %tmp9571 = getelementptr inbounds float* %tmp9570, i64 1
- %tmp9572 = getelementptr inbounds float* %tmp9571, i64 1
- %tmp9573 = getelementptr inbounds float* %tmp9572, i64 1
- %tmp9574 = getelementptr inbounds float* %tmp9573, i64 1
- %tmp9575 = getelementptr inbounds float* %tmp9574, i64 1
- %tmp9576 = getelementptr inbounds float* %tmp9575, i64 1
- %tmp9577 = getelementptr inbounds float* %tmp9576, i64 1
- %tmp9578 = getelementptr inbounds float* %tmp9577, i64 1
- %tmp9579 = getelementptr inbounds float* %tmp9578, i64 1
- %tmp9580 = getelementptr inbounds float* %tmp9579, i64 1
- %tmp9581 = getelementptr inbounds float* %tmp9580, i64 1
- %tmp9582 = getelementptr inbounds float* %tmp9581, i64 1
- %tmp9583 = getelementptr inbounds float* %tmp9582, i64 1
- %tmp9584 = getelementptr inbounds float* %tmp9583, i64 1
- %tmp9585 = getelementptr inbounds float* %tmp9584, i64 1
- %tmp9586 = getelementptr inbounds float* %tmp9585, i64 1
- %tmp9587 = getelementptr inbounds float* %tmp9586, i64 1
- %tmp9588 = getelementptr inbounds float* %tmp9587, i64 1
- %tmp9589 = getelementptr inbounds float* %tmp9588, i64 1
- %tmp9590 = getelementptr inbounds float* %tmp9589, i64 1
- %tmp9591 = getelementptr inbounds float* %tmp9590, i64 1
- %tmp9592 = getelementptr inbounds float* %tmp9591, i64 1
- %tmp9593 = getelementptr inbounds float* %tmp9592, i64 1
- %tmp9594 = getelementptr inbounds float* %tmp9593, i64 1
- %tmp9595 = getelementptr inbounds float* %tmp9594, i64 1
- %tmp9596 = getelementptr inbounds float* %tmp9595, i64 1
- %tmp9597 = getelementptr inbounds float* %tmp9596, i64 1
- %tmp9598 = getelementptr inbounds float* %tmp9597, i64 1
- %tmp9599 = getelementptr inbounds float* %tmp9598, i64 1
- %tmp9600 = getelementptr inbounds float* %tmp9599, i64 1
- %tmp9601 = getelementptr inbounds float* %tmp9600, i64 1
- %tmp9602 = getelementptr inbounds float* %tmp9601, i64 1
- %tmp9603 = getelementptr inbounds float* %tmp9602, i64 1
- %tmp9604 = getelementptr inbounds float* %tmp9603, i64 1
- %tmp9605 = getelementptr inbounds float* %tmp9604, i64 1
- %tmp9606 = getelementptr inbounds float* %tmp9605, i64 1
- %tmp9607 = getelementptr inbounds float* %tmp9606, i64 1
- %tmp9608 = getelementptr inbounds float* %tmp9607, i64 1
- %tmp9609 = getelementptr inbounds float* %tmp9608, i64 1
- %tmp9610 = getelementptr inbounds float* %tmp9609, i64 1
- %tmp9611 = getelementptr inbounds float* %tmp9610, i64 1
- %tmp9612 = getelementptr inbounds float* %tmp9611, i64 1
- %tmp9613 = getelementptr inbounds float* %tmp9612, i64 1
- %tmp9614 = getelementptr inbounds float* %tmp9613, i64 1
- %tmp9615 = getelementptr inbounds float* %tmp9614, i64 1
- %tmp9616 = getelementptr inbounds float* %tmp9615, i64 1
- %tmp9617 = getelementptr inbounds float* %tmp9616, i64 1
- %tmp9618 = getelementptr inbounds float* %tmp9617, i64 1
- %tmp9619 = getelementptr inbounds float* %tmp9618, i64 1
- %tmp9620 = getelementptr inbounds float* %tmp9619, i64 1
- %tmp9621 = getelementptr inbounds float* %tmp9620, i64 1
- %tmp9622 = getelementptr inbounds float* %tmp9621, i64 1
- %tmp9623 = getelementptr inbounds float* %tmp9622, i64 1
- %tmp9624 = getelementptr inbounds float* %tmp9623, i64 1
- %tmp9625 = getelementptr inbounds float* %tmp9624, i64 1
- %tmp9626 = getelementptr inbounds float* %tmp9625, i64 1
- %tmp9627 = getelementptr inbounds float* %tmp9626, i64 1
- %tmp9628 = getelementptr inbounds float* %tmp9627, i64 1
- %tmp9629 = getelementptr inbounds float* %tmp9628, i64 1
- %tmp9630 = getelementptr inbounds float* %tmp9629, i64 1
- %tmp9631 = getelementptr inbounds float* %tmp9630, i64 1
- %tmp9632 = getelementptr inbounds float* %tmp9631, i64 1
- %tmp9633 = getelementptr inbounds float* %tmp9632, i64 1
- %tmp9634 = getelementptr inbounds float* %tmp9633, i64 1
- %tmp9635 = getelementptr inbounds float* %tmp9634, i64 1
- %tmp9636 = getelementptr inbounds float* %tmp9635, i64 1
- %tmp9637 = getelementptr inbounds float* %tmp9636, i64 1
- %tmp9638 = getelementptr inbounds float* %tmp9637, i64 1
- %tmp9639 = getelementptr inbounds float* %tmp9638, i64 1
- %tmp9640 = getelementptr inbounds float* %tmp9639, i64 1
- %tmp9641 = getelementptr inbounds float* %tmp9640, i64 1
- %tmp9642 = getelementptr inbounds float* %tmp9641, i64 1
- %tmp9643 = getelementptr inbounds float* %tmp9642, i64 1
- %tmp9644 = getelementptr inbounds float* %tmp9643, i64 1
- %tmp9645 = getelementptr inbounds float* %tmp9644, i64 1
- %tmp9646 = getelementptr inbounds float* %tmp9645, i64 1
- %tmp9647 = getelementptr inbounds float* %tmp9646, i64 1
- %tmp9648 = getelementptr inbounds float* %tmp9647, i64 1
- %tmp9649 = getelementptr inbounds float* %tmp9648, i64 1
- %tmp9650 = getelementptr inbounds float* %tmp9649, i64 1
- %tmp9651 = getelementptr inbounds float* %tmp9650, i64 1
- %tmp9652 = getelementptr inbounds float* %tmp9651, i64 1
- %tmp9653 = getelementptr inbounds float* %tmp9652, i64 1
- %tmp9654 = getelementptr inbounds float* %tmp9653, i64 1
- %tmp9655 = getelementptr inbounds float* %tmp9654, i64 1
- %tmp9656 = getelementptr inbounds float* %tmp9655, i64 1
- %tmp9657 = getelementptr inbounds float* %tmp9656, i64 1
- %tmp9658 = getelementptr inbounds float* %tmp9657, i64 1
- %tmp9659 = getelementptr inbounds float* %tmp9658, i64 1
- %tmp9660 = getelementptr inbounds float* %tmp9659, i64 1
- %tmp9661 = getelementptr inbounds float* %tmp9660, i64 1
- %tmp9662 = getelementptr inbounds float* %tmp9661, i64 1
- %tmp9663 = getelementptr inbounds float* %tmp9662, i64 1
- %tmp9664 = getelementptr inbounds float* %tmp9663, i64 1
- %tmp9665 = getelementptr inbounds float* %tmp9664, i64 1
- %tmp9666 = getelementptr inbounds float* %tmp9665, i64 1
- %tmp9667 = getelementptr inbounds float* %tmp9666, i64 1
- %tmp9668 = getelementptr inbounds float* %tmp9667, i64 1
- %tmp9669 = getelementptr inbounds float* %tmp9668, i64 1
- %tmp9670 = getelementptr inbounds float* %tmp9669, i64 1
- %tmp9671 = getelementptr inbounds float* %tmp9670, i64 1
- %tmp9672 = getelementptr inbounds float* %tmp9671, i64 1
- %tmp9673 = getelementptr inbounds float* %tmp9672, i64 1
- %tmp9674 = getelementptr inbounds float* %tmp9673, i64 1
- %tmp9675 = getelementptr inbounds float* %tmp9674, i64 1
- %tmp9676 = getelementptr inbounds float* %tmp9675, i64 1
- %tmp9677 = getelementptr inbounds float* %tmp9676, i64 1
- %tmp9678 = getelementptr inbounds float* %tmp9677, i64 1
- %tmp9679 = getelementptr inbounds float* %tmp9678, i64 1
- %tmp9680 = getelementptr inbounds float* %tmp9679, i64 1
- %tmp9681 = getelementptr inbounds float* %tmp9680, i64 1
- %tmp9682 = getelementptr inbounds float* %tmp9681, i64 1
- %tmp9683 = getelementptr inbounds float* %tmp9682, i64 1
- %tmp9684 = getelementptr inbounds float* %tmp9683, i64 1
- %tmp9685 = getelementptr inbounds float* %tmp9684, i64 1
- %tmp9686 = getelementptr inbounds float* %tmp9685, i64 1
- %tmp9687 = getelementptr inbounds float* %tmp9686, i64 1
- %tmp9688 = getelementptr inbounds float* %tmp9687, i64 1
- %tmp9689 = getelementptr inbounds float* %tmp9688, i64 1
- %tmp9690 = getelementptr inbounds float* %tmp9689, i64 1
- %tmp9691 = getelementptr inbounds float* %tmp9690, i64 1
- %tmp9692 = getelementptr inbounds float* %tmp9691, i64 1
- %tmp9693 = getelementptr inbounds float* %tmp9692, i64 1
- %tmp9694 = getelementptr inbounds float* %tmp9693, i64 1
- %tmp9695 = getelementptr inbounds float* %tmp9694, i64 1
- %tmp9696 = getelementptr inbounds float* %tmp9695, i64 1
- %tmp9697 = getelementptr inbounds float* %tmp9696, i64 1
- %tmp9698 = getelementptr inbounds float* %tmp9697, i64 1
- %tmp9699 = getelementptr inbounds float* %tmp9698, i64 1
- %tmp9700 = getelementptr inbounds float* %tmp9699, i64 1
- %tmp9701 = getelementptr inbounds float* %tmp9700, i64 1
- %tmp9702 = getelementptr inbounds float* %tmp9701, i64 1
- %tmp9703 = getelementptr inbounds float* %tmp9702, i64 1
- %tmp9704 = getelementptr inbounds float* %tmp9703, i64 1
- %tmp9705 = getelementptr inbounds float* %tmp9704, i64 1
- %tmp9706 = getelementptr inbounds float* %tmp9705, i64 1
- %tmp9707 = getelementptr inbounds float* %tmp9706, i64 1
- %tmp9708 = getelementptr inbounds float* %tmp9707, i64 1
- %tmp9709 = getelementptr inbounds float* %tmp9708, i64 1
- %tmp9710 = getelementptr inbounds float* %tmp9709, i64 1
- %tmp9711 = getelementptr inbounds float* %tmp9710, i64 1
- %tmp9712 = getelementptr inbounds float* %tmp9711, i64 1
- %tmp9713 = getelementptr inbounds float* %tmp9712, i64 1
- %tmp9714 = getelementptr inbounds float* %tmp9713, i64 1
- %tmp9715 = getelementptr inbounds float* %tmp9714, i64 1
- %tmp9716 = getelementptr inbounds float* %tmp9715, i64 1
- %tmp9717 = getelementptr inbounds float* %tmp9716, i64 1
- %tmp9718 = getelementptr inbounds float* %tmp9717, i64 1
- %tmp9719 = getelementptr inbounds float* %tmp9718, i64 1
- %tmp9720 = getelementptr inbounds float* %tmp9719, i64 1
- %tmp9721 = getelementptr inbounds float* %tmp9720, i64 1
- %tmp9722 = getelementptr inbounds float* %tmp9721, i64 1
- %tmp9723 = getelementptr inbounds float* %tmp9722, i64 1
- %tmp9724 = getelementptr inbounds float* %tmp9723, i64 1
- %tmp9725 = getelementptr inbounds float* %tmp9724, i64 1
- %tmp9726 = getelementptr inbounds float* %tmp9725, i64 1
- %tmp9727 = getelementptr inbounds float* %tmp9726, i64 1
- %tmp9728 = getelementptr inbounds float* %tmp9727, i64 1
- %tmp9729 = getelementptr inbounds float* %tmp9728, i64 1
- %tmp9730 = getelementptr inbounds float* %tmp9729, i64 1
- %tmp9731 = getelementptr inbounds float* %tmp9730, i64 1
- %tmp9732 = getelementptr inbounds float* %tmp9731, i64 1
- %tmp9733 = getelementptr inbounds float* %tmp9732, i64 1
- %tmp9734 = getelementptr inbounds float* %tmp9733, i64 1
- %tmp9735 = getelementptr inbounds float* %tmp9734, i64 1
- %tmp9736 = getelementptr inbounds float* %tmp9735, i64 1
- %tmp9737 = getelementptr inbounds float* %tmp9736, i64 1
- %tmp9738 = getelementptr inbounds float* %tmp9737, i64 1
- %tmp9739 = getelementptr inbounds float* %tmp9738, i64 1
- %tmp9740 = getelementptr inbounds float* %tmp9739, i64 1
- %tmp9741 = getelementptr inbounds float* %tmp9740, i64 1
- %tmp9742 = getelementptr inbounds float* %tmp9741, i64 1
- %tmp9743 = getelementptr inbounds float* %tmp9742, i64 1
- %tmp9744 = getelementptr inbounds float* %tmp9743, i64 1
- %tmp9745 = getelementptr inbounds float* %tmp9744, i64 1
- %tmp9746 = getelementptr inbounds float* %tmp9745, i64 1
- %tmp9747 = getelementptr inbounds float* %tmp9746, i64 1
- %tmp9748 = getelementptr inbounds float* %tmp9747, i64 1
- %tmp9749 = getelementptr inbounds float* %tmp9748, i64 1
- %tmp9750 = getelementptr inbounds float* %tmp9749, i64 1
- %tmp9751 = getelementptr inbounds float* %tmp9750, i64 1
- %tmp9752 = getelementptr inbounds float* %tmp9751, i64 1
- %tmp9753 = getelementptr inbounds float* %tmp9752, i64 1
- %tmp9754 = getelementptr inbounds float* %tmp9753, i64 1
- %tmp9755 = getelementptr inbounds float* %tmp9754, i64 1
- %tmp9756 = getelementptr inbounds float* %tmp9755, i64 1
- %tmp9757 = getelementptr inbounds float* %tmp9756, i64 1
- %tmp9758 = getelementptr inbounds float* %tmp9757, i64 1
- %tmp9759 = getelementptr inbounds float* %tmp9758, i64 1
- %tmp9760 = getelementptr inbounds float* %tmp9759, i64 1
- %tmp9761 = getelementptr inbounds float* %tmp9760, i64 1
- %tmp9762 = getelementptr inbounds float* %tmp9761, i64 1
- %tmp9763 = getelementptr inbounds float* %tmp9762, i64 1
- %tmp9764 = getelementptr inbounds float* %tmp9763, i64 1
- %tmp9765 = getelementptr inbounds float* %tmp9764, i64 1
- %tmp9766 = getelementptr inbounds float* %tmp9765, i64 1
- %tmp9767 = getelementptr inbounds float* %tmp9766, i64 1
- %tmp9768 = getelementptr inbounds float* %tmp9767, i64 1
- %tmp9769 = getelementptr inbounds float* %tmp9768, i64 1
- %tmp9770 = getelementptr inbounds float* %tmp9769, i64 1
- %tmp9771 = getelementptr inbounds float* %tmp9770, i64 1
- %tmp9772 = getelementptr inbounds float* %tmp9771, i64 1
- %tmp9773 = getelementptr inbounds float* %tmp9772, i64 1
- %tmp9774 = getelementptr inbounds float* %tmp9773, i64 1
- %tmp9775 = getelementptr inbounds float* %tmp9774, i64 1
- %tmp9776 = getelementptr inbounds float* %tmp9775, i64 1
- %tmp9777 = getelementptr inbounds float* %tmp9776, i64 1
- %tmp9778 = getelementptr inbounds float* %tmp9777, i64 1
- %tmp9779 = getelementptr inbounds float* %tmp9778, i64 1
- %tmp9780 = getelementptr inbounds float* %tmp9779, i64 1
- %tmp9781 = getelementptr inbounds float* %tmp9780, i64 1
- %tmp9782 = getelementptr inbounds float* %tmp9781, i64 1
- %tmp9783 = getelementptr inbounds float* %tmp9782, i64 1
- %tmp9784 = getelementptr inbounds float* %tmp9783, i64 1
- %tmp9785 = getelementptr inbounds float* %tmp9784, i64 1
- %tmp9786 = getelementptr inbounds float* %tmp9785, i64 1
- %tmp9787 = getelementptr inbounds float* %tmp9786, i64 1
- %tmp9788 = getelementptr inbounds float* %tmp9787, i64 1
- %tmp9789 = getelementptr inbounds float* %tmp9788, i64 1
- %tmp9790 = getelementptr inbounds float* %tmp9789, i64 1
- %tmp9791 = getelementptr inbounds float* %tmp9790, i64 1
- %tmp9792 = getelementptr inbounds float* %tmp9791, i64 1
- %tmp9793 = getelementptr inbounds float* %tmp9792, i64 1
- %tmp9794 = getelementptr inbounds float* %tmp9793, i64 1
- %tmp9795 = getelementptr inbounds float* %tmp9794, i64 1
- %tmp9796 = getelementptr inbounds float* %tmp9795, i64 1
- %tmp9797 = getelementptr inbounds float* %tmp9796, i64 1
- %tmp9798 = getelementptr inbounds float* %tmp9797, i64 1
- %tmp9799 = getelementptr inbounds float* %tmp9798, i64 1
- %tmp9800 = getelementptr inbounds float* %tmp9799, i64 1
- %tmp9801 = getelementptr inbounds float* %tmp9800, i64 1
- %tmp9802 = getelementptr inbounds float* %tmp9801, i64 1
- %tmp9803 = getelementptr inbounds float* %tmp9802, i64 1
- %tmp9804 = getelementptr inbounds float* %tmp9803, i64 1
- %tmp9805 = getelementptr inbounds float* %tmp9804, i64 1
- %tmp9806 = getelementptr inbounds float* %tmp9805, i64 1
- %tmp9807 = getelementptr inbounds float* %tmp9806, i64 1
- %tmp9808 = getelementptr inbounds float* %tmp9807, i64 1
- %tmp9809 = getelementptr inbounds float* %tmp9808, i64 1
- %tmp9810 = getelementptr inbounds float* %tmp9809, i64 1
- %tmp9811 = getelementptr inbounds float* %tmp9810, i64 1
- %tmp9812 = getelementptr inbounds float* %tmp9811, i64 1
- %tmp9813 = getelementptr inbounds float* %tmp9812, i64 1
- %tmp9814 = getelementptr inbounds float* %tmp9813, i64 1
- %tmp9815 = getelementptr inbounds float* %tmp9814, i64 1
- %tmp9816 = getelementptr inbounds float* %tmp9815, i64 1
- %tmp9817 = getelementptr inbounds float* %tmp9816, i64 1
- %tmp9818 = getelementptr inbounds float* %tmp9817, i64 1
- %tmp9819 = getelementptr inbounds float* %tmp9818, i64 1
- %tmp9820 = getelementptr inbounds float* %tmp9819, i64 1
- %tmp9821 = getelementptr inbounds float* %tmp9820, i64 1
- %tmp9822 = getelementptr inbounds float* %tmp9821, i64 1
- %tmp9823 = getelementptr inbounds float* %tmp9822, i64 1
- %tmp9824 = getelementptr inbounds float* %tmp9823, i64 1
- %tmp9825 = getelementptr inbounds float* %tmp9824, i64 1
- %tmp9826 = getelementptr inbounds float* %tmp9825, i64 1
- %tmp9827 = getelementptr inbounds float* %tmp9826, i64 1
- %tmp9828 = getelementptr inbounds float* %tmp9827, i64 1
- %tmp9829 = getelementptr inbounds float* %tmp9828, i64 1
- %tmp9830 = getelementptr inbounds float* %tmp9829, i64 1
- %tmp9831 = getelementptr inbounds float* %tmp9830, i64 1
- %tmp9832 = getelementptr inbounds float* %tmp9831, i64 1
- %tmp9833 = getelementptr inbounds float* %tmp9832, i64 1
- %tmp9834 = getelementptr inbounds float* %tmp9833, i64 1
- %tmp9835 = getelementptr inbounds float* %tmp9834, i64 1
- %tmp9836 = getelementptr inbounds float* %tmp9835, i64 1
- %tmp9837 = getelementptr inbounds float* %tmp9836, i64 1
- %tmp9838 = getelementptr inbounds float* %tmp9837, i64 1
- %tmp9839 = getelementptr inbounds float* %tmp9838, i64 1
- %tmp9840 = getelementptr inbounds float* %tmp9839, i64 1
- %tmp9841 = getelementptr inbounds float* %tmp9840, i64 1
- %tmp9842 = getelementptr inbounds float* %tmp9841, i64 1
- %tmp9843 = getelementptr inbounds float* %tmp9842, i64 1
- %tmp9844 = getelementptr inbounds float* %tmp9843, i64 1
- %tmp9845 = getelementptr inbounds float* %tmp9844, i64 1
- %tmp9846 = getelementptr inbounds float* %tmp9845, i64 1
- %tmp9847 = getelementptr inbounds float* %tmp9846, i64 1
- %tmp9848 = getelementptr inbounds float* %tmp9847, i64 1
- %tmp9849 = getelementptr inbounds float* %tmp9848, i64 1
- %tmp9850 = getelementptr inbounds float* %tmp9849, i64 1
- %tmp9851 = getelementptr inbounds float* %tmp9850, i64 1
- %tmp9852 = getelementptr inbounds float* %tmp9851, i64 1
- %tmp9853 = getelementptr inbounds float* %tmp9852, i64 1
- %tmp9854 = getelementptr inbounds float* %tmp9853, i64 1
- %tmp9855 = getelementptr inbounds float* %tmp9854, i64 1
- %tmp9856 = getelementptr inbounds float* %tmp9855, i64 1
- %tmp9857 = getelementptr inbounds float* %tmp9856, i64 1
- %tmp9858 = getelementptr inbounds float* %tmp9857, i64 1
- %tmp9859 = getelementptr inbounds float* %tmp9858, i64 1
- %tmp9860 = getelementptr inbounds float* %tmp9859, i64 1
- %tmp9861 = getelementptr inbounds float* %tmp9860, i64 1
- %tmp9862 = getelementptr inbounds float* %tmp9861, i64 1
- %tmp9863 = getelementptr inbounds float* %tmp9862, i64 1
- %tmp9864 = getelementptr inbounds float* %tmp9863, i64 1
- %tmp9865 = getelementptr inbounds float* %tmp9864, i64 1
- %tmp9866 = getelementptr inbounds float* %tmp9865, i64 1
- %tmp9867 = getelementptr inbounds float* %tmp9866, i64 1
- %tmp9868 = getelementptr inbounds float* %tmp9867, i64 1
- %tmp9869 = getelementptr inbounds float* %tmp9868, i64 1
- %tmp9870 = getelementptr inbounds float* %tmp9869, i64 1
- %tmp9871 = getelementptr inbounds float* %tmp9870, i64 1
- %tmp9872 = getelementptr inbounds float* %tmp9871, i64 1
- %tmp9873 = getelementptr inbounds float* %tmp9872, i64 1
- %tmp9874 = getelementptr inbounds float* %tmp9873, i64 1
- %tmp9875 = getelementptr inbounds float* %tmp9874, i64 1
- %tmp9876 = getelementptr inbounds float* %tmp9875, i64 1
- %tmp9877 = getelementptr inbounds float* %tmp9876, i64 1
- %tmp9878 = getelementptr inbounds float* %tmp9877, i64 1
- %tmp9879 = getelementptr inbounds float* %tmp9878, i64 1
- %tmp9880 = getelementptr inbounds float* %tmp9879, i64 1
- %tmp9881 = getelementptr inbounds float* %tmp9880, i64 1
- %tmp9882 = getelementptr inbounds float* %tmp9881, i64 1
- %tmp9883 = getelementptr inbounds float* %tmp9882, i64 1
- %tmp9884 = getelementptr inbounds float* %tmp9883, i64 1
- %tmp9885 = getelementptr inbounds float* %tmp9884, i64 1
- %tmp9886 = getelementptr inbounds float* %tmp9885, i64 1
- %tmp9887 = getelementptr inbounds float* %tmp9886, i64 1
- %tmp9888 = getelementptr inbounds float* %tmp9887, i64 1
- %tmp9889 = getelementptr inbounds float* %tmp9888, i64 1
- %tmp9890 = getelementptr inbounds float* %tmp9889, i64 1
- %tmp9891 = getelementptr inbounds float* %tmp9890, i64 1
- %tmp9892 = getelementptr inbounds float* %tmp9891, i64 1
- %tmp9893 = getelementptr inbounds float* %tmp9892, i64 1
- %tmp9894 = getelementptr inbounds float* %tmp9893, i64 1
- %tmp9895 = getelementptr inbounds float* %tmp9894, i64 1
- %tmp9896 = getelementptr inbounds float* %tmp9895, i64 1
- %tmp9897 = getelementptr inbounds float* %tmp9896, i64 1
- %tmp9898 = getelementptr inbounds float* %tmp9897, i64 1
- %tmp9899 = getelementptr inbounds float* %tmp9898, i64 1
- %tmp9900 = getelementptr inbounds float* %tmp9899, i64 1
- %tmp9901 = getelementptr inbounds float* %tmp9900, i64 1
- %tmp9902 = getelementptr inbounds float* %tmp9901, i64 1
- %tmp9903 = getelementptr inbounds float* %tmp9902, i64 1
- %tmp9904 = getelementptr inbounds float* %tmp9903, i64 1
- %tmp9905 = getelementptr inbounds float* %tmp9904, i64 1
- %tmp9906 = getelementptr inbounds float* %tmp9905, i64 1
- %tmp9907 = getelementptr inbounds float* %tmp9906, i64 1
- %tmp9908 = getelementptr inbounds float* %tmp9907, i64 1
- %tmp9909 = getelementptr inbounds float* %tmp9908, i64 1
- %tmp9910 = getelementptr inbounds float* %tmp9909, i64 1
- %tmp9911 = getelementptr inbounds float* %tmp9910, i64 1
- %tmp9912 = getelementptr inbounds float* %tmp9911, i64 1
- %tmp9913 = getelementptr inbounds float* %tmp9912, i64 1
- %tmp9914 = getelementptr inbounds float* %tmp9913, i64 1
- %tmp9915 = getelementptr inbounds float* %tmp9914, i64 1
- %tmp9916 = getelementptr inbounds float* %tmp9915, i64 1
- %tmp9917 = getelementptr inbounds float* %tmp9916, i64 1
- %tmp9918 = getelementptr inbounds float* %tmp9917, i64 1
- %tmp9919 = getelementptr inbounds float* %tmp9918, i64 1
- %tmp9920 = getelementptr inbounds float* %tmp9919, i64 1
- %tmp9921 = getelementptr inbounds float* %tmp9920, i64 1
- %tmp9922 = getelementptr inbounds float* %tmp9921, i64 1
- %tmp9923 = getelementptr inbounds float* %tmp9922, i64 1
- %tmp9924 = getelementptr inbounds float* %tmp9923, i64 1
- %tmp9925 = getelementptr inbounds float* %tmp9924, i64 1
- %tmp9926 = getelementptr inbounds float* %tmp9925, i64 1
- %tmp9927 = getelementptr inbounds float* %tmp9926, i64 1
- %tmp9928 = getelementptr inbounds float* %tmp9927, i64 1
- %tmp9929 = getelementptr inbounds float* %tmp9928, i64 1
- %tmp9930 = getelementptr inbounds float* %tmp9929, i64 1
- %tmp9931 = getelementptr inbounds float* %tmp9930, i64 1
- %tmp9932 = getelementptr inbounds float* %tmp9931, i64 1
- %tmp9933 = getelementptr inbounds float* %tmp9932, i64 1
- %tmp9934 = getelementptr inbounds float* %tmp9933, i64 1
- %tmp9935 = getelementptr inbounds float* %tmp9934, i64 1
- %tmp9936 = getelementptr inbounds float* %tmp9935, i64 1
- %tmp9937 = getelementptr inbounds float* %tmp9936, i64 1
- %tmp9938 = getelementptr inbounds float* %tmp9937, i64 1
- %tmp9939 = getelementptr inbounds float* %tmp9938, i64 1
- %tmp9940 = getelementptr inbounds float* %tmp9939, i64 1
- %tmp9941 = getelementptr inbounds float* %tmp9940, i64 1
- %tmp9942 = getelementptr inbounds float* %tmp9941, i64 1
- %tmp9943 = getelementptr inbounds float* %tmp9942, i64 1
- %tmp9944 = getelementptr inbounds float* %tmp9943, i64 1
- %tmp9945 = getelementptr inbounds float* %tmp9944, i64 1
- %tmp9946 = getelementptr inbounds float* %tmp9945, i64 1
- %tmp9947 = getelementptr inbounds float* %tmp9946, i64 1
- %tmp9948 = getelementptr inbounds float* %tmp9947, i64 1
- %tmp9949 = getelementptr inbounds float* %tmp9948, i64 1
- %tmp9950 = getelementptr inbounds float* %tmp9949, i64 1
- %tmp9951 = getelementptr inbounds float* %tmp9950, i64 1
- %tmp9952 = getelementptr inbounds float* %tmp9951, i64 1
- %tmp9953 = getelementptr inbounds float* %tmp9952, i64 1
- %tmp9954 = getelementptr inbounds float* %tmp9953, i64 1
- %tmp9955 = getelementptr inbounds float* %tmp9954, i64 1
- %tmp9956 = getelementptr inbounds float* %tmp9955, i64 1
- %tmp9957 = getelementptr inbounds float* %tmp9956, i64 1
- %tmp9958 = getelementptr inbounds float* %tmp9957, i64 1
- %tmp9959 = getelementptr inbounds float* %tmp9958, i64 1
- %tmp9960 = getelementptr inbounds float* %tmp9959, i64 1
- %tmp9961 = getelementptr inbounds float* %tmp9960, i64 1
- %tmp9962 = getelementptr inbounds float* %tmp9961, i64 1
- %tmp9963 = getelementptr inbounds float* %tmp9962, i64 1
- %tmp9964 = getelementptr inbounds float* %tmp9963, i64 1
- %tmp9965 = getelementptr inbounds float* %tmp9964, i64 1
- %tmp9966 = getelementptr inbounds float* %tmp9965, i64 1
- %tmp9967 = getelementptr inbounds float* %tmp9966, i64 1
- %tmp9968 = getelementptr inbounds float* %tmp9967, i64 1
- %tmp9969 = getelementptr inbounds float* %tmp9968, i64 1
- %tmp9970 = getelementptr inbounds float* %tmp9969, i64 1
- %tmp9971 = getelementptr inbounds float* %tmp9970, i64 1
- %tmp9972 = getelementptr inbounds float* %tmp9971, i64 1
- %tmp9973 = getelementptr inbounds float* %tmp9972, i64 1
- %tmp9974 = getelementptr inbounds float* %tmp9973, i64 1
- %tmp9975 = getelementptr inbounds float* %tmp9974, i64 1
- %tmp9976 = getelementptr inbounds float* %tmp9975, i64 1
- %tmp9977 = getelementptr inbounds float* %tmp9976, i64 1
- %tmp9978 = getelementptr inbounds float* %tmp9977, i64 1
- %tmp9979 = getelementptr inbounds float* %tmp9978, i64 1
- %tmp9980 = getelementptr inbounds float* %tmp9979, i64 1
- %tmp9981 = getelementptr inbounds float* %tmp9980, i64 1
- %tmp9982 = getelementptr inbounds float* %tmp9981, i64 1
- %tmp9983 = getelementptr inbounds float* %tmp9982, i64 1
- %tmp9984 = getelementptr inbounds float* %tmp9983, i64 1
- %tmp9985 = getelementptr inbounds float* %tmp9984, i64 1
- %tmp9986 = getelementptr inbounds float* %tmp9985, i64 1
- %tmp9987 = getelementptr inbounds float* %tmp9986, i64 1
- %tmp9988 = getelementptr inbounds float* %tmp9987, i64 1
- %tmp9989 = getelementptr inbounds float* %tmp9988, i64 1
- %tmp9990 = getelementptr inbounds float* %tmp9989, i64 1
- %tmp9991 = getelementptr inbounds float* %tmp9990, i64 1
- %tmp9992 = getelementptr inbounds float* %tmp9991, i64 1
- %tmp9993 = getelementptr inbounds float* %tmp9992, i64 1
- %tmp9994 = getelementptr inbounds float* %tmp9993, i64 1
- %tmp9995 = getelementptr inbounds float* %tmp9994, i64 1
- %tmp9996 = getelementptr inbounds float* %tmp9995, i64 1
- %tmp9997 = getelementptr inbounds float* %tmp9996, i64 1
- %tmp9998 = getelementptr inbounds float* %tmp9997, i64 1
- %tmp9999 = getelementptr inbounds float* %tmp9998, i64 1
- %tmp10000 = getelementptr inbounds float* %tmp9999, i64 1
- %tmp10001 = getelementptr inbounds float* %tmp10000, i64 1
- %tmp10002 = getelementptr inbounds float* %tmp10001, i64 1
- %tmp10003 = getelementptr inbounds float* %tmp10002, i64 1
- %tmp10004 = getelementptr inbounds float* %tmp10003, i64 1
- %tmp10005 = getelementptr inbounds float* %tmp10004, i64 1
- %tmp10006 = getelementptr inbounds float* %tmp10005, i64 1
- %tmp10007 = getelementptr inbounds float* %tmp10006, i64 1
- %tmp10008 = getelementptr inbounds float* %tmp10007, i64 1
- %tmp10009 = getelementptr inbounds float* %tmp10008, i64 1
- %tmp10010 = getelementptr inbounds float* %tmp10009, i64 1
- %tmp10011 = getelementptr inbounds float* %tmp10010, i64 1
- %tmp10012 = getelementptr inbounds float* %tmp10011, i64 1
- %tmp10013 = getelementptr inbounds float* %tmp10012, i64 1
- %tmp10014 = getelementptr inbounds float* %tmp10013, i64 1
- %tmp10015 = getelementptr inbounds float* %tmp10014, i64 1
- %tmp10016 = getelementptr inbounds float* %tmp10015, i64 1
- %tmp10017 = getelementptr inbounds float* %tmp10016, i64 1
- %tmp10018 = getelementptr inbounds float* %tmp10017, i64 1
- %tmp10019 = getelementptr inbounds float* %tmp10018, i64 1
- %tmp10020 = getelementptr inbounds float* %tmp10019, i64 1
- %tmp10021 = getelementptr inbounds float* %tmp10020, i64 1
- %tmp10022 = getelementptr inbounds float* %tmp10021, i64 1
- %tmp10023 = getelementptr inbounds float* %tmp10022, i64 1
- %tmp10024 = getelementptr inbounds float* %tmp10023, i64 1
- %tmp10025 = getelementptr inbounds float* %tmp10024, i64 1
- %tmp10026 = getelementptr inbounds float* %tmp10025, i64 1
- %tmp10027 = getelementptr inbounds float* %tmp10026, i64 1
- %tmp10028 = getelementptr inbounds float* %tmp10027, i64 1
- %tmp10029 = getelementptr inbounds float* %tmp10028, i64 1
- %tmp10030 = getelementptr inbounds float* %tmp10029, i64 1
- %tmp10031 = getelementptr inbounds float* %tmp10030, i64 1
- %tmp10032 = getelementptr inbounds float* %tmp10031, i64 1
- %tmp10033 = getelementptr inbounds float* %tmp10032, i64 1
- %tmp10034 = getelementptr inbounds float* %tmp10033, i64 1
- %tmp10035 = getelementptr inbounds float* %tmp10034, i64 1
- %tmp10036 = getelementptr inbounds float* %tmp10035, i64 1
- %tmp10037 = getelementptr inbounds float* %tmp10036, i64 1
- %tmp10038 = getelementptr inbounds float* %tmp10037, i64 1
- %tmp10039 = getelementptr inbounds float* %tmp10038, i64 1
- %tmp10040 = getelementptr inbounds float* %tmp10039, i64 1
- %tmp10041 = getelementptr inbounds float* %tmp10040, i64 1
- %tmp10042 = getelementptr inbounds float* %tmp10041, i64 1
- %tmp10043 = getelementptr inbounds float* %tmp10042, i64 1
- %tmp10044 = getelementptr inbounds float* %tmp10043, i64 1
- %tmp10045 = getelementptr inbounds float* %tmp10044, i64 1
- %tmp10046 = getelementptr inbounds float* %tmp10045, i64 1
- %tmp10047 = getelementptr inbounds float* %tmp10046, i64 1
- %tmp10048 = getelementptr inbounds float* %tmp10047, i64 1
- %tmp10049 = getelementptr inbounds float* %tmp10048, i64 1
- %tmp10050 = getelementptr inbounds float* %tmp10049, i64 1
- %tmp10051 = getelementptr inbounds float* %tmp10050, i64 1
- %tmp10052 = getelementptr inbounds float* %tmp10051, i64 1
- %tmp10053 = getelementptr inbounds float* %tmp10052, i64 1
- %tmp10054 = getelementptr inbounds float* %tmp10053, i64 1
- %tmp10055 = getelementptr inbounds float* %tmp10054, i64 1
- %tmp10056 = getelementptr inbounds float* %tmp10055, i64 1
- %tmp10057 = getelementptr inbounds float* %tmp10056, i64 1
- %tmp10058 = getelementptr inbounds float* %tmp10057, i64 1
- %tmp10059 = getelementptr inbounds float* %tmp10058, i64 1
- %tmp10060 = getelementptr inbounds float* %tmp10059, i64 1
- %tmp10061 = getelementptr inbounds float* %tmp10060, i64 1
- %tmp10062 = getelementptr inbounds float* %tmp10061, i64 1
- %tmp10063 = getelementptr inbounds float* %tmp10062, i64 1
- %tmp10064 = getelementptr inbounds float* %tmp10063, i64 1
- %tmp10065 = getelementptr inbounds float* %tmp10064, i64 1
- %tmp10066 = getelementptr inbounds float* %tmp10065, i64 1
- %tmp10067 = getelementptr inbounds float* %tmp10066, i64 1
- %tmp10068 = getelementptr inbounds float* %tmp10067, i64 1
- %tmp10069 = getelementptr inbounds float* %tmp10068, i64 1
- %tmp10070 = getelementptr inbounds float* %tmp10069, i64 1
- %tmp10071 = getelementptr inbounds float* %tmp10070, i64 1
- %tmp10072 = getelementptr inbounds float* %tmp10071, i64 1
- %tmp10073 = getelementptr inbounds float* %tmp10072, i64 1
- %tmp10074 = getelementptr inbounds float* %tmp10073, i64 1
- %tmp10075 = getelementptr inbounds float* %tmp10074, i64 1
- %tmp10076 = getelementptr inbounds float* %tmp10075, i64 1
- %tmp10077 = getelementptr inbounds float* %tmp10076, i64 1
- %tmp10078 = getelementptr inbounds float* %tmp10077, i64 1
- %tmp10079 = getelementptr inbounds float* %tmp10078, i64 1
- %tmp10080 = getelementptr inbounds float* %tmp10079, i64 1
- %tmp10081 = getelementptr inbounds float* %tmp10080, i64 1
- %tmp10082 = getelementptr inbounds float* %tmp10081, i64 1
- %tmp10083 = getelementptr inbounds float* %tmp10082, i64 1
- %tmp10084 = getelementptr inbounds float* %tmp10083, i64 1
- %tmp10085 = getelementptr inbounds float* %tmp10084, i64 1
- %tmp10086 = getelementptr inbounds float* %tmp10085, i64 1
- %tmp10087 = getelementptr inbounds float* %tmp10086, i64 1
- %tmp10088 = getelementptr inbounds float* %tmp10087, i64 1
- %tmp10089 = getelementptr inbounds float* %tmp10088, i64 1
- %tmp10090 = getelementptr inbounds float* %tmp10089, i64 1
- %tmp10091 = getelementptr inbounds float* %tmp10090, i64 1
- %tmp10092 = getelementptr inbounds float* %tmp10091, i64 1
- %tmp10093 = getelementptr inbounds float* %tmp10092, i64 1
- %tmp10094 = getelementptr inbounds float* %tmp10093, i64 1
- %tmp10095 = getelementptr inbounds float* %tmp10094, i64 1
- %tmp10096 = getelementptr inbounds float* %tmp10095, i64 1
- %tmp10097 = getelementptr inbounds float* %tmp10096, i64 1
- %tmp10098 = getelementptr inbounds float* %tmp10097, i64 1
- %tmp10099 = getelementptr inbounds float* %tmp10098, i64 1
- %tmp10100 = getelementptr inbounds float* %tmp10099, i64 1
- %tmp10101 = getelementptr inbounds float* %tmp10100, i64 1
- %tmp10102 = getelementptr inbounds float* %tmp10101, i64 1
- %tmp10103 = getelementptr inbounds float* %tmp10102, i64 1
- %tmp10104 = getelementptr inbounds float* %tmp10103, i64 1
- %tmp10105 = getelementptr inbounds float* %tmp10104, i64 1
- %tmp10106 = getelementptr inbounds float* %tmp10105, i64 1
- %tmp10107 = getelementptr inbounds float* %tmp10106, i64 1
- %tmp10108 = getelementptr inbounds float* %tmp10107, i64 1
- %tmp10109 = getelementptr inbounds float* %tmp10108, i64 1
- %tmp10110 = getelementptr inbounds float* %tmp10109, i64 1
- %tmp10111 = getelementptr inbounds float* %tmp10110, i64 1
- %tmp10112 = getelementptr inbounds float* %tmp10111, i64 1
- %tmp10113 = getelementptr inbounds float* %tmp10112, i64 1
- %tmp10114 = getelementptr inbounds float* %tmp10113, i64 1
- %tmp10115 = getelementptr inbounds float* %tmp10114, i64 1
- %tmp10116 = getelementptr inbounds float* %tmp10115, i64 1
- %tmp10117 = getelementptr inbounds float* %tmp10116, i64 1
- %tmp10118 = getelementptr inbounds float* %tmp10117, i64 1
- %tmp10119 = getelementptr inbounds float* %tmp10118, i64 1
- %tmp10120 = getelementptr inbounds float* %tmp10119, i64 1
- %tmp10121 = getelementptr inbounds float* %tmp10120, i64 1
- %tmp10122 = getelementptr inbounds float* %tmp10121, i64 1
- %tmp10123 = getelementptr inbounds float* %tmp10122, i64 1
- %tmp10124 = getelementptr inbounds float* %tmp10123, i64 1
- %tmp10125 = getelementptr inbounds float* %tmp10124, i64 1
- %tmp10126 = getelementptr inbounds float* %tmp10125, i64 1
- %tmp10127 = getelementptr inbounds float* %tmp10126, i64 1
- %tmp10128 = getelementptr inbounds float* %tmp10127, i64 1
- %tmp10129 = getelementptr inbounds float* %tmp10128, i64 1
- %tmp10130 = getelementptr inbounds float* %tmp10129, i64 1
- %tmp10131 = getelementptr inbounds float* %tmp10130, i64 1
- %tmp10132 = getelementptr inbounds float* %tmp10131, i64 1
- %tmp10133 = getelementptr inbounds float* %tmp10132, i64 1
- %tmp10134 = getelementptr inbounds float* %tmp10133, i64 1
- %tmp10135 = getelementptr inbounds float* %tmp10134, i64 1
- %tmp10136 = getelementptr inbounds float* %tmp10135, i64 1
- %tmp10137 = getelementptr inbounds float* %tmp10136, i64 1
- %tmp10138 = getelementptr inbounds float* %tmp10137, i64 1
- %tmp10139 = getelementptr inbounds float* %tmp10138, i64 1
- %tmp10140 = getelementptr inbounds float* %tmp10139, i64 1
- %tmp10141 = getelementptr inbounds float* %tmp10140, i64 1
- %tmp10142 = getelementptr inbounds float* %tmp10141, i64 1
- %tmp10143 = getelementptr inbounds float* %tmp10142, i64 1
- %tmp10144 = getelementptr inbounds float* %tmp10143, i64 1
- %tmp10145 = getelementptr inbounds float* %tmp10144, i64 1
- %tmp10146 = getelementptr inbounds float* %tmp10145, i64 1
- %tmp10147 = getelementptr inbounds float* %tmp10146, i64 1
- %tmp10148 = getelementptr inbounds float* %tmp10147, i64 1
- %tmp10149 = getelementptr inbounds float* %tmp10148, i64 1
- %tmp10150 = getelementptr inbounds float* %tmp10149, i64 1
- %tmp10151 = getelementptr inbounds float* %tmp10150, i64 1
- %tmp10152 = getelementptr inbounds float* %tmp10151, i64 1
- %tmp10153 = getelementptr inbounds float* %tmp10152, i64 1
- %tmp10154 = getelementptr inbounds float* %tmp10153, i64 1
- %tmp10155 = getelementptr inbounds float* %tmp10154, i64 1
- %tmp10156 = getelementptr inbounds float* %tmp10155, i64 1
- %tmp10157 = getelementptr inbounds float* %tmp10156, i64 1
- %tmp10158 = getelementptr inbounds float* %tmp10157, i64 1
- %tmp10159 = getelementptr inbounds float* %tmp10158, i64 1
- %tmp10160 = getelementptr inbounds float* %tmp10159, i64 1
- %tmp10161 = getelementptr inbounds float* %tmp10160, i64 1
- %tmp10162 = getelementptr inbounds float* %tmp10161, i64 1
- %tmp10163 = getelementptr inbounds float* %tmp10162, i64 1
- %tmp10164 = getelementptr inbounds float* %tmp10163, i64 1
- %tmp10165 = getelementptr inbounds float* %tmp10164, i64 1
- %tmp10166 = getelementptr inbounds float* %tmp10165, i64 1
- %tmp10167 = getelementptr inbounds float* %tmp10166, i64 1
- %tmp10168 = getelementptr inbounds float* %tmp10167, i64 1
- %tmp10169 = getelementptr inbounds float* %tmp10168, i64 1
- %tmp10170 = getelementptr inbounds float* %tmp10169, i64 1
- %tmp10171 = getelementptr inbounds float* %tmp10170, i64 1
- %tmp10172 = getelementptr inbounds float* %tmp10171, i64 1
- %tmp10173 = getelementptr inbounds float* %tmp10172, i64 1
- %tmp10174 = getelementptr inbounds float* %tmp10173, i64 1
- %tmp10175 = getelementptr inbounds float* %tmp10174, i64 1
- %tmp10176 = getelementptr inbounds float* %tmp10175, i64 1
- %tmp10177 = getelementptr inbounds float* %tmp10176, i64 1
- %tmp10178 = getelementptr inbounds float* %tmp10177, i64 1
- %tmp10179 = getelementptr inbounds float* %tmp10178, i64 1
- %tmp10180 = getelementptr inbounds float* %tmp10179, i64 1
- %tmp10181 = getelementptr inbounds float* %tmp10180, i64 1
- %tmp10182 = getelementptr inbounds float* %tmp10181, i64 1
- %tmp10183 = getelementptr inbounds float* %tmp10182, i64 1
- %tmp10184 = getelementptr inbounds float* %tmp10183, i64 1
- %tmp10185 = getelementptr inbounds float* %tmp10184, i64 1
- %tmp10186 = getelementptr inbounds float* %tmp10185, i64 1
- %tmp10187 = getelementptr inbounds float* %tmp10186, i64 1
- %tmp10188 = getelementptr inbounds float* %tmp10187, i64 1
- %tmp10189 = getelementptr inbounds float* %tmp10188, i64 1
- %tmp10190 = getelementptr inbounds float* %tmp10189, i64 1
- %tmp10191 = getelementptr inbounds float* %tmp10190, i64 1
- %tmp10192 = getelementptr inbounds float* %tmp10191, i64 1
- %tmp10193 = getelementptr inbounds float* %tmp10192, i64 1
- %tmp10194 = getelementptr inbounds float* %tmp10193, i64 1
- %tmp10195 = getelementptr inbounds float* %tmp10194, i64 1
- %tmp10196 = getelementptr inbounds float* %tmp10195, i64 1
- %tmp10197 = getelementptr inbounds float* %tmp10196, i64 1
- %tmp10198 = getelementptr inbounds float* %tmp10197, i64 1
- %tmp10199 = getelementptr inbounds float* %tmp10198, i64 1
- %tmp10200 = getelementptr inbounds float* %tmp10199, i64 1
- %tmp10201 = getelementptr inbounds float* %tmp10200, i64 1
- %tmp10202 = getelementptr inbounds float* %tmp10201, i64 1
- %tmp10203 = getelementptr inbounds float* %tmp10202, i64 1
- %tmp10204 = getelementptr inbounds float* %tmp10203, i64 1
- %tmp10205 = getelementptr inbounds float* %tmp10204, i64 1
- %tmp10206 = getelementptr inbounds float* %tmp10205, i64 1
- %tmp10207 = getelementptr inbounds float* %tmp10206, i64 1
- %tmp10208 = getelementptr inbounds float* %tmp10207, i64 1
- %tmp10209 = getelementptr inbounds float* %tmp10208, i64 1
- %tmp10210 = getelementptr inbounds float* %tmp10209, i64 1
- %tmp10211 = getelementptr inbounds float* %tmp10210, i64 1
- %tmp10212 = getelementptr inbounds float* %tmp10211, i64 1
- %tmp10213 = getelementptr inbounds float* %tmp10212, i64 1
- %tmp10214 = getelementptr inbounds float* %tmp10213, i64 1
- %tmp10215 = getelementptr inbounds float* %tmp10214, i64 1
- %tmp10216 = getelementptr inbounds float* %tmp10215, i64 1
- %tmp10217 = getelementptr inbounds float* %tmp10216, i64 1
- %tmp10218 = getelementptr inbounds float* %tmp10217, i64 1
- %tmp10219 = getelementptr inbounds float* %tmp10218, i64 1
- %tmp10220 = getelementptr inbounds float* %tmp10219, i64 1
- %tmp10221 = getelementptr inbounds float* %tmp10220, i64 1
- %tmp10222 = getelementptr inbounds float* %tmp10221, i64 1
- %tmp10223 = getelementptr inbounds float* %tmp10222, i64 1
- %tmp10224 = getelementptr inbounds float* %tmp10223, i64 1
- %tmp10225 = getelementptr inbounds float* %tmp10224, i64 1
- %tmp10226 = getelementptr inbounds float* %tmp10225, i64 1
- %tmp10227 = getelementptr inbounds float* %tmp10226, i64 1
- %tmp10228 = getelementptr inbounds float* %tmp10227, i64 1
- %tmp10229 = getelementptr inbounds float* %tmp10228, i64 1
- %tmp10230 = getelementptr inbounds float* %tmp10229, i64 1
- %tmp10231 = getelementptr inbounds float* %tmp10230, i64 1
- %tmp10232 = getelementptr inbounds float* %tmp10231, i64 1
- %tmp10233 = getelementptr inbounds float* %tmp10232, i64 1
- %tmp10234 = getelementptr inbounds float* %tmp10233, i64 1
- %tmp10235 = getelementptr inbounds float* %tmp10234, i64 1
- %tmp10236 = getelementptr inbounds float* %tmp10235, i64 1
- %tmp10237 = getelementptr inbounds float* %tmp10236, i64 1
- %tmp10238 = getelementptr inbounds float* %tmp10237, i64 1
- %tmp10239 = getelementptr inbounds float* %tmp10238, i64 1
- %tmp10240 = getelementptr inbounds float* %tmp10239, i64 1
- %tmp10241 = getelementptr inbounds float* %tmp10240, i64 1
- %tmp10242 = getelementptr inbounds float* %tmp10241, i64 1
- %tmp10243 = getelementptr inbounds float* %tmp10242, i64 1
- %tmp10244 = getelementptr inbounds float* %tmp10243, i64 1
- %tmp10245 = getelementptr inbounds float* %tmp10244, i64 1
- %tmp10246 = getelementptr inbounds float* %tmp10245, i64 1
- %tmp10247 = getelementptr inbounds float* %tmp10246, i64 1
- %tmp10248 = getelementptr inbounds float* %tmp10247, i64 1
- %tmp10249 = getelementptr inbounds float* %tmp10248, i64 1
- %tmp10250 = getelementptr inbounds float* %tmp10249, i64 1
- %tmp10251 = getelementptr inbounds float* %tmp10250, i64 1
- %tmp10252 = getelementptr inbounds float* %tmp10251, i64 1
- %tmp10253 = getelementptr inbounds float* %tmp10252, i64 1
- %tmp10254 = getelementptr inbounds float* %tmp10253, i64 1
- %tmp10255 = getelementptr inbounds float* %tmp10254, i64 1
- %tmp10256 = getelementptr inbounds float* %tmp10255, i64 1
- %tmp10257 = getelementptr inbounds float* %tmp10256, i64 1
- %tmp10258 = getelementptr inbounds float* %tmp10257, i64 1
- %tmp10259 = getelementptr inbounds float* %tmp10258, i64 1
- %tmp10260 = getelementptr inbounds float* %tmp10259, i64 1
- %tmp10261 = getelementptr inbounds float* %tmp10260, i64 1
- %tmp10262 = getelementptr inbounds float* %tmp10261, i64 1
- %tmp10263 = getelementptr inbounds float* %tmp10262, i64 1
- %tmp10264 = getelementptr inbounds float* %tmp10263, i64 1
- %tmp10265 = getelementptr inbounds float* %tmp10264, i64 1
- %tmp10266 = getelementptr inbounds float* %tmp10265, i64 1
- %tmp10267 = getelementptr inbounds float* %tmp10266, i64 1
- %tmp10268 = getelementptr inbounds float* %tmp10267, i64 1
- %tmp10269 = getelementptr inbounds float* %tmp10268, i64 1
- %tmp10270 = getelementptr inbounds float* %tmp10269, i64 1
- %tmp10271 = getelementptr inbounds float* %tmp10270, i64 1
- %tmp10272 = getelementptr inbounds float* %tmp10271, i64 1
- %tmp10273 = getelementptr inbounds float* %tmp10272, i64 1
- %tmp10274 = getelementptr inbounds float* %tmp10273, i64 1
- %tmp10275 = getelementptr inbounds float* %tmp10274, i64 1
- %tmp10276 = getelementptr inbounds float* %tmp10275, i64 1
- %tmp10277 = getelementptr inbounds float* %tmp10276, i64 1
- %tmp10278 = getelementptr inbounds float* %tmp10277, i64 1
- %tmp10279 = getelementptr inbounds float* %tmp10278, i64 1
- %tmp10280 = getelementptr inbounds float* %tmp10279, i64 1
- %tmp10281 = getelementptr inbounds float* %tmp10280, i64 1
- %tmp10282 = getelementptr inbounds float* %tmp10281, i64 1
- %tmp10283 = getelementptr inbounds float* %tmp10282, i64 1
- %tmp10284 = getelementptr inbounds float* %tmp10283, i64 1
- %tmp10285 = getelementptr inbounds float* %tmp10284, i64 1
- %tmp10286 = getelementptr inbounds float* %tmp10285, i64 1
- %tmp10287 = getelementptr inbounds float* %tmp10286, i64 1
- %tmp10288 = getelementptr inbounds float* %tmp10287, i64 1
- %tmp10289 = getelementptr inbounds float* %tmp10288, i64 1
- %tmp10290 = getelementptr inbounds float* %tmp10289, i64 1
- %tmp10291 = getelementptr inbounds float* %tmp10290, i64 1
- %tmp10292 = getelementptr inbounds float* %tmp10291, i64 1
- %tmp10293 = getelementptr inbounds float* %tmp10292, i64 1
- %tmp10294 = getelementptr inbounds float* %tmp10293, i64 1
- %tmp10295 = getelementptr inbounds float* %tmp10294, i64 1
- %tmp10296 = getelementptr inbounds float* %tmp10295, i64 1
- %tmp10297 = getelementptr inbounds float* %tmp10296, i64 1
- %tmp10298 = getelementptr inbounds float* %tmp10297, i64 1
- %tmp10299 = getelementptr inbounds float* %tmp10298, i64 1
- %tmp10300 = getelementptr inbounds float* %tmp10299, i64 1
- %tmp10301 = getelementptr inbounds float* %tmp10300, i64 1
- %tmp10302 = getelementptr inbounds float* %tmp10301, i64 1
- %tmp10303 = getelementptr inbounds float* %tmp10302, i64 1
- %tmp10304 = getelementptr inbounds float* %tmp10303, i64 1
- %tmp10305 = getelementptr inbounds float* %tmp10304, i64 1
- %tmp10306 = getelementptr inbounds float* %tmp10305, i64 1
- %tmp10307 = getelementptr inbounds float* %tmp10306, i64 1
- %tmp10308 = getelementptr inbounds float* %tmp10307, i64 1
- %tmp10309 = getelementptr inbounds float* %tmp10308, i64 1
- %tmp10310 = getelementptr inbounds float* %tmp10309, i64 1
- %tmp10311 = getelementptr inbounds float* %tmp10310, i64 1
- %tmp10312 = getelementptr inbounds float* %tmp10311, i64 1
- %tmp10313 = getelementptr inbounds float* %tmp10312, i64 1
- %tmp10314 = getelementptr inbounds float* %tmp10313, i64 1
- %tmp10315 = getelementptr inbounds float* %tmp10314, i64 1
- %tmp10316 = getelementptr inbounds float* %tmp10315, i64 1
- %tmp10317 = getelementptr inbounds float* %tmp10316, i64 1
- %tmp10318 = getelementptr inbounds float* %tmp10317, i64 1
- %tmp10319 = getelementptr inbounds float* %tmp10318, i64 1
- %tmp10320 = getelementptr inbounds float* %tmp10319, i64 1
- %tmp10321 = getelementptr inbounds float* %tmp10320, i64 1
- %tmp10322 = getelementptr inbounds float* %tmp10321, i64 1
- %tmp10323 = getelementptr inbounds float* %tmp10322, i64 1
- %tmp10324 = getelementptr inbounds float* %tmp10323, i64 1
- %tmp10325 = getelementptr inbounds float* %tmp10324, i64 1
- %tmp10326 = getelementptr inbounds float* %tmp10325, i64 1
- %tmp10327 = getelementptr inbounds float* %tmp10326, i64 1
- %tmp10328 = getelementptr inbounds float* %tmp10327, i64 1
- %tmp10329 = getelementptr inbounds float* %tmp10328, i64 1
- %tmp10330 = getelementptr inbounds float* %tmp10329, i64 1
- %tmp10331 = getelementptr inbounds float* %tmp10330, i64 1
- %tmp10332 = getelementptr inbounds float* %tmp10331, i64 1
- %tmp10333 = getelementptr inbounds float* %tmp10332, i64 1
- %tmp10334 = getelementptr inbounds float* %tmp10333, i64 1
- %tmp10335 = getelementptr inbounds float* %tmp10334, i64 1
- %tmp10336 = getelementptr inbounds float* %tmp10335, i64 1
- %tmp10337 = getelementptr inbounds float* %tmp10336, i64 1
- %tmp10338 = getelementptr inbounds float* %tmp10337, i64 1
- %tmp10339 = getelementptr inbounds float* %tmp10338, i64 1
- %tmp10340 = getelementptr inbounds float* %tmp10339, i64 1
- %tmp10341 = getelementptr inbounds float* %tmp10340, i64 1
- %tmp10342 = getelementptr inbounds float* %tmp10341, i64 1
- %tmp10343 = getelementptr inbounds float* %tmp10342, i64 1
- %tmp10344 = getelementptr inbounds float* %tmp10343, i64 1
- %tmp10345 = getelementptr inbounds float* %tmp10344, i64 1
- %tmp10346 = getelementptr inbounds float* %tmp10345, i64 1
- %tmp10347 = getelementptr inbounds float* %tmp10346, i64 1
- %tmp10348 = getelementptr inbounds float* %tmp10347, i64 1
- %tmp10349 = getelementptr inbounds float* %tmp10348, i64 1
- %tmp10350 = getelementptr inbounds float* %tmp10349, i64 1
- %tmp10351 = getelementptr inbounds float* %tmp10350, i64 1
- %tmp10352 = getelementptr inbounds float* %tmp10351, i64 1
- %tmp10353 = getelementptr inbounds float* %tmp10352, i64 1
- %tmp10354 = getelementptr inbounds float* %tmp10353, i64 1
- %tmp10355 = getelementptr inbounds float* %tmp10354, i64 1
- %tmp10356 = getelementptr inbounds float* %tmp10355, i64 1
- %tmp10357 = getelementptr inbounds float* %tmp10356, i64 1
- %tmp10358 = getelementptr inbounds float* %tmp10357, i64 1
- %tmp10359 = getelementptr inbounds float* %tmp10358, i64 1
- %tmp10360 = getelementptr inbounds float* %tmp10359, i64 1
- %tmp10361 = getelementptr inbounds float* %tmp10360, i64 1
- %tmp10362 = getelementptr inbounds float* %tmp10361, i64 1
- %tmp10363 = getelementptr inbounds float* %tmp10362, i64 1
- %tmp10364 = getelementptr inbounds float* %tmp10363, i64 1
- %tmp10365 = getelementptr inbounds float* %tmp10364, i64 1
- %tmp10366 = getelementptr inbounds float* %tmp10365, i64 1
- %tmp10367 = getelementptr inbounds float* %tmp10366, i64 1
- %tmp10368 = getelementptr inbounds float* %tmp10367, i64 1
- %tmp10369 = getelementptr inbounds float* %tmp10368, i64 1
- %tmp10370 = getelementptr inbounds float* %tmp10369, i64 1
- %tmp10371 = getelementptr inbounds float* %tmp10370, i64 1
- %tmp10372 = getelementptr inbounds float* %tmp10371, i64 1
- %tmp10373 = getelementptr inbounds float* %tmp10372, i64 1
- %tmp10374 = getelementptr inbounds float* %tmp10373, i64 1
- %tmp10375 = getelementptr inbounds float* %tmp10374, i64 1
- %tmp10376 = getelementptr inbounds float* %tmp10375, i64 1
- %tmp10377 = getelementptr inbounds float* %tmp10376, i64 1
- %tmp10378 = getelementptr inbounds float* %tmp10377, i64 1
- %tmp10379 = getelementptr inbounds float* %tmp10378, i64 1
- %tmp10380 = getelementptr inbounds float* %tmp10379, i64 1
- %tmp10381 = getelementptr inbounds float* %tmp10380, i64 1
- %tmp10382 = getelementptr inbounds float* %tmp10381, i64 1
- %tmp10383 = getelementptr inbounds float* %tmp10382, i64 1
- %tmp10384 = getelementptr inbounds float* %tmp10383, i64 1
- %tmp10385 = getelementptr inbounds float* %tmp10384, i64 1
- %tmp10386 = getelementptr inbounds float* %tmp10385, i64 1
- %tmp10387 = getelementptr inbounds float* %tmp10386, i64 1
- %tmp10388 = getelementptr inbounds float* %tmp10387, i64 1
- %tmp10389 = getelementptr inbounds float* %tmp10388, i64 1
- %tmp10390 = getelementptr inbounds float* %tmp10389, i64 1
- %tmp10391 = getelementptr inbounds float* %tmp10390, i64 1
- %tmp10392 = getelementptr inbounds float* %tmp10391, i64 1
- %tmp10393 = getelementptr inbounds float* %tmp10392, i64 1
- %tmp10394 = getelementptr inbounds float* %tmp10393, i64 1
- %tmp10395 = getelementptr inbounds float* %tmp10394, i64 1
- %tmp10396 = getelementptr inbounds float* %tmp10395, i64 1
- %tmp10397 = getelementptr inbounds float* %tmp10396, i64 1
- %tmp10398 = getelementptr inbounds float* %tmp10397, i64 1
- %tmp10399 = getelementptr inbounds float* %tmp10398, i64 1
- %tmp10400 = getelementptr inbounds float* %tmp10399, i64 1
- %tmp10401 = getelementptr inbounds float* %tmp10400, i64 1
- %tmp10402 = getelementptr inbounds float* %tmp10401, i64 1
- %tmp10403 = getelementptr inbounds float* %tmp10402, i64 1
- %tmp10404 = getelementptr inbounds float* %tmp10403, i64 1
- %tmp10405 = getelementptr inbounds float* %tmp10404, i64 1
- %tmp10406 = getelementptr inbounds float* %tmp10405, i64 1
- %tmp10407 = getelementptr inbounds float* %tmp10406, i64 1
- %tmp10408 = getelementptr inbounds float* %tmp10407, i64 1
- %tmp10409 = getelementptr inbounds float* %tmp10408, i64 1
- %tmp10410 = getelementptr inbounds float* %tmp10409, i64 1
- %tmp10411 = getelementptr inbounds float* %tmp10410, i64 1
- %tmp10412 = getelementptr inbounds float* %tmp10411, i64 1
- %tmp10413 = getelementptr inbounds float* %tmp10412, i64 1
- %tmp10414 = getelementptr inbounds float* %tmp10413, i64 1
- %tmp10415 = getelementptr inbounds float* %tmp10414, i64 1
- %tmp10416 = getelementptr inbounds float* %tmp10415, i64 1
- %tmp10417 = getelementptr inbounds float* %tmp10416, i64 1
- %tmp10418 = getelementptr inbounds float* %tmp10417, i64 1
- %tmp10419 = getelementptr inbounds float* %tmp10418, i64 1
- %tmp10420 = getelementptr inbounds float* %tmp10419, i64 1
- %tmp10421 = getelementptr inbounds float* %tmp10420, i64 1
- %tmp10422 = getelementptr inbounds float* %tmp10421, i64 1
- %tmp10423 = getelementptr inbounds float* %tmp10422, i64 1
- %tmp10424 = getelementptr inbounds float* %tmp10423, i64 1
- %tmp10425 = getelementptr inbounds float* %tmp10424, i64 1
- %tmp10426 = getelementptr inbounds float* %tmp10425, i64 1
- %tmp10427 = getelementptr inbounds float* %tmp10426, i64 1
- %tmp10428 = getelementptr inbounds float* %tmp10427, i64 1
- %tmp10429 = getelementptr inbounds float* %tmp10428, i64 1
- %tmp10430 = getelementptr inbounds float* %tmp10429, i64 1
- %tmp10431 = getelementptr inbounds float* %tmp10430, i64 1
- %tmp10432 = getelementptr inbounds float* %tmp10431, i64 1
- %tmp10433 = getelementptr inbounds float* %tmp10432, i64 1
- %tmp10434 = getelementptr inbounds float* %tmp10433, i64 1
- %tmp10435 = getelementptr inbounds float* %tmp10434, i64 1
- %tmp10436 = getelementptr inbounds float* %tmp10435, i64 1
- %tmp10437 = getelementptr inbounds float* %tmp10436, i64 1
- %tmp10438 = getelementptr inbounds float* %tmp10437, i64 1
- %tmp10439 = getelementptr inbounds float* %tmp10438, i64 1
- %tmp10440 = getelementptr inbounds float* %tmp10439, i64 1
- %tmp10441 = getelementptr inbounds float* %tmp10440, i64 1
- %tmp10442 = getelementptr inbounds float* %tmp10441, i64 1
- %tmp10443 = getelementptr inbounds float* %tmp10442, i64 1
- %tmp10444 = getelementptr inbounds float* %tmp10443, i64 1
- %tmp10445 = getelementptr inbounds float* %tmp10444, i64 1
- %tmp10446 = getelementptr inbounds float* %tmp10445, i64 1
- %tmp10447 = getelementptr inbounds float* %tmp10446, i64 1
- %tmp10448 = getelementptr inbounds float* %tmp10447, i64 1
- %tmp10449 = getelementptr inbounds float* %tmp10448, i64 1
- %tmp10450 = getelementptr inbounds float* %tmp10449, i64 1
- %tmp10451 = getelementptr inbounds float* %tmp10450, i64 1
- %tmp10452 = getelementptr inbounds float* %tmp10451, i64 1
- %tmp10453 = getelementptr inbounds float* %tmp10452, i64 1
- %tmp10454 = getelementptr inbounds float* %tmp10453, i64 1
- %tmp10455 = getelementptr inbounds float* %tmp10454, i64 1
- %tmp10456 = getelementptr inbounds float* %tmp10455, i64 1
- %tmp10457 = getelementptr inbounds float* %tmp10456, i64 1
- %tmp10458 = getelementptr inbounds float* %tmp10457, i64 1
- %tmp10459 = getelementptr inbounds float* %tmp10458, i64 1
- %tmp10460 = getelementptr inbounds float* %tmp10459, i64 1
- %tmp10461 = getelementptr inbounds float* %tmp10460, i64 1
- %tmp10462 = getelementptr inbounds float* %tmp10461, i64 1
- %tmp10463 = getelementptr inbounds float* %tmp10462, i64 1
- %tmp10464 = getelementptr inbounds float* %tmp10463, i64 1
- %tmp10465 = getelementptr inbounds float* %tmp10464, i64 1
- %tmp10466 = getelementptr inbounds float* %tmp10465, i64 1
- %tmp10467 = getelementptr inbounds float* %tmp10466, i64 1
- %tmp10468 = getelementptr inbounds float* %tmp10467, i64 1
- %tmp10469 = getelementptr inbounds float* %tmp10468, i64 1
- %tmp10470 = getelementptr inbounds float* %tmp10469, i64 1
- %tmp10471 = getelementptr inbounds float* %tmp10470, i64 1
- %tmp10472 = getelementptr inbounds float* %tmp10471, i64 1
- %tmp10473 = getelementptr inbounds float* %tmp10472, i64 1
- %tmp10474 = getelementptr inbounds float* %tmp10473, i64 1
- %tmp10475 = getelementptr inbounds float* %tmp10474, i64 1
- %tmp10476 = getelementptr inbounds float* %tmp10475, i64 1
- %tmp10477 = getelementptr inbounds float* %tmp10476, i64 1
- %tmp10478 = getelementptr inbounds float* %tmp10477, i64 1
- %tmp10479 = getelementptr inbounds float* %tmp10478, i64 1
- %tmp10480 = getelementptr inbounds float* %tmp10479, i64 1
- %tmp10481 = getelementptr inbounds float* %tmp10480, i64 1
- %tmp10482 = getelementptr inbounds float* %tmp10481, i64 1
- %tmp10483 = getelementptr inbounds float* %tmp10482, i64 1
- %tmp10484 = getelementptr inbounds float* %tmp10483, i64 1
- %tmp10485 = getelementptr inbounds float* %tmp10484, i64 1
- %tmp10486 = getelementptr inbounds float* %tmp10485, i64 1
- %tmp10487 = getelementptr inbounds float* %tmp10486, i64 1
- %tmp10488 = getelementptr inbounds float* %tmp10487, i64 1
- %tmp10489 = getelementptr inbounds float* %tmp10488, i64 1
- %tmp10490 = getelementptr inbounds float* %tmp10489, i64 1
- %tmp10491 = getelementptr inbounds float* %tmp10490, i64 1
- %tmp10492 = getelementptr inbounds float* %tmp10491, i64 1
- %tmp10493 = getelementptr inbounds float* %tmp10492, i64 1
- %tmp10494 = getelementptr inbounds float* %tmp10493, i64 1
- %tmp10495 = getelementptr inbounds float* %tmp10494, i64 1
- %tmp10496 = getelementptr inbounds float* %tmp10495, i64 1
- %tmp10497 = getelementptr inbounds float* %tmp10496, i64 1
- %tmp10498 = getelementptr inbounds float* %tmp10497, i64 1
- %tmp10499 = getelementptr inbounds float* %tmp10498, i64 1
- %tmp10500 = getelementptr inbounds float* %tmp10499, i64 1
- %tmp10501 = getelementptr inbounds float* %tmp10500, i64 1
- %tmp10502 = getelementptr inbounds float* %tmp10501, i64 1
- %tmp10503 = getelementptr inbounds float* %tmp10502, i64 1
- %tmp10504 = getelementptr inbounds float* %tmp10503, i64 1
- %tmp10505 = getelementptr inbounds float* %tmp10504, i64 1
- %tmp10506 = getelementptr inbounds float* %tmp10505, i64 1
- %tmp10507 = getelementptr inbounds float* %tmp10506, i64 1
- %tmp10508 = getelementptr inbounds float* %tmp10507, i64 1
- %tmp10509 = getelementptr inbounds float* %tmp10508, i64 1
- %tmp10510 = getelementptr inbounds float* %tmp10509, i64 1
- %tmp10511 = getelementptr inbounds float* %tmp10510, i64 1
- %tmp10512 = getelementptr inbounds float* %tmp10511, i64 1
- %tmp10513 = getelementptr inbounds float* %tmp10512, i64 1
- %tmp10514 = getelementptr inbounds float* %tmp10513, i64 1
- %tmp10515 = getelementptr inbounds float* %tmp10514, i64 1
- %tmp10516 = getelementptr inbounds float* %tmp10515, i64 1
- %tmp10517 = getelementptr inbounds float* %tmp10516, i64 1
- %tmp10518 = getelementptr inbounds float* %tmp10517, i64 1
- %tmp10519 = getelementptr inbounds float* %tmp10518, i64 1
- %tmp10520 = getelementptr inbounds float* %tmp10519, i64 1
- %tmp10521 = getelementptr inbounds float* %tmp10520, i64 1
- %tmp10522 = getelementptr inbounds float* %tmp10521, i64 1
- %tmp10523 = getelementptr inbounds float* %tmp10522, i64 1
- %tmp10524 = getelementptr inbounds float* %tmp10523, i64 1
- %tmp10525 = getelementptr inbounds float* %tmp10524, i64 1
- %tmp10526 = getelementptr inbounds float* %tmp10525, i64 1
- %tmp10527 = getelementptr inbounds float* %tmp10526, i64 1
- %tmp10528 = getelementptr inbounds float* %tmp10527, i64 1
- %tmp10529 = getelementptr inbounds float* %tmp10528, i64 1
- %tmp10530 = getelementptr inbounds float* %tmp10529, i64 1
- %tmp10531 = getelementptr inbounds float* %tmp10530, i64 1
- %tmp10532 = getelementptr inbounds float* %tmp10531, i64 1
- %tmp10533 = getelementptr inbounds float* %tmp10532, i64 1
- %tmp10534 = getelementptr inbounds float* %tmp10533, i64 1
- %tmp10535 = getelementptr inbounds float* %tmp10534, i64 1
- %tmp10536 = getelementptr inbounds float* %tmp10535, i64 1
- %tmp10537 = getelementptr inbounds float* %tmp10536, i64 1
- %tmp10538 = getelementptr inbounds float* %tmp10537, i64 1
- %tmp10539 = getelementptr inbounds float* %tmp10538, i64 1
- %tmp10540 = getelementptr inbounds float* %tmp10539, i64 1
- %tmp10541 = getelementptr inbounds float* %tmp10540, i64 1
- %tmp10542 = getelementptr inbounds float* %tmp10541, i64 1
- %tmp10543 = getelementptr inbounds float* %tmp10542, i64 1
- %tmp10544 = getelementptr inbounds float* %tmp10543, i64 1
- %tmp10545 = getelementptr inbounds float* %tmp10544, i64 1
- %tmp10546 = getelementptr inbounds float* %tmp10545, i64 1
- %tmp10547 = getelementptr inbounds float* %tmp10546, i64 1
- %tmp10548 = getelementptr inbounds float* %tmp10547, i64 1
- %tmp10549 = getelementptr inbounds float* %tmp10548, i64 1
- %tmp10550 = getelementptr inbounds float* %tmp10549, i64 1
- %tmp10551 = getelementptr inbounds float* %tmp10550, i64 1
- %tmp10552 = getelementptr inbounds float* %tmp10551, i64 1
- %tmp10553 = getelementptr inbounds float* %tmp10552, i64 1
- %tmp10554 = getelementptr inbounds float* %tmp10553, i64 1
- %tmp10555 = getelementptr inbounds float* %tmp10554, i64 1
- %tmp10556 = getelementptr inbounds float* %tmp10555, i64 1
- %tmp10557 = getelementptr inbounds float* %tmp10556, i64 1
- %tmp10558 = getelementptr inbounds float* %tmp10557, i64 1
- %tmp10559 = getelementptr inbounds float* %tmp10558, i64 1
- %tmp10560 = getelementptr inbounds float* %tmp10559, i64 1
- %tmp10561 = getelementptr inbounds float* %tmp10560, i64 1
- %tmp10562 = getelementptr inbounds float* %tmp10561, i64 1
- %tmp10563 = getelementptr inbounds float* %tmp10562, i64 1
- %tmp10564 = getelementptr inbounds float* %tmp10563, i64 1
- %tmp10565 = getelementptr inbounds float* %tmp10564, i64 1
- %tmp10566 = getelementptr inbounds float* %tmp10565, i64 1
- %tmp10567 = getelementptr inbounds float* %tmp10566, i64 1
- %tmp10568 = getelementptr inbounds float* %tmp10567, i64 1
- %tmp10569 = getelementptr inbounds float* %tmp10568, i64 1
- %tmp10570 = getelementptr inbounds float* %tmp10569, i64 1
- %tmp10571 = getelementptr inbounds float* %tmp10570, i64 1
- %tmp10572 = getelementptr inbounds float* %tmp10571, i64 1
- %tmp10573 = getelementptr inbounds float* %tmp10572, i64 1
- %tmp10574 = getelementptr inbounds float* %tmp10573, i64 1
- %tmp10575 = getelementptr inbounds float* %tmp10574, i64 1
- %tmp10576 = getelementptr inbounds float* %tmp10575, i64 1
- %tmp10577 = getelementptr inbounds float* %tmp10576, i64 1
- %tmp10578 = getelementptr inbounds float* %tmp10577, i64 1
- %tmp10579 = getelementptr inbounds float* %tmp10578, i64 1
- %tmp10580 = getelementptr inbounds float* %tmp10579, i64 1
- %tmp10581 = getelementptr inbounds float* %tmp10580, i64 1
- %tmp10582 = getelementptr inbounds float* %tmp10581, i64 1
- %tmp10583 = getelementptr inbounds float* %tmp10582, i64 1
- %tmp10584 = getelementptr inbounds float* %tmp10583, i64 1
- %tmp10585 = getelementptr inbounds float* %tmp10584, i64 1
- %tmp10586 = getelementptr inbounds float* %tmp10585, i64 1
- %tmp10587 = getelementptr inbounds float* %tmp10586, i64 1
- %tmp10588 = getelementptr inbounds float* %tmp10587, i64 1
- %tmp10589 = getelementptr inbounds float* %tmp10588, i64 1
- %tmp10590 = getelementptr inbounds float* %tmp10589, i64 1
- %tmp10591 = getelementptr inbounds float* %tmp10590, i64 1
- %tmp10592 = getelementptr inbounds float* %tmp10591, i64 1
- %tmp10593 = getelementptr inbounds float* %tmp10592, i64 1
- %tmp10594 = getelementptr inbounds float* %tmp10593, i64 1
- %tmp10595 = getelementptr inbounds float* %tmp10594, i64 1
- %tmp10596 = getelementptr inbounds float* %tmp10595, i64 1
- %tmp10597 = getelementptr inbounds float* %tmp10596, i64 1
- %tmp10598 = getelementptr inbounds float* %tmp10597, i64 1
- %tmp10599 = getelementptr inbounds float* %tmp10598, i64 1
- %tmp10600 = getelementptr inbounds float* %tmp10599, i64 1
- %tmp10601 = getelementptr inbounds float* %tmp10600, i64 1
- %tmp10602 = getelementptr inbounds float* %tmp10601, i64 1
- %tmp10603 = getelementptr inbounds float* %tmp10602, i64 1
- %tmp10604 = getelementptr inbounds float* %tmp10603, i64 1
- %tmp10605 = getelementptr inbounds float* %tmp10604, i64 1
- %tmp10606 = getelementptr inbounds float* %tmp10605, i64 1
- %tmp10607 = getelementptr inbounds float* %tmp10606, i64 1
- %tmp10608 = getelementptr inbounds float* %tmp10607, i64 1
- %tmp10609 = getelementptr inbounds float* %tmp10608, i64 1
- %tmp10610 = getelementptr inbounds float* %tmp10609, i64 1
- %tmp10611 = getelementptr inbounds float* %tmp10610, i64 1
- %tmp10612 = getelementptr inbounds float* %tmp10611, i64 1
- %tmp10613 = getelementptr inbounds float* %tmp10612, i64 1
- %tmp10614 = getelementptr inbounds float* %tmp10613, i64 1
- %tmp10615 = getelementptr inbounds float* %tmp10614, i64 1
- %tmp10616 = getelementptr inbounds float* %tmp10615, i64 1
- %tmp10617 = getelementptr inbounds float* %tmp10616, i64 1
- %tmp10618 = getelementptr inbounds float* %tmp10617, i64 1
- %tmp10619 = getelementptr inbounds float* %tmp10618, i64 1
- %tmp10620 = getelementptr inbounds float* %tmp10619, i64 1
- %tmp10621 = getelementptr inbounds float* %tmp10620, i64 1
- %tmp10622 = getelementptr inbounds float* %tmp10621, i64 1
- %tmp10623 = getelementptr inbounds float* %tmp10622, i64 1
- %tmp10624 = getelementptr inbounds float* %tmp10623, i64 1
- %tmp10625 = getelementptr inbounds float* %tmp10624, i64 1
- %tmp10626 = getelementptr inbounds float* %tmp10625, i64 1
- %tmp10627 = getelementptr inbounds float* %tmp10626, i64 1
- %tmp10628 = getelementptr inbounds float* %tmp10627, i64 1
- %tmp10629 = getelementptr inbounds float* %tmp10628, i64 1
- %tmp10630 = getelementptr inbounds float* %tmp10629, i64 1
- %tmp10631 = getelementptr inbounds float* %tmp10630, i64 1
- %tmp10632 = getelementptr inbounds float* %tmp10631, i64 1
- %tmp10633 = getelementptr inbounds float* %tmp10632, i64 1
- %tmp10634 = getelementptr inbounds float* %tmp10633, i64 1
- %tmp10635 = getelementptr inbounds float* %tmp10634, i64 1
- %tmp10636 = getelementptr inbounds float* %tmp10635, i64 1
- %tmp10637 = getelementptr inbounds float* %tmp10636, i64 1
- %tmp10638 = getelementptr inbounds float* %tmp10637, i64 1
- %tmp10639 = getelementptr inbounds float* %tmp10638, i64 1
- %tmp10640 = getelementptr inbounds float* %tmp10639, i64 1
- %tmp10641 = getelementptr inbounds float* %tmp10640, i64 1
- %tmp10642 = getelementptr inbounds float* %tmp10641, i64 1
- %tmp10643 = getelementptr inbounds float* %tmp10642, i64 1
- %tmp10644 = getelementptr inbounds float* %tmp10643, i64 1
- %tmp10645 = getelementptr inbounds float* %tmp10644, i64 1
- %tmp10646 = getelementptr inbounds float* %tmp10645, i64 1
- %tmp10647 = getelementptr inbounds float* %tmp10646, i64 1
- %tmp10648 = getelementptr inbounds float* %tmp10647, i64 1
- %tmp10649 = getelementptr inbounds float* %tmp10648, i64 1
- %tmp10650 = getelementptr inbounds float* %tmp10649, i64 1
- %tmp10651 = getelementptr inbounds float* %tmp10650, i64 1
- %tmp10652 = getelementptr inbounds float* %tmp10651, i64 1
- %tmp10653 = getelementptr inbounds float* %tmp10652, i64 1
- %tmp10654 = getelementptr inbounds float* %tmp10653, i64 1
- %tmp10655 = getelementptr inbounds float* %tmp10654, i64 1
- %tmp10656 = getelementptr inbounds float* %tmp10655, i64 1
- %tmp10657 = getelementptr inbounds float* %tmp10656, i64 1
- %tmp10658 = getelementptr inbounds float* %tmp10657, i64 1
- %tmp10659 = getelementptr inbounds float* %tmp10658, i64 1
- %tmp10660 = getelementptr inbounds float* %tmp10659, i64 1
- %tmp10661 = getelementptr inbounds float* %tmp10660, i64 1
- %tmp10662 = getelementptr inbounds float* %tmp10661, i64 1
- %tmp10663 = getelementptr inbounds float* %tmp10662, i64 1
- %tmp10664 = getelementptr inbounds float* %tmp10663, i64 1
- %tmp10665 = getelementptr inbounds float* %tmp10664, i64 1
- %tmp10666 = getelementptr inbounds float* %tmp10665, i64 1
- %tmp10667 = getelementptr inbounds float* %tmp10666, i64 1
- %tmp10668 = getelementptr inbounds float* %tmp10667, i64 1
- %tmp10669 = getelementptr inbounds float* %tmp10668, i64 1
- %tmp10670 = getelementptr inbounds float* %tmp10669, i64 1
- %tmp10671 = getelementptr inbounds float* %tmp10670, i64 1
- %tmp10672 = getelementptr inbounds float* %tmp10671, i64 1
- %tmp10673 = getelementptr inbounds float* %tmp10672, i64 1
- %tmp10674 = getelementptr inbounds float* %tmp10673, i64 1
- %tmp10675 = getelementptr inbounds float* %tmp10674, i64 1
- %tmp10676 = getelementptr inbounds float* %tmp10675, i64 1
- %tmp10677 = getelementptr inbounds float* %tmp10676, i64 1
- %tmp10678 = getelementptr inbounds float* %tmp10677, i64 1
- %tmp10679 = getelementptr inbounds float* %tmp10678, i64 1
- %tmp10680 = getelementptr inbounds float* %tmp10679, i64 1
- %tmp10681 = getelementptr inbounds float* %tmp10680, i64 1
- %tmp10682 = getelementptr inbounds float* %tmp10681, i64 1
- %tmp10683 = getelementptr inbounds float* %tmp10682, i64 1
- %tmp10684 = getelementptr inbounds float* %tmp10683, i64 1
- %tmp10685 = getelementptr inbounds float* %tmp10684, i64 1
- %tmp10686 = getelementptr inbounds float* %tmp10685, i64 1
- %tmp10687 = getelementptr inbounds float* %tmp10686, i64 1
- %tmp10688 = getelementptr inbounds float* %tmp10687, i64 1
- %tmp10689 = getelementptr inbounds float* %tmp10688, i64 1
- %tmp10690 = getelementptr inbounds float* %tmp10689, i64 1
- %tmp10691 = getelementptr inbounds float* %tmp10690, i64 1
- %tmp10692 = getelementptr inbounds float* %tmp10691, i64 1
- %tmp10693 = getelementptr inbounds float* %tmp10692, i64 1
- %tmp10694 = getelementptr inbounds float* %tmp10693, i64 1
- %tmp10695 = getelementptr inbounds float* %tmp10694, i64 1
- %tmp10696 = getelementptr inbounds float* %tmp10695, i64 1
- %tmp10697 = getelementptr inbounds float* %tmp10696, i64 1
- %tmp10698 = getelementptr inbounds float* %tmp10697, i64 1
- %tmp10699 = getelementptr inbounds float* %tmp10698, i64 1
- %tmp10700 = getelementptr inbounds float* %tmp10699, i64 1
- %tmp10701 = getelementptr inbounds float* %tmp10700, i64 1
- %tmp10702 = getelementptr inbounds float* %tmp10701, i64 1
- %tmp10703 = getelementptr inbounds float* %tmp10702, i64 1
- %tmp10704 = getelementptr inbounds float* %tmp10703, i64 1
- %tmp10705 = getelementptr inbounds float* %tmp10704, i64 1
- %tmp10706 = getelementptr inbounds float* %tmp10705, i64 1
- %tmp10707 = getelementptr inbounds float* %tmp10706, i64 1
- %tmp10708 = getelementptr inbounds float* %tmp10707, i64 1
- %tmp10709 = getelementptr inbounds float* %tmp10708, i64 1
- %tmp10710 = getelementptr inbounds float* %tmp10709, i64 1
- %tmp10711 = getelementptr inbounds float* %tmp10710, i64 1
- %tmp10712 = getelementptr inbounds float* %tmp10711, i64 1
- %tmp10713 = getelementptr inbounds float* %tmp10712, i64 1
- %tmp10714 = getelementptr inbounds float* %tmp10713, i64 1
- %tmp10715 = getelementptr inbounds float* %tmp10714, i64 1
- %tmp10716 = getelementptr inbounds float* %tmp10715, i64 1
- %tmp10717 = getelementptr inbounds float* %tmp10716, i64 1
- %tmp10718 = getelementptr inbounds float* %tmp10717, i64 1
- %tmp10719 = getelementptr inbounds float* %tmp10718, i64 1
- %tmp10720 = getelementptr inbounds float* %tmp10719, i64 1
- %tmp10721 = getelementptr inbounds float* %tmp10720, i64 1
- %tmp10722 = getelementptr inbounds float* %tmp10721, i64 1
- %tmp10723 = getelementptr inbounds float* %tmp10722, i64 1
- %tmp10724 = getelementptr inbounds float* %tmp10723, i64 1
- %tmp10725 = getelementptr inbounds float* %tmp10724, i64 1
- %tmp10726 = getelementptr inbounds float* %tmp10725, i64 1
- %tmp10727 = getelementptr inbounds float* %tmp10726, i64 1
- %tmp10728 = getelementptr inbounds float* %tmp10727, i64 1
- %tmp10729 = getelementptr inbounds float* %tmp10728, i64 1
- %tmp10730 = getelementptr inbounds float* %tmp10729, i64 1
- %tmp10731 = getelementptr inbounds float* %tmp10730, i64 1
- %tmp10732 = getelementptr inbounds float* %tmp10731, i64 1
- %tmp10733 = getelementptr inbounds float* %tmp10732, i64 1
- %tmp10734 = getelementptr inbounds float* %tmp10733, i64 1
- %tmp10735 = getelementptr inbounds float* %tmp10734, i64 1
- %tmp10736 = getelementptr inbounds float* %tmp10735, i64 1
- %tmp10737 = getelementptr inbounds float* %tmp10736, i64 1
- %tmp10738 = getelementptr inbounds float* %tmp10737, i64 1
- %tmp10739 = getelementptr inbounds float* %tmp10738, i64 1
- %tmp10740 = getelementptr inbounds float* %tmp10739, i64 1
- %tmp10741 = getelementptr inbounds float* %tmp10740, i64 1
- %tmp10742 = getelementptr inbounds float* %tmp10741, i64 1
- %tmp10743 = getelementptr inbounds float* %tmp10742, i64 1
- %tmp10744 = getelementptr inbounds float* %tmp10743, i64 1
- %tmp10745 = getelementptr inbounds float* %tmp10744, i64 1
- %tmp10746 = getelementptr inbounds float* %tmp10745, i64 1
- %tmp10747 = getelementptr inbounds float* %tmp10746, i64 1
- %tmp10748 = getelementptr inbounds float* %tmp10747, i64 1
- %tmp10749 = getelementptr inbounds float* %tmp10748, i64 1
- %tmp10750 = getelementptr inbounds float* %tmp10749, i64 1
- %tmp10751 = getelementptr inbounds float* %tmp10750, i64 1
- %tmp10752 = getelementptr inbounds float* %tmp10751, i64 1
- %tmp10753 = getelementptr inbounds float* %tmp10752, i64 1
- %tmp10754 = getelementptr inbounds float* %tmp10753, i64 1
- %tmp10755 = getelementptr inbounds float* %tmp10754, i64 1
- %tmp10756 = getelementptr inbounds float* %tmp10755, i64 1
- %tmp10757 = getelementptr inbounds float* %tmp10756, i64 1
- %tmp10758 = getelementptr inbounds float* %tmp10757, i64 1
- %tmp10759 = getelementptr inbounds float* %tmp10758, i64 1
- %tmp10760 = getelementptr inbounds float* %tmp10759, i64 1
- %tmp10761 = getelementptr inbounds float* %tmp10760, i64 1
- %tmp10762 = getelementptr inbounds float* %tmp10761, i64 1
- %tmp10763 = getelementptr inbounds float* %tmp10762, i64 1
- %tmp10764 = getelementptr inbounds float* %tmp10763, i64 1
- %tmp10765 = getelementptr inbounds float* %tmp10764, i64 1
- %tmp10766 = getelementptr inbounds float* %tmp10765, i64 1
- %tmp10767 = getelementptr inbounds float* %tmp10766, i64 1
- %tmp10768 = getelementptr inbounds float* %tmp10767, i64 1
- %tmp10769 = getelementptr inbounds float* %tmp10768, i64 1
- %tmp10770 = getelementptr inbounds float* %tmp10769, i64 1
- %tmp10771 = getelementptr inbounds float* %tmp10770, i64 1
- %tmp10772 = getelementptr inbounds float* %tmp10771, i64 1
- %tmp10773 = getelementptr inbounds float* %tmp10772, i64 1
- %tmp10774 = getelementptr inbounds float* %tmp10773, i64 1
- %tmp10775 = getelementptr inbounds float* %tmp10774, i64 1
- %tmp10776 = getelementptr inbounds float* %tmp10775, i64 1
- %tmp10777 = getelementptr inbounds float* %tmp10776, i64 1
- %tmp10778 = getelementptr inbounds float* %tmp10777, i64 1
- %tmp10779 = getelementptr inbounds float* %tmp10778, i64 1
- %tmp10780 = getelementptr inbounds float* %tmp10779, i64 1
- %tmp10781 = getelementptr inbounds float* %tmp10780, i64 1
- %tmp10782 = getelementptr inbounds float* %tmp10781, i64 1
- %tmp10783 = getelementptr inbounds float* %tmp10782, i64 1
- %tmp10784 = getelementptr inbounds float* %tmp10783, i64 1
- %tmp10785 = getelementptr inbounds float* %tmp10784, i64 1
- %tmp10786 = getelementptr inbounds float* %tmp10785, i64 1
- %tmp10787 = getelementptr inbounds float* %tmp10786, i64 1
- %tmp10788 = getelementptr inbounds float* %tmp10787, i64 1
- %tmp10789 = getelementptr inbounds float* %tmp10788, i64 1
- %tmp10790 = getelementptr inbounds float* %tmp10789, i64 1
- %tmp10791 = getelementptr inbounds float* %tmp10790, i64 1
- %tmp10792 = getelementptr inbounds float* %tmp10791, i64 1
- %tmp10793 = getelementptr inbounds float* %tmp10792, i64 1
- %tmp10794 = getelementptr inbounds float* %tmp10793, i64 1
- %tmp10795 = getelementptr inbounds float* %tmp10794, i64 1
- %tmp10796 = getelementptr inbounds float* %tmp10795, i64 1
- %tmp10797 = getelementptr inbounds float* %tmp10796, i64 1
- %tmp10798 = getelementptr inbounds float* %tmp10797, i64 1
- %tmp10799 = getelementptr inbounds float* %tmp10798, i64 1
- %tmp10800 = getelementptr inbounds float* %tmp10799, i64 1
- %tmp10801 = getelementptr inbounds float* %tmp10800, i64 1
- %tmp10802 = getelementptr inbounds float* %tmp10801, i64 1
- %tmp10803 = getelementptr inbounds float* %tmp10802, i64 1
- %tmp10804 = getelementptr inbounds float* %tmp10803, i64 1
- %tmp10805 = getelementptr inbounds float* %tmp10804, i64 1
- %tmp10806 = getelementptr inbounds float* %tmp10805, i64 1
- %tmp10807 = getelementptr inbounds float* %tmp10806, i64 1
- %tmp10808 = getelementptr inbounds float* %tmp10807, i64 1
- %tmp10809 = getelementptr inbounds float* %tmp10808, i64 1
- %tmp10810 = getelementptr inbounds float* %tmp10809, i64 1
- %tmp10811 = getelementptr inbounds float* %tmp10810, i64 1
- %tmp10812 = getelementptr inbounds float* %tmp10811, i64 1
- %tmp10813 = getelementptr inbounds float* %tmp10812, i64 1
- %tmp10814 = getelementptr inbounds float* %tmp10813, i64 1
- %tmp10815 = getelementptr inbounds float* %tmp10814, i64 1
- %tmp10816 = getelementptr inbounds float* %tmp10815, i64 1
- %tmp10817 = getelementptr inbounds float* %tmp10816, i64 1
- %tmp10818 = getelementptr inbounds float* %tmp10817, i64 1
- %tmp10819 = getelementptr inbounds float* %tmp10818, i64 1
- %tmp10820 = getelementptr inbounds float* %tmp10819, i64 1
- %tmp10821 = getelementptr inbounds float* %tmp10820, i64 1
- %tmp10822 = getelementptr inbounds float* %tmp10821, i64 1
- %tmp10823 = getelementptr inbounds float* %tmp10822, i64 1
- %tmp10824 = getelementptr inbounds float* %tmp10823, i64 1
- %tmp10825 = getelementptr inbounds float* %tmp10824, i64 1
- %tmp10826 = getelementptr inbounds float* %tmp10825, i64 1
- %tmp10827 = getelementptr inbounds float* %tmp10826, i64 1
- %tmp10828 = getelementptr inbounds float* %tmp10827, i64 1
- %tmp10829 = getelementptr inbounds float* %tmp10828, i64 1
- %tmp10830 = getelementptr inbounds float* %tmp10829, i64 1
- %tmp10831 = getelementptr inbounds float* %tmp10830, i64 1
- %tmp10832 = getelementptr inbounds float* %tmp10831, i64 1
- %tmp10833 = getelementptr inbounds float* %tmp10832, i64 1
- %tmp10834 = getelementptr inbounds float* %tmp10833, i64 1
- %tmp10835 = getelementptr inbounds float* %tmp10834, i64 1
- %tmp10836 = getelementptr inbounds float* %tmp10835, i64 1
- %tmp10837 = getelementptr inbounds float* %tmp10836, i64 1
- %tmp10838 = getelementptr inbounds float* %tmp10837, i64 1
- %tmp10839 = getelementptr inbounds float* %tmp10838, i64 1
- %tmp10840 = getelementptr inbounds float* %tmp10839, i64 1
- %tmp10841 = getelementptr inbounds float* %tmp10840, i64 1
- %tmp10842 = getelementptr inbounds float* %tmp10841, i64 1
- %tmp10843 = getelementptr inbounds float* %tmp10842, i64 1
- %tmp10844 = getelementptr inbounds float* %tmp10843, i64 1
- %tmp10845 = getelementptr inbounds float* %tmp10844, i64 1
- %tmp10846 = getelementptr inbounds float* %tmp10845, i64 1
- %tmp10847 = getelementptr inbounds float* %tmp10846, i64 1
- %tmp10848 = getelementptr inbounds float* %tmp10847, i64 1
- %tmp10849 = getelementptr inbounds float* %tmp10848, i64 1
- %tmp10850 = getelementptr inbounds float* %tmp10849, i64 1
- %tmp10851 = getelementptr inbounds float* %tmp10850, i64 1
- %tmp10852 = getelementptr inbounds float* %tmp10851, i64 1
- %tmp10853 = getelementptr inbounds float* %tmp10852, i64 1
- %tmp10854 = getelementptr inbounds float* %tmp10853, i64 1
- %tmp10855 = getelementptr inbounds float* %tmp10854, i64 1
- %tmp10856 = getelementptr inbounds float* %tmp10855, i64 1
- %tmp10857 = getelementptr inbounds float* %tmp10856, i64 1
- %tmp10858 = getelementptr inbounds float* %tmp10857, i64 1
- %tmp10859 = getelementptr inbounds float* %tmp10858, i64 1
- %tmp10860 = getelementptr inbounds float* %tmp10859, i64 1
- %tmp10861 = getelementptr inbounds float* %tmp10860, i64 1
- %tmp10862 = getelementptr inbounds float* %tmp10861, i64 1
- %tmp10863 = getelementptr inbounds float* %tmp10862, i64 1
- %tmp10864 = getelementptr inbounds float* %tmp10863, i64 1
- %tmp10865 = getelementptr inbounds float* %tmp10864, i64 1
- %tmp10866 = getelementptr inbounds float* %tmp10865, i64 1
- %tmp10867 = getelementptr inbounds float* %tmp10866, i64 1
- %tmp10868 = getelementptr inbounds float* %tmp10867, i64 1
- %tmp10869 = getelementptr inbounds float* %tmp10868, i64 1
- %tmp10870 = getelementptr inbounds float* %tmp10869, i64 1
- %tmp10871 = getelementptr inbounds float* %tmp10870, i64 1
- %tmp10872 = getelementptr inbounds float* %tmp10871, i64 1
- %tmp10873 = getelementptr inbounds float* %tmp10872, i64 1
- %tmp10874 = getelementptr inbounds float* %tmp10873, i64 1
- %tmp10875 = getelementptr inbounds float* %tmp10874, i64 1
- %tmp10876 = getelementptr inbounds float* %tmp10875, i64 1
- %tmp10877 = getelementptr inbounds float* %tmp10876, i64 1
- %tmp10878 = getelementptr inbounds float* %tmp10877, i64 1
- %tmp10879 = getelementptr inbounds float* %tmp10878, i64 1
- %tmp10880 = getelementptr inbounds float* %tmp10879, i64 1
- %tmp10881 = getelementptr inbounds float* %tmp10880, i64 1
- %tmp10882 = getelementptr inbounds float* %tmp10881, i64 1
- %tmp10883 = getelementptr inbounds float* %tmp10882, i64 1
- %tmp10884 = getelementptr inbounds float* %tmp10883, i64 1
- %tmp10885 = getelementptr inbounds float* %tmp10884, i64 1
- %tmp10886 = getelementptr inbounds float* %tmp10885, i64 1
- %tmp10887 = getelementptr inbounds float* %tmp10886, i64 1
- %tmp10888 = getelementptr inbounds float* %tmp10887, i64 1
- %tmp10889 = getelementptr inbounds float* %tmp10888, i64 1
- %tmp10890 = getelementptr inbounds float* %tmp10889, i64 1
- %tmp10891 = getelementptr inbounds float* %tmp10890, i64 1
- %tmp10892 = getelementptr inbounds float* %tmp10891, i64 1
- %tmp10893 = getelementptr inbounds float* %tmp10892, i64 1
- %tmp10894 = getelementptr inbounds float* %tmp10893, i64 1
- %tmp10895 = getelementptr inbounds float* %tmp10894, i64 1
- %tmp10896 = getelementptr inbounds float* %tmp10895, i64 1
- %tmp10897 = getelementptr inbounds float* %tmp10896, i64 1
- %tmp10898 = getelementptr inbounds float* %tmp10897, i64 1
- %tmp10899 = getelementptr inbounds float* %tmp10898, i64 1
- %tmp10900 = getelementptr inbounds float* %tmp10899, i64 1
- %tmp10901 = getelementptr inbounds float* %tmp10900, i64 1
- %tmp10902 = getelementptr inbounds float* %tmp10901, i64 1
- %tmp10903 = getelementptr inbounds float* %tmp10902, i64 1
- %tmp10904 = getelementptr inbounds float* %tmp10903, i64 1
- %tmp10905 = getelementptr inbounds float* %tmp10904, i64 1
- %tmp10906 = getelementptr inbounds float* %tmp10905, i64 1
- %tmp10907 = getelementptr inbounds float* %tmp10906, i64 1
- %tmp10908 = getelementptr inbounds float* %tmp10907, i64 1
- %tmp10909 = getelementptr inbounds float* %tmp10908, i64 1
- %tmp10910 = getelementptr inbounds float* %tmp10909, i64 1
- %tmp10911 = getelementptr inbounds float* %tmp10910, i64 1
- %tmp10912 = getelementptr inbounds float* %tmp10911, i64 1
- %tmp10913 = getelementptr inbounds float* %tmp10912, i64 1
- %tmp10914 = getelementptr inbounds float* %tmp10913, i64 1
- %tmp10915 = getelementptr inbounds float* %tmp10914, i64 1
- %tmp10916 = getelementptr inbounds float* %tmp10915, i64 1
- %tmp10917 = getelementptr inbounds float* %tmp10916, i64 1
- %tmp10918 = getelementptr inbounds float* %tmp10917, i64 1
- %tmp10919 = getelementptr inbounds float* %tmp10918, i64 1
- %tmp10920 = getelementptr inbounds float* %tmp10919, i64 1
- %tmp10921 = getelementptr inbounds float* %tmp10920, i64 1
- %tmp10922 = getelementptr inbounds float* %tmp10921, i64 1
- %tmp10923 = getelementptr inbounds float* %tmp10922, i64 1
- %tmp10924 = getelementptr inbounds float* %tmp10923, i64 1
- %tmp10925 = getelementptr inbounds float* %tmp10924, i64 1
- %tmp10926 = getelementptr inbounds float* %tmp10925, i64 1
- %tmp10927 = getelementptr inbounds float* %tmp10926, i64 1
- %tmp10928 = getelementptr inbounds float* %tmp10927, i64 1
- %tmp10929 = getelementptr inbounds float* %tmp10928, i64 1
- %tmp10930 = getelementptr inbounds float* %tmp10929, i64 1
- %tmp10931 = getelementptr inbounds float* %tmp10930, i64 1
- %tmp10932 = getelementptr inbounds float* %tmp10931, i64 1
- %tmp10933 = getelementptr inbounds float* %tmp10932, i64 1
- %tmp10934 = getelementptr inbounds float* %tmp10933, i64 1
- %tmp10935 = getelementptr inbounds float* %tmp10934, i64 1
- %tmp10936 = getelementptr inbounds float* %tmp10935, i64 1
- %tmp10937 = getelementptr inbounds float* %tmp10936, i64 1
- %tmp10938 = getelementptr inbounds float* %tmp10937, i64 1
- %tmp10939 = getelementptr inbounds float* %tmp10938, i64 1
- %tmp10940 = getelementptr inbounds float* %tmp10939, i64 1
- %tmp10941 = getelementptr inbounds float* %tmp10940, i64 1
- %tmp10942 = getelementptr inbounds float* %tmp10941, i64 1
- %tmp10943 = getelementptr inbounds float* %tmp10942, i64 1
- %tmp10944 = getelementptr inbounds float* %tmp10943, i64 1
- %tmp10945 = getelementptr inbounds float* %tmp10944, i64 1
- %tmp10946 = getelementptr inbounds float* %tmp10945, i64 1
- %tmp10947 = getelementptr inbounds float* %tmp10946, i64 1
- %tmp10948 = getelementptr inbounds float* %tmp10947, i64 1
- %tmp10949 = getelementptr inbounds float* %tmp10948, i64 1
- %tmp10950 = getelementptr inbounds float* %tmp10949, i64 1
- %tmp10951 = getelementptr inbounds float* %tmp10950, i64 1
- %tmp10952 = getelementptr inbounds float* %tmp10951, i64 1
- %tmp10953 = getelementptr inbounds float* %tmp10952, i64 1
- %tmp10954 = getelementptr inbounds float* %tmp10953, i64 1
- %tmp10955 = getelementptr inbounds float* %tmp10954, i64 1
- %tmp10956 = getelementptr inbounds float* %tmp10955, i64 1
- %tmp10957 = getelementptr inbounds float* %tmp10956, i64 1
- %tmp10958 = getelementptr inbounds float* %tmp10957, i64 1
- %tmp10959 = getelementptr inbounds float* %tmp10958, i64 1
- %tmp10960 = getelementptr inbounds float* %tmp10959, i64 1
- %tmp10961 = getelementptr inbounds float* %tmp10960, i64 1
- %tmp10962 = getelementptr inbounds float* %tmp10961, i64 1
- %tmp10963 = getelementptr inbounds float* %tmp10962, i64 1
- %tmp10964 = getelementptr inbounds float* %tmp10963, i64 1
- %tmp10965 = getelementptr inbounds float* %tmp10964, i64 1
- %tmp10966 = getelementptr inbounds float* %tmp10965, i64 1
- %tmp10967 = getelementptr inbounds float* %tmp10966, i64 1
- %tmp10968 = getelementptr inbounds float* %tmp10967, i64 1
- %tmp10969 = getelementptr inbounds float* %tmp10968, i64 1
- %tmp10970 = getelementptr inbounds float* %tmp10969, i64 1
- %tmp10971 = getelementptr inbounds float* %tmp10970, i64 1
- %tmp10972 = getelementptr inbounds float* %tmp10971, i64 1
- %tmp10973 = getelementptr inbounds float* %tmp10972, i64 1
- %tmp10974 = getelementptr inbounds float* %tmp10973, i64 1
- %tmp10975 = getelementptr inbounds float* %tmp10974, i64 1
- %tmp10976 = getelementptr inbounds float* %tmp10975, i64 1
- %tmp10977 = getelementptr inbounds float* %tmp10976, i64 1
- %tmp10978 = getelementptr inbounds float* %tmp10977, i64 1
- %tmp10979 = getelementptr inbounds float* %tmp10978, i64 1
- %tmp10980 = getelementptr inbounds float* %tmp10979, i64 1
- %tmp10981 = getelementptr inbounds float* %tmp10980, i64 1
- %tmp10982 = getelementptr inbounds float* %tmp10981, i64 1
- %tmp10983 = getelementptr inbounds float* %tmp10982, i64 1
- %tmp10984 = getelementptr inbounds float* %tmp10983, i64 1
- %tmp10985 = getelementptr inbounds float* %tmp10984, i64 1
- %tmp10986 = getelementptr inbounds float* %tmp10985, i64 1
- %tmp10987 = getelementptr inbounds float* %tmp10986, i64 1
- %tmp10988 = getelementptr inbounds float* %tmp10987, i64 1
- %tmp10989 = getelementptr inbounds float* %tmp10988, i64 1
- %tmp10990 = getelementptr inbounds float* %tmp10989, i64 1
- %tmp10991 = getelementptr inbounds float* %tmp10990, i64 1
- %tmp10992 = getelementptr inbounds float* %tmp10991, i64 1
- %tmp10993 = getelementptr inbounds float* %tmp10992, i64 1
- %tmp10994 = getelementptr inbounds float* %tmp10993, i64 1
- %tmp10995 = getelementptr inbounds float* %tmp10994, i64 1
- %tmp10996 = getelementptr inbounds float* %tmp10995, i64 1
- %tmp10997 = getelementptr inbounds float* %tmp10996, i64 1
- %tmp10998 = getelementptr inbounds float* %tmp10997, i64 1
- %tmp10999 = getelementptr inbounds float* %tmp10998, i64 1
- %tmp11000 = getelementptr inbounds float* %tmp10999, i64 1
- %tmp11001 = getelementptr inbounds float* %tmp11000, i64 1
- %tmp11002 = getelementptr inbounds float* %tmp11001, i64 1
- %tmp11003 = getelementptr inbounds float* %tmp11002, i64 1
- %tmp11004 = getelementptr inbounds float* %tmp11003, i64 1
- %tmp11005 = getelementptr inbounds float* %tmp11004, i64 1
- %tmp11006 = getelementptr inbounds float* %tmp11005, i64 1
- %tmp11007 = getelementptr inbounds float* %tmp11006, i64 1
- %tmp11008 = getelementptr inbounds float* %tmp11007, i64 1
- %tmp11009 = getelementptr inbounds float* %tmp11008, i64 1
- %tmp11010 = getelementptr inbounds float* %tmp11009, i64 1
- %tmp11011 = getelementptr inbounds float* %tmp11010, i64 1
- %tmp11012 = getelementptr inbounds float* %tmp11011, i64 1
- %tmp11013 = getelementptr inbounds float* %tmp11012, i64 1
- %tmp11014 = getelementptr inbounds float* %tmp11013, i64 1
- %tmp11015 = getelementptr inbounds float* %tmp11014, i64 1
- %tmp11016 = getelementptr inbounds float* %tmp11015, i64 1
- %tmp11017 = getelementptr inbounds float* %tmp11016, i64 1
- %tmp11018 = getelementptr inbounds float* %tmp11017, i64 1
- %tmp11019 = getelementptr inbounds float* %tmp11018, i64 1
- %tmp11020 = getelementptr inbounds float* %tmp11019, i64 1
- %tmp11021 = getelementptr inbounds float* %tmp11020, i64 1
- %tmp11022 = getelementptr inbounds float* %tmp11021, i64 1
- %tmp11023 = getelementptr inbounds float* %tmp11022, i64 1
- %tmp11024 = getelementptr inbounds float* %tmp11023, i64 1
- %tmp11025 = getelementptr inbounds float* %tmp11024, i64 1
- %tmp11026 = getelementptr inbounds float* %tmp11025, i64 1
- %tmp11027 = getelementptr inbounds float* %tmp11026, i64 1
- %tmp11028 = getelementptr inbounds float* %tmp11027, i64 1
- %tmp11029 = getelementptr inbounds float* %tmp11028, i64 1
- %tmp11030 = getelementptr inbounds float* %tmp11029, i64 1
- %tmp11031 = getelementptr inbounds float* %tmp11030, i64 1
- %tmp11032 = getelementptr inbounds float* %tmp11031, i64 1
- %tmp11033 = getelementptr inbounds float* %tmp11032, i64 1
- %tmp11034 = getelementptr inbounds float* %tmp11033, i64 1
- %tmp11035 = getelementptr inbounds float* %tmp11034, i64 1
- %tmp11036 = getelementptr inbounds float* %tmp11035, i64 1
- %tmp11037 = getelementptr inbounds float* %tmp11036, i64 1
- %tmp11038 = getelementptr inbounds float* %tmp11037, i64 1
- %tmp11039 = getelementptr inbounds float* %tmp11038, i64 1
- %tmp11040 = getelementptr inbounds float* %tmp11039, i64 1
- %tmp11041 = getelementptr inbounds float* %tmp11040, i64 1
- %tmp11042 = getelementptr inbounds float* %tmp11041, i64 1
- %tmp11043 = getelementptr inbounds float* %tmp11042, i64 1
- %tmp11044 = getelementptr inbounds float* %tmp11043, i64 1
- %tmp11045 = getelementptr inbounds float* %tmp11044, i64 1
- %tmp11046 = getelementptr inbounds float* %tmp11045, i64 1
- %tmp11047 = getelementptr inbounds float* %tmp11046, i64 1
- %tmp11048 = getelementptr inbounds float* %tmp11047, i64 1
- %tmp11049 = getelementptr inbounds float* %tmp11048, i64 1
- %tmp11050 = getelementptr inbounds float* %tmp11049, i64 1
- %tmp11051 = getelementptr inbounds float* %tmp11050, i64 1
- %tmp11052 = getelementptr inbounds float* %tmp11051, i64 1
- %tmp11053 = getelementptr inbounds float* %tmp11052, i64 1
- %tmp11054 = getelementptr inbounds float* %tmp11053, i64 1
- %tmp11055 = getelementptr inbounds float* %tmp11054, i64 1
- %tmp11056 = getelementptr inbounds float* %tmp11055, i64 1
- %tmp11057 = getelementptr inbounds float* %tmp11056, i64 1
- %tmp11058 = getelementptr inbounds float* %tmp11057, i64 1
- %tmp11059 = getelementptr inbounds float* %tmp11058, i64 1
- %tmp11060 = getelementptr inbounds float* %tmp11059, i64 1
- %tmp11061 = getelementptr inbounds float* %tmp11060, i64 1
- %tmp11062 = getelementptr inbounds float* %tmp11061, i64 1
- %tmp11063 = getelementptr inbounds float* %tmp11062, i64 1
- %tmp11064 = getelementptr inbounds float* %tmp11063, i64 1
- %tmp11065 = getelementptr inbounds float* %tmp11064, i64 1
- %tmp11066 = getelementptr inbounds float* %tmp11065, i64 1
- %tmp11067 = getelementptr inbounds float* %tmp11066, i64 1
- %tmp11068 = getelementptr inbounds float* %tmp11067, i64 1
- %tmp11069 = getelementptr inbounds float* %tmp11068, i64 1
- %tmp11070 = getelementptr inbounds float* %tmp11069, i64 1
- %tmp11071 = getelementptr inbounds float* %tmp11070, i64 1
- %tmp11072 = getelementptr inbounds float* %tmp11071, i64 1
- %tmp11073 = getelementptr inbounds float* %tmp11072, i64 1
- %tmp11074 = getelementptr inbounds float* %tmp11073, i64 1
- %tmp11075 = getelementptr inbounds float* %tmp11074, i64 1
- %tmp11076 = getelementptr inbounds float* %tmp11075, i64 1
- %tmp11077 = getelementptr inbounds float* %tmp11076, i64 1
- %tmp11078 = getelementptr inbounds float* %tmp11077, i64 1
- %tmp11079 = getelementptr inbounds float* %tmp11078, i64 1
- %tmp11080 = getelementptr inbounds float* %tmp11079, i64 1
- %tmp11081 = getelementptr inbounds float* %tmp11080, i64 1
- %tmp11082 = getelementptr inbounds float* %tmp11081, i64 1
- %tmp11083 = getelementptr inbounds float* %tmp11082, i64 1
- %tmp11084 = getelementptr inbounds float* %tmp11083, i64 1
- %tmp11085 = getelementptr inbounds float* %tmp11084, i64 1
- %tmp11086 = getelementptr inbounds float* %tmp11085, i64 1
- %tmp11087 = getelementptr inbounds float* %tmp11086, i64 1
- %tmp11088 = getelementptr inbounds float* %tmp11087, i64 1
- %tmp11089 = getelementptr inbounds float* %tmp11088, i64 1
- %tmp11090 = getelementptr inbounds float* %tmp11089, i64 1
- %tmp11091 = getelementptr inbounds float* %tmp11090, i64 1
- %tmp11092 = getelementptr inbounds float* %tmp11091, i64 1
- %tmp11093 = getelementptr inbounds float* %tmp11092, i64 1
- %tmp11094 = getelementptr inbounds float* %tmp11093, i64 1
- %tmp11095 = getelementptr inbounds float* %tmp11094, i64 1
- %tmp11096 = getelementptr inbounds float* %tmp11095, i64 1
- %tmp11097 = getelementptr inbounds float* %tmp11096, i64 1
- %tmp11098 = getelementptr inbounds float* %tmp11097, i64 1
- %tmp11099 = getelementptr inbounds float* %tmp11098, i64 1
- %tmp11100 = getelementptr inbounds float* %tmp11099, i64 1
- %tmp11101 = getelementptr inbounds float* %tmp11100, i64 1
- %tmp11102 = getelementptr inbounds float* %tmp11101, i64 1
- %tmp11103 = getelementptr inbounds float* %tmp11102, i64 1
- %tmp11104 = getelementptr inbounds float* %tmp11103, i64 1
- %tmp11105 = getelementptr inbounds float* %tmp11104, i64 1
- %tmp11106 = getelementptr inbounds float* %tmp11105, i64 1
- %tmp11107 = getelementptr inbounds float* %tmp11106, i64 1
- %tmp11108 = getelementptr inbounds float* %tmp11107, i64 1
- %tmp11109 = getelementptr inbounds float* %tmp11108, i64 1
- %tmp11110 = getelementptr inbounds float* %tmp11109, i64 1
- %tmp11111 = getelementptr inbounds float* %tmp11110, i64 1
- %tmp11112 = getelementptr inbounds float* %tmp11111, i64 1
- %tmp11113 = getelementptr inbounds float* %tmp11112, i64 1
- %tmp11114 = getelementptr inbounds float* %tmp11113, i64 1
- %tmp11115 = getelementptr inbounds float* %tmp11114, i64 1
- %tmp11116 = getelementptr inbounds float* %tmp11115, i64 1
- %tmp11117 = getelementptr inbounds float* %tmp11116, i64 1
- %tmp11118 = getelementptr inbounds float* %tmp11117, i64 1
- %tmp11119 = getelementptr inbounds float* %tmp11118, i64 1
- %tmp11120 = getelementptr inbounds float* %tmp11119, i64 1
- %tmp11121 = getelementptr inbounds float* %tmp11120, i64 1
- %tmp11122 = getelementptr inbounds float* %tmp11121, i64 1
- %tmp11123 = getelementptr inbounds float* %tmp11122, i64 1
- %tmp11124 = getelementptr inbounds float* %tmp11123, i64 1
- %tmp11125 = getelementptr inbounds float* %tmp11124, i64 1
- %tmp11126 = getelementptr inbounds float* %tmp11125, i64 1
- %tmp11127 = getelementptr inbounds float* %tmp11126, i64 1
- %tmp11128 = getelementptr inbounds float* %tmp11127, i64 1
- %tmp11129 = getelementptr inbounds float* %tmp11128, i64 1
- %tmp11130 = getelementptr inbounds float* %tmp11129, i64 1
- %tmp11131 = getelementptr inbounds float* %tmp11130, i64 1
- %tmp11132 = getelementptr inbounds float* %tmp11131, i64 1
- %tmp11133 = getelementptr inbounds float* %tmp11132, i64 1
- %tmp11134 = getelementptr inbounds float* %tmp11133, i64 1
- %tmp11135 = getelementptr inbounds float* %tmp11134, i64 1
- %tmp11136 = getelementptr inbounds float* %tmp11135, i64 1
- %tmp11137 = getelementptr inbounds float* %tmp11136, i64 1
- %tmp11138 = getelementptr inbounds float* %tmp11137, i64 1
- %tmp11139 = getelementptr inbounds float* %tmp11138, i64 1
- %tmp11140 = getelementptr inbounds float* %tmp11139, i64 1
- %tmp11141 = getelementptr inbounds float* %tmp11140, i64 1
- %tmp11142 = getelementptr inbounds float* %tmp11141, i64 1
- %tmp11143 = getelementptr inbounds float* %tmp11142, i64 1
- %tmp11144 = getelementptr inbounds float* %tmp11143, i64 1
- %tmp11145 = getelementptr inbounds float* %tmp11144, i64 1
- %tmp11146 = getelementptr inbounds float* %tmp11145, i64 1
- %tmp11147 = getelementptr inbounds float* %tmp11146, i64 1
- %tmp11148 = getelementptr inbounds float* %tmp11147, i64 1
- %tmp11149 = getelementptr inbounds float* %tmp11148, i64 1
- %tmp11150 = getelementptr inbounds float* %tmp11149, i64 1
- %tmp11151 = getelementptr inbounds float* %tmp11150, i64 1
- %tmp11152 = getelementptr inbounds float* %tmp11151, i64 1
- %tmp11153 = getelementptr inbounds float* %tmp11152, i64 1
- %tmp11154 = getelementptr inbounds float* %tmp11153, i64 1
- %tmp11155 = getelementptr inbounds float* %tmp11154, i64 1
- %tmp11156 = getelementptr inbounds float* %tmp11155, i64 1
- %tmp11157 = getelementptr inbounds float* %tmp11156, i64 1
- %tmp11158 = getelementptr inbounds float* %tmp11157, i64 1
- %tmp11159 = getelementptr inbounds float* %tmp11158, i64 1
- %tmp11160 = getelementptr inbounds float* %tmp11159, i64 1
- %tmp11161 = getelementptr inbounds float* %tmp11160, i64 1
- %tmp11162 = getelementptr inbounds float* %tmp11161, i64 1
- %tmp11163 = getelementptr inbounds float* %tmp11162, i64 1
- %tmp11164 = getelementptr inbounds float* %tmp11163, i64 1
- %tmp11165 = getelementptr inbounds float* %tmp11164, i64 1
- %tmp11166 = getelementptr inbounds float* %tmp11165, i64 1
- %tmp11167 = getelementptr inbounds float* %tmp11166, i64 1
- %tmp11168 = getelementptr inbounds float* %tmp11167, i64 1
- %tmp11169 = getelementptr inbounds float* %tmp11168, i64 1
- %tmp11170 = getelementptr inbounds float* %tmp11169, i64 1
- %tmp11171 = getelementptr inbounds float* %tmp11170, i64 1
- %tmp11172 = getelementptr inbounds float* %tmp11171, i64 1
- %tmp11173 = getelementptr inbounds float* %tmp11172, i64 1
- %tmp11174 = getelementptr inbounds float* %tmp11173, i64 1
- %tmp11175 = getelementptr inbounds float* %tmp11174, i64 1
- %tmp11176 = getelementptr inbounds float* %tmp11175, i64 1
- %tmp11177 = getelementptr inbounds float* %tmp11176, i64 1
- %tmp11178 = getelementptr inbounds float* %tmp11177, i64 1
- %tmp11179 = getelementptr inbounds float* %tmp11178, i64 1
- %tmp11180 = getelementptr inbounds float* %tmp11179, i64 1
- %tmp11181 = getelementptr inbounds float* %tmp11180, i64 1
- %tmp11182 = getelementptr inbounds float* %tmp11181, i64 1
- %tmp11183 = getelementptr inbounds float* %tmp11182, i64 1
- %tmp11184 = getelementptr inbounds float* %tmp11183, i64 1
- %tmp11185 = getelementptr inbounds float* %tmp11184, i64 1
- %tmp11186 = getelementptr inbounds float* %tmp11185, i64 1
- %tmp11187 = getelementptr inbounds float* %tmp11186, i64 1
- %tmp11188 = getelementptr inbounds float* %tmp11187, i64 1
- %tmp11189 = getelementptr inbounds float* %tmp11188, i64 1
- %tmp11190 = getelementptr inbounds float* %tmp11189, i64 1
- %tmp11191 = getelementptr inbounds float* %tmp11190, i64 1
- %tmp11192 = getelementptr inbounds float* %tmp11191, i64 1
- %tmp11193 = getelementptr inbounds float* %tmp11192, i64 1
- %tmp11194 = getelementptr inbounds float* %tmp11193, i64 1
- %tmp11195 = getelementptr inbounds float* %tmp11194, i64 1
- %tmp11196 = getelementptr inbounds float* %tmp11195, i64 1
- %tmp11197 = getelementptr inbounds float* %tmp11196, i64 1
- %tmp11198 = getelementptr inbounds float* %tmp11197, i64 1
- %tmp11199 = getelementptr inbounds float* %tmp11198, i64 1
- %tmp11200 = getelementptr inbounds float* %tmp11199, i64 1
- %tmp11201 = getelementptr inbounds float* %tmp11200, i64 1
- %tmp11202 = getelementptr inbounds float* %tmp11201, i64 1
- %tmp11203 = getelementptr inbounds float* %tmp11202, i64 1
- %tmp11204 = getelementptr inbounds float* %tmp11203, i64 1
- %tmp11205 = getelementptr inbounds float* %tmp11204, i64 1
- %tmp11206 = getelementptr inbounds float* %tmp11205, i64 1
- %tmp11207 = getelementptr inbounds float* %tmp11206, i64 1
- %tmp11208 = getelementptr inbounds float* %tmp11207, i64 1
- %tmp11209 = getelementptr inbounds float* %tmp11208, i64 1
- %tmp11210 = getelementptr inbounds float* %tmp11209, i64 1
- %tmp11211 = getelementptr inbounds float* %tmp11210, i64 1
- %tmp11212 = getelementptr inbounds float* %tmp11211, i64 1
- %tmp11213 = getelementptr inbounds float* %tmp11212, i64 1
- %tmp11214 = getelementptr inbounds float* %tmp11213, i64 1
- %tmp11215 = getelementptr inbounds float* %tmp11214, i64 1
- %tmp11216 = getelementptr inbounds float* %tmp11215, i64 1
- %tmp11217 = getelementptr inbounds float* %tmp11216, i64 1
- %tmp11218 = getelementptr inbounds float* %tmp11217, i64 1
- %tmp11219 = getelementptr inbounds float* %tmp11218, i64 1
- %tmp11220 = getelementptr inbounds float* %tmp11219, i64 1
- %tmp11221 = getelementptr inbounds float* %tmp11220, i64 1
- %tmp11222 = getelementptr inbounds float* %tmp11221, i64 1
- %tmp11223 = getelementptr inbounds float* %tmp11222, i64 1
- %tmp11224 = getelementptr inbounds float* %tmp11223, i64 1
- %tmp11225 = getelementptr inbounds float* %tmp11224, i64 1
- %tmp11226 = getelementptr inbounds float* %tmp11225, i64 1
- %tmp11227 = getelementptr inbounds float* %tmp11226, i64 1
- %tmp11228 = getelementptr inbounds float* %tmp11227, i64 1
- %tmp11229 = getelementptr inbounds float* %tmp11228, i64 1
- %tmp11230 = getelementptr inbounds float* %tmp11229, i64 1
- %tmp11231 = getelementptr inbounds float* %tmp11230, i64 1
- %tmp11232 = getelementptr inbounds float* %tmp11231, i64 1
- %tmp11233 = getelementptr inbounds float* %tmp11232, i64 1
- %tmp11234 = getelementptr inbounds float* %tmp11233, i64 1
- %tmp11235 = getelementptr inbounds float* %tmp11234, i64 1
- %tmp11236 = getelementptr inbounds float* %tmp11235, i64 1
- %tmp11237 = getelementptr inbounds float* %tmp11236, i64 1
- %tmp11238 = getelementptr inbounds float* %tmp11237, i64 1
- %tmp11239 = getelementptr inbounds float* %tmp11238, i64 1
- %tmp11240 = getelementptr inbounds float* %tmp11239, i64 1
- %tmp11241 = getelementptr inbounds float* %tmp11240, i64 1
- %tmp11242 = getelementptr inbounds float* %tmp11241, i64 1
- %tmp11243 = getelementptr inbounds float* %tmp11242, i64 1
- %tmp11244 = getelementptr inbounds float* %tmp11243, i64 1
- %tmp11245 = getelementptr inbounds float* %tmp11244, i64 1
- %tmp11246 = getelementptr inbounds float* %tmp11245, i64 1
- %tmp11247 = getelementptr inbounds float* %tmp11246, i64 1
- %tmp11248 = getelementptr inbounds float* %tmp11247, i64 1
- %tmp11249 = getelementptr inbounds float* %tmp11248, i64 1
- %tmp11250 = getelementptr inbounds float* %tmp11249, i64 1
- %tmp11251 = getelementptr inbounds float* %tmp11250, i64 1
- %tmp11252 = getelementptr inbounds float* %tmp11251, i64 1
- %tmp11253 = getelementptr inbounds float* %tmp11252, i64 1
- %tmp11254 = getelementptr inbounds float* %tmp11253, i64 1
- %tmp11255 = getelementptr inbounds float* %tmp11254, i64 1
- %tmp11256 = getelementptr inbounds float* %tmp11255, i64 1
- %tmp11257 = getelementptr inbounds float* %tmp11256, i64 1
- %tmp11258 = getelementptr inbounds float* %tmp11257, i64 1
- %tmp11259 = getelementptr inbounds float* %tmp11258, i64 1
- %tmp11260 = getelementptr inbounds float* %tmp11259, i64 1
- %tmp11261 = getelementptr inbounds float* %tmp11260, i64 1
- %tmp11262 = getelementptr inbounds float* %tmp11261, i64 1
- %tmp11263 = getelementptr inbounds float* %tmp11262, i64 1
- %tmp11264 = getelementptr inbounds float* %tmp11263, i64 1
- %tmp11265 = getelementptr inbounds float* %tmp11264, i64 1
- %tmp11266 = getelementptr inbounds float* %tmp11265, i64 1
- %tmp11267 = getelementptr inbounds float* %tmp11266, i64 1
- %tmp11268 = getelementptr inbounds float* %tmp11267, i64 1
- %tmp11269 = getelementptr inbounds float* %tmp11268, i64 1
- %tmp11270 = getelementptr inbounds float* %tmp11269, i64 1
- %tmp11271 = getelementptr inbounds float* %tmp11270, i64 1
- %tmp11272 = getelementptr inbounds float* %tmp11271, i64 1
- %tmp11273 = getelementptr inbounds float* %tmp11272, i64 1
- %tmp11274 = getelementptr inbounds float* %tmp11273, i64 1
- %tmp11275 = getelementptr inbounds float* %tmp11274, i64 1
- %tmp11276 = getelementptr inbounds float* %tmp11275, i64 1
- %tmp11277 = getelementptr inbounds float* %tmp11276, i64 1
- %tmp11278 = getelementptr inbounds float* %tmp11277, i64 1
- %tmp11279 = getelementptr inbounds float* %tmp11278, i64 1
- %tmp11280 = getelementptr inbounds float* %tmp11279, i64 1
- %tmp11281 = getelementptr inbounds float* %tmp11280, i64 1
- %tmp11282 = getelementptr inbounds float* %tmp11281, i64 1
- %tmp11283 = getelementptr inbounds float* %tmp11282, i64 1
- %tmp11284 = getelementptr inbounds float* %tmp11283, i64 1
- %tmp11285 = getelementptr inbounds float* %tmp11284, i64 1
- %tmp11286 = getelementptr inbounds float* %tmp11285, i64 1
- %tmp11287 = getelementptr inbounds float* %tmp11286, i64 1
- %tmp11288 = getelementptr inbounds float* %tmp11287, i64 1
- %tmp11289 = getelementptr inbounds float* %tmp11288, i64 1
- %tmp11290 = getelementptr inbounds float* %tmp11289, i64 1
- %tmp11291 = getelementptr inbounds float* %tmp11290, i64 1
- %tmp11292 = getelementptr inbounds float* %tmp11291, i64 1
- %tmp11293 = getelementptr inbounds float* %tmp11292, i64 1
- %tmp11294 = getelementptr inbounds float* %tmp11293, i64 1
- %tmp11295 = getelementptr inbounds float* %tmp11294, i64 1
- %tmp11296 = getelementptr inbounds float* %tmp11295, i64 1
- %tmp11297 = getelementptr inbounds float* %tmp11296, i64 1
- %tmp11298 = getelementptr inbounds float* %tmp11297, i64 1
- %tmp11299 = getelementptr inbounds float* %tmp11298, i64 1
- %tmp11300 = getelementptr inbounds float* %tmp11299, i64 1
- %tmp11301 = getelementptr inbounds float* %tmp11300, i64 1
- %tmp11302 = getelementptr inbounds float* %tmp11301, i64 1
- %tmp11303 = getelementptr inbounds float* %tmp11302, i64 1
- %tmp11304 = getelementptr inbounds float* %tmp11303, i64 1
- %tmp11305 = getelementptr inbounds float* %tmp11304, i64 1
- %tmp11306 = getelementptr inbounds float* %tmp11305, i64 1
- %tmp11307 = getelementptr inbounds float* %tmp11306, i64 1
- %tmp11308 = getelementptr inbounds float* %tmp11307, i64 1
- %tmp11309 = getelementptr inbounds float* %tmp11308, i64 1
- %tmp11310 = getelementptr inbounds float* %tmp11309, i64 1
- %tmp11311 = getelementptr inbounds float* %tmp11310, i64 1
- %tmp11312 = getelementptr inbounds float* %tmp11311, i64 1
- %tmp11313 = getelementptr inbounds float* %tmp11312, i64 1
- %tmp11314 = getelementptr inbounds float* %tmp11313, i64 1
- %tmp11315 = getelementptr inbounds float* %tmp11314, i64 1
- %tmp11316 = getelementptr inbounds float* %tmp11315, i64 1
- %tmp11317 = getelementptr inbounds float* %tmp11316, i64 1
- %tmp11318 = getelementptr inbounds float* %tmp11317, i64 1
- %tmp11319 = getelementptr inbounds float* %tmp11318, i64 1
- %tmp11320 = getelementptr inbounds float* %tmp11319, i64 1
- %tmp11321 = getelementptr inbounds float* %tmp11320, i64 1
- %tmp11322 = getelementptr inbounds float* %tmp11321, i64 1
- %tmp11323 = getelementptr inbounds float* %tmp11322, i64 1
- %tmp11324 = getelementptr inbounds float* %tmp11323, i64 1
- %tmp11325 = getelementptr inbounds float* %tmp11324, i64 1
- %tmp11326 = getelementptr inbounds float* %tmp11325, i64 1
- %tmp11327 = getelementptr inbounds float* %tmp11326, i64 1
- %tmp11328 = getelementptr inbounds float* %tmp11327, i64 1
- %tmp11329 = getelementptr inbounds float* %tmp11328, i64 1
- %tmp11330 = getelementptr inbounds float* %tmp11329, i64 1
- %tmp11331 = getelementptr inbounds float* %tmp11330, i64 1
- %tmp11332 = getelementptr inbounds float* %tmp11331, i64 1
- %tmp11333 = getelementptr inbounds float* %tmp11332, i64 1
- %tmp11334 = getelementptr inbounds float* %tmp11333, i64 1
- %tmp11335 = getelementptr inbounds float* %tmp11334, i64 1
- %tmp11336 = getelementptr inbounds float* %tmp11335, i64 1
- %tmp11337 = getelementptr inbounds float* %tmp11336, i64 1
- %tmp11338 = getelementptr inbounds float* %tmp11337, i64 1
- %tmp11339 = getelementptr inbounds float* %tmp11338, i64 1
- %tmp11340 = getelementptr inbounds float* %tmp11339, i64 1
- %tmp11341 = getelementptr inbounds float* %tmp11340, i64 1
- %tmp11342 = getelementptr inbounds float* %tmp11341, i64 1
- %tmp11343 = getelementptr inbounds float* %tmp11342, i64 1
- %tmp11344 = getelementptr inbounds float* %tmp11343, i64 1
- %tmp11345 = getelementptr inbounds float* %tmp11344, i64 1
- %tmp11346 = getelementptr inbounds float* %tmp11345, i64 1
- %tmp11347 = getelementptr inbounds float* %tmp11346, i64 1
- %tmp11348 = getelementptr inbounds float* %tmp11347, i64 1
- %tmp11349 = getelementptr inbounds float* %tmp11348, i64 1
- %tmp11350 = getelementptr inbounds float* %tmp11349, i64 1
- %tmp11351 = getelementptr inbounds float* %tmp11350, i64 1
- %tmp11352 = getelementptr inbounds float* %tmp11351, i64 1
- %tmp11353 = getelementptr inbounds float* %tmp11352, i64 1
- %tmp11354 = getelementptr inbounds float* %tmp11353, i64 1
- %tmp11355 = getelementptr inbounds float* %tmp11354, i64 1
- %tmp11356 = getelementptr inbounds float* %tmp11355, i64 1
- %tmp11357 = getelementptr inbounds float* %tmp11356, i64 1
- %tmp11358 = getelementptr inbounds float* %tmp11357, i64 1
- %tmp11359 = getelementptr inbounds float* %tmp11358, i64 1
- %tmp11360 = getelementptr inbounds float* %tmp11359, i64 1
- %tmp11361 = getelementptr inbounds float* %tmp11360, i64 1
- %tmp11362 = getelementptr inbounds float* %tmp11361, i64 1
- %tmp11363 = getelementptr inbounds float* %tmp11362, i64 1
- %tmp11364 = getelementptr inbounds float* %tmp11363, i64 1
- %tmp11365 = getelementptr inbounds float* %tmp11364, i64 1
- %tmp11366 = getelementptr inbounds float* %tmp11365, i64 1
- %tmp11367 = getelementptr inbounds float* %tmp11366, i64 1
- %tmp11368 = getelementptr inbounds float* %tmp11367, i64 1
- %tmp11369 = getelementptr inbounds float* %tmp11368, i64 1
- %tmp11370 = getelementptr inbounds float* %tmp11369, i64 1
- %tmp11371 = getelementptr inbounds float* %tmp11370, i64 1
- %tmp11372 = getelementptr inbounds float* %tmp11371, i64 1
- %tmp11373 = getelementptr inbounds float* %tmp11372, i64 1
- %tmp11374 = getelementptr inbounds float* %tmp11373, i64 1
- %tmp11375 = getelementptr inbounds float* %tmp11374, i64 1
- %tmp11376 = getelementptr inbounds float* %tmp11375, i64 1
- %tmp11377 = getelementptr inbounds float* %tmp11376, i64 1
- %tmp11378 = getelementptr inbounds float* %tmp11377, i64 1
- %tmp11379 = getelementptr inbounds float* %tmp11378, i64 1
- %tmp11380 = getelementptr inbounds float* %tmp11379, i64 1
- %tmp11381 = getelementptr inbounds float* %tmp11380, i64 1
- %tmp11382 = getelementptr inbounds float* %tmp11381, i64 1
- %tmp11383 = getelementptr inbounds float* %tmp11382, i64 1
- %tmp11384 = getelementptr inbounds float* %tmp11383, i64 1
- %tmp11385 = getelementptr inbounds float* %tmp11384, i64 1
- %tmp11386 = getelementptr inbounds float* %tmp11385, i64 1
- %tmp11387 = getelementptr inbounds float* %tmp11386, i64 1
- %tmp11388 = getelementptr inbounds float* %tmp11387, i64 1
- %tmp11389 = getelementptr inbounds float* %tmp11388, i64 1
- %tmp11390 = getelementptr inbounds float* %tmp11389, i64 1
- %tmp11391 = getelementptr inbounds float* %tmp11390, i64 1
- %tmp11392 = getelementptr inbounds float* %tmp11391, i64 1
- %tmp11393 = getelementptr inbounds float* %tmp11392, i64 1
- %tmp11394 = getelementptr inbounds float* %tmp11393, i64 1
- %tmp11395 = getelementptr inbounds float* %tmp11394, i64 1
- %tmp11396 = getelementptr inbounds float* %tmp11395, i64 1
- %tmp11397 = getelementptr inbounds float* %tmp11396, i64 1
- %tmp11398 = getelementptr inbounds float* %tmp11397, i64 1
- %tmp11399 = getelementptr inbounds float* %tmp11398, i64 1
- %tmp11400 = getelementptr inbounds float* %tmp11399, i64 1
- %tmp11401 = getelementptr inbounds float* %tmp11400, i64 1
- %tmp11402 = getelementptr inbounds float* %tmp11401, i64 1
- %tmp11403 = getelementptr inbounds float* %tmp11402, i64 1
- %tmp11404 = getelementptr inbounds float* %tmp11403, i64 1
- %tmp11405 = getelementptr inbounds float* %tmp11404, i64 1
- %tmp11406 = getelementptr inbounds float* %tmp11405, i64 1
- %tmp11407 = getelementptr inbounds float* %tmp11406, i64 1
- %tmp11408 = getelementptr inbounds float* %tmp11407, i64 1
- %tmp11409 = getelementptr inbounds float* %tmp11408, i64 1
- %tmp11410 = getelementptr inbounds float* %tmp11409, i64 1
- %tmp11411 = getelementptr inbounds float* %tmp11410, i64 1
- %tmp11412 = getelementptr inbounds float* %tmp11411, i64 1
- %tmp11413 = getelementptr inbounds float* %tmp11412, i64 1
- %tmp11414 = getelementptr inbounds float* %tmp11413, i64 1
- %tmp11415 = getelementptr inbounds float* %tmp11414, i64 1
- %tmp11416 = getelementptr inbounds float* %tmp11415, i64 1
- %tmp11417 = getelementptr inbounds float* %tmp11416, i64 1
- %tmp11418 = getelementptr inbounds float* %tmp11417, i64 1
- %tmp11419 = getelementptr inbounds float* %tmp11418, i64 1
- %tmp11420 = getelementptr inbounds float* %tmp11419, i64 1
- %tmp11421 = getelementptr inbounds float* %tmp11420, i64 1
- %tmp11422 = getelementptr inbounds float* %tmp11421, i64 1
- %tmp11423 = getelementptr inbounds float* %tmp11422, i64 1
- %tmp11424 = getelementptr inbounds float* %tmp11423, i64 1
- %tmp11425 = getelementptr inbounds float* %tmp11424, i64 1
- %tmp11426 = getelementptr inbounds float* %tmp11425, i64 1
- %tmp11427 = getelementptr inbounds float* %tmp11426, i64 1
- %tmp11428 = getelementptr inbounds float* %tmp11427, i64 1
- %tmp11429 = getelementptr inbounds float* %tmp11428, i64 1
- %tmp11430 = getelementptr inbounds float* %tmp11429, i64 1
- %tmp11431 = getelementptr inbounds float* %tmp11430, i64 1
- %tmp11432 = getelementptr inbounds float* %tmp11431, i64 1
- %tmp11433 = getelementptr inbounds float* %tmp11432, i64 1
- %tmp11434 = getelementptr inbounds float* %tmp11433, i64 1
- %tmp11435 = getelementptr inbounds float* %tmp11434, i64 1
- %tmp11436 = getelementptr inbounds float* %tmp11435, i64 1
- %tmp11437 = getelementptr inbounds float* %tmp11436, i64 1
- %tmp11438 = getelementptr inbounds float* %tmp11437, i64 1
- %tmp11439 = getelementptr inbounds float* %tmp11438, i64 1
- %tmp11440 = getelementptr inbounds float* %tmp11439, i64 1
- %tmp11441 = getelementptr inbounds float* %tmp11440, i64 1
- %tmp11442 = getelementptr inbounds float* %tmp11441, i64 1
- %tmp11443 = getelementptr inbounds float* %tmp11442, i64 1
- %tmp11444 = getelementptr inbounds float* %tmp11443, i64 1
- %tmp11445 = getelementptr inbounds float* %tmp11444, i64 1
- %tmp11446 = getelementptr inbounds float* %tmp11445, i64 1
- %tmp11447 = getelementptr inbounds float* %tmp11446, i64 1
- %tmp11448 = getelementptr inbounds float* %tmp11447, i64 1
- %tmp11449 = getelementptr inbounds float* %tmp11448, i64 1
- %tmp11450 = getelementptr inbounds float* %tmp11449, i64 1
- %tmp11451 = getelementptr inbounds float* %tmp11450, i64 1
- %tmp11452 = getelementptr inbounds float* %tmp11451, i64 1
- %tmp11453 = getelementptr inbounds float* %tmp11452, i64 1
- %tmp11454 = getelementptr inbounds float* %tmp11453, i64 1
- %tmp11455 = getelementptr inbounds float* %tmp11454, i64 1
- %tmp11456 = getelementptr inbounds float* %tmp11455, i64 1
- %tmp11457 = getelementptr inbounds float* %tmp11456, i64 1
- %tmp11458 = getelementptr inbounds float* %tmp11457, i64 1
- %tmp11459 = getelementptr inbounds float* %tmp11458, i64 1
- %tmp11460 = getelementptr inbounds float* %tmp11459, i64 1
- %tmp11461 = getelementptr inbounds float* %tmp11460, i64 1
- %tmp11462 = getelementptr inbounds float* %tmp11461, i64 1
- %tmp11463 = getelementptr inbounds float* %tmp11462, i64 1
- %tmp11464 = getelementptr inbounds float* %tmp11463, i64 1
- %tmp11465 = getelementptr inbounds float* %tmp11464, i64 1
- %tmp11466 = getelementptr inbounds float* %tmp11465, i64 1
- %tmp11467 = getelementptr inbounds float* %tmp11466, i64 1
- %tmp11468 = getelementptr inbounds float* %tmp11467, i64 1
- %tmp11469 = getelementptr inbounds float* %tmp11468, i64 1
- %tmp11470 = getelementptr inbounds float* %tmp11469, i64 1
- %tmp11471 = getelementptr inbounds float* %tmp11470, i64 1
- %tmp11472 = getelementptr inbounds float* %tmp11471, i64 1
- %tmp11473 = getelementptr inbounds float* %tmp11472, i64 1
- %tmp11474 = getelementptr inbounds float* %tmp11473, i64 1
- %tmp11475 = getelementptr inbounds float* %tmp11474, i64 1
- %tmp11476 = getelementptr inbounds float* %tmp11475, i64 1
- %tmp11477 = getelementptr inbounds float* %tmp11476, i64 1
- %tmp11478 = getelementptr inbounds float* %tmp11477, i64 1
- %tmp11479 = getelementptr inbounds float* %tmp11478, i64 1
- %tmp11480 = getelementptr inbounds float* %tmp11479, i64 1
- %tmp11481 = getelementptr inbounds float* %tmp11480, i64 1
- %tmp11482 = getelementptr inbounds float* %tmp11481, i64 1
- %tmp11483 = getelementptr inbounds float* %tmp11482, i64 1
- %tmp11484 = getelementptr inbounds float* %tmp11483, i64 1
- %tmp11485 = getelementptr inbounds float* %tmp11484, i64 1
- %tmp11486 = getelementptr inbounds float* %tmp11485, i64 1
- %tmp11487 = getelementptr inbounds float* %tmp11486, i64 1
- %tmp11488 = getelementptr inbounds float* %tmp11487, i64 1
- %tmp11489 = getelementptr inbounds float* %tmp11488, i64 1
- %tmp11490 = getelementptr inbounds float* %tmp11489, i64 1
- %tmp11491 = getelementptr inbounds float* %tmp11490, i64 1
- %tmp11492 = getelementptr inbounds float* %tmp11491, i64 1
- %tmp11493 = getelementptr inbounds float* %tmp11492, i64 1
- %tmp11494 = getelementptr inbounds float* %tmp11493, i64 1
- %tmp11495 = getelementptr inbounds float* %tmp11494, i64 1
- %tmp11496 = getelementptr inbounds float* %tmp11495, i64 1
- %tmp11497 = getelementptr inbounds float* %tmp11496, i64 1
- %tmp11498 = getelementptr inbounds float* %tmp11497, i64 1
- %tmp11499 = getelementptr inbounds float* %tmp11498, i64 1
- %tmp11500 = getelementptr inbounds float* %tmp11499, i64 1
- %tmp11501 = getelementptr inbounds float* %tmp11500, i64 1
- %tmp11502 = getelementptr inbounds float* %tmp11501, i64 1
- %tmp11503 = getelementptr inbounds float* %tmp11502, i64 1
- %tmp11504 = getelementptr inbounds float* %tmp11503, i64 1
- %tmp11505 = getelementptr inbounds float* %tmp11504, i64 1
- %tmp11506 = getelementptr inbounds float* %tmp11505, i64 1
- %tmp11507 = getelementptr inbounds float* %tmp11506, i64 1
- %tmp11508 = getelementptr inbounds float* %tmp11507, i64 1
- %tmp11509 = getelementptr inbounds float* %tmp11508, i64 1
- %tmp11510 = getelementptr inbounds float* %tmp11509, i64 1
- %tmp11511 = getelementptr inbounds float* %tmp11510, i64 1
- %tmp11512 = getelementptr inbounds float* %tmp11511, i64 1
- %tmp11513 = getelementptr inbounds float* %tmp11512, i64 1
- %tmp11514 = getelementptr inbounds float* %tmp11513, i64 1
- %tmp11515 = getelementptr inbounds float* %tmp11514, i64 1
- %tmp11516 = getelementptr inbounds float* %tmp11515, i64 1
- %tmp11517 = getelementptr inbounds float* %tmp11516, i64 1
- %tmp11518 = getelementptr inbounds float* %tmp11517, i64 1
- %tmp11519 = getelementptr inbounds float* %tmp11518, i64 1
- %tmp11520 = getelementptr inbounds float* %tmp11519, i64 1
- %tmp11521 = getelementptr inbounds float* %tmp11520, i64 1
- %tmp11522 = getelementptr inbounds float* %tmp11521, i64 1
- %tmp11523 = getelementptr inbounds float* %tmp11522, i64 1
- %tmp11524 = getelementptr inbounds float* %tmp11523, i64 1
- %tmp11525 = getelementptr inbounds float* %tmp11524, i64 1
- %tmp11526 = getelementptr inbounds float* %tmp11525, i64 1
- %tmp11527 = getelementptr inbounds float* %tmp11526, i64 1
- %tmp11528 = getelementptr inbounds float* %tmp11527, i64 1
- %tmp11529 = getelementptr inbounds float* %tmp11528, i64 1
- %tmp11530 = getelementptr inbounds float* %tmp11529, i64 1
- %tmp11531 = getelementptr inbounds float* %tmp11530, i64 1
- %tmp11532 = getelementptr inbounds float* %tmp11531, i64 1
- %tmp11533 = getelementptr inbounds float* %tmp11532, i64 1
- %tmp11534 = getelementptr inbounds float* %tmp11533, i64 1
- %tmp11535 = getelementptr inbounds float* %tmp11534, i64 1
- %tmp11536 = getelementptr inbounds float* %tmp11535, i64 1
- %tmp11537 = getelementptr inbounds float* %tmp11536, i64 1
- %tmp11538 = getelementptr inbounds float* %tmp11537, i64 1
- %tmp11539 = getelementptr inbounds float* %tmp11538, i64 1
- %tmp11540 = getelementptr inbounds float* %tmp11539, i64 1
- %tmp11541 = getelementptr inbounds float* %tmp11540, i64 1
- %tmp11542 = getelementptr inbounds float* %tmp11541, i64 1
- %tmp11543 = getelementptr inbounds float* %tmp11542, i64 1
- %tmp11544 = getelementptr inbounds float* %tmp11543, i64 1
- %tmp11545 = getelementptr inbounds float* %tmp11544, i64 1
- %tmp11546 = getelementptr inbounds float* %tmp11545, i64 1
- %tmp11547 = getelementptr inbounds float* %tmp11546, i64 1
- %tmp11548 = getelementptr inbounds float* %tmp11547, i64 1
- %tmp11549 = getelementptr inbounds float* %tmp11548, i64 1
- %tmp11550 = getelementptr inbounds float* %tmp11549, i64 1
- %tmp11551 = getelementptr inbounds float* %tmp11550, i64 1
- %tmp11552 = getelementptr inbounds float* %tmp11551, i64 1
- %tmp11553 = getelementptr inbounds float* %tmp11552, i64 1
- %tmp11554 = getelementptr inbounds float* %tmp11553, i64 1
- %tmp11555 = getelementptr inbounds float* %tmp11554, i64 1
- %tmp11556 = getelementptr inbounds float* %tmp11555, i64 1
- %tmp11557 = getelementptr inbounds float* %tmp11556, i64 1
- %tmp11558 = getelementptr inbounds float* %tmp11557, i64 1
- %tmp11559 = getelementptr inbounds float* %tmp11558, i64 1
- %tmp11560 = getelementptr inbounds float* %tmp11559, i64 1
- %tmp11561 = getelementptr inbounds float* %tmp11560, i64 1
- %tmp11562 = getelementptr inbounds float* %tmp11561, i64 1
- %tmp11563 = getelementptr inbounds float* %tmp11562, i64 1
- %tmp11564 = getelementptr inbounds float* %tmp11563, i64 1
- %tmp11565 = getelementptr inbounds float* %tmp11564, i64 1
- %tmp11566 = getelementptr inbounds float* %tmp11565, i64 1
- %tmp11567 = getelementptr inbounds float* %tmp11566, i64 1
- %tmp11568 = getelementptr inbounds float* %tmp11567, i64 1
- %tmp11569 = getelementptr inbounds float* %tmp11568, i64 1
- %tmp11570 = getelementptr inbounds float* %tmp11569, i64 1
- %tmp11571 = getelementptr inbounds float* %tmp11570, i64 1
- %tmp11572 = getelementptr inbounds float* %tmp11571, i64 1
- %tmp11573 = getelementptr inbounds float* %tmp11572, i64 1
- %tmp11574 = getelementptr inbounds float* %tmp11573, i64 1
- %tmp11575 = getelementptr inbounds float* %tmp11574, i64 1
- %tmp11576 = getelementptr inbounds float* %tmp11575, i64 1
- %tmp11577 = getelementptr inbounds float* %tmp11576, i64 1
- %tmp11578 = getelementptr inbounds float* %tmp11577, i64 1
- %tmp11579 = getelementptr inbounds float* %tmp11578, i64 1
- %tmp11580 = getelementptr inbounds float* %tmp11579, i64 1
- %tmp11581 = getelementptr inbounds float* %tmp11580, i64 1
- %tmp11582 = getelementptr inbounds float* %tmp11581, i64 1
- %tmp11583 = getelementptr inbounds float* %tmp11582, i64 1
- %tmp11584 = getelementptr inbounds float* %tmp11583, i64 1
- %tmp11585 = getelementptr inbounds float* %tmp11584, i64 1
- %tmp11586 = getelementptr inbounds float* %tmp11585, i64 1
- %tmp11587 = getelementptr inbounds float* %tmp11586, i64 1
- %tmp11588 = getelementptr inbounds float* %tmp11587, i64 1
- %tmp11589 = getelementptr inbounds float* %tmp11588, i64 1
- %tmp11590 = getelementptr inbounds float* %tmp11589, i64 1
- %tmp11591 = getelementptr inbounds float* %tmp11590, i64 1
- %tmp11592 = getelementptr inbounds float* %tmp11591, i64 1
- %tmp11593 = getelementptr inbounds float* %tmp11592, i64 1
- %tmp11594 = getelementptr inbounds float* %tmp11593, i64 1
- %tmp11595 = getelementptr inbounds float* %tmp11594, i64 1
- %tmp11596 = getelementptr inbounds float* %tmp11595, i64 1
- %tmp11597 = getelementptr inbounds float* %tmp11596, i64 1
- %tmp11598 = getelementptr inbounds float* %tmp11597, i64 1
- %tmp11599 = getelementptr inbounds float* %tmp11598, i64 1
- %tmp11600 = getelementptr inbounds float* %tmp11599, i64 1
- %tmp11601 = getelementptr inbounds float* %tmp11600, i64 1
- %tmp11602 = getelementptr inbounds float* %tmp11601, i64 1
- %tmp11603 = getelementptr inbounds float* %tmp11602, i64 1
- %tmp11604 = getelementptr inbounds float* %tmp11603, i64 1
- %tmp11605 = getelementptr inbounds float* %tmp11604, i64 1
- %tmp11606 = getelementptr inbounds float* %tmp11605, i64 1
- %tmp11607 = getelementptr inbounds float* %tmp11606, i64 1
- %tmp11608 = getelementptr inbounds float* %tmp11607, i64 1
- %tmp11609 = getelementptr inbounds float* %tmp11608, i64 1
- %tmp11610 = getelementptr inbounds float* %tmp11609, i64 1
- %tmp11611 = getelementptr inbounds float* %tmp11610, i64 1
- %tmp11612 = getelementptr inbounds float* %tmp11611, i64 1
- %tmp11613 = getelementptr inbounds float* %tmp11612, i64 1
- %tmp11614 = getelementptr inbounds float* %tmp11613, i64 1
- %tmp11615 = getelementptr inbounds float* %tmp11614, i64 1
- %tmp11616 = getelementptr inbounds float* %tmp11615, i64 1
- %tmp11617 = getelementptr inbounds float* %tmp11616, i64 1
- %tmp11618 = getelementptr inbounds float* %tmp11617, i64 1
- %tmp11619 = getelementptr inbounds float* %tmp11618, i64 1
- %tmp11620 = getelementptr inbounds float* %tmp11619, i64 1
- %tmp11621 = getelementptr inbounds float* %tmp11620, i64 1
- %tmp11622 = getelementptr inbounds float* %tmp11621, i64 1
- %tmp11623 = getelementptr inbounds float* %tmp11622, i64 1
- %tmp11624 = getelementptr inbounds float* %tmp11623, i64 1
- %tmp11625 = getelementptr inbounds float* %tmp11624, i64 1
- %tmp11626 = getelementptr inbounds float* %tmp11625, i64 1
- %tmp11627 = getelementptr inbounds float* %tmp11626, i64 1
- %tmp11628 = getelementptr inbounds float* %tmp11627, i64 1
- %tmp11629 = getelementptr inbounds float* %tmp11628, i64 1
- %tmp11630 = getelementptr inbounds float* %tmp11629, i64 1
- %tmp11631 = getelementptr inbounds float* %tmp11630, i64 1
- %tmp11632 = getelementptr inbounds float* %tmp11631, i64 1
- %tmp11633 = getelementptr inbounds float* %tmp11632, i64 1
- %tmp11634 = getelementptr inbounds float* %tmp11633, i64 1
- %tmp11635 = getelementptr inbounds float* %tmp11634, i64 1
- %tmp11636 = getelementptr inbounds float* %tmp11635, i64 1
- %tmp11637 = getelementptr inbounds float* %tmp11636, i64 1
- %tmp11638 = getelementptr inbounds float* %tmp11637, i64 1
- %tmp11639 = getelementptr inbounds float* %tmp11638, i64 1
- %tmp11640 = getelementptr inbounds float* %tmp11639, i64 1
- %tmp11641 = getelementptr inbounds float* %tmp11640, i64 1
- %tmp11642 = getelementptr inbounds float* %tmp11641, i64 1
- %tmp11643 = getelementptr inbounds float* %tmp11642, i64 1
- %tmp11644 = getelementptr inbounds float* %tmp11643, i64 1
- %tmp11645 = getelementptr inbounds float* %tmp11644, i64 1
- %tmp11646 = getelementptr inbounds float* %tmp11645, i64 1
- %tmp11647 = getelementptr inbounds float* %tmp11646, i64 1
- %tmp11648 = getelementptr inbounds float* %tmp11647, i64 1
- %tmp11649 = getelementptr inbounds float* %tmp11648, i64 1
- %tmp11650 = getelementptr inbounds float* %tmp11649, i64 1
- %tmp11651 = getelementptr inbounds float* %tmp11650, i64 1
- %tmp11652 = getelementptr inbounds float* %tmp11651, i64 1
- %tmp11653 = getelementptr inbounds float* %tmp11652, i64 1
- %tmp11654 = getelementptr inbounds float* %tmp11653, i64 1
- %tmp11655 = getelementptr inbounds float* %tmp11654, i64 1
- %tmp11656 = getelementptr inbounds float* %tmp11655, i64 1
- %tmp11657 = getelementptr inbounds float* %tmp11656, i64 1
- %tmp11658 = getelementptr inbounds float* %tmp11657, i64 1
- %tmp11659 = getelementptr inbounds float* %tmp11658, i64 1
- %tmp11660 = getelementptr inbounds float* %tmp11659, i64 1
- %tmp11661 = getelementptr inbounds float* %tmp11660, i64 1
- %tmp11662 = getelementptr inbounds float* %tmp11661, i64 1
- %tmp11663 = getelementptr inbounds float* %tmp11662, i64 1
- %tmp11664 = getelementptr inbounds float* %tmp11663, i64 1
- %tmp11665 = getelementptr inbounds float* %tmp11664, i64 1
- %tmp11666 = getelementptr inbounds float* %tmp11665, i64 1
- %tmp11667 = getelementptr inbounds float* %tmp11666, i64 1
- %tmp11668 = getelementptr inbounds float* %tmp11667, i64 1
- %tmp11669 = getelementptr inbounds float* %tmp11668, i64 1
- %tmp11670 = getelementptr inbounds float* %tmp11669, i64 1
- %tmp11671 = getelementptr inbounds float* %tmp11670, i64 1
- %tmp11672 = getelementptr inbounds float* %tmp11671, i64 1
- %tmp11673 = getelementptr inbounds float* %tmp11672, i64 1
- %tmp11674 = getelementptr inbounds float* %tmp11673, i64 1
- %tmp11675 = getelementptr inbounds float* %tmp11674, i64 1
- %tmp11676 = getelementptr inbounds float* %tmp11675, i64 1
- %tmp11677 = getelementptr inbounds float* %tmp11676, i64 1
- %tmp11678 = getelementptr inbounds float* %tmp11677, i64 1
- %tmp11679 = getelementptr inbounds float* %tmp11678, i64 1
- %tmp11680 = getelementptr inbounds float* %tmp11679, i64 1
- %tmp11681 = getelementptr inbounds float* %tmp11680, i64 1
- %tmp11682 = getelementptr inbounds float* %tmp11681, i64 1
- %tmp11683 = getelementptr inbounds float* %tmp11682, i64 1
- %tmp11684 = getelementptr inbounds float* %tmp11683, i64 1
- %tmp11685 = getelementptr inbounds float* %tmp11684, i64 1
- %tmp11686 = getelementptr inbounds float* %tmp11685, i64 1
- %tmp11687 = getelementptr inbounds float* %tmp11686, i64 1
- %tmp11688 = getelementptr inbounds float* %tmp11687, i64 1
- %tmp11689 = getelementptr inbounds float* %tmp11688, i64 1
- %tmp11690 = getelementptr inbounds float* %tmp11689, i64 1
- %tmp11691 = getelementptr inbounds float* %tmp11690, i64 1
- %tmp11692 = getelementptr inbounds float* %tmp11691, i64 1
- %tmp11693 = getelementptr inbounds float* %tmp11692, i64 1
- %tmp11694 = getelementptr inbounds float* %tmp11693, i64 1
- %tmp11695 = getelementptr inbounds float* %tmp11694, i64 1
- %tmp11696 = getelementptr inbounds float* %tmp11695, i64 1
- %tmp11697 = getelementptr inbounds float* %tmp11696, i64 1
- %tmp11698 = getelementptr inbounds float* %tmp11697, i64 1
- %tmp11699 = getelementptr inbounds float* %tmp11698, i64 1
- %tmp11700 = getelementptr inbounds float* %tmp11699, i64 1
- %tmp11701 = getelementptr inbounds float* %tmp11700, i64 1
- %tmp11702 = getelementptr inbounds float* %tmp11701, i64 1
- %tmp11703 = getelementptr inbounds float* %tmp11702, i64 1
- %tmp11704 = getelementptr inbounds float* %tmp11703, i64 1
- %tmp11705 = getelementptr inbounds float* %tmp11704, i64 1
- %tmp11706 = getelementptr inbounds float* %tmp11705, i64 1
- %tmp11707 = getelementptr inbounds float* %tmp11706, i64 1
- %tmp11708 = getelementptr inbounds float* %tmp11707, i64 1
- %tmp11709 = getelementptr inbounds float* %tmp11708, i64 1
- %tmp11710 = getelementptr inbounds float* %tmp11709, i64 1
- %tmp11711 = getelementptr inbounds float* %tmp11710, i64 1
- %tmp11712 = getelementptr inbounds float* %tmp11711, i64 1
- %tmp11713 = getelementptr inbounds float* %tmp11712, i64 1
- %tmp11714 = getelementptr inbounds float* %tmp11713, i64 1
- %tmp11715 = getelementptr inbounds float* %tmp11714, i64 1
- %tmp11716 = getelementptr inbounds float* %tmp11715, i64 1
- %tmp11717 = getelementptr inbounds float* %tmp11716, i64 1
- %tmp11718 = getelementptr inbounds float* %tmp11717, i64 1
- %tmp11719 = getelementptr inbounds float* %tmp11718, i64 1
- %tmp11720 = getelementptr inbounds float* %tmp11719, i64 1
- %tmp11721 = getelementptr inbounds float* %tmp11720, i64 1
- %tmp11722 = getelementptr inbounds float* %tmp11721, i64 1
- %tmp11723 = getelementptr inbounds float* %tmp11722, i64 1
- %tmp11724 = getelementptr inbounds float* %tmp11723, i64 1
- %tmp11725 = getelementptr inbounds float* %tmp11724, i64 1
- %tmp11726 = getelementptr inbounds float* %tmp11725, i64 1
- %tmp11727 = getelementptr inbounds float* %tmp11726, i64 1
- %tmp11728 = getelementptr inbounds float* %tmp11727, i64 1
- %tmp11729 = getelementptr inbounds float* %tmp11728, i64 1
- %tmp11730 = getelementptr inbounds float* %tmp11729, i64 1
- %tmp11731 = getelementptr inbounds float* %tmp11730, i64 1
- %tmp11732 = getelementptr inbounds float* %tmp11731, i64 1
- %tmp11733 = getelementptr inbounds float* %tmp11732, i64 1
- %tmp11734 = getelementptr inbounds float* %tmp11733, i64 1
- %tmp11735 = getelementptr inbounds float* %tmp11734, i64 1
- %tmp11736 = getelementptr inbounds float* %tmp11735, i64 1
- %tmp11737 = getelementptr inbounds float* %tmp11736, i64 1
- %tmp11738 = getelementptr inbounds float* %tmp11737, i64 1
- %tmp11739 = getelementptr inbounds float* %tmp11738, i64 1
- %tmp11740 = getelementptr inbounds float* %tmp11739, i64 1
- %tmp11741 = getelementptr inbounds float* %tmp11740, i64 1
- %tmp11742 = getelementptr inbounds float* %tmp11741, i64 1
- %tmp11743 = getelementptr inbounds float* %tmp11742, i64 1
- %tmp11744 = getelementptr inbounds float* %tmp11743, i64 1
- %tmp11745 = getelementptr inbounds float* %tmp11744, i64 1
- %tmp11746 = getelementptr inbounds float* %tmp11745, i64 1
- %tmp11747 = getelementptr inbounds float* %tmp11746, i64 1
- %tmp11748 = getelementptr inbounds float* %tmp11747, i64 1
- %tmp11749 = getelementptr inbounds float* %tmp11748, i64 1
- %tmp11750 = getelementptr inbounds float* %tmp11749, i64 1
- %tmp11751 = getelementptr inbounds float* %tmp11750, i64 1
- %tmp11752 = getelementptr inbounds float* %tmp11751, i64 1
- %tmp11753 = getelementptr inbounds float* %tmp11752, i64 1
- %tmp11754 = getelementptr inbounds float* %tmp11753, i64 1
- %tmp11755 = getelementptr inbounds float* %tmp11754, i64 1
- %tmp11756 = getelementptr inbounds float* %tmp11755, i64 1
- %tmp11757 = getelementptr inbounds float* %tmp11756, i64 1
- %tmp11758 = getelementptr inbounds float* %tmp11757, i64 1
- %tmp11759 = getelementptr inbounds float* %tmp11758, i64 1
- %tmp11760 = getelementptr inbounds float* %tmp11759, i64 1
- %tmp11761 = getelementptr inbounds float* %tmp11760, i64 1
- %tmp11762 = getelementptr inbounds float* %tmp11761, i64 1
- %tmp11763 = getelementptr inbounds float* %tmp11762, i64 1
- %tmp11764 = getelementptr inbounds float* %tmp11763, i64 1
- %tmp11765 = getelementptr inbounds float* %tmp11764, i64 1
- %tmp11766 = getelementptr inbounds float* %tmp11765, i64 1
- %tmp11767 = getelementptr inbounds float* %tmp11766, i64 1
- %tmp11768 = getelementptr inbounds float* %tmp11767, i64 1
- %tmp11769 = getelementptr inbounds float* %tmp11768, i64 1
- %tmp11770 = getelementptr inbounds float* %tmp11769, i64 1
- %tmp11771 = getelementptr inbounds float* %tmp11770, i64 1
- %tmp11772 = getelementptr inbounds float* %tmp11771, i64 1
- %tmp11773 = getelementptr inbounds float* %tmp11772, i64 1
- %tmp11774 = getelementptr inbounds float* %tmp11773, i64 1
- %tmp11775 = getelementptr inbounds float* %tmp11774, i64 1
- %tmp11776 = getelementptr inbounds float* %tmp11775, i64 1
- %tmp11777 = getelementptr inbounds float* %tmp11776, i64 1
- %tmp11778 = getelementptr inbounds float* %tmp11777, i64 1
- %tmp11779 = getelementptr inbounds float* %tmp11778, i64 1
- %tmp11780 = getelementptr inbounds float* %tmp11779, i64 1
- %tmp11781 = getelementptr inbounds float* %tmp11780, i64 1
- %tmp11782 = getelementptr inbounds float* %tmp11781, i64 1
- %tmp11783 = getelementptr inbounds float* %tmp11782, i64 1
- %tmp11784 = getelementptr inbounds float* %tmp11783, i64 1
- %tmp11785 = getelementptr inbounds float* %tmp11784, i64 1
- %tmp11786 = getelementptr inbounds float* %tmp11785, i64 1
- %tmp11787 = getelementptr inbounds float* %tmp11786, i64 1
- %tmp11788 = getelementptr inbounds float* %tmp11787, i64 1
- %tmp11789 = getelementptr inbounds float* %tmp11788, i64 1
- %tmp11790 = getelementptr inbounds float* %tmp11789, i64 1
- %tmp11791 = getelementptr inbounds float* %tmp11790, i64 1
- %tmp11792 = getelementptr inbounds float* %tmp11791, i64 1
- %tmp11793 = getelementptr inbounds float* %tmp11792, i64 1
- %tmp11794 = getelementptr inbounds float* %tmp11793, i64 1
- %tmp11795 = getelementptr inbounds float* %tmp11794, i64 1
- %tmp11796 = getelementptr inbounds float* %tmp11795, i64 1
- %tmp11797 = getelementptr inbounds float* %tmp11796, i64 1
- %tmp11798 = getelementptr inbounds float* %tmp11797, i64 1
- %tmp11799 = getelementptr inbounds float* %tmp11798, i64 1
- %tmp11800 = getelementptr inbounds float* %tmp11799, i64 1
- %tmp11801 = getelementptr inbounds float* %tmp11800, i64 1
- %tmp11802 = getelementptr inbounds float* %tmp11801, i64 1
- %tmp11803 = getelementptr inbounds float* %tmp11802, i64 1
- %tmp11804 = getelementptr inbounds float* %tmp11803, i64 1
- %tmp11805 = getelementptr inbounds float* %tmp11804, i64 1
- %tmp11806 = getelementptr inbounds float* %tmp11805, i64 1
- %tmp11807 = getelementptr inbounds float* %tmp11806, i64 1
- %tmp11808 = getelementptr inbounds float* %tmp11807, i64 1
- %tmp11809 = getelementptr inbounds float* %tmp11808, i64 1
- %tmp11810 = getelementptr inbounds float* %tmp11809, i64 1
- %tmp11811 = getelementptr inbounds float* %tmp11810, i64 1
- %tmp11812 = getelementptr inbounds float* %tmp11811, i64 1
- %tmp11813 = getelementptr inbounds float* %tmp11812, i64 1
- %tmp11814 = getelementptr inbounds float* %tmp11813, i64 1
- %tmp11815 = getelementptr inbounds float* %tmp11814, i64 1
- %tmp11816 = getelementptr inbounds float* %tmp11815, i64 1
- %tmp11817 = getelementptr inbounds float* %tmp11816, i64 1
- %tmp11818 = getelementptr inbounds float* %tmp11817, i64 1
- %tmp11819 = getelementptr inbounds float* %tmp11818, i64 1
- %tmp11820 = getelementptr inbounds float* %tmp11819, i64 1
- %tmp11821 = getelementptr inbounds float* %tmp11820, i64 1
- %tmp11822 = getelementptr inbounds float* %tmp11821, i64 1
- %tmp11823 = getelementptr inbounds float* %tmp11822, i64 1
- %tmp11824 = getelementptr inbounds float* %tmp11823, i64 1
- %tmp11825 = getelementptr inbounds float* %tmp11824, i64 1
- %tmp11826 = getelementptr inbounds float* %tmp11825, i64 1
- %tmp11827 = getelementptr inbounds float* %tmp11826, i64 1
- %tmp11828 = getelementptr inbounds float* %tmp11827, i64 1
- %tmp11829 = getelementptr inbounds float* %tmp11828, i64 1
- %tmp11830 = getelementptr inbounds float* %tmp11829, i64 1
- %tmp11831 = getelementptr inbounds float* %tmp11830, i64 1
- %tmp11832 = getelementptr inbounds float* %tmp11831, i64 1
- %tmp11833 = getelementptr inbounds float* %tmp11832, i64 1
- %tmp11834 = getelementptr inbounds float* %tmp11833, i64 1
- %tmp11835 = getelementptr inbounds float* %tmp11834, i64 1
- %tmp11836 = getelementptr inbounds float* %tmp11835, i64 1
- %tmp11837 = getelementptr inbounds float* %tmp11836, i64 1
- %tmp11838 = getelementptr inbounds float* %tmp11837, i64 1
- %tmp11839 = getelementptr inbounds float* %tmp11838, i64 1
- %tmp11840 = getelementptr inbounds float* %tmp11839, i64 1
- %tmp11841 = getelementptr inbounds float* %tmp11840, i64 1
- %tmp11842 = getelementptr inbounds float* %tmp11841, i64 1
- %tmp11843 = getelementptr inbounds float* %tmp11842, i64 1
- %tmp11844 = getelementptr inbounds float* %tmp11843, i64 1
- %tmp11845 = getelementptr inbounds float* %tmp11844, i64 1
- %tmp11846 = getelementptr inbounds float* %tmp11845, i64 1
- %tmp11847 = getelementptr inbounds float* %tmp11846, i64 1
- %tmp11848 = getelementptr inbounds float* %tmp11847, i64 1
- %tmp11849 = getelementptr inbounds float* %tmp11848, i64 1
- %tmp11850 = getelementptr inbounds float* %tmp11849, i64 1
- %tmp11851 = getelementptr inbounds float* %tmp11850, i64 1
- %tmp11852 = getelementptr inbounds float* %tmp11851, i64 1
- %tmp11853 = getelementptr inbounds float* %tmp11852, i64 1
- %tmp11854 = getelementptr inbounds float* %tmp11853, i64 1
- %tmp11855 = getelementptr inbounds float* %tmp11854, i64 1
- %tmp11856 = getelementptr inbounds float* %tmp11855, i64 1
- %tmp11857 = getelementptr inbounds float* %tmp11856, i64 1
- %tmp11858 = getelementptr inbounds float* %tmp11857, i64 1
- %tmp11859 = getelementptr inbounds float* %tmp11858, i64 1
- %tmp11860 = getelementptr inbounds float* %tmp11859, i64 1
- %tmp11861 = getelementptr inbounds float* %tmp11860, i64 1
- %tmp11862 = getelementptr inbounds float* %tmp11861, i64 1
- %tmp11863 = getelementptr inbounds float* %tmp11862, i64 1
- %tmp11864 = getelementptr inbounds float* %tmp11863, i64 1
- %tmp11865 = getelementptr inbounds float* %tmp11864, i64 1
- %tmp11866 = getelementptr inbounds float* %tmp11865, i64 1
- %tmp11867 = getelementptr inbounds float* %tmp11866, i64 1
- %tmp11868 = getelementptr inbounds float* %tmp11867, i64 1
- %tmp11869 = getelementptr inbounds float* %tmp11868, i64 1
- %tmp11870 = getelementptr inbounds float* %tmp11869, i64 1
- %tmp11871 = getelementptr inbounds float* %tmp11870, i64 1
- %tmp11872 = getelementptr inbounds float* %tmp11871, i64 1
- %tmp11873 = getelementptr inbounds float* %tmp11872, i64 1
- %tmp11874 = getelementptr inbounds float* %tmp11873, i64 1
- %tmp11875 = getelementptr inbounds float* %tmp11874, i64 1
- %tmp11876 = getelementptr inbounds float* %tmp11875, i64 1
- %tmp11877 = getelementptr inbounds float* %tmp11876, i64 1
- %tmp11878 = getelementptr inbounds float* %tmp11877, i64 1
- %tmp11879 = getelementptr inbounds float* %tmp11878, i64 1
- %tmp11880 = getelementptr inbounds float* %tmp11879, i64 1
- %tmp11881 = getelementptr inbounds float* %tmp11880, i64 1
- %tmp11882 = getelementptr inbounds float* %tmp11881, i64 1
- %tmp11883 = getelementptr inbounds float* %tmp11882, i64 1
- %tmp11884 = getelementptr inbounds float* %tmp11883, i64 1
- %tmp11885 = getelementptr inbounds float* %tmp11884, i64 1
- %tmp11886 = getelementptr inbounds float* %tmp11885, i64 1
- %tmp11887 = getelementptr inbounds float* %tmp11886, i64 1
- %tmp11888 = getelementptr inbounds float* %tmp11887, i64 1
- %tmp11889 = getelementptr inbounds float* %tmp11888, i64 1
- %tmp11890 = getelementptr inbounds float* %tmp11889, i64 1
- %tmp11891 = getelementptr inbounds float* %tmp11890, i64 1
- %tmp11892 = getelementptr inbounds float* %tmp11891, i64 1
- %tmp11893 = getelementptr inbounds float* %tmp11892, i64 1
- %tmp11894 = getelementptr inbounds float* %tmp11893, i64 1
- %tmp11895 = getelementptr inbounds float* %tmp11894, i64 1
- %tmp11896 = getelementptr inbounds float* %tmp11895, i64 1
- %tmp11897 = getelementptr inbounds float* %tmp11896, i64 1
- %tmp11898 = getelementptr inbounds float* %tmp11897, i64 1
- %tmp11899 = getelementptr inbounds float* %tmp11898, i64 1
- %tmp11900 = getelementptr inbounds float* %tmp11899, i64 1
- %tmp11901 = getelementptr inbounds float* %tmp11900, i64 1
- %tmp11902 = getelementptr inbounds float* %tmp11901, i64 1
- %tmp11903 = getelementptr inbounds float* %tmp11902, i64 1
- %tmp11904 = getelementptr inbounds float* %tmp11903, i64 1
- %tmp11905 = getelementptr inbounds float* %tmp11904, i64 1
- %tmp11906 = getelementptr inbounds float* %tmp11905, i64 1
- %tmp11907 = getelementptr inbounds float* %tmp11906, i64 1
- %tmp11908 = getelementptr inbounds float* %tmp11907, i64 1
- %tmp11909 = getelementptr inbounds float* %tmp11908, i64 1
- %tmp11910 = getelementptr inbounds float* %tmp11909, i64 1
- %tmp11911 = getelementptr inbounds float* %tmp11910, i64 1
- %tmp11912 = getelementptr inbounds float* %tmp11911, i64 1
- %tmp11913 = getelementptr inbounds float* %tmp11912, i64 1
- %tmp11914 = getelementptr inbounds float* %tmp11913, i64 1
- %tmp11915 = getelementptr inbounds float* %tmp11914, i64 1
- %tmp11916 = getelementptr inbounds float* %tmp11915, i64 1
- %tmp11917 = getelementptr inbounds float* %tmp11916, i64 1
- %tmp11918 = getelementptr inbounds float* %tmp11917, i64 1
- %tmp11919 = getelementptr inbounds float* %tmp11918, i64 1
- %tmp11920 = getelementptr inbounds float* %tmp11919, i64 1
- %tmp11921 = getelementptr inbounds float* %tmp11920, i64 1
- %tmp11922 = getelementptr inbounds float* %tmp11921, i64 1
- %tmp11923 = getelementptr inbounds float* %tmp11922, i64 1
- %tmp11924 = getelementptr inbounds float* %tmp11923, i64 1
- %tmp11925 = getelementptr inbounds float* %tmp11924, i64 1
- %tmp11926 = getelementptr inbounds float* %tmp11925, i64 1
- %tmp11927 = getelementptr inbounds float* %tmp11926, i64 1
- %tmp11928 = getelementptr inbounds float* %tmp11927, i64 1
- %tmp11929 = getelementptr inbounds float* %tmp11928, i64 1
- %tmp11930 = getelementptr inbounds float* %tmp11929, i64 1
- %tmp11931 = getelementptr inbounds float* %tmp11930, i64 1
- %tmp11932 = getelementptr inbounds float* %tmp11931, i64 1
- %tmp11933 = getelementptr inbounds float* %tmp11932, i64 1
- %tmp11934 = getelementptr inbounds float* %tmp11933, i64 1
- %tmp11935 = getelementptr inbounds float* %tmp11934, i64 1
- %tmp11936 = getelementptr inbounds float* %tmp11935, i64 1
- %tmp11937 = getelementptr inbounds float* %tmp11936, i64 1
- %tmp11938 = getelementptr inbounds float* %tmp11937, i64 1
- %tmp11939 = getelementptr inbounds float* %tmp11938, i64 1
- %tmp11940 = getelementptr inbounds float* %tmp11939, i64 1
- %tmp11941 = getelementptr inbounds float* %tmp11940, i64 1
- %tmp11942 = getelementptr inbounds float* %tmp11941, i64 1
- %tmp11943 = getelementptr inbounds float* %tmp11942, i64 1
- %tmp11944 = getelementptr inbounds float* %tmp11943, i64 1
- %tmp11945 = getelementptr inbounds float* %tmp11944, i64 1
- %tmp11946 = getelementptr inbounds float* %tmp11945, i64 1
- %tmp11947 = getelementptr inbounds float* %tmp11946, i64 1
- %tmp11948 = getelementptr inbounds float* %tmp11947, i64 1
- %tmp11949 = getelementptr inbounds float* %tmp11948, i64 1
- %tmp11950 = getelementptr inbounds float* %tmp11949, i64 1
- %tmp11951 = getelementptr inbounds float* %tmp11950, i64 1
- %tmp11952 = getelementptr inbounds float* %tmp11951, i64 1
- %tmp11953 = getelementptr inbounds float* %tmp11952, i64 1
- %tmp11954 = getelementptr inbounds float* %tmp11953, i64 1
- %tmp11955 = getelementptr inbounds float* %tmp11954, i64 1
- %tmp11956 = getelementptr inbounds float* %tmp11955, i64 1
- %tmp11957 = getelementptr inbounds float* %tmp11956, i64 1
- %tmp11958 = getelementptr inbounds float* %tmp11957, i64 1
- %tmp11959 = getelementptr inbounds float* %tmp11958, i64 1
- %tmp11960 = getelementptr inbounds float* %tmp11959, i64 1
- %tmp11961 = getelementptr inbounds float* %tmp11960, i64 1
- %tmp11962 = getelementptr inbounds float* %tmp11961, i64 1
- %tmp11963 = getelementptr inbounds float* %tmp11962, i64 1
- %tmp11964 = getelementptr inbounds float* %tmp11963, i64 1
- %tmp11965 = getelementptr inbounds float* %tmp11964, i64 1
- %tmp11966 = getelementptr inbounds float* %tmp11965, i64 1
- %tmp11967 = getelementptr inbounds float* %tmp11966, i64 1
- %tmp11968 = getelementptr inbounds float* %tmp11967, i64 1
- %tmp11969 = getelementptr inbounds float* %tmp11968, i64 1
- %tmp11970 = getelementptr inbounds float* %tmp11969, i64 1
- %tmp11971 = getelementptr inbounds float* %tmp11970, i64 1
- %tmp11972 = getelementptr inbounds float* %tmp11971, i64 1
- %tmp11973 = getelementptr inbounds float* %tmp11972, i64 1
- %tmp11974 = getelementptr inbounds float* %tmp11973, i64 1
- %tmp11975 = getelementptr inbounds float* %tmp11974, i64 1
- %tmp11976 = getelementptr inbounds float* %tmp11975, i64 1
- %tmp11977 = getelementptr inbounds float* %tmp11976, i64 1
- %tmp11978 = getelementptr inbounds float* %tmp11977, i64 1
- %tmp11979 = getelementptr inbounds float* %tmp11978, i64 1
- %tmp11980 = getelementptr inbounds float* %tmp11979, i64 1
- %tmp11981 = getelementptr inbounds float* %tmp11980, i64 1
- %tmp11982 = getelementptr inbounds float* %tmp11981, i64 1
- %tmp11983 = getelementptr inbounds float* %tmp11982, i64 1
- %tmp11984 = getelementptr inbounds float* %tmp11983, i64 1
- %tmp11985 = getelementptr inbounds float* %tmp11984, i64 1
- %tmp11986 = getelementptr inbounds float* %tmp11985, i64 1
- %tmp11987 = getelementptr inbounds float* %tmp11986, i64 1
- %tmp11988 = getelementptr inbounds float* %tmp11987, i64 1
- %tmp11989 = getelementptr inbounds float* %tmp11988, i64 1
- %tmp11990 = getelementptr inbounds float* %tmp11989, i64 1
- %tmp11991 = getelementptr inbounds float* %tmp11990, i64 1
- %tmp11992 = getelementptr inbounds float* %tmp11991, i64 1
- %tmp11993 = getelementptr inbounds float* %tmp11992, i64 1
- %tmp11994 = getelementptr inbounds float* %tmp11993, i64 1
- %tmp11995 = getelementptr inbounds float* %tmp11994, i64 1
- %tmp11996 = getelementptr inbounds float* %tmp11995, i64 1
- %tmp11997 = getelementptr inbounds float* %tmp11996, i64 1
- %tmp11998 = getelementptr inbounds float* %tmp11997, i64 1
- %tmp11999 = getelementptr inbounds float* %tmp11998, i64 1
- %tmp12000 = getelementptr inbounds float* %tmp11999, i64 1
- %tmp12001 = getelementptr inbounds float* %tmp12000, i64 1
- %tmp12002 = getelementptr inbounds float* %tmp12001, i64 1
- %tmp12003 = getelementptr inbounds float* %tmp12002, i64 1
- %tmp12004 = getelementptr inbounds float* %tmp12003, i64 1
- %tmp12005 = getelementptr inbounds float* %tmp12004, i64 1
- %tmp12006 = getelementptr inbounds float* %tmp12005, i64 1
- %tmp12007 = getelementptr inbounds float* %tmp12006, i64 1
- %tmp12008 = getelementptr inbounds float* %tmp12007, i64 1
- %tmp12009 = getelementptr inbounds float* %tmp12008, i64 1
- %tmp12010 = getelementptr inbounds float* %tmp12009, i64 1
- %tmp12011 = getelementptr inbounds float* %tmp12010, i64 1
- %tmp12012 = getelementptr inbounds float* %tmp12011, i64 1
- %tmp12013 = getelementptr inbounds float* %tmp12012, i64 1
- %tmp12014 = getelementptr inbounds float* %tmp12013, i64 1
- %tmp12015 = getelementptr inbounds float* %tmp12014, i64 1
- %tmp12016 = getelementptr inbounds float* %tmp12015, i64 1
- %tmp12017 = getelementptr inbounds float* %tmp12016, i64 1
- %tmp12018 = getelementptr inbounds float* %tmp12017, i64 1
- %tmp12019 = getelementptr inbounds float* %tmp12018, i64 1
- %tmp12020 = getelementptr inbounds float* %tmp12019, i64 1
- %tmp12021 = getelementptr inbounds float* %tmp12020, i64 1
- %tmp12022 = getelementptr inbounds float* %tmp12021, i64 1
- %tmp12023 = getelementptr inbounds float* %tmp12022, i64 1
- %tmp12024 = getelementptr inbounds float* %tmp12023, i64 1
- %tmp12025 = getelementptr inbounds float* %tmp12024, i64 1
- %tmp12026 = getelementptr inbounds float* %tmp12025, i64 1
- %tmp12027 = getelementptr inbounds float* %tmp12026, i64 1
- %tmp12028 = getelementptr inbounds float* %tmp12027, i64 1
- %tmp12029 = getelementptr inbounds float* %tmp12028, i64 1
- %tmp12030 = getelementptr inbounds float* %tmp12029, i64 1
- %tmp12031 = getelementptr inbounds float* %tmp12030, i64 1
- %tmp12032 = getelementptr inbounds float* %tmp12031, i64 1
- %tmp12033 = getelementptr inbounds float* %tmp12032, i64 1
- %tmp12034 = getelementptr inbounds float* %tmp12033, i64 1
- %tmp12035 = getelementptr inbounds float* %tmp12034, i64 1
- %tmp12036 = getelementptr inbounds float* %tmp12035, i64 1
- %tmp12037 = getelementptr inbounds float* %tmp12036, i64 1
- %tmp12038 = getelementptr inbounds float* %tmp12037, i64 1
- %tmp12039 = getelementptr inbounds float* %tmp12038, i64 1
- %tmp12040 = getelementptr inbounds float* %tmp12039, i64 1
- %tmp12041 = getelementptr inbounds float* %tmp12040, i64 1
- %tmp12042 = getelementptr inbounds float* %tmp12041, i64 1
- %tmp12043 = getelementptr inbounds float* %tmp12042, i64 1
- %tmp12044 = getelementptr inbounds float* %tmp12043, i64 1
- %tmp12045 = getelementptr inbounds float* %tmp12044, i64 1
- %tmp12046 = getelementptr inbounds float* %tmp12045, i64 1
- %tmp12047 = getelementptr inbounds float* %tmp12046, i64 1
- %tmp12048 = getelementptr inbounds float* %tmp12047, i64 1
- %tmp12049 = getelementptr inbounds float* %tmp12048, i64 1
- %tmp12050 = getelementptr inbounds float* %tmp12049, i64 1
- %tmp12051 = getelementptr inbounds float* %tmp12050, i64 1
- %tmp12052 = getelementptr inbounds float* %tmp12051, i64 1
- %tmp12053 = getelementptr inbounds float* %tmp12052, i64 1
- %tmp12054 = getelementptr inbounds float* %tmp12053, i64 1
- %tmp12055 = getelementptr inbounds float* %tmp12054, i64 1
- %tmp12056 = getelementptr inbounds float* %tmp12055, i64 1
- %tmp12057 = getelementptr inbounds float* %tmp12056, i64 1
- %tmp12058 = getelementptr inbounds float* %tmp12057, i64 1
- %tmp12059 = getelementptr inbounds float* %tmp12058, i64 1
- %tmp12060 = getelementptr inbounds float* %tmp12059, i64 1
- %tmp12061 = getelementptr inbounds float* %tmp12060, i64 1
- %tmp12062 = getelementptr inbounds float* %tmp12061, i64 1
- %tmp12063 = getelementptr inbounds float* %tmp12062, i64 1
- %tmp12064 = getelementptr inbounds float* %tmp12063, i64 1
- %tmp12065 = getelementptr inbounds float* %tmp12064, i64 1
- %tmp12066 = getelementptr inbounds float* %tmp12065, i64 1
- %tmp12067 = getelementptr inbounds float* %tmp12066, i64 1
- %tmp12068 = getelementptr inbounds float* %tmp12067, i64 1
- %tmp12069 = getelementptr inbounds float* %tmp12068, i64 1
- %tmp12070 = getelementptr inbounds float* %tmp12069, i64 1
- %tmp12071 = getelementptr inbounds float* %tmp12070, i64 1
- %tmp12072 = getelementptr inbounds float* %tmp12071, i64 1
- %tmp12073 = getelementptr inbounds float* %tmp12072, i64 1
- %tmp12074 = getelementptr inbounds float* %tmp12073, i64 1
- %tmp12075 = getelementptr inbounds float* %tmp12074, i64 1
- %tmp12076 = getelementptr inbounds float* %tmp12075, i64 1
- %tmp12077 = getelementptr inbounds float* %tmp12076, i64 1
- %tmp12078 = getelementptr inbounds float* %tmp12077, i64 1
- %tmp12079 = getelementptr inbounds float* %tmp12078, i64 1
- %tmp12080 = getelementptr inbounds float* %tmp12079, i64 1
- %tmp12081 = getelementptr inbounds float* %tmp12080, i64 1
- %tmp12082 = getelementptr inbounds float* %tmp12081, i64 1
- %tmp12083 = getelementptr inbounds float* %tmp12082, i64 1
- %tmp12084 = getelementptr inbounds float* %tmp12083, i64 1
- %tmp12085 = getelementptr inbounds float* %tmp12084, i64 1
- %tmp12086 = getelementptr inbounds float* %tmp12085, i64 1
- %tmp12087 = getelementptr inbounds float* %tmp12086, i64 1
- %tmp12088 = getelementptr inbounds float* %tmp12087, i64 1
- %tmp12089 = getelementptr inbounds float* %tmp12088, i64 1
- %tmp12090 = getelementptr inbounds float* %tmp12089, i64 1
- %tmp12091 = getelementptr inbounds float* %tmp12090, i64 1
- %tmp12092 = getelementptr inbounds float* %tmp12091, i64 1
- %tmp12093 = getelementptr inbounds float* %tmp12092, i64 1
- %tmp12094 = getelementptr inbounds float* %tmp12093, i64 1
- %tmp12095 = getelementptr inbounds float* %tmp12094, i64 1
- %tmp12096 = getelementptr inbounds float* %tmp12095, i64 1
- %tmp12097 = getelementptr inbounds float* %tmp12096, i64 1
- %tmp12098 = getelementptr inbounds float* %tmp12097, i64 1
- %tmp12099 = getelementptr inbounds float* %tmp12098, i64 1
- %tmp12100 = getelementptr inbounds float* %tmp12099, i64 1
- %tmp12101 = getelementptr inbounds float* %tmp12100, i64 1
- %tmp12102 = getelementptr inbounds float* %tmp12101, i64 1
- %tmp12103 = getelementptr inbounds float* %tmp12102, i64 1
- %tmp12104 = getelementptr inbounds float* %tmp12103, i64 1
- %tmp12105 = getelementptr inbounds float* %tmp12104, i64 1
- %tmp12106 = getelementptr inbounds float* %tmp12105, i64 1
- %tmp12107 = getelementptr inbounds float* %tmp12106, i64 1
- %tmp12108 = getelementptr inbounds float* %tmp12107, i64 1
- %tmp12109 = getelementptr inbounds float* %tmp12108, i64 1
- %tmp12110 = getelementptr inbounds float* %tmp12109, i64 1
- %tmp12111 = getelementptr inbounds float* %tmp12110, i64 1
- %tmp12112 = getelementptr inbounds float* %tmp12111, i64 1
- %tmp12113 = getelementptr inbounds float* %tmp12112, i64 1
- %tmp12114 = getelementptr inbounds float* %tmp12113, i64 1
- %tmp12115 = getelementptr inbounds float* %tmp12114, i64 1
- %tmp12116 = getelementptr inbounds float* %tmp12115, i64 1
- %tmp12117 = getelementptr inbounds float* %tmp12116, i64 1
- %tmp12118 = getelementptr inbounds float* %tmp12117, i64 1
- %tmp12119 = getelementptr inbounds float* %tmp12118, i64 1
- %tmp12120 = getelementptr inbounds float* %tmp12119, i64 1
- %tmp12121 = getelementptr inbounds float* %tmp12120, i64 1
- %tmp12122 = getelementptr inbounds float* %tmp12121, i64 1
- %tmp12123 = getelementptr inbounds float* %tmp12122, i64 1
- %tmp12124 = getelementptr inbounds float* %tmp12123, i64 1
- %tmp12125 = getelementptr inbounds float* %tmp12124, i64 1
- %tmp12126 = getelementptr inbounds float* %tmp12125, i64 1
- %tmp12127 = getelementptr inbounds float* %tmp12126, i64 1
- %tmp12128 = getelementptr inbounds float* %tmp12127, i64 1
- %tmp12129 = getelementptr inbounds float* %tmp12128, i64 1
- %tmp12130 = getelementptr inbounds float* %tmp12129, i64 1
- %tmp12131 = getelementptr inbounds float* %tmp12130, i64 1
- %tmp12132 = getelementptr inbounds float* %tmp12131, i64 1
- %tmp12133 = getelementptr inbounds float* %tmp12132, i64 1
- %tmp12134 = getelementptr inbounds float* %tmp12133, i64 1
- %tmp12135 = getelementptr inbounds float* %tmp12134, i64 1
- %tmp12136 = getelementptr inbounds float* %tmp12135, i64 1
- %tmp12137 = getelementptr inbounds float* %tmp12136, i64 1
- %tmp12138 = getelementptr inbounds float* %tmp12137, i64 1
- %tmp12139 = getelementptr inbounds float* %tmp12138, i64 1
- %tmp12140 = getelementptr inbounds float* %tmp12139, i64 1
- %tmp12141 = getelementptr inbounds float* %tmp12140, i64 1
- %tmp12142 = getelementptr inbounds float* %tmp12141, i64 1
- %tmp12143 = getelementptr inbounds float* %tmp12142, i64 1
- %tmp12144 = getelementptr inbounds float* %tmp12143, i64 1
- %tmp12145 = getelementptr inbounds float* %tmp12144, i64 1
- %tmp12146 = getelementptr inbounds float* %tmp12145, i64 1
- %tmp12147 = getelementptr inbounds float* %tmp12146, i64 1
- %tmp12148 = getelementptr inbounds float* %tmp12147, i64 1
- %tmp12149 = getelementptr inbounds float* %tmp12148, i64 1
- %tmp12150 = getelementptr inbounds float* %tmp12149, i64 1
- %tmp12151 = getelementptr inbounds float* %tmp12150, i64 1
- %tmp12152 = getelementptr inbounds float* %tmp12151, i64 1
- %tmp12153 = getelementptr inbounds float* %tmp12152, i64 1
- %tmp12154 = getelementptr inbounds float* %tmp12153, i64 1
- %tmp12155 = getelementptr inbounds float* %tmp12154, i64 1
- %tmp12156 = getelementptr inbounds float* %tmp12155, i64 1
- %tmp12157 = getelementptr inbounds float* %tmp12156, i64 1
- %tmp12158 = getelementptr inbounds float* %tmp12157, i64 1
- %tmp12159 = getelementptr inbounds float* %tmp12158, i64 1
- %tmp12160 = getelementptr inbounds float* %tmp12159, i64 1
- %tmp12161 = getelementptr inbounds float* %tmp12160, i64 1
- %tmp12162 = getelementptr inbounds float* %tmp12161, i64 1
- %tmp12163 = getelementptr inbounds float* %tmp12162, i64 1
- %tmp12164 = getelementptr inbounds float* %tmp12163, i64 1
- %tmp12165 = getelementptr inbounds float* %tmp12164, i64 1
- %tmp12166 = getelementptr inbounds float* %tmp12165, i64 1
- %tmp12167 = getelementptr inbounds float* %tmp12166, i64 1
- %tmp12168 = getelementptr inbounds float* %tmp12167, i64 1
- %tmp12169 = getelementptr inbounds float* %tmp12168, i64 1
- %tmp12170 = getelementptr inbounds float* %tmp12169, i64 1
- %tmp12171 = getelementptr inbounds float* %tmp12170, i64 1
- %tmp12172 = getelementptr inbounds float* %tmp12171, i64 1
- %tmp12173 = getelementptr inbounds float* %tmp12172, i64 1
- %tmp12174 = getelementptr inbounds float* %tmp12173, i64 1
- %tmp12175 = getelementptr inbounds float* %tmp12174, i64 1
- %tmp12176 = getelementptr inbounds float* %tmp12175, i64 1
- %tmp12177 = getelementptr inbounds float* %tmp12176, i64 1
- %tmp12178 = getelementptr inbounds float* %tmp12177, i64 1
- %tmp12179 = getelementptr inbounds float* %tmp12178, i64 1
- %tmp12180 = getelementptr inbounds float* %tmp12179, i64 1
- %tmp12181 = getelementptr inbounds float* %tmp12180, i64 1
- %tmp12182 = getelementptr inbounds float* %tmp12181, i64 1
- %tmp12183 = getelementptr inbounds float* %tmp12182, i64 1
- %tmp12184 = getelementptr inbounds float* %tmp12183, i64 1
- %tmp12185 = getelementptr inbounds float* %tmp12184, i64 1
- %tmp12186 = getelementptr inbounds float* %tmp12185, i64 1
- %tmp12187 = getelementptr inbounds float* %tmp12186, i64 1
- %tmp12188 = getelementptr inbounds float* %tmp12187, i64 1
- %tmp12189 = getelementptr inbounds float* %tmp12188, i64 1
- %tmp12190 = getelementptr inbounds float* %tmp12189, i64 1
- %tmp12191 = getelementptr inbounds float* %tmp12190, i64 1
- %tmp12192 = getelementptr inbounds float* %tmp12191, i64 1
- %tmp12193 = getelementptr inbounds float* %tmp12192, i64 1
- %tmp12194 = getelementptr inbounds float* %tmp12193, i64 1
- %tmp12195 = getelementptr inbounds float* %tmp12194, i64 1
- %tmp12196 = getelementptr inbounds float* %tmp12195, i64 1
- %tmp12197 = getelementptr inbounds float* %tmp12196, i64 1
- %tmp12198 = getelementptr inbounds float* %tmp12197, i64 1
- %tmp12199 = getelementptr inbounds float* %tmp12198, i64 1
- %tmp12200 = getelementptr inbounds float* %tmp12199, i64 1
- %tmp12201 = getelementptr inbounds float* %tmp12200, i64 1
- %tmp12202 = getelementptr inbounds float* %tmp12201, i64 1
- %tmp12203 = getelementptr inbounds float* %tmp12202, i64 1
- %tmp12204 = getelementptr inbounds float* %tmp12203, i64 1
- %tmp12205 = getelementptr inbounds float* %tmp12204, i64 1
- %tmp12206 = getelementptr inbounds float* %tmp12205, i64 1
- %tmp12207 = getelementptr inbounds float* %tmp12206, i64 1
- %tmp12208 = getelementptr inbounds float* %tmp12207, i64 1
- %tmp12209 = getelementptr inbounds float* %tmp12208, i64 1
- %tmp12210 = getelementptr inbounds float* %tmp12209, i64 1
- %tmp12211 = getelementptr inbounds float* %tmp12210, i64 1
- %tmp12212 = getelementptr inbounds float* %tmp12211, i64 1
- %tmp12213 = getelementptr inbounds float* %tmp12212, i64 1
- %tmp12214 = getelementptr inbounds float* %tmp12213, i64 1
- %tmp12215 = getelementptr inbounds float* %tmp12214, i64 1
- %tmp12216 = getelementptr inbounds float* %tmp12215, i64 1
- %tmp12217 = getelementptr inbounds float* %tmp12216, i64 1
- %tmp12218 = getelementptr inbounds float* %tmp12217, i64 1
- %tmp12219 = getelementptr inbounds float* %tmp12218, i64 1
- %tmp12220 = getelementptr inbounds float* %tmp12219, i64 1
- %tmp12221 = getelementptr inbounds float* %tmp12220, i64 1
- %tmp12222 = getelementptr inbounds float* %tmp12221, i64 1
- %tmp12223 = getelementptr inbounds float* %tmp12222, i64 1
- %tmp12224 = getelementptr inbounds float* %tmp12223, i64 1
- %tmp12225 = getelementptr inbounds float* %tmp12224, i64 1
- %tmp12226 = getelementptr inbounds float* %tmp12225, i64 1
- %tmp12227 = getelementptr inbounds float* %tmp12226, i64 1
- %tmp12228 = getelementptr inbounds float* %tmp12227, i64 1
- %tmp12229 = getelementptr inbounds float* %tmp12228, i64 1
- %tmp12230 = getelementptr inbounds float* %tmp12229, i64 1
- %tmp12231 = getelementptr inbounds float* %tmp12230, i64 1
- %tmp12232 = getelementptr inbounds float* %tmp12231, i64 1
- %tmp12233 = getelementptr inbounds float* %tmp12232, i64 1
- %tmp12234 = getelementptr inbounds float* %tmp12233, i64 1
- %tmp12235 = getelementptr inbounds float* %tmp12234, i64 1
- %tmp12236 = getelementptr inbounds float* %tmp12235, i64 1
- %tmp12237 = getelementptr inbounds float* %tmp12236, i64 1
- %tmp12238 = getelementptr inbounds float* %tmp12237, i64 1
- %tmp12239 = getelementptr inbounds float* %tmp12238, i64 1
- %tmp12240 = getelementptr inbounds float* %tmp12239, i64 1
- %tmp12241 = getelementptr inbounds float* %tmp12240, i64 1
- %tmp12242 = getelementptr inbounds float* %tmp12241, i64 1
- %tmp12243 = getelementptr inbounds float* %tmp12242, i64 1
- %tmp12244 = getelementptr inbounds float* %tmp12243, i64 1
- %tmp12245 = getelementptr inbounds float* %tmp12244, i64 1
- %tmp12246 = getelementptr inbounds float* %tmp12245, i64 1
- %tmp12247 = getelementptr inbounds float* %tmp12246, i64 1
- %tmp12248 = getelementptr inbounds float* %tmp12247, i64 1
- %tmp12249 = getelementptr inbounds float* %tmp12248, i64 1
- %tmp12250 = getelementptr inbounds float* %tmp12249, i64 1
- %tmp12251 = getelementptr inbounds float* %tmp12250, i64 1
- %tmp12252 = getelementptr inbounds float* %tmp12251, i64 1
- %tmp12253 = getelementptr inbounds float* %tmp12252, i64 1
- %tmp12254 = getelementptr inbounds float* %tmp12253, i64 1
- %tmp12255 = getelementptr inbounds float* %tmp12254, i64 1
- %tmp12256 = getelementptr inbounds float* %tmp12255, i64 1
- %tmp12257 = getelementptr inbounds float* %tmp12256, i64 1
- %tmp12258 = getelementptr inbounds float* %tmp12257, i64 1
- %tmp12259 = getelementptr inbounds float* %tmp12258, i64 1
- %tmp12260 = getelementptr inbounds float* %tmp12259, i64 1
- %tmp12261 = getelementptr inbounds float* %tmp12260, i64 1
- %tmp12262 = getelementptr inbounds float* %tmp12261, i64 1
- %tmp12263 = getelementptr inbounds float* %tmp12262, i64 1
- %tmp12264 = getelementptr inbounds float* %tmp12263, i64 1
- %tmp12265 = getelementptr inbounds float* %tmp12264, i64 1
- %tmp12266 = getelementptr inbounds float* %tmp12265, i64 1
- %tmp12267 = getelementptr inbounds float* %tmp12266, i64 1
- %tmp12268 = getelementptr inbounds float* %tmp12267, i64 1
- %tmp12269 = getelementptr inbounds float* %tmp12268, i64 1
- %tmp12270 = getelementptr inbounds float* %tmp12269, i64 1
- %tmp12271 = getelementptr inbounds float* %tmp12270, i64 1
- %tmp12272 = getelementptr inbounds float* %tmp12271, i64 1
- %tmp12273 = getelementptr inbounds float* %tmp12272, i64 1
- %tmp12274 = getelementptr inbounds float* %tmp12273, i64 1
- %tmp12275 = getelementptr inbounds float* %tmp12274, i64 1
- %tmp12276 = getelementptr inbounds float* %tmp12275, i64 1
- %tmp12277 = getelementptr inbounds float* %tmp12276, i64 1
- %tmp12278 = getelementptr inbounds float* %tmp12277, i64 1
- %tmp12279 = getelementptr inbounds float* %tmp12278, i64 1
- %tmp12280 = getelementptr inbounds float* %tmp12279, i64 1
- %tmp12281 = getelementptr inbounds float* %tmp12280, i64 1
- %tmp12282 = getelementptr inbounds float* %tmp12281, i64 1
- %tmp12283 = getelementptr inbounds float* %tmp12282, i64 1
- %tmp12284 = getelementptr inbounds float* %tmp12283, i64 1
- %tmp12285 = getelementptr inbounds float* %tmp12284, i64 1
- %tmp12286 = getelementptr inbounds float* %tmp12285, i64 1
- %tmp12287 = getelementptr inbounds float* %tmp12286, i64 1
- %tmp12288 = getelementptr inbounds float* %tmp12287, i64 1
- %tmp12289 = getelementptr inbounds float* %tmp12288, i64 1
- %tmp12290 = getelementptr inbounds float* %tmp12289, i64 1
- %tmp12291 = getelementptr inbounds float* %tmp12290, i64 1
- %tmp12292 = getelementptr inbounds float* %tmp12291, i64 1
- %tmp12293 = getelementptr inbounds float* %tmp12292, i64 1
- %tmp12294 = getelementptr inbounds float* %tmp12293, i64 1
- %tmp12295 = getelementptr inbounds float* %tmp12294, i64 1
- %tmp12296 = getelementptr inbounds float* %tmp12295, i64 1
- %tmp12297 = getelementptr inbounds float* %tmp12296, i64 1
- %tmp12298 = getelementptr inbounds float* %tmp12297, i64 1
- %tmp12299 = getelementptr inbounds float* %tmp12298, i64 1
- %tmp12300 = getelementptr inbounds float* %tmp12299, i64 1
- %tmp12301 = getelementptr inbounds float* %tmp12300, i64 1
- %tmp12302 = getelementptr inbounds float* %tmp12301, i64 1
- %tmp12303 = getelementptr inbounds float* %tmp12302, i64 1
- %tmp12304 = getelementptr inbounds float* %tmp12303, i64 1
- %tmp12305 = getelementptr inbounds float* %tmp12304, i64 1
- %tmp12306 = getelementptr inbounds float* %tmp12305, i64 1
- %tmp12307 = getelementptr inbounds float* %tmp12306, i64 1
- %tmp12308 = getelementptr inbounds float* %tmp12307, i64 1
- %tmp12309 = getelementptr inbounds float* %tmp12308, i64 1
- %tmp12310 = getelementptr inbounds float* %tmp12309, i64 1
- %tmp12311 = getelementptr inbounds float* %tmp12310, i64 1
- %tmp12312 = getelementptr inbounds float* %tmp12311, i64 1
- %tmp12313 = getelementptr inbounds float* %tmp12312, i64 1
- %tmp12314 = getelementptr inbounds float* %tmp12313, i64 1
- %tmp12315 = getelementptr inbounds float* %tmp12314, i64 1
- %tmp12316 = getelementptr inbounds float* %tmp12315, i64 1
- %tmp12317 = getelementptr inbounds float* %tmp12316, i64 1
- %tmp12318 = getelementptr inbounds float* %tmp12317, i64 1
- %tmp12319 = getelementptr inbounds float* %tmp12318, i64 1
- %tmp12320 = getelementptr inbounds float* %tmp12319, i64 1
- %tmp12321 = getelementptr inbounds float* %tmp12320, i64 1
- %tmp12322 = getelementptr inbounds float* %tmp12321, i64 1
- %tmp12323 = getelementptr inbounds float* %tmp12322, i64 1
- %tmp12324 = getelementptr inbounds float* %tmp12323, i64 1
- %tmp12325 = getelementptr inbounds float* %tmp12324, i64 1
- %tmp12326 = getelementptr inbounds float* %tmp12325, i64 1
- %tmp12327 = getelementptr inbounds float* %tmp12326, i64 1
- %tmp12328 = getelementptr inbounds float* %tmp12327, i64 1
- %tmp12329 = getelementptr inbounds float* %tmp12328, i64 1
- %tmp12330 = getelementptr inbounds float* %tmp12329, i64 1
- %tmp12331 = getelementptr inbounds float* %tmp12330, i64 1
- %tmp12332 = getelementptr inbounds float* %tmp12331, i64 1
- %tmp12333 = getelementptr inbounds float* %tmp12332, i64 1
- %tmp12334 = getelementptr inbounds float* %tmp12333, i64 1
- %tmp12335 = getelementptr inbounds float* %tmp12334, i64 1
- %tmp12336 = getelementptr inbounds float* %tmp12335, i64 1
- %tmp12337 = getelementptr inbounds float* %tmp12336, i64 1
- %tmp12338 = getelementptr inbounds float* %tmp12337, i64 1
- %tmp12339 = getelementptr inbounds float* %tmp12338, i64 1
- %tmp12340 = getelementptr inbounds float* %tmp12339, i64 1
- %tmp12341 = getelementptr inbounds float* %tmp12340, i64 1
- %tmp12342 = getelementptr inbounds float* %tmp12341, i64 1
- %tmp12343 = getelementptr inbounds float* %tmp12342, i64 1
- %tmp12344 = getelementptr inbounds float* %tmp12343, i64 1
- %tmp12345 = getelementptr inbounds float* %tmp12344, i64 1
- %tmp12346 = getelementptr inbounds float* %tmp12345, i64 1
- %tmp12347 = getelementptr inbounds float* %tmp12346, i64 1
- %tmp12348 = getelementptr inbounds float* %tmp12347, i64 1
- %tmp12349 = getelementptr inbounds float* %tmp12348, i64 1
- %tmp12350 = getelementptr inbounds float* %tmp12349, i64 1
- %tmp12351 = getelementptr inbounds float* %tmp12350, i64 1
- %tmp12352 = getelementptr inbounds float* %tmp12351, i64 1
- %tmp12353 = getelementptr inbounds float* %tmp12352, i64 1
- %tmp12354 = getelementptr inbounds float* %tmp12353, i64 1
- %tmp12355 = getelementptr inbounds float* %tmp12354, i64 1
- %tmp12356 = getelementptr inbounds float* %tmp12355, i64 1
- %tmp12357 = getelementptr inbounds float* %tmp12356, i64 1
- %tmp12358 = getelementptr inbounds float* %tmp12357, i64 1
- %tmp12359 = getelementptr inbounds float* %tmp12358, i64 1
- %tmp12360 = getelementptr inbounds float* %tmp12359, i64 1
- %tmp12361 = getelementptr inbounds float* %tmp12360, i64 1
- %tmp12362 = getelementptr inbounds float* %tmp12361, i64 1
- %tmp12363 = getelementptr inbounds float* %tmp12362, i64 1
- %tmp12364 = getelementptr inbounds float* %tmp12363, i64 1
- %tmp12365 = getelementptr inbounds float* %tmp12364, i64 1
- %tmp12366 = getelementptr inbounds float* %tmp12365, i64 1
- %tmp12367 = getelementptr inbounds float* %tmp12366, i64 1
- %tmp12368 = getelementptr inbounds float* %tmp12367, i64 1
- %tmp12369 = getelementptr inbounds float* %tmp12368, i64 1
- %tmp12370 = getelementptr inbounds float* %tmp12369, i64 1
- %tmp12371 = getelementptr inbounds float* %tmp12370, i64 1
- %tmp12372 = getelementptr inbounds float* %tmp12371, i64 1
- %tmp12373 = getelementptr inbounds float* %tmp12372, i64 1
- %tmp12374 = getelementptr inbounds float* %tmp12373, i64 1
- %tmp12375 = getelementptr inbounds float* %tmp12374, i64 1
- %tmp12376 = getelementptr inbounds float* %tmp12375, i64 1
- %tmp12377 = getelementptr inbounds float* %tmp12376, i64 1
- %tmp12378 = getelementptr inbounds float* %tmp12377, i64 1
- %tmp12379 = getelementptr inbounds float* %tmp12378, i64 1
- %tmp12380 = getelementptr inbounds float* %tmp12379, i64 1
- %tmp12381 = getelementptr inbounds float* %tmp12380, i64 1
- %tmp12382 = getelementptr inbounds float* %tmp12381, i64 1
- %tmp12383 = getelementptr inbounds float* %tmp12382, i64 1
- %tmp12384 = getelementptr inbounds float* %tmp12383, i64 1
- %tmp12385 = getelementptr inbounds float* %tmp12384, i64 1
- %tmp12386 = getelementptr inbounds float* %tmp12385, i64 1
- %tmp12387 = getelementptr inbounds float* %tmp12386, i64 1
- %tmp12388 = getelementptr inbounds float* %tmp12387, i64 1
- %tmp12389 = getelementptr inbounds float* %tmp12388, i64 1
- %tmp12390 = getelementptr inbounds float* %tmp12389, i64 1
- %tmp12391 = getelementptr inbounds float* %tmp12390, i64 1
- %tmp12392 = getelementptr inbounds float* %tmp12391, i64 1
- %tmp12393 = getelementptr inbounds float* %tmp12392, i64 1
- %tmp12394 = getelementptr inbounds float* %tmp12393, i64 1
- %tmp12395 = getelementptr inbounds float* %tmp12394, i64 1
- %tmp12396 = getelementptr inbounds float* %tmp12395, i64 1
- %tmp12397 = getelementptr inbounds float* %tmp12396, i64 1
- %tmp12398 = getelementptr inbounds float* %tmp12397, i64 1
- %tmp12399 = getelementptr inbounds float* %tmp12398, i64 1
- %tmp12400 = getelementptr inbounds float* %tmp12399, i64 1
- %tmp12401 = getelementptr inbounds float* %tmp12400, i64 1
- %tmp12402 = getelementptr inbounds float* %tmp12401, i64 1
- %tmp12403 = getelementptr inbounds float* %tmp12402, i64 1
- %tmp12404 = getelementptr inbounds float* %tmp12403, i64 1
- %tmp12405 = getelementptr inbounds float* %tmp12404, i64 1
- %tmp12406 = getelementptr inbounds float* %tmp12405, i64 1
- %tmp12407 = getelementptr inbounds float* %tmp12406, i64 1
- %tmp12408 = getelementptr inbounds float* %tmp12407, i64 1
- %tmp12409 = getelementptr inbounds float* %tmp12408, i64 1
- %tmp12410 = getelementptr inbounds float* %tmp12409, i64 1
- %tmp12411 = getelementptr inbounds float* %tmp12410, i64 1
- %tmp12412 = getelementptr inbounds float* %tmp12411, i64 1
- %tmp12413 = getelementptr inbounds float* %tmp12412, i64 1
- %tmp12414 = getelementptr inbounds float* %tmp12413, i64 1
- %tmp12415 = getelementptr inbounds float* %tmp12414, i64 1
- %tmp12416 = getelementptr inbounds float* %tmp12415, i64 1
- %tmp12417 = getelementptr inbounds float* %tmp12416, i64 1
- %tmp12418 = getelementptr inbounds float* %tmp12417, i64 1
- %tmp12419 = getelementptr inbounds float* %tmp12418, i64 1
- %tmp12420 = getelementptr inbounds float* %tmp12419, i64 1
- %tmp12421 = getelementptr inbounds float* %tmp12420, i64 1
- %tmp12422 = getelementptr inbounds float* %tmp12421, i64 1
- %tmp12423 = getelementptr inbounds float* %tmp12422, i64 1
- %tmp12424 = getelementptr inbounds float* %tmp12423, i64 1
- %tmp12425 = getelementptr inbounds float* %tmp12424, i64 1
- %tmp12426 = getelementptr inbounds float* %tmp12425, i64 1
- %tmp12427 = getelementptr inbounds float* %tmp12426, i64 1
- %tmp12428 = getelementptr inbounds float* %tmp12427, i64 1
- %tmp12429 = getelementptr inbounds float* %tmp12428, i64 1
- %tmp12430 = getelementptr inbounds float* %tmp12429, i64 1
- %tmp12431 = getelementptr inbounds float* %tmp12430, i64 1
- %tmp12432 = getelementptr inbounds float* %tmp12431, i64 1
- %tmp12433 = getelementptr inbounds float* %tmp12432, i64 1
- %tmp12434 = getelementptr inbounds float* %tmp12433, i64 1
- %tmp12435 = getelementptr inbounds float* %tmp12434, i64 1
- %tmp12436 = getelementptr inbounds float* %tmp12435, i64 1
- %tmp12437 = getelementptr inbounds float* %tmp12436, i64 1
- %tmp12438 = getelementptr inbounds float* %tmp12437, i64 1
- %tmp12439 = getelementptr inbounds float* %tmp12438, i64 1
- %tmp12440 = getelementptr inbounds float* %tmp12439, i64 1
- %tmp12441 = getelementptr inbounds float* %tmp12440, i64 1
- %tmp12442 = getelementptr inbounds float* %tmp12441, i64 1
- %tmp12443 = getelementptr inbounds float* %tmp12442, i64 1
- %tmp12444 = getelementptr inbounds float* %tmp12443, i64 1
- %tmp12445 = getelementptr inbounds float* %tmp12444, i64 1
- %tmp12446 = getelementptr inbounds float* %tmp12445, i64 1
- %tmp12447 = getelementptr inbounds float* %tmp12446, i64 1
- %tmp12448 = getelementptr inbounds float* %tmp12447, i64 1
- %tmp12449 = getelementptr inbounds float* %tmp12448, i64 1
- %tmp12450 = getelementptr inbounds float* %tmp12449, i64 1
- %tmp12451 = getelementptr inbounds float* %tmp12450, i64 1
- %tmp12452 = getelementptr inbounds float* %tmp12451, i64 1
- %tmp12453 = getelementptr inbounds float* %tmp12452, i64 1
- %tmp12454 = getelementptr inbounds float* %tmp12453, i64 1
- %tmp12455 = getelementptr inbounds float* %tmp12454, i64 1
- %tmp12456 = getelementptr inbounds float* %tmp12455, i64 1
- %tmp12457 = getelementptr inbounds float* %tmp12456, i64 1
- %tmp12458 = getelementptr inbounds float* %tmp12457, i64 1
- %tmp12459 = getelementptr inbounds float* %tmp12458, i64 1
- %tmp12460 = getelementptr inbounds float* %tmp12459, i64 1
- %tmp12461 = getelementptr inbounds float* %tmp12460, i64 1
- %tmp12462 = getelementptr inbounds float* %tmp12461, i64 1
- %tmp12463 = getelementptr inbounds float* %tmp12462, i64 1
- %tmp12464 = getelementptr inbounds float* %tmp12463, i64 1
- %tmp12465 = getelementptr inbounds float* %tmp12464, i64 1
- %tmp12466 = getelementptr inbounds float* %tmp12465, i64 1
- %tmp12467 = getelementptr inbounds float* %tmp12466, i64 1
- %tmp12468 = getelementptr inbounds float* %tmp12467, i64 1
- %tmp12469 = getelementptr inbounds float* %tmp12468, i64 1
- %tmp12470 = getelementptr inbounds float* %tmp12469, i64 1
- %tmp12471 = getelementptr inbounds float* %tmp12470, i64 1
- %tmp12472 = getelementptr inbounds float* %tmp12471, i64 1
- %tmp12473 = getelementptr inbounds float* %tmp12472, i64 1
- %tmp12474 = getelementptr inbounds float* %tmp12473, i64 1
- %tmp12475 = getelementptr inbounds float* %tmp12474, i64 1
- %tmp12476 = getelementptr inbounds float* %tmp12475, i64 1
- %tmp12477 = getelementptr inbounds float* %tmp12476, i64 1
- %tmp12478 = getelementptr inbounds float* %tmp12477, i64 1
- %tmp12479 = getelementptr inbounds float* %tmp12478, i64 1
- %tmp12480 = getelementptr inbounds float* %tmp12479, i64 1
- %tmp12481 = getelementptr inbounds float* %tmp12480, i64 1
- %tmp12482 = getelementptr inbounds float* %tmp12481, i64 1
- %tmp12483 = getelementptr inbounds float* %tmp12482, i64 1
- %tmp12484 = getelementptr inbounds float* %tmp12483, i64 1
- %tmp12485 = getelementptr inbounds float* %tmp12484, i64 1
- %tmp12486 = getelementptr inbounds float* %tmp12485, i64 1
- %tmp12487 = getelementptr inbounds float* %tmp12486, i64 1
- %tmp12488 = getelementptr inbounds float* %tmp12487, i64 1
- %tmp12489 = getelementptr inbounds float* %tmp12488, i64 1
- %tmp12490 = getelementptr inbounds float* %tmp12489, i64 1
- %tmp12491 = getelementptr inbounds float* %tmp12490, i64 1
- %tmp12492 = getelementptr inbounds float* %tmp12491, i64 1
- %tmp12493 = getelementptr inbounds float* %tmp12492, i64 1
- %tmp12494 = getelementptr inbounds float* %tmp12493, i64 1
- %tmp12495 = getelementptr inbounds float* %tmp12494, i64 1
- %tmp12496 = getelementptr inbounds float* %tmp12495, i64 1
- %tmp12497 = getelementptr inbounds float* %tmp12496, i64 1
- %tmp12498 = getelementptr inbounds float* %tmp12497, i64 1
- %tmp12499 = getelementptr inbounds float* %tmp12498, i64 1
- %tmp12500 = getelementptr inbounds float* %tmp12499, i64 1
- %tmp12501 = getelementptr inbounds float* %tmp12500, i64 1
- %tmp12502 = getelementptr inbounds float* %tmp12501, i64 1
- %tmp12503 = getelementptr inbounds float* %tmp12502, i64 1
- %tmp12504 = getelementptr inbounds float* %tmp12503, i64 1
- %tmp12505 = getelementptr inbounds float* %tmp12504, i64 1
- %tmp12506 = getelementptr inbounds float* %tmp12505, i64 1
- %tmp12507 = getelementptr inbounds float* %tmp12506, i64 1
- %tmp12508 = getelementptr inbounds float* %tmp12507, i64 1
- %tmp12509 = getelementptr inbounds float* %tmp12508, i64 1
- %tmp12510 = getelementptr inbounds float* %tmp12509, i64 1
- %tmp12511 = getelementptr inbounds float* %tmp12510, i64 1
- %tmp12512 = getelementptr inbounds float* %tmp12511, i64 1
- %tmp12513 = getelementptr inbounds float* %tmp12512, i64 1
- %tmp12514 = getelementptr inbounds float* %tmp12513, i64 1
- %tmp12515 = getelementptr inbounds float* %tmp12514, i64 1
- %tmp12516 = getelementptr inbounds float* %tmp12515, i64 1
- %tmp12517 = getelementptr inbounds float* %tmp12516, i64 1
- %tmp12518 = getelementptr inbounds float* %tmp12517, i64 1
- %tmp12519 = getelementptr inbounds float* %tmp12518, i64 1
- %tmp12520 = getelementptr inbounds float* %tmp12519, i64 1
- %tmp12521 = getelementptr inbounds float* %tmp12520, i64 1
- %tmp12522 = getelementptr inbounds float* %tmp12521, i64 1
- %tmp12523 = getelementptr inbounds float* %tmp12522, i64 1
- %tmp12524 = getelementptr inbounds float* %tmp12523, i64 1
- %tmp12525 = getelementptr inbounds float* %tmp12524, i64 1
- %tmp12526 = getelementptr inbounds float* %tmp12525, i64 1
- %tmp12527 = getelementptr inbounds float* %tmp12526, i64 1
- %tmp12528 = getelementptr inbounds float* %tmp12527, i64 1
- %tmp12529 = getelementptr inbounds float* %tmp12528, i64 1
- %tmp12530 = getelementptr inbounds float* %tmp12529, i64 1
- %tmp12531 = getelementptr inbounds float* %tmp12530, i64 1
- %tmp12532 = getelementptr inbounds float* %tmp12531, i64 1
- %tmp12533 = getelementptr inbounds float* %tmp12532, i64 1
- %tmp12534 = getelementptr inbounds float* %tmp12533, i64 1
- %tmp12535 = getelementptr inbounds float* %tmp12534, i64 1
- %tmp12536 = getelementptr inbounds float* %tmp12535, i64 1
- %tmp12537 = getelementptr inbounds float* %tmp12536, i64 1
- %tmp12538 = getelementptr inbounds float* %tmp12537, i64 1
- %tmp12539 = getelementptr inbounds float* %tmp12538, i64 1
- %tmp12540 = getelementptr inbounds float* %tmp12539, i64 1
- %tmp12541 = getelementptr inbounds float* %tmp12540, i64 1
- %tmp12542 = getelementptr inbounds float* %tmp12541, i64 1
- %tmp12543 = getelementptr inbounds float* %tmp12542, i64 1
- %tmp12544 = getelementptr inbounds float* %tmp12543, i64 1
- %tmp12545 = getelementptr inbounds float* %tmp12544, i64 1
- %tmp12546 = getelementptr inbounds float* %tmp12545, i64 1
- %tmp12547 = getelementptr inbounds float* %tmp12546, i64 1
- %tmp12548 = getelementptr inbounds float* %tmp12547, i64 1
- %tmp12549 = getelementptr inbounds float* %tmp12548, i64 1
- %tmp12550 = getelementptr inbounds float* %tmp12549, i64 1
- %tmp12551 = getelementptr inbounds float* %tmp12550, i64 1
- %tmp12552 = getelementptr inbounds float* %tmp12551, i64 1
- %tmp12553 = getelementptr inbounds float* %tmp12552, i64 1
- %tmp12554 = getelementptr inbounds float* %tmp12553, i64 1
- %tmp12555 = getelementptr inbounds float* %tmp12554, i64 1
- %tmp12556 = getelementptr inbounds float* %tmp12555, i64 1
- %tmp12557 = getelementptr inbounds float* %tmp12556, i64 1
- %tmp12558 = getelementptr inbounds float* %tmp12557, i64 1
- %tmp12559 = getelementptr inbounds float* %tmp12558, i64 1
- %tmp12560 = getelementptr inbounds float* %tmp12559, i64 1
- %tmp12561 = getelementptr inbounds float* %tmp12560, i64 1
- %tmp12562 = getelementptr inbounds float* %tmp12561, i64 1
- %tmp12563 = getelementptr inbounds float* %tmp12562, i64 1
- %tmp12564 = getelementptr inbounds float* %tmp12563, i64 1
- %tmp12565 = getelementptr inbounds float* %tmp12564, i64 1
- %tmp12566 = getelementptr inbounds float* %tmp12565, i64 1
- %tmp12567 = getelementptr inbounds float* %tmp12566, i64 1
- %tmp12568 = getelementptr inbounds float* %tmp12567, i64 1
- %tmp12569 = getelementptr inbounds float* %tmp12568, i64 1
- %tmp12570 = getelementptr inbounds float* %tmp12569, i64 1
- %tmp12571 = getelementptr inbounds float* %tmp12570, i64 1
- %tmp12572 = getelementptr inbounds float* %tmp12571, i64 1
- %tmp12573 = getelementptr inbounds float* %tmp12572, i64 1
- %tmp12574 = getelementptr inbounds float* %tmp12573, i64 1
- %tmp12575 = getelementptr inbounds float* %tmp12574, i64 1
- %tmp12576 = getelementptr inbounds float* %tmp12575, i64 1
- %tmp12577 = getelementptr inbounds float* %tmp12576, i64 1
- %tmp12578 = getelementptr inbounds float* %tmp12577, i64 1
- %tmp12579 = getelementptr inbounds float* %tmp12578, i64 1
- %tmp12580 = getelementptr inbounds float* %tmp12579, i64 1
- %tmp12581 = getelementptr inbounds float* %tmp12580, i64 1
- %tmp12582 = getelementptr inbounds float* %tmp12581, i64 1
- %tmp12583 = getelementptr inbounds float* %tmp12582, i64 1
- %tmp12584 = getelementptr inbounds float* %tmp12583, i64 1
- %tmp12585 = getelementptr inbounds float* %tmp12584, i64 1
- %tmp12586 = getelementptr inbounds float* %tmp12585, i64 1
- %tmp12587 = getelementptr inbounds float* %tmp12586, i64 1
- %tmp12588 = getelementptr inbounds float* %tmp12587, i64 1
- %tmp12589 = getelementptr inbounds float* %tmp12588, i64 1
- %tmp12590 = getelementptr inbounds float* %tmp12589, i64 1
- %tmp12591 = getelementptr inbounds float* %tmp12590, i64 1
- %tmp12592 = getelementptr inbounds float* %tmp12591, i64 1
- %tmp12593 = getelementptr inbounds float* %tmp12592, i64 1
- %tmp12594 = getelementptr inbounds float* %tmp12593, i64 1
- %tmp12595 = getelementptr inbounds float* %tmp12594, i64 1
- %tmp12596 = getelementptr inbounds float* %tmp12595, i64 1
- %tmp12597 = getelementptr inbounds float* %tmp12596, i64 1
- %tmp12598 = getelementptr inbounds float* %tmp12597, i64 1
- %tmp12599 = getelementptr inbounds float* %tmp12598, i64 1
- %tmp12600 = getelementptr inbounds float* %tmp12599, i64 1
- %tmp12601 = getelementptr inbounds float* %tmp12600, i64 1
- %tmp12602 = getelementptr inbounds float* %tmp12601, i64 1
- %tmp12603 = getelementptr inbounds float* %tmp12602, i64 1
- %tmp12604 = getelementptr inbounds float* %tmp12603, i64 1
- %tmp12605 = getelementptr inbounds float* %tmp12604, i64 1
- %tmp12606 = getelementptr inbounds float* %tmp12605, i64 1
- %tmp12607 = getelementptr inbounds float* %tmp12606, i64 1
- %tmp12608 = getelementptr inbounds float* %tmp12607, i64 1
- %tmp12609 = getelementptr inbounds float* %tmp12608, i64 1
- %tmp12610 = getelementptr inbounds float* %tmp12609, i64 1
- %tmp12611 = getelementptr inbounds float* %tmp12610, i64 1
- %tmp12612 = getelementptr inbounds float* %tmp12611, i64 1
- %tmp12613 = getelementptr inbounds float* %tmp12612, i64 1
- %tmp12614 = getelementptr inbounds float* %tmp12613, i64 1
- %tmp12615 = getelementptr inbounds float* %tmp12614, i64 1
- %tmp12616 = getelementptr inbounds float* %tmp12615, i64 1
- %tmp12617 = getelementptr inbounds float* %tmp12616, i64 1
- %tmp12618 = getelementptr inbounds float* %tmp12617, i64 1
- %tmp12619 = getelementptr inbounds float* %tmp12618, i64 1
- %tmp12620 = getelementptr inbounds float* %tmp12619, i64 1
- %tmp12621 = getelementptr inbounds float* %tmp12620, i64 1
- %tmp12622 = getelementptr inbounds float* %tmp12621, i64 1
- %tmp12623 = getelementptr inbounds float* %tmp12622, i64 1
- %tmp12624 = getelementptr inbounds float* %tmp12623, i64 1
- %tmp12625 = getelementptr inbounds float* %tmp12624, i64 1
- %tmp12626 = getelementptr inbounds float* %tmp12625, i64 1
- %tmp12627 = getelementptr inbounds float* %tmp12626, i64 1
- %tmp12628 = getelementptr inbounds float* %tmp12627, i64 1
- %tmp12629 = getelementptr inbounds float* %tmp12628, i64 1
- %tmp12630 = getelementptr inbounds float* %tmp12629, i64 1
- %tmp12631 = getelementptr inbounds float* %tmp12630, i64 1
- %tmp12632 = getelementptr inbounds float* %tmp12631, i64 1
- %tmp12633 = getelementptr inbounds float* %tmp12632, i64 1
- %tmp12634 = getelementptr inbounds float* %tmp12633, i64 1
- %tmp12635 = getelementptr inbounds float* %tmp12634, i64 1
- %tmp12636 = getelementptr inbounds float* %tmp12635, i64 1
- %tmp12637 = getelementptr inbounds float* %tmp12636, i64 1
- %tmp12638 = getelementptr inbounds float* %tmp12637, i64 1
- %tmp12639 = getelementptr inbounds float* %tmp12638, i64 1
- %tmp12640 = getelementptr inbounds float* %tmp12639, i64 1
- %tmp12641 = getelementptr inbounds float* %tmp12640, i64 1
- %tmp12642 = getelementptr inbounds float* %tmp12641, i64 1
- %tmp12643 = getelementptr inbounds float* %tmp12642, i64 1
- %tmp12644 = getelementptr inbounds float* %tmp12643, i64 1
- %tmp12645 = getelementptr inbounds float* %tmp12644, i64 1
- %tmp12646 = getelementptr inbounds float* %tmp12645, i64 1
- %tmp12647 = getelementptr inbounds float* %tmp12646, i64 1
- %tmp12648 = getelementptr inbounds float* %tmp12647, i64 1
- %tmp12649 = getelementptr inbounds float* %tmp12648, i64 1
- %tmp12650 = getelementptr inbounds float* %tmp12649, i64 1
- %tmp12651 = getelementptr inbounds float* %tmp12650, i64 1
- %tmp12652 = getelementptr inbounds float* %tmp12651, i64 1
- %tmp12653 = getelementptr inbounds float* %tmp12652, i64 1
- %tmp12654 = getelementptr inbounds float* %tmp12653, i64 1
- %tmp12655 = getelementptr inbounds float* %tmp12654, i64 1
- %tmp12656 = getelementptr inbounds float* %tmp12655, i64 1
- %tmp12657 = getelementptr inbounds float* %tmp12656, i64 1
- %tmp12658 = getelementptr inbounds float* %tmp12657, i64 1
- %tmp12659 = getelementptr inbounds float* %tmp12658, i64 1
- %tmp12660 = getelementptr inbounds float* %tmp12659, i64 1
- %tmp12661 = getelementptr inbounds float* %tmp12660, i64 1
- %tmp12662 = getelementptr inbounds float* %tmp12661, i64 1
- %tmp12663 = getelementptr inbounds float* %tmp12662, i64 1
- %tmp12664 = getelementptr inbounds float* %tmp12663, i64 1
- %tmp12665 = getelementptr inbounds float* %tmp12664, i64 1
- %tmp12666 = getelementptr inbounds float* %tmp12665, i64 1
- %tmp12667 = getelementptr inbounds float* %tmp12666, i64 1
- %tmp12668 = getelementptr inbounds float* %tmp12667, i64 1
- %tmp12669 = getelementptr inbounds float* %tmp12668, i64 1
- %tmp12670 = getelementptr inbounds float* %tmp12669, i64 1
- %tmp12671 = getelementptr inbounds float* %tmp12670, i64 1
- %tmp12672 = getelementptr inbounds float* %tmp12671, i64 1
- %tmp12673 = getelementptr inbounds float* %tmp12672, i64 1
- %tmp12674 = getelementptr inbounds float* %tmp12673, i64 1
- %tmp12675 = getelementptr inbounds float* %tmp12674, i64 1
- %tmp12676 = getelementptr inbounds float* %tmp12675, i64 1
- %tmp12677 = getelementptr inbounds float* %tmp12676, i64 1
- %tmp12678 = getelementptr inbounds float* %tmp12677, i64 1
- %tmp12679 = getelementptr inbounds float* %tmp12678, i64 1
- %tmp12680 = getelementptr inbounds float* %tmp12679, i64 1
- %tmp12681 = getelementptr inbounds float* %tmp12680, i64 1
- %tmp12682 = getelementptr inbounds float* %tmp12681, i64 1
- %tmp12683 = getelementptr inbounds float* %tmp12682, i64 1
- %tmp12684 = getelementptr inbounds float* %tmp12683, i64 1
- %tmp12685 = getelementptr inbounds float* %tmp12684, i64 1
- %tmp12686 = getelementptr inbounds float* %tmp12685, i64 1
- %tmp12687 = getelementptr inbounds float* %tmp12686, i64 1
- %tmp12688 = getelementptr inbounds float* %tmp12687, i64 1
- %tmp12689 = getelementptr inbounds float* %tmp12688, i64 1
- %tmp12690 = getelementptr inbounds float* %tmp12689, i64 1
- %tmp12691 = getelementptr inbounds float* %tmp12690, i64 1
- %tmp12692 = getelementptr inbounds float* %tmp12691, i64 1
- %tmp12693 = getelementptr inbounds float* %tmp12692, i64 1
- %tmp12694 = getelementptr inbounds float* %tmp12693, i64 1
- %tmp12695 = getelementptr inbounds float* %tmp12694, i64 1
- %tmp12696 = getelementptr inbounds float* %tmp12695, i64 1
- %tmp12697 = getelementptr inbounds float* %tmp12696, i64 1
- %tmp12698 = getelementptr inbounds float* %tmp12697, i64 1
- %tmp12699 = getelementptr inbounds float* %tmp12698, i64 1
- %tmp12700 = getelementptr inbounds float* %tmp12699, i64 1
- %tmp12701 = getelementptr inbounds float* %tmp12700, i64 1
- %tmp12702 = getelementptr inbounds float* %tmp12701, i64 1
- %tmp12703 = getelementptr inbounds float* %tmp12702, i64 1
- %tmp12704 = getelementptr inbounds float* %tmp12703, i64 1
- %tmp12705 = getelementptr inbounds float* %tmp12704, i64 1
- %tmp12706 = getelementptr inbounds float* %tmp12705, i64 1
- %tmp12707 = getelementptr inbounds float* %tmp12706, i64 1
- %tmp12708 = getelementptr inbounds float* %tmp12707, i64 1
- %tmp12709 = getelementptr inbounds float* %tmp12708, i64 1
- %tmp12710 = getelementptr inbounds float* %tmp12709, i64 1
- %tmp12711 = getelementptr inbounds float* %tmp12710, i64 1
- %tmp12712 = getelementptr inbounds float* %tmp12711, i64 1
- %tmp12713 = getelementptr inbounds float* %tmp12712, i64 1
- %tmp12714 = getelementptr inbounds float* %tmp12713, i64 1
- %tmp12715 = getelementptr inbounds float* %tmp12714, i64 1
- %tmp12716 = getelementptr inbounds float* %tmp12715, i64 1
- %tmp12717 = getelementptr inbounds float* %tmp12716, i64 1
- %tmp12718 = getelementptr inbounds float* %tmp12717, i64 1
- %tmp12719 = getelementptr inbounds float* %tmp12718, i64 1
- %tmp12720 = getelementptr inbounds float* %tmp12719, i64 1
- %tmp12721 = getelementptr inbounds float* %tmp12720, i64 1
- %tmp12722 = getelementptr inbounds float* %tmp12721, i64 1
- %tmp12723 = getelementptr inbounds float* %tmp12722, i64 1
- %tmp12724 = getelementptr inbounds float* %tmp12723, i64 1
- %tmp12725 = getelementptr inbounds float* %tmp12724, i64 1
- %tmp12726 = getelementptr inbounds float* %tmp12725, i64 1
- %tmp12727 = getelementptr inbounds float* %tmp12726, i64 1
- %tmp12728 = getelementptr inbounds float* %tmp12727, i64 1
- %tmp12729 = getelementptr inbounds float* %tmp12728, i64 1
- %tmp12730 = getelementptr inbounds float* %tmp12729, i64 1
- %tmp12731 = getelementptr inbounds float* %tmp12730, i64 1
- %tmp12732 = getelementptr inbounds float* %tmp12731, i64 1
- %tmp12733 = getelementptr inbounds float* %tmp12732, i64 1
- %tmp12734 = getelementptr inbounds float* %tmp12733, i64 1
- %tmp12735 = getelementptr inbounds float* %tmp12734, i64 1
- %tmp12736 = getelementptr inbounds float* %tmp12735, i64 1
- %tmp12737 = getelementptr inbounds float* %tmp12736, i64 1
- %tmp12738 = getelementptr inbounds float* %tmp12737, i64 1
- %tmp12739 = getelementptr inbounds float* %tmp12738, i64 1
- %tmp12740 = getelementptr inbounds float* %tmp12739, i64 1
- %tmp12741 = getelementptr inbounds float* %tmp12740, i64 1
- %tmp12742 = getelementptr inbounds float* %tmp12741, i64 1
- %tmp12743 = getelementptr inbounds float* %tmp12742, i64 1
- %tmp12744 = getelementptr inbounds float* %tmp12743, i64 1
- %tmp12745 = getelementptr inbounds float* %tmp12744, i64 1
- %tmp12746 = getelementptr inbounds float* %tmp12745, i64 1
- %tmp12747 = getelementptr inbounds float* %tmp12746, i64 1
- %tmp12748 = getelementptr inbounds float* %tmp12747, i64 1
- %tmp12749 = getelementptr inbounds float* %tmp12748, i64 1
- %tmp12750 = getelementptr inbounds float* %tmp12749, i64 1
- %tmp12751 = getelementptr inbounds float* %tmp12750, i64 1
- %tmp12752 = getelementptr inbounds float* %tmp12751, i64 1
- %tmp12753 = getelementptr inbounds float* %tmp12752, i64 1
- %tmp12754 = getelementptr inbounds float* %tmp12753, i64 1
- %tmp12755 = getelementptr inbounds float* %tmp12754, i64 1
- %tmp12756 = getelementptr inbounds float* %tmp12755, i64 1
- %tmp12757 = getelementptr inbounds float* %tmp12756, i64 1
- %tmp12758 = getelementptr inbounds float* %tmp12757, i64 1
- %tmp12759 = getelementptr inbounds float* %tmp12758, i64 1
- %tmp12760 = getelementptr inbounds float* %tmp12759, i64 1
- %tmp12761 = getelementptr inbounds float* %tmp12760, i64 1
- %tmp12762 = getelementptr inbounds float* %tmp12761, i64 1
- %tmp12763 = getelementptr inbounds float* %tmp12762, i64 1
- %tmp12764 = getelementptr inbounds float* %tmp12763, i64 1
- %tmp12765 = getelementptr inbounds float* %tmp12764, i64 1
- %tmp12766 = getelementptr inbounds float* %tmp12765, i64 1
- %tmp12767 = getelementptr inbounds float* %tmp12766, i64 1
- %tmp12768 = getelementptr inbounds float* %tmp12767, i64 1
- %tmp12769 = getelementptr inbounds float* %tmp12768, i64 1
- %tmp12770 = getelementptr inbounds float* %tmp12769, i64 1
- %tmp12771 = getelementptr inbounds float* %tmp12770, i64 1
- %tmp12772 = getelementptr inbounds float* %tmp12771, i64 1
- %tmp12773 = getelementptr inbounds float* %tmp12772, i64 1
- %tmp12774 = getelementptr inbounds float* %tmp12773, i64 1
- %tmp12775 = getelementptr inbounds float* %tmp12774, i64 1
- %tmp12776 = getelementptr inbounds float* %tmp12775, i64 1
- %tmp12777 = getelementptr inbounds float* %tmp12776, i64 1
- %tmp12778 = getelementptr inbounds float* %tmp12777, i64 1
- %tmp12779 = getelementptr inbounds float* %tmp12778, i64 1
- %tmp12780 = getelementptr inbounds float* %tmp12779, i64 1
- %tmp12781 = getelementptr inbounds float* %tmp12780, i64 1
- %tmp12782 = getelementptr inbounds float* %tmp12781, i64 1
- %tmp12783 = getelementptr inbounds float* %tmp12782, i64 1
- %tmp12784 = getelementptr inbounds float* %tmp12783, i64 1
- %tmp12785 = getelementptr inbounds float* %tmp12784, i64 1
- %tmp12786 = getelementptr inbounds float* %tmp12785, i64 1
- %tmp12787 = getelementptr inbounds float* %tmp12786, i64 1
- %tmp12788 = getelementptr inbounds float* %tmp12787, i64 1
- %tmp12789 = getelementptr inbounds float* %tmp12788, i64 1
- %tmp12790 = getelementptr inbounds float* %tmp12789, i64 1
- %tmp12791 = getelementptr inbounds float* %tmp12790, i64 1
- %tmp12792 = getelementptr inbounds float* %tmp12791, i64 1
- %tmp12793 = getelementptr inbounds float* %tmp12792, i64 1
- %tmp12794 = getelementptr inbounds float* %tmp12793, i64 1
- %tmp12795 = getelementptr inbounds float* %tmp12794, i64 1
- %tmp12796 = getelementptr inbounds float* %tmp12795, i64 1
- %tmp12797 = getelementptr inbounds float* %tmp12796, i64 1
- %tmp12798 = getelementptr inbounds float* %tmp12797, i64 1
- %tmp12799 = getelementptr inbounds float* %tmp12798, i64 1
- %tmp12800 = getelementptr inbounds float* %tmp12799, i64 1
- %tmp12801 = getelementptr inbounds float* %tmp12800, i64 1
- %tmp12802 = getelementptr inbounds float* %tmp12801, i64 1
- %tmp12803 = getelementptr inbounds float* %tmp12802, i64 1
- %tmp12804 = getelementptr inbounds float* %tmp12803, i64 1
- %tmp12805 = getelementptr inbounds float* %tmp12804, i64 1
- %tmp12806 = getelementptr inbounds float* %tmp12805, i64 1
- %tmp12807 = getelementptr inbounds float* %tmp12806, i64 1
- %tmp12808 = getelementptr inbounds float* %tmp12807, i64 1
- %tmp12809 = getelementptr inbounds float* %tmp12808, i64 1
- %tmp12810 = getelementptr inbounds float* %tmp12809, i64 1
- %tmp12811 = getelementptr inbounds float* %tmp12810, i64 1
- %tmp12812 = getelementptr inbounds float* %tmp12811, i64 1
- %tmp12813 = getelementptr inbounds float* %tmp12812, i64 1
- %tmp12814 = getelementptr inbounds float* %tmp12813, i64 1
- %tmp12815 = getelementptr inbounds float* %tmp12814, i64 1
- %tmp12816 = getelementptr inbounds float* %tmp12815, i64 1
- %tmp12817 = getelementptr inbounds float* %tmp12816, i64 1
- %tmp12818 = getelementptr inbounds float* %tmp12817, i64 1
- %tmp12819 = getelementptr inbounds float* %tmp12818, i64 1
- %tmp12820 = getelementptr inbounds float* %tmp12819, i64 1
- %tmp12821 = getelementptr inbounds float* %tmp12820, i64 1
- %tmp12822 = getelementptr inbounds float* %tmp12821, i64 1
- %tmp12823 = getelementptr inbounds float* %tmp12822, i64 1
- %tmp12824 = getelementptr inbounds float* %tmp12823, i64 1
- %tmp12825 = getelementptr inbounds float* %tmp12824, i64 1
- %tmp12826 = getelementptr inbounds float* %tmp12825, i64 1
- %tmp12827 = getelementptr inbounds float* %tmp12826, i64 1
- %tmp12828 = getelementptr inbounds float* %tmp12827, i64 1
- %tmp12829 = getelementptr inbounds float* %tmp12828, i64 1
- %tmp12830 = getelementptr inbounds float* %tmp12829, i64 1
- %tmp12831 = getelementptr inbounds float* %tmp12830, i64 1
- %tmp12832 = getelementptr inbounds float* %tmp12831, i64 1
- %tmp12833 = getelementptr inbounds float* %tmp12832, i64 1
- %tmp12834 = getelementptr inbounds float* %tmp12833, i64 1
- %tmp12835 = getelementptr inbounds float* %tmp12834, i64 1
- %tmp12836 = getelementptr inbounds float* %tmp12835, i64 1
- %tmp12837 = getelementptr inbounds float* %tmp12836, i64 1
- %tmp12838 = getelementptr inbounds float* %tmp12837, i64 1
- %tmp12839 = getelementptr inbounds float* %tmp12838, i64 1
- %tmp12840 = getelementptr inbounds float* %tmp12839, i64 1
- %tmp12841 = getelementptr inbounds float* %tmp12840, i64 1
- %tmp12842 = getelementptr inbounds float* %tmp12841, i64 1
- %tmp12843 = getelementptr inbounds float* %tmp12842, i64 1
- %tmp12844 = getelementptr inbounds float* %tmp12843, i64 1
- %tmp12845 = getelementptr inbounds float* %tmp12844, i64 1
- %tmp12846 = getelementptr inbounds float* %tmp12845, i64 1
- %tmp12847 = getelementptr inbounds float* %tmp12846, i64 1
- %tmp12848 = getelementptr inbounds float* %tmp12847, i64 1
- %tmp12849 = getelementptr inbounds float* %tmp12848, i64 1
- %tmp12850 = getelementptr inbounds float* %tmp12849, i64 1
- %tmp12851 = getelementptr inbounds float* %tmp12850, i64 1
- %tmp12852 = getelementptr inbounds float* %tmp12851, i64 1
- %tmp12853 = getelementptr inbounds float* %tmp12852, i64 1
- %tmp12854 = getelementptr inbounds float* %tmp12853, i64 1
- %tmp12855 = getelementptr inbounds float* %tmp12854, i64 1
- %tmp12856 = getelementptr inbounds float* %tmp12855, i64 1
- %tmp12857 = getelementptr inbounds float* %tmp12856, i64 1
- %tmp12858 = getelementptr inbounds float* %tmp12857, i64 1
- %tmp12859 = getelementptr inbounds float* %tmp12858, i64 1
- %tmp12860 = getelementptr inbounds float* %tmp12859, i64 1
- %tmp12861 = getelementptr inbounds float* %tmp12860, i64 1
- %tmp12862 = getelementptr inbounds float* %tmp12861, i64 1
- %tmp12863 = getelementptr inbounds float* %tmp12862, i64 1
- %tmp12864 = getelementptr inbounds float* %tmp12863, i64 1
- %tmp12865 = getelementptr inbounds float* %tmp12864, i64 1
- %tmp12866 = getelementptr inbounds float* %tmp12865, i64 1
- %tmp12867 = getelementptr inbounds float* %tmp12866, i64 1
- %tmp12868 = getelementptr inbounds float* %tmp12867, i64 1
- %tmp12869 = getelementptr inbounds float* %tmp12868, i64 1
- %tmp12870 = getelementptr inbounds float* %tmp12869, i64 1
- %tmp12871 = getelementptr inbounds float* %tmp12870, i64 1
- %tmp12872 = getelementptr inbounds float* %tmp12871, i64 1
- %tmp12873 = getelementptr inbounds float* %tmp12872, i64 1
- %tmp12874 = getelementptr inbounds float* %tmp12873, i64 1
- %tmp12875 = getelementptr inbounds float* %tmp12874, i64 1
- %tmp12876 = getelementptr inbounds float* %tmp12875, i64 1
- %tmp12877 = getelementptr inbounds float* %tmp12876, i64 1
- %tmp12878 = getelementptr inbounds float* %tmp12877, i64 1
- %tmp12879 = getelementptr inbounds float* %tmp12878, i64 1
- %tmp12880 = getelementptr inbounds float* %tmp12879, i64 1
- %tmp12881 = getelementptr inbounds float* %tmp12880, i64 1
- %tmp12882 = getelementptr inbounds float* %tmp12881, i64 1
- %tmp12883 = getelementptr inbounds float* %tmp12882, i64 1
- %tmp12884 = getelementptr inbounds float* %tmp12883, i64 1
- %tmp12885 = getelementptr inbounds float* %tmp12884, i64 1
- %tmp12886 = getelementptr inbounds float* %tmp12885, i64 1
- %tmp12887 = getelementptr inbounds float* %tmp12886, i64 1
- %tmp12888 = getelementptr inbounds float* %tmp12887, i64 1
- %tmp12889 = getelementptr inbounds float* %tmp12888, i64 1
- %tmp12890 = getelementptr inbounds float* %tmp12889, i64 1
- %tmp12891 = getelementptr inbounds float* %tmp12890, i64 1
- %tmp12892 = getelementptr inbounds float* %tmp12891, i64 1
- %tmp12893 = getelementptr inbounds float* %tmp12892, i64 1
- %tmp12894 = getelementptr inbounds float* %tmp12893, i64 1
- %tmp12895 = getelementptr inbounds float* %tmp12894, i64 1
- %tmp12896 = getelementptr inbounds float* %tmp12895, i64 1
- %tmp12897 = getelementptr inbounds float* %tmp12896, i64 1
- %tmp12898 = getelementptr inbounds float* %tmp12897, i64 1
- %tmp12899 = getelementptr inbounds float* %tmp12898, i64 1
- %tmp12900 = getelementptr inbounds float* %tmp12899, i64 1
- %tmp12901 = getelementptr inbounds float* %tmp12900, i64 1
- %tmp12902 = getelementptr inbounds float* %tmp12901, i64 1
- %tmp12903 = getelementptr inbounds float* %tmp12902, i64 1
- %tmp12904 = getelementptr inbounds float* %tmp12903, i64 1
- %tmp12905 = getelementptr inbounds float* %tmp12904, i64 1
- %tmp12906 = getelementptr inbounds float* %tmp12905, i64 1
- %tmp12907 = getelementptr inbounds float* %tmp12906, i64 1
- %tmp12908 = getelementptr inbounds float* %tmp12907, i64 1
- %tmp12909 = getelementptr inbounds float* %tmp12908, i64 1
- %tmp12910 = getelementptr inbounds float* %tmp12909, i64 1
- %tmp12911 = getelementptr inbounds float* %tmp12910, i64 1
- %tmp12912 = getelementptr inbounds float* %tmp12911, i64 1
- %tmp12913 = getelementptr inbounds float* %tmp12912, i64 1
- %tmp12914 = getelementptr inbounds float* %tmp12913, i64 1
- %tmp12915 = getelementptr inbounds float* %tmp12914, i64 1
- %tmp12916 = getelementptr inbounds float* %tmp12915, i64 1
- %tmp12917 = getelementptr inbounds float* %tmp12916, i64 1
- %tmp12918 = getelementptr inbounds float* %tmp12917, i64 1
- %tmp12919 = getelementptr inbounds float* %tmp12918, i64 1
- %tmp12920 = getelementptr inbounds float* %tmp12919, i64 1
- %tmp12921 = getelementptr inbounds float* %tmp12920, i64 1
- %tmp12922 = getelementptr inbounds float* %tmp12921, i64 1
- %tmp12923 = getelementptr inbounds float* %tmp12922, i64 1
- %tmp12924 = getelementptr inbounds float* %tmp12923, i64 1
- %tmp12925 = getelementptr inbounds float* %tmp12924, i64 1
- %tmp12926 = getelementptr inbounds float* %tmp12925, i64 1
- %tmp12927 = getelementptr inbounds float* %tmp12926, i64 1
- %tmp12928 = getelementptr inbounds float* %tmp12927, i64 1
- %tmp12929 = getelementptr inbounds float* %tmp12928, i64 1
- %tmp12930 = getelementptr inbounds float* %tmp12929, i64 1
- %tmp12931 = getelementptr inbounds float* %tmp12930, i64 1
- %tmp12932 = getelementptr inbounds float* %tmp12931, i64 1
- %tmp12933 = getelementptr inbounds float* %tmp12932, i64 1
- %tmp12934 = getelementptr inbounds float* %tmp12933, i64 1
- %tmp12935 = getelementptr inbounds float* %tmp12934, i64 1
- %tmp12936 = getelementptr inbounds float* %tmp12935, i64 1
- %tmp12937 = getelementptr inbounds float* %tmp12936, i64 1
- %tmp12938 = getelementptr inbounds float* %tmp12937, i64 1
- %tmp12939 = getelementptr inbounds float* %tmp12938, i64 1
- %tmp12940 = getelementptr inbounds float* %tmp12939, i64 1
- %tmp12941 = getelementptr inbounds float* %tmp12940, i64 1
- %tmp12942 = getelementptr inbounds float* %tmp12941, i64 1
- %tmp12943 = getelementptr inbounds float* %tmp12942, i64 1
- %tmp12944 = getelementptr inbounds float* %tmp12943, i64 1
- %tmp12945 = getelementptr inbounds float* %tmp12944, i64 1
- %tmp12946 = getelementptr inbounds float* %tmp12945, i64 1
- %tmp12947 = getelementptr inbounds float* %tmp12946, i64 1
- %tmp12948 = getelementptr inbounds float* %tmp12947, i64 1
- %tmp12949 = getelementptr inbounds float* %tmp12948, i64 1
- %tmp12950 = getelementptr inbounds float* %tmp12949, i64 1
- %tmp12951 = getelementptr inbounds float* %tmp12950, i64 1
- %tmp12952 = getelementptr inbounds float* %tmp12951, i64 1
- %tmp12953 = getelementptr inbounds float* %tmp12952, i64 1
- %tmp12954 = getelementptr inbounds float* %tmp12953, i64 1
- %tmp12955 = getelementptr inbounds float* %tmp12954, i64 1
- %tmp12956 = getelementptr inbounds float* %tmp12955, i64 1
- %tmp12957 = getelementptr inbounds float* %tmp12956, i64 1
- %tmp12958 = getelementptr inbounds float* %tmp12957, i64 1
- %tmp12959 = getelementptr inbounds float* %tmp12958, i64 1
- %tmp12960 = getelementptr inbounds float* %tmp12959, i64 1
- %tmp12961 = getelementptr inbounds float* %tmp12960, i64 1
- %tmp12962 = getelementptr inbounds float* %tmp12961, i64 1
- %tmp12963 = getelementptr inbounds float* %tmp12962, i64 1
- %tmp12964 = getelementptr inbounds float* %tmp12963, i64 1
- %tmp12965 = getelementptr inbounds float* %tmp12964, i64 1
- %tmp12966 = getelementptr inbounds float* %tmp12965, i64 1
- %tmp12967 = getelementptr inbounds float* %tmp12966, i64 1
- %tmp12968 = getelementptr inbounds float* %tmp12967, i64 1
- %tmp12969 = getelementptr inbounds float* %tmp12968, i64 1
- %tmp12970 = getelementptr inbounds float* %tmp12969, i64 1
- %tmp12971 = getelementptr inbounds float* %tmp12970, i64 1
- %tmp12972 = getelementptr inbounds float* %tmp12971, i64 1
- %tmp12973 = getelementptr inbounds float* %tmp12972, i64 1
- %tmp12974 = getelementptr inbounds float* %tmp12973, i64 1
- %tmp12975 = getelementptr inbounds float* %tmp12974, i64 1
- %tmp12976 = getelementptr inbounds float* %tmp12975, i64 1
- %tmp12977 = getelementptr inbounds float* %tmp12976, i64 1
- %tmp12978 = getelementptr inbounds float* %tmp12977, i64 1
- %tmp12979 = getelementptr inbounds float* %tmp12978, i64 1
- %tmp12980 = getelementptr inbounds float* %tmp12979, i64 1
- %tmp12981 = getelementptr inbounds float* %tmp12980, i64 1
- %tmp12982 = getelementptr inbounds float* %tmp12981, i64 1
- %tmp12983 = getelementptr inbounds float* %tmp12982, i64 1
- %tmp12984 = getelementptr inbounds float* %tmp12983, i64 1
- %tmp12985 = getelementptr inbounds float* %tmp12984, i64 1
- %tmp12986 = getelementptr inbounds float* %tmp12985, i64 1
- %tmp12987 = getelementptr inbounds float* %tmp12986, i64 1
- %tmp12988 = getelementptr inbounds float* %tmp12987, i64 1
- %tmp12989 = getelementptr inbounds float* %tmp12988, i64 1
- %tmp12990 = getelementptr inbounds float* %tmp12989, i64 1
- %tmp12991 = getelementptr inbounds float* %tmp12990, i64 1
- %tmp12992 = getelementptr inbounds float* %tmp12991, i64 1
- %tmp12993 = getelementptr inbounds float* %tmp12992, i64 1
- %tmp12994 = getelementptr inbounds float* %tmp12993, i64 1
- %tmp12995 = getelementptr inbounds float* %tmp12994, i64 1
- %tmp12996 = getelementptr inbounds float* %tmp12995, i64 1
- %tmp12997 = getelementptr inbounds float* %tmp12996, i64 1
- %tmp12998 = getelementptr inbounds float* %tmp12997, i64 1
- %tmp12999 = getelementptr inbounds float* %tmp12998, i64 1
- %tmp13000 = getelementptr inbounds float* %tmp12999, i64 1
- %tmp13001 = getelementptr inbounds float* %tmp13000, i64 1
- %tmp13002 = getelementptr inbounds float* %tmp13001, i64 1
- %tmp13003 = getelementptr inbounds float* %tmp13002, i64 1
- %tmp13004 = getelementptr inbounds float* %tmp13003, i64 1
- %tmp13005 = getelementptr inbounds float* %tmp13004, i64 1
- %tmp13006 = getelementptr inbounds float* %tmp13005, i64 1
- %tmp13007 = getelementptr inbounds float* %tmp13006, i64 1
- %tmp13008 = getelementptr inbounds float* %tmp13007, i64 1
- %tmp13009 = getelementptr inbounds float* %tmp13008, i64 1
- %tmp13010 = getelementptr inbounds float* %tmp13009, i64 1
- %tmp13011 = getelementptr inbounds float* %tmp13010, i64 1
- %tmp13012 = getelementptr inbounds float* %tmp13011, i64 1
- %tmp13013 = getelementptr inbounds float* %tmp13012, i64 1
- %tmp13014 = getelementptr inbounds float* %tmp13013, i64 1
- %tmp13015 = getelementptr inbounds float* %tmp13014, i64 1
- %tmp13016 = getelementptr inbounds float* %tmp13015, i64 1
- %tmp13017 = getelementptr inbounds float* %tmp13016, i64 1
- %tmp13018 = getelementptr inbounds float* %tmp13017, i64 1
- %tmp13019 = getelementptr inbounds float* %tmp13018, i64 1
- %tmp13020 = getelementptr inbounds float* %tmp13019, i64 1
- %tmp13021 = getelementptr inbounds float* %tmp13020, i64 1
- %tmp13022 = getelementptr inbounds float* %tmp13021, i64 1
- %tmp13023 = getelementptr inbounds float* %tmp13022, i64 1
- %tmp13024 = getelementptr inbounds float* %tmp13023, i64 1
- %tmp13025 = getelementptr inbounds float* %tmp13024, i64 1
- %tmp13026 = getelementptr inbounds float* %tmp13025, i64 1
- %tmp13027 = getelementptr inbounds float* %tmp13026, i64 1
- %tmp13028 = getelementptr inbounds float* %tmp13027, i64 1
- %tmp13029 = getelementptr inbounds float* %tmp13028, i64 1
- %tmp13030 = getelementptr inbounds float* %tmp13029, i64 1
- %tmp13031 = getelementptr inbounds float* %tmp13030, i64 1
- %tmp13032 = getelementptr inbounds float* %tmp13031, i64 1
- %tmp13033 = getelementptr inbounds float* %tmp13032, i64 1
- %tmp13034 = getelementptr inbounds float* %tmp13033, i64 1
- %tmp13035 = getelementptr inbounds float* %tmp13034, i64 1
- %tmp13036 = getelementptr inbounds float* %tmp13035, i64 1
- %tmp13037 = getelementptr inbounds float* %tmp13036, i64 1
- %tmp13038 = getelementptr inbounds float* %tmp13037, i64 1
- %tmp13039 = getelementptr inbounds float* %tmp13038, i64 1
- %tmp13040 = getelementptr inbounds float* %tmp13039, i64 1
- %tmp13041 = getelementptr inbounds float* %tmp13040, i64 1
- %tmp13042 = getelementptr inbounds float* %tmp13041, i64 1
- %tmp13043 = getelementptr inbounds float* %tmp13042, i64 1
- %tmp13044 = getelementptr inbounds float* %tmp13043, i64 1
- %tmp13045 = getelementptr inbounds float* %tmp13044, i64 1
- %tmp13046 = getelementptr inbounds float* %tmp13045, i64 1
- %tmp13047 = getelementptr inbounds float* %tmp13046, i64 1
- %tmp13048 = getelementptr inbounds float* %tmp13047, i64 1
- %tmp13049 = getelementptr inbounds float* %tmp13048, i64 1
- %tmp13050 = getelementptr inbounds float* %tmp13049, i64 1
- %tmp13051 = getelementptr inbounds float* %tmp13050, i64 1
- %tmp13052 = getelementptr inbounds float* %tmp13051, i64 1
- %tmp13053 = getelementptr inbounds float* %tmp13052, i64 1
- %tmp13054 = getelementptr inbounds float* %tmp13053, i64 1
- %tmp13055 = getelementptr inbounds float* %tmp13054, i64 1
- %tmp13056 = getelementptr inbounds float* %tmp13055, i64 1
- %tmp13057 = getelementptr inbounds float* %tmp13056, i64 1
- %tmp13058 = getelementptr inbounds float* %tmp13057, i64 1
- %tmp13059 = getelementptr inbounds float* %tmp13058, i64 1
- %tmp13060 = getelementptr inbounds float* %tmp13059, i64 1
- %tmp13061 = getelementptr inbounds float* %tmp13060, i64 1
- %tmp13062 = getelementptr inbounds float* %tmp13061, i64 1
- %tmp13063 = getelementptr inbounds float* %tmp13062, i64 1
- %tmp13064 = getelementptr inbounds float* %tmp13063, i64 1
- %tmp13065 = getelementptr inbounds float* %tmp13064, i64 1
- %tmp13066 = getelementptr inbounds float* %tmp13065, i64 1
- %tmp13067 = getelementptr inbounds float* %tmp13066, i64 1
- %tmp13068 = getelementptr inbounds float* %tmp13067, i64 1
- %tmp13069 = getelementptr inbounds float* %tmp13068, i64 1
- %tmp13070 = getelementptr inbounds float* %tmp13069, i64 1
- %tmp13071 = getelementptr inbounds float* %tmp13070, i64 1
- %tmp13072 = getelementptr inbounds float* %tmp13071, i64 1
- %tmp13073 = getelementptr inbounds float* %tmp13072, i64 1
- %tmp13074 = getelementptr inbounds float* %tmp13073, i64 1
- %tmp13075 = getelementptr inbounds float* %tmp13074, i64 1
- %tmp13076 = getelementptr inbounds float* %tmp13075, i64 1
- %tmp13077 = getelementptr inbounds float* %tmp13076, i64 1
- %tmp13078 = getelementptr inbounds float* %tmp13077, i64 1
- %tmp13079 = getelementptr inbounds float* %tmp13078, i64 1
- %tmp13080 = getelementptr inbounds float* %tmp13079, i64 1
- %tmp13081 = getelementptr inbounds float* %tmp13080, i64 1
- %tmp13082 = getelementptr inbounds float* %tmp13081, i64 1
- %tmp13083 = getelementptr inbounds float* %tmp13082, i64 1
- %tmp13084 = getelementptr inbounds float* %tmp13083, i64 1
- %tmp13085 = getelementptr inbounds float* %tmp13084, i64 1
- %tmp13086 = getelementptr inbounds float* %tmp13085, i64 1
- %tmp13087 = getelementptr inbounds float* %tmp13086, i64 1
- %tmp13088 = getelementptr inbounds float* %tmp13087, i64 1
- %tmp13089 = getelementptr inbounds float* %tmp13088, i64 1
- %tmp13090 = getelementptr inbounds float* %tmp13089, i64 1
- %tmp13091 = getelementptr inbounds float* %tmp13090, i64 1
- %tmp13092 = getelementptr inbounds float* %tmp13091, i64 1
- %tmp13093 = getelementptr inbounds float* %tmp13092, i64 1
- %tmp13094 = getelementptr inbounds float* %tmp13093, i64 1
- %tmp13095 = getelementptr inbounds float* %tmp13094, i64 1
- %tmp13096 = getelementptr inbounds float* %tmp13095, i64 1
- %tmp13097 = getelementptr inbounds float* %tmp13096, i64 1
- %tmp13098 = getelementptr inbounds float* %tmp13097, i64 1
- %tmp13099 = getelementptr inbounds float* %tmp13098, i64 1
- %tmp13100 = getelementptr inbounds float* %tmp13099, i64 1
- %tmp13101 = getelementptr inbounds float* %tmp13100, i64 1
- %tmp13102 = getelementptr inbounds float* %tmp13101, i64 1
- %tmp13103 = getelementptr inbounds float* %tmp13102, i64 1
- %tmp13104 = getelementptr inbounds float* %tmp13103, i64 1
- %tmp13105 = getelementptr inbounds float* %tmp13104, i64 1
- %tmp13106 = getelementptr inbounds float* %tmp13105, i64 1
- %tmp13107 = getelementptr inbounds float* %tmp13106, i64 1
- %tmp13108 = getelementptr inbounds float* %tmp13107, i64 1
- %tmp13109 = getelementptr inbounds float* %tmp13108, i64 1
- %tmp13110 = getelementptr inbounds float* %tmp13109, i64 1
- %tmp13111 = getelementptr inbounds float* %tmp13110, i64 1
- %tmp13112 = getelementptr inbounds float* %tmp13111, i64 1
- %tmp13113 = getelementptr inbounds float* %tmp13112, i64 1
- %tmp13114 = getelementptr inbounds float* %tmp13113, i64 1
- %tmp13115 = getelementptr inbounds float* %tmp13114, i64 1
- %tmp13116 = getelementptr inbounds float* %tmp13115, i64 1
- %tmp13117 = getelementptr inbounds float* %tmp13116, i64 1
- %tmp13118 = getelementptr inbounds float* %tmp13117, i64 1
- %tmp13119 = getelementptr inbounds float* %tmp13118, i64 1
- %tmp13120 = getelementptr inbounds float* %tmp13119, i64 1
- %tmp13121 = getelementptr inbounds float* %tmp13120, i64 1
- %tmp13122 = getelementptr inbounds float* %tmp13121, i64 1
- %tmp13123 = getelementptr inbounds float* %tmp13122, i64 1
- %tmp13124 = getelementptr inbounds float* %tmp13123, i64 1
- %tmp13125 = getelementptr inbounds float* %tmp13124, i64 1
- %tmp13126 = getelementptr inbounds float* %tmp13125, i64 1
- %tmp13127 = getelementptr inbounds float* %tmp13126, i64 1
- %tmp13128 = getelementptr inbounds float* %tmp13127, i64 1
- %tmp13129 = getelementptr inbounds float* %tmp13128, i64 1
- %tmp13130 = getelementptr inbounds float* %tmp13129, i64 1
- %tmp13131 = getelementptr inbounds float* %tmp13130, i64 1
- %tmp13132 = getelementptr inbounds float* %tmp13131, i64 1
- %tmp13133 = getelementptr inbounds float* %tmp13132, i64 1
- %tmp13134 = getelementptr inbounds float* %tmp13133, i64 1
- %tmp13135 = getelementptr inbounds float* %tmp13134, i64 1
- %tmp13136 = getelementptr inbounds float* %tmp13135, i64 1
- %tmp13137 = getelementptr inbounds float* %tmp13136, i64 1
- %tmp13138 = getelementptr inbounds float* %tmp13137, i64 1
- %tmp13139 = getelementptr inbounds float* %tmp13138, i64 1
- %tmp13140 = getelementptr inbounds float* %tmp13139, i64 1
- %tmp13141 = getelementptr inbounds float* %tmp13140, i64 1
- %tmp13142 = getelementptr inbounds float* %tmp13141, i64 1
- %tmp13143 = getelementptr inbounds float* %tmp13142, i64 1
- %tmp13144 = getelementptr inbounds float* %tmp13143, i64 1
- %tmp13145 = getelementptr inbounds float* %tmp13144, i64 1
- %tmp13146 = getelementptr inbounds float* %tmp13145, i64 1
- %tmp13147 = getelementptr inbounds float* %tmp13146, i64 1
- %tmp13148 = getelementptr inbounds float* %tmp13147, i64 1
- %tmp13149 = getelementptr inbounds float* %tmp13148, i64 1
- %tmp13150 = getelementptr inbounds float* %tmp13149, i64 1
- %tmp13151 = getelementptr inbounds float* %tmp13150, i64 1
- %tmp13152 = getelementptr inbounds float* %tmp13151, i64 1
- %tmp13153 = getelementptr inbounds float* %tmp13152, i64 1
- %tmp13154 = getelementptr inbounds float* %tmp13153, i64 1
- %tmp13155 = getelementptr inbounds float* %tmp13154, i64 1
- %tmp13156 = getelementptr inbounds float* %tmp13155, i64 1
- %tmp13157 = getelementptr inbounds float* %tmp13156, i64 1
- %tmp13158 = getelementptr inbounds float* %tmp13157, i64 1
- %tmp13159 = getelementptr inbounds float* %tmp13158, i64 1
- %tmp13160 = getelementptr inbounds float* %tmp13159, i64 1
- %tmp13161 = getelementptr inbounds float* %tmp13160, i64 1
- %tmp13162 = getelementptr inbounds float* %tmp13161, i64 1
- %tmp13163 = getelementptr inbounds float* %tmp13162, i64 1
- %tmp13164 = getelementptr inbounds float* %tmp13163, i64 1
- %tmp13165 = getelementptr inbounds float* %tmp13164, i64 1
- %tmp13166 = getelementptr inbounds float* %tmp13165, i64 1
- %tmp13167 = getelementptr inbounds float* %tmp13166, i64 1
- %tmp13168 = getelementptr inbounds float* %tmp13167, i64 1
- %tmp13169 = getelementptr inbounds float* %tmp13168, i64 1
- %tmp13170 = getelementptr inbounds float* %tmp13169, i64 1
- %tmp13171 = getelementptr inbounds float* %tmp13170, i64 1
- %tmp13172 = getelementptr inbounds float* %tmp13171, i64 1
- %tmp13173 = getelementptr inbounds float* %tmp13172, i64 1
- %tmp13174 = getelementptr inbounds float* %tmp13173, i64 1
- %tmp13175 = getelementptr inbounds float* %tmp13174, i64 1
- %tmp13176 = getelementptr inbounds float* %tmp13175, i64 1
- %tmp13177 = getelementptr inbounds float* %tmp13176, i64 1
- %tmp13178 = getelementptr inbounds float* %tmp13177, i64 1
- %tmp13179 = getelementptr inbounds float* %tmp13178, i64 1
- %tmp13180 = getelementptr inbounds float* %tmp13179, i64 1
- %tmp13181 = getelementptr inbounds float* %tmp13180, i64 1
- %tmp13182 = getelementptr inbounds float* %tmp13181, i64 1
- %tmp13183 = getelementptr inbounds float* %tmp13182, i64 1
- %tmp13184 = getelementptr inbounds float* %tmp13183, i64 1
- %tmp13185 = getelementptr inbounds float* %tmp13184, i64 1
- %tmp13186 = getelementptr inbounds float* %tmp13185, i64 1
- %tmp13187 = getelementptr inbounds float* %tmp13186, i64 1
- %tmp13188 = getelementptr inbounds float* %tmp13187, i64 1
- %tmp13189 = getelementptr inbounds float* %tmp13188, i64 1
- %tmp13190 = getelementptr inbounds float* %tmp13189, i64 1
- %tmp13191 = getelementptr inbounds float* %tmp13190, i64 1
- %tmp13192 = getelementptr inbounds float* %tmp13191, i64 1
- %tmp13193 = getelementptr inbounds float* %tmp13192, i64 1
- %tmp13194 = getelementptr inbounds float* %tmp13193, i64 1
- %tmp13195 = getelementptr inbounds float* %tmp13194, i64 1
- %tmp13196 = getelementptr inbounds float* %tmp13195, i64 1
- %tmp13197 = getelementptr inbounds float* %tmp13196, i64 1
- %tmp13198 = getelementptr inbounds float* %tmp13197, i64 1
- %tmp13199 = getelementptr inbounds float* %tmp13198, i64 1
- %tmp13200 = getelementptr inbounds float* %tmp13199, i64 1
- %tmp13201 = getelementptr inbounds float* %tmp13200, i64 1
- %tmp13202 = getelementptr inbounds float* %tmp13201, i64 1
- %tmp13203 = getelementptr inbounds float* %tmp13202, i64 1
- %tmp13204 = getelementptr inbounds float* %tmp13203, i64 1
- %tmp13205 = getelementptr inbounds float* %tmp13204, i64 1
- %tmp13206 = getelementptr inbounds float* %tmp13205, i64 1
- %tmp13207 = getelementptr inbounds float* %tmp13206, i64 1
- %tmp13208 = getelementptr inbounds float* %tmp13207, i64 1
- %tmp13209 = getelementptr inbounds float* %tmp13208, i64 1
- %tmp13210 = getelementptr inbounds float* %tmp13209, i64 1
- %tmp13211 = getelementptr inbounds float* %tmp13210, i64 1
- %tmp13212 = getelementptr inbounds float* %tmp13211, i64 1
- %tmp13213 = getelementptr inbounds float* %tmp13212, i64 1
- %tmp13214 = getelementptr inbounds float* %tmp13213, i64 1
- %tmp13215 = getelementptr inbounds float* %tmp13214, i64 1
- %tmp13216 = getelementptr inbounds float* %tmp13215, i64 1
- %tmp13217 = getelementptr inbounds float* %tmp13216, i64 1
- %tmp13218 = getelementptr inbounds float* %tmp13217, i64 1
- %tmp13219 = getelementptr inbounds float* %tmp13218, i64 1
- %tmp13220 = getelementptr inbounds float* %tmp13219, i64 1
- %tmp13221 = getelementptr inbounds float* %tmp13220, i64 1
- %tmp13222 = getelementptr inbounds float* %tmp13221, i64 1
- %tmp13223 = getelementptr inbounds float* %tmp13222, i64 1
- %tmp13224 = getelementptr inbounds float* %tmp13223, i64 1
- %tmp13225 = getelementptr inbounds float* %tmp13224, i64 1
- %tmp13226 = getelementptr inbounds float* %tmp13225, i64 1
- %tmp13227 = getelementptr inbounds float* %tmp13226, i64 1
- %tmp13228 = getelementptr inbounds float* %tmp13227, i64 1
- %tmp13229 = getelementptr inbounds float* %tmp13228, i64 1
- %tmp13230 = getelementptr inbounds float* %tmp13229, i64 1
- %tmp13231 = getelementptr inbounds float* %tmp13230, i64 1
- %tmp13232 = getelementptr inbounds float* %tmp13231, i64 1
- %tmp13233 = getelementptr inbounds float* %tmp13232, i64 1
- %tmp13234 = getelementptr inbounds float* %tmp13233, i64 1
- %tmp13235 = getelementptr inbounds float* %tmp13234, i64 1
- %tmp13236 = getelementptr inbounds float* %tmp13235, i64 1
- %tmp13237 = getelementptr inbounds float* %tmp13236, i64 1
- %tmp13238 = getelementptr inbounds float* %tmp13237, i64 1
- %tmp13239 = getelementptr inbounds float* %tmp13238, i64 1
- %tmp13240 = getelementptr inbounds float* %tmp13239, i64 1
- %tmp13241 = getelementptr inbounds float* %tmp13240, i64 1
- %tmp13242 = getelementptr inbounds float* %tmp13241, i64 1
- %tmp13243 = getelementptr inbounds float* %tmp13242, i64 1
- %tmp13244 = getelementptr inbounds float* %tmp13243, i64 1
- %tmp13245 = getelementptr inbounds float* %tmp13244, i64 1
- %tmp13246 = getelementptr inbounds float* %tmp13245, i64 1
- %tmp13247 = getelementptr inbounds float* %tmp13246, i64 1
- %tmp13248 = getelementptr inbounds float* %tmp13247, i64 1
- %tmp13249 = getelementptr inbounds float* %tmp13248, i64 1
- %tmp13250 = getelementptr inbounds float* %tmp13249, i64 1
- %tmp13251 = getelementptr inbounds float* %tmp13250, i64 1
- %tmp13252 = getelementptr inbounds float* %tmp13251, i64 1
- %tmp13253 = getelementptr inbounds float* %tmp13252, i64 1
- %tmp13254 = getelementptr inbounds float* %tmp13253, i64 1
- %tmp13255 = getelementptr inbounds float* %tmp13254, i64 1
- %tmp13256 = getelementptr inbounds float* %tmp13255, i64 1
- %tmp13257 = getelementptr inbounds float* %tmp13256, i64 1
- %tmp13258 = getelementptr inbounds float* %tmp13257, i64 1
- %tmp13259 = getelementptr inbounds float* %tmp13258, i64 1
- %tmp13260 = getelementptr inbounds float* %tmp13259, i64 1
- %tmp13261 = getelementptr inbounds float* %tmp13260, i64 1
- %tmp13262 = getelementptr inbounds float* %tmp13261, i64 1
- %tmp13263 = getelementptr inbounds float* %tmp13262, i64 1
- %tmp13264 = getelementptr inbounds float* %tmp13263, i64 1
- %tmp13265 = getelementptr inbounds float* %tmp13264, i64 1
- %tmp13266 = getelementptr inbounds float* %tmp13265, i64 1
- %tmp13267 = getelementptr inbounds float* %tmp13266, i64 1
- %tmp13268 = getelementptr inbounds float* %tmp13267, i64 1
- %tmp13269 = getelementptr inbounds float* %tmp13268, i64 1
- %tmp13270 = getelementptr inbounds float* %tmp13269, i64 1
- %tmp13271 = getelementptr inbounds float* %tmp13270, i64 1
- %tmp13272 = getelementptr inbounds float* %tmp13271, i64 1
- %tmp13273 = getelementptr inbounds float* %tmp13272, i64 1
- %tmp13274 = getelementptr inbounds float* %tmp13273, i64 1
- %tmp13275 = getelementptr inbounds float* %tmp13274, i64 1
- %tmp13276 = getelementptr inbounds float* %tmp13275, i64 1
- %tmp13277 = getelementptr inbounds float* %tmp13276, i64 1
- %tmp13278 = getelementptr inbounds float* %tmp13277, i64 1
- %tmp13279 = getelementptr inbounds float* %tmp13278, i64 1
- %tmp13280 = getelementptr inbounds float* %tmp13279, i64 1
- %tmp13281 = getelementptr inbounds float* %tmp13280, i64 1
- %tmp13282 = getelementptr inbounds float* %tmp13281, i64 1
- %tmp13283 = getelementptr inbounds float* %tmp13282, i64 1
- %tmp13284 = getelementptr inbounds float* %tmp13283, i64 1
- %tmp13285 = getelementptr inbounds float* %tmp13284, i64 1
- %tmp13286 = getelementptr inbounds float* %tmp13285, i64 1
- %tmp13287 = getelementptr inbounds float* %tmp13286, i64 1
- %tmp13288 = getelementptr inbounds float* %tmp13287, i64 1
- %tmp13289 = getelementptr inbounds float* %tmp13288, i64 1
- %tmp13290 = getelementptr inbounds float* %tmp13289, i64 1
- %tmp13291 = getelementptr inbounds float* %tmp13290, i64 1
- %tmp13292 = getelementptr inbounds float* %tmp13291, i64 1
- %tmp13293 = getelementptr inbounds float* %tmp13292, i64 1
- %tmp13294 = getelementptr inbounds float* %tmp13293, i64 1
- %tmp13295 = getelementptr inbounds float* %tmp13294, i64 1
- %tmp13296 = getelementptr inbounds float* %tmp13295, i64 1
- %tmp13297 = getelementptr inbounds float* %tmp13296, i64 1
- %tmp13298 = getelementptr inbounds float* %tmp13297, i64 1
- %tmp13299 = getelementptr inbounds float* %tmp13298, i64 1
- %tmp13300 = getelementptr inbounds float* %tmp13299, i64 1
- %tmp13301 = getelementptr inbounds float* %tmp13300, i64 1
- %tmp13302 = getelementptr inbounds float* %tmp13301, i64 1
- %tmp13303 = getelementptr inbounds float* %tmp13302, i64 1
- %tmp13304 = getelementptr inbounds float* %tmp13303, i64 1
- %tmp13305 = getelementptr inbounds float* %tmp13304, i64 1
- %tmp13306 = getelementptr inbounds float* %tmp13305, i64 1
- %tmp13307 = getelementptr inbounds float* %tmp13306, i64 1
- %tmp13308 = getelementptr inbounds float* %tmp13307, i64 1
- %tmp13309 = getelementptr inbounds float* %tmp13308, i64 1
- %tmp13310 = getelementptr inbounds float* %tmp13309, i64 1
- %tmp13311 = getelementptr inbounds float* %tmp13310, i64 1
- %tmp13312 = getelementptr inbounds float* %tmp13311, i64 1
- %tmp13313 = getelementptr inbounds float* %tmp13312, i64 1
- %tmp13314 = getelementptr inbounds float* %tmp13313, i64 1
- %tmp13315 = getelementptr inbounds float* %tmp13314, i64 1
- %tmp13316 = getelementptr inbounds float* %tmp13315, i64 1
- %tmp13317 = getelementptr inbounds float* %tmp13316, i64 1
- %tmp13318 = getelementptr inbounds float* %tmp13317, i64 1
- %tmp13319 = getelementptr inbounds float* %tmp13318, i64 1
- %tmp13320 = getelementptr inbounds float* %tmp13319, i64 1
- %tmp13321 = getelementptr inbounds float* %tmp13320, i64 1
- %tmp13322 = getelementptr inbounds float* %tmp13321, i64 1
- %tmp13323 = getelementptr inbounds float* %tmp13322, i64 1
- %tmp13324 = getelementptr inbounds float* %tmp13323, i64 1
- %tmp13325 = getelementptr inbounds float* %tmp13324, i64 1
- %tmp13326 = getelementptr inbounds float* %tmp13325, i64 1
- %tmp13327 = getelementptr inbounds float* %tmp13326, i64 1
- %tmp13328 = getelementptr inbounds float* %tmp13327, i64 1
- %tmp13329 = getelementptr inbounds float* %tmp13328, i64 1
- %tmp13330 = getelementptr inbounds float* %tmp13329, i64 1
- %tmp13331 = getelementptr inbounds float* %tmp13330, i64 1
- %tmp13332 = getelementptr inbounds float* %tmp13331, i64 1
- %tmp13333 = getelementptr inbounds float* %tmp13332, i64 1
- %tmp13334 = getelementptr inbounds float* %tmp13333, i64 1
- %tmp13335 = getelementptr inbounds float* %tmp13334, i64 1
- %tmp13336 = getelementptr inbounds float* %tmp13335, i64 1
- %tmp13337 = getelementptr inbounds float* %tmp13336, i64 1
- %tmp13338 = getelementptr inbounds float* %tmp13337, i64 1
- %tmp13339 = getelementptr inbounds float* %tmp13338, i64 1
- %tmp13340 = getelementptr inbounds float* %tmp13339, i64 1
- %tmp13341 = getelementptr inbounds float* %tmp13340, i64 1
- %tmp13342 = getelementptr inbounds float* %tmp13341, i64 1
- %tmp13343 = getelementptr inbounds float* %tmp13342, i64 1
- %tmp13344 = getelementptr inbounds float* %tmp13343, i64 1
- %tmp13345 = getelementptr inbounds float* %tmp13344, i64 1
- %tmp13346 = getelementptr inbounds float* %tmp13345, i64 1
- %tmp13347 = getelementptr inbounds float* %tmp13346, i64 1
- %tmp13348 = getelementptr inbounds float* %tmp13347, i64 1
- %tmp13349 = getelementptr inbounds float* %tmp13348, i64 1
- %tmp13350 = getelementptr inbounds float* %tmp13349, i64 1
- %tmp13351 = getelementptr inbounds float* %tmp13350, i64 1
- %tmp13352 = getelementptr inbounds float* %tmp13351, i64 1
- %tmp13353 = getelementptr inbounds float* %tmp13352, i64 1
- %tmp13354 = getelementptr inbounds float* %tmp13353, i64 1
- %tmp13355 = getelementptr inbounds float* %tmp13354, i64 1
- %tmp13356 = getelementptr inbounds float* %tmp13355, i64 1
- %tmp13357 = getelementptr inbounds float* %tmp13356, i64 1
- %tmp13358 = getelementptr inbounds float* %tmp13357, i64 1
- %tmp13359 = getelementptr inbounds float* %tmp13358, i64 1
- %tmp13360 = getelementptr inbounds float* %tmp13359, i64 1
- %tmp13361 = getelementptr inbounds float* %tmp13360, i64 1
- %tmp13362 = getelementptr inbounds float* %tmp13361, i64 1
- %tmp13363 = getelementptr inbounds float* %tmp13362, i64 1
- %tmp13364 = getelementptr inbounds float* %tmp13363, i64 1
- %tmp13365 = getelementptr inbounds float* %tmp13364, i64 1
- %tmp13366 = getelementptr inbounds float* %tmp13365, i64 1
- %tmp13367 = getelementptr inbounds float* %tmp13366, i64 1
- %tmp13368 = getelementptr inbounds float* %tmp13367, i64 1
- %tmp13369 = getelementptr inbounds float* %tmp13368, i64 1
- %tmp13370 = getelementptr inbounds float* %tmp13369, i64 1
- %tmp13371 = getelementptr inbounds float* %tmp13370, i64 1
- %tmp13372 = getelementptr inbounds float* %tmp13371, i64 1
- %tmp13373 = getelementptr inbounds float* %tmp13372, i64 1
- %tmp13374 = getelementptr inbounds float* %tmp13373, i64 1
- %tmp13375 = getelementptr inbounds float* %tmp13374, i64 1
- %tmp13376 = getelementptr inbounds float* %tmp13375, i64 1
- %tmp13377 = getelementptr inbounds float* %tmp13376, i64 1
- %tmp13378 = getelementptr inbounds float* %tmp13377, i64 1
- %tmp13379 = getelementptr inbounds float* %tmp13378, i64 1
- %tmp13380 = getelementptr inbounds float* %tmp13379, i64 1
- %tmp13381 = getelementptr inbounds float* %tmp13380, i64 1
- %tmp13382 = getelementptr inbounds float* %tmp13381, i64 1
- %tmp13383 = getelementptr inbounds float* %tmp13382, i64 1
- %tmp13384 = getelementptr inbounds float* %tmp13383, i64 1
- %tmp13385 = getelementptr inbounds float* %tmp13384, i64 1
- %tmp13386 = getelementptr inbounds float* %tmp13385, i64 1
- %tmp13387 = getelementptr inbounds float* %tmp13386, i64 1
- %tmp13388 = getelementptr inbounds float* %tmp13387, i64 1
- %tmp13389 = getelementptr inbounds float* %tmp13388, i64 1
- %tmp13390 = getelementptr inbounds float* %tmp13389, i64 1
- %tmp13391 = getelementptr inbounds float* %tmp13390, i64 1
- %tmp13392 = getelementptr inbounds float* %tmp13391, i64 1
- %tmp13393 = getelementptr inbounds float* %tmp13392, i64 1
- %tmp13394 = getelementptr inbounds float* %tmp13393, i64 1
- %tmp13395 = getelementptr inbounds float* %tmp13394, i64 1
- %tmp13396 = getelementptr inbounds float* %tmp13395, i64 1
- %tmp13397 = getelementptr inbounds float* %tmp13396, i64 1
- %tmp13398 = getelementptr inbounds float* %tmp13397, i64 1
- %tmp13399 = getelementptr inbounds float* %tmp13398, i64 1
- %tmp13400 = getelementptr inbounds float* %tmp13399, i64 1
- %tmp13401 = getelementptr inbounds float* %tmp13400, i64 1
- %tmp13402 = getelementptr inbounds float* %tmp13401, i64 1
- %tmp13403 = getelementptr inbounds float* %tmp13402, i64 1
- %tmp13404 = getelementptr inbounds float* %tmp13403, i64 1
- %tmp13405 = getelementptr inbounds float* %tmp13404, i64 1
- %tmp13406 = getelementptr inbounds float* %tmp13405, i64 1
- %tmp13407 = getelementptr inbounds float* %tmp13406, i64 1
- %tmp13408 = getelementptr inbounds float* %tmp13407, i64 1
- %tmp13409 = getelementptr inbounds float* %tmp13408, i64 1
- %tmp13410 = getelementptr inbounds float* %tmp13409, i64 1
- %tmp13411 = getelementptr inbounds float* %tmp13410, i64 1
- %tmp13412 = getelementptr inbounds float* %tmp13411, i64 1
- %tmp13413 = getelementptr inbounds float* %tmp13412, i64 1
- %tmp13414 = getelementptr inbounds float* %tmp13413, i64 1
- %tmp13415 = getelementptr inbounds float* %tmp13414, i64 1
- %tmp13416 = getelementptr inbounds float* %tmp13415, i64 1
- %tmp13417 = getelementptr inbounds float* %tmp13416, i64 1
- %tmp13418 = getelementptr inbounds float* %tmp13417, i64 1
- %tmp13419 = getelementptr inbounds float* %tmp13418, i64 1
- %tmp13420 = getelementptr inbounds float* %tmp13419, i64 1
- %tmp13421 = getelementptr inbounds float* %tmp13420, i64 1
- %tmp13422 = getelementptr inbounds float* %tmp13421, i64 1
- %tmp13423 = getelementptr inbounds float* %tmp13422, i64 1
- %tmp13424 = getelementptr inbounds float* %tmp13423, i64 1
- %tmp13425 = getelementptr inbounds float* %tmp13424, i64 1
- %tmp13426 = getelementptr inbounds float* %tmp13425, i64 1
- %tmp13427 = getelementptr inbounds float* %tmp13426, i64 1
- %tmp13428 = getelementptr inbounds float* %tmp13427, i64 1
- %tmp13429 = getelementptr inbounds float* %tmp13428, i64 1
- %tmp13430 = getelementptr inbounds float* %tmp13429, i64 1
- %tmp13431 = getelementptr inbounds float* %tmp13430, i64 1
- %tmp13432 = getelementptr inbounds float* %tmp13431, i64 1
- %tmp13433 = getelementptr inbounds float* %tmp13432, i64 1
- %tmp13434 = getelementptr inbounds float* %tmp13433, i64 1
- %tmp13435 = getelementptr inbounds float* %tmp13434, i64 1
- %tmp13436 = getelementptr inbounds float* %tmp13435, i64 1
- %tmp13437 = getelementptr inbounds float* %tmp13436, i64 1
- %tmp13438 = getelementptr inbounds float* %tmp13437, i64 1
- %tmp13439 = getelementptr inbounds float* %tmp13438, i64 1
- %tmp13440 = getelementptr inbounds float* %tmp13439, i64 1
- %tmp13441 = getelementptr inbounds float* %tmp13440, i64 1
- %tmp13442 = getelementptr inbounds float* %tmp13441, i64 1
- %tmp13443 = getelementptr inbounds float* %tmp13442, i64 1
- %tmp13444 = getelementptr inbounds float* %tmp13443, i64 1
- %tmp13445 = getelementptr inbounds float* %tmp13444, i64 1
- %tmp13446 = getelementptr inbounds float* %tmp13445, i64 1
- %tmp13447 = getelementptr inbounds float* %tmp13446, i64 1
- %tmp13448 = getelementptr inbounds float* %tmp13447, i64 1
- %tmp13449 = getelementptr inbounds float* %tmp13448, i64 1
- %tmp13450 = getelementptr inbounds float* %tmp13449, i64 1
- %tmp13451 = getelementptr inbounds float* %tmp13450, i64 1
- %tmp13452 = getelementptr inbounds float* %tmp13451, i64 1
- %tmp13453 = getelementptr inbounds float* %tmp13452, i64 1
- %tmp13454 = getelementptr inbounds float* %tmp13453, i64 1
- %tmp13455 = getelementptr inbounds float* %tmp13454, i64 1
- %tmp13456 = getelementptr inbounds float* %tmp13455, i64 1
- %tmp13457 = getelementptr inbounds float* %tmp13456, i64 1
- %tmp13458 = getelementptr inbounds float* %tmp13457, i64 1
- %tmp13459 = getelementptr inbounds float* %tmp13458, i64 1
- %tmp13460 = getelementptr inbounds float* %tmp13459, i64 1
- %tmp13461 = getelementptr inbounds float* %tmp13460, i64 1
- %tmp13462 = getelementptr inbounds float* %tmp13461, i64 1
- %tmp13463 = getelementptr inbounds float* %tmp13462, i64 1
- %tmp13464 = getelementptr inbounds float* %tmp13463, i64 1
- %tmp13465 = getelementptr inbounds float* %tmp13464, i64 1
- %tmp13466 = getelementptr inbounds float* %tmp13465, i64 1
- %tmp13467 = getelementptr inbounds float* %tmp13466, i64 1
- %tmp13468 = getelementptr inbounds float* %tmp13467, i64 1
- %tmp13469 = getelementptr inbounds float* %tmp13468, i64 1
- %tmp13470 = getelementptr inbounds float* %tmp13469, i64 1
- %tmp13471 = getelementptr inbounds float* %tmp13470, i64 1
- %tmp13472 = getelementptr inbounds float* %tmp13471, i64 1
- %tmp13473 = getelementptr inbounds float* %tmp13472, i64 1
- %tmp13474 = getelementptr inbounds float* %tmp13473, i64 1
- %tmp13475 = getelementptr inbounds float* %tmp13474, i64 1
- %tmp13476 = getelementptr inbounds float* %tmp13475, i64 1
- %tmp13477 = getelementptr inbounds float* %tmp13476, i64 1
- %tmp13478 = getelementptr inbounds float* %tmp13477, i64 1
- %tmp13479 = getelementptr inbounds float* %tmp13478, i64 1
- %tmp13480 = getelementptr inbounds float* %tmp13479, i64 1
- %tmp13481 = getelementptr inbounds float* %tmp13480, i64 1
- %tmp13482 = getelementptr inbounds float* %tmp13481, i64 1
- %tmp13483 = getelementptr inbounds float* %tmp13482, i64 1
- %tmp13484 = getelementptr inbounds float* %tmp13483, i64 1
- %tmp13485 = getelementptr inbounds float* %tmp13484, i64 1
- %tmp13486 = getelementptr inbounds float* %tmp13485, i64 1
- %tmp13487 = getelementptr inbounds float* %tmp13486, i64 1
- %tmp13488 = getelementptr inbounds float* %tmp13487, i64 1
- %tmp13489 = getelementptr inbounds float* %tmp13488, i64 1
- %tmp13490 = getelementptr inbounds float* %tmp13489, i64 1
- %tmp13491 = getelementptr inbounds float* %tmp13490, i64 1
- %tmp13492 = getelementptr inbounds float* %tmp13491, i64 1
- %tmp13493 = getelementptr inbounds float* %tmp13492, i64 1
- %tmp13494 = getelementptr inbounds float* %tmp13493, i64 1
- %tmp13495 = getelementptr inbounds float* %tmp13494, i64 1
- %tmp13496 = getelementptr inbounds float* %tmp13495, i64 1
- %tmp13497 = getelementptr inbounds float* %tmp13496, i64 1
- %tmp13498 = getelementptr inbounds float* %tmp13497, i64 1
- %tmp13499 = getelementptr inbounds float* %tmp13498, i64 1
- %tmp13500 = getelementptr inbounds float* %tmp13499, i64 1
- %tmp13501 = getelementptr inbounds float* %tmp13500, i64 1
- %tmp13502 = getelementptr inbounds float* %tmp13501, i64 1
- %tmp13503 = getelementptr inbounds float* %tmp13502, i64 1
- %tmp13504 = getelementptr inbounds float* %tmp13503, i64 1
- %tmp13505 = getelementptr inbounds float* %tmp13504, i64 1
- %tmp13506 = getelementptr inbounds float* %tmp13505, i64 1
- %tmp13507 = getelementptr inbounds float* %tmp13506, i64 1
- %tmp13508 = getelementptr inbounds float* %tmp13507, i64 1
- %tmp13509 = getelementptr inbounds float* %tmp13508, i64 1
- %tmp13510 = getelementptr inbounds float* %tmp13509, i64 1
- %tmp13511 = getelementptr inbounds float* %tmp13510, i64 1
- %tmp13512 = getelementptr inbounds float* %tmp13511, i64 1
- %tmp13513 = getelementptr inbounds float* %tmp13512, i64 1
- %tmp13514 = getelementptr inbounds float* %tmp13513, i64 1
- %tmp13515 = getelementptr inbounds float* %tmp13514, i64 1
- %tmp13516 = getelementptr inbounds float* %tmp13515, i64 1
- %tmp13517 = getelementptr inbounds float* %tmp13516, i64 1
- %tmp13518 = getelementptr inbounds float* %tmp13517, i64 1
- %tmp13519 = getelementptr inbounds float* %tmp13518, i64 1
- %tmp13520 = getelementptr inbounds float* %tmp13519, i64 1
- %tmp13521 = getelementptr inbounds float* %tmp13520, i64 1
- %tmp13522 = getelementptr inbounds float* %tmp13521, i64 1
- %tmp13523 = getelementptr inbounds float* %tmp13522, i64 1
- %tmp13524 = getelementptr inbounds float* %tmp13523, i64 1
- %tmp13525 = getelementptr inbounds float* %tmp13524, i64 1
- %tmp13526 = getelementptr inbounds float* %tmp13525, i64 1
- %tmp13527 = getelementptr inbounds float* %tmp13526, i64 1
- %tmp13528 = getelementptr inbounds float* %tmp13527, i64 1
- %tmp13529 = getelementptr inbounds float* %tmp13528, i64 1
- %tmp13530 = getelementptr inbounds float* %tmp13529, i64 1
- %tmp13531 = getelementptr inbounds float* %tmp13530, i64 1
- %tmp13532 = getelementptr inbounds float* %tmp13531, i64 1
- %tmp13533 = getelementptr inbounds float* %tmp13532, i64 1
- %tmp13534 = getelementptr inbounds float* %tmp13533, i64 1
- %tmp13535 = getelementptr inbounds float* %tmp13534, i64 1
- %tmp13536 = getelementptr inbounds float* %tmp13535, i64 1
- %tmp13537 = getelementptr inbounds float* %tmp13536, i64 1
- %tmp13538 = getelementptr inbounds float* %tmp13537, i64 1
- %tmp13539 = getelementptr inbounds float* %tmp13538, i64 1
- %tmp13540 = getelementptr inbounds float* %tmp13539, i64 1
- %tmp13541 = getelementptr inbounds float* %tmp13540, i64 1
- %tmp13542 = getelementptr inbounds float* %tmp13541, i64 1
- %tmp13543 = getelementptr inbounds float* %tmp13542, i64 1
- %tmp13544 = getelementptr inbounds float* %tmp13543, i64 1
- %tmp13545 = getelementptr inbounds float* %tmp13544, i64 1
- %tmp13546 = getelementptr inbounds float* %tmp13545, i64 1
- %tmp13547 = getelementptr inbounds float* %tmp13546, i64 1
- %tmp13548 = getelementptr inbounds float* %tmp13547, i64 1
- %tmp13549 = getelementptr inbounds float* %tmp13548, i64 1
- %tmp13550 = getelementptr inbounds float* %tmp13549, i64 1
- %tmp13551 = getelementptr inbounds float* %tmp13550, i64 1
- %tmp13552 = getelementptr inbounds float* %tmp13551, i64 1
- %tmp13553 = getelementptr inbounds float* %tmp13552, i64 1
- %tmp13554 = getelementptr inbounds float* %tmp13553, i64 1
- %tmp13555 = getelementptr inbounds float* %tmp13554, i64 1
- %tmp13556 = getelementptr inbounds float* %tmp13555, i64 1
- %tmp13557 = getelementptr inbounds float* %tmp13556, i64 1
- %tmp13558 = getelementptr inbounds float* %tmp13557, i64 1
- %tmp13559 = getelementptr inbounds float* %tmp13558, i64 1
- %tmp13560 = getelementptr inbounds float* %tmp13559, i64 1
- %tmp13561 = getelementptr inbounds float* %tmp13560, i64 1
- %tmp13562 = getelementptr inbounds float* %tmp13561, i64 1
- %tmp13563 = getelementptr inbounds float* %tmp13562, i64 1
- %tmp13564 = getelementptr inbounds float* %tmp13563, i64 1
- %tmp13565 = getelementptr inbounds float* %tmp13564, i64 1
- %tmp13566 = getelementptr inbounds float* %tmp13565, i64 1
- %tmp13567 = getelementptr inbounds float* %tmp13566, i64 1
- %tmp13568 = getelementptr inbounds float* %tmp13567, i64 1
- %tmp13569 = getelementptr inbounds float* %tmp13568, i64 1
- %tmp13570 = getelementptr inbounds float* %tmp13569, i64 1
- %tmp13571 = getelementptr inbounds float* %tmp13570, i64 1
- %tmp13572 = getelementptr inbounds float* %tmp13571, i64 1
- %tmp13573 = getelementptr inbounds float* %tmp13572, i64 1
- %tmp13574 = getelementptr inbounds float* %tmp13573, i64 1
- %tmp13575 = getelementptr inbounds float* %tmp13574, i64 1
- %tmp13576 = getelementptr inbounds float* %tmp13575, i64 1
- %tmp13577 = getelementptr inbounds float* %tmp13576, i64 1
- %tmp13578 = getelementptr inbounds float* %tmp13577, i64 1
- %tmp13579 = getelementptr inbounds float* %tmp13578, i64 1
- %tmp13580 = getelementptr inbounds float* %tmp13579, i64 1
- %tmp13581 = getelementptr inbounds float* %tmp13580, i64 1
- %tmp13582 = getelementptr inbounds float* %tmp13581, i64 1
- %tmp13583 = getelementptr inbounds float* %tmp13582, i64 1
- %tmp13584 = getelementptr inbounds float* %tmp13583, i64 1
- %tmp13585 = getelementptr inbounds float* %tmp13584, i64 1
- %tmp13586 = getelementptr inbounds float* %tmp13585, i64 1
- %tmp13587 = getelementptr inbounds float* %tmp13586, i64 1
- %tmp13588 = getelementptr inbounds float* %tmp13587, i64 1
- %tmp13589 = getelementptr inbounds float* %tmp13588, i64 1
- %tmp13590 = getelementptr inbounds float* %tmp13589, i64 1
- %tmp13591 = getelementptr inbounds float* %tmp13590, i64 1
- %tmp13592 = getelementptr inbounds float* %tmp13591, i64 1
- %tmp13593 = getelementptr inbounds float* %tmp13592, i64 1
- %tmp13594 = getelementptr inbounds float* %tmp13593, i64 1
- %tmp13595 = getelementptr inbounds float* %tmp13594, i64 1
- %tmp13596 = getelementptr inbounds float* %tmp13595, i64 1
- %tmp13597 = getelementptr inbounds float* %tmp13596, i64 1
- %tmp13598 = getelementptr inbounds float* %tmp13597, i64 1
- %tmp13599 = getelementptr inbounds float* %tmp13598, i64 1
- %tmp13600 = getelementptr inbounds float* %tmp13599, i64 1
- %tmp13601 = getelementptr inbounds float* %tmp13600, i64 1
- %tmp13602 = getelementptr inbounds float* %tmp13601, i64 1
- %tmp13603 = getelementptr inbounds float* %tmp13602, i64 1
- %tmp13604 = getelementptr inbounds float* %tmp13603, i64 1
- %tmp13605 = getelementptr inbounds float* %tmp13604, i64 1
- %tmp13606 = getelementptr inbounds float* %tmp13605, i64 1
- %tmp13607 = getelementptr inbounds float* %tmp13606, i64 1
- %tmp13608 = getelementptr inbounds float* %tmp13607, i64 1
- %tmp13609 = getelementptr inbounds float* %tmp13608, i64 1
- %tmp13610 = getelementptr inbounds float* %tmp13609, i64 1
- %tmp13611 = getelementptr inbounds float* %tmp13610, i64 1
- %tmp13612 = getelementptr inbounds float* %tmp13611, i64 1
- %tmp13613 = getelementptr inbounds float* %tmp13612, i64 1
- %tmp13614 = getelementptr inbounds float* %tmp13613, i64 1
- %tmp13615 = getelementptr inbounds float* %tmp13614, i64 1
- %tmp13616 = getelementptr inbounds float* %tmp13615, i64 1
- %tmp13617 = getelementptr inbounds float* %tmp13616, i64 1
- %tmp13618 = getelementptr inbounds float* %tmp13617, i64 1
- %tmp13619 = getelementptr inbounds float* %tmp13618, i64 1
- %tmp13620 = getelementptr inbounds float* %tmp13619, i64 1
- %tmp13621 = getelementptr inbounds float* %tmp13620, i64 1
- %tmp13622 = getelementptr inbounds float* %tmp13621, i64 1
- %tmp13623 = getelementptr inbounds float* %tmp13622, i64 1
- %tmp13624 = getelementptr inbounds float* %tmp13623, i64 1
- %tmp13625 = getelementptr inbounds float* %tmp13624, i64 1
- %tmp13626 = getelementptr inbounds float* %tmp13625, i64 1
- %tmp13627 = getelementptr inbounds float* %tmp13626, i64 1
- %tmp13628 = getelementptr inbounds float* %tmp13627, i64 1
- %tmp13629 = getelementptr inbounds float* %tmp13628, i64 1
- %tmp13630 = getelementptr inbounds float* %tmp13629, i64 1
- %tmp13631 = getelementptr inbounds float* %tmp13630, i64 1
- %tmp13632 = getelementptr inbounds float* %tmp13631, i64 1
- %tmp13633 = getelementptr inbounds float* %tmp13632, i64 1
- %tmp13634 = getelementptr inbounds float* %tmp13633, i64 1
- %tmp13635 = getelementptr inbounds float* %tmp13634, i64 1
- %tmp13636 = getelementptr inbounds float* %tmp13635, i64 1
- %tmp13637 = getelementptr inbounds float* %tmp13636, i64 1
- %tmp13638 = getelementptr inbounds float* %tmp13637, i64 1
- %tmp13639 = getelementptr inbounds float* %tmp13638, i64 1
- %tmp13640 = getelementptr inbounds float* %tmp13639, i64 1
- %tmp13641 = getelementptr inbounds float* %tmp13640, i64 1
- %tmp13642 = getelementptr inbounds float* %tmp13641, i64 1
- %tmp13643 = getelementptr inbounds float* %tmp13642, i64 1
- %tmp13644 = getelementptr inbounds float* %tmp13643, i64 1
- %tmp13645 = getelementptr inbounds float* %tmp13644, i64 1
- %tmp13646 = getelementptr inbounds float* %tmp13645, i64 1
- %tmp13647 = getelementptr inbounds float* %tmp13646, i64 1
- %tmp13648 = getelementptr inbounds float* %tmp13647, i64 1
- %tmp13649 = getelementptr inbounds float* %tmp13648, i64 1
- %tmp13650 = getelementptr inbounds float* %tmp13649, i64 1
- %tmp13651 = getelementptr inbounds float* %tmp13650, i64 1
- %tmp13652 = getelementptr inbounds float* %tmp13651, i64 1
- %tmp13653 = getelementptr inbounds float* %tmp13652, i64 1
- %tmp13654 = getelementptr inbounds float* %tmp13653, i64 1
- %tmp13655 = getelementptr inbounds float* %tmp13654, i64 1
- %tmp13656 = getelementptr inbounds float* %tmp13655, i64 1
- %tmp13657 = getelementptr inbounds float* %tmp13656, i64 1
- %tmp13658 = getelementptr inbounds float* %tmp13657, i64 1
- %tmp13659 = getelementptr inbounds float* %tmp13658, i64 1
- %tmp13660 = getelementptr inbounds float* %tmp13659, i64 1
- %tmp13661 = getelementptr inbounds float* %tmp13660, i64 1
- %tmp13662 = getelementptr inbounds float* %tmp13661, i64 1
- %tmp13663 = getelementptr inbounds float* %tmp13662, i64 1
- %tmp13664 = getelementptr inbounds float* %tmp13663, i64 1
- %tmp13665 = getelementptr inbounds float* %tmp13664, i64 1
- %tmp13666 = getelementptr inbounds float* %tmp13665, i64 1
- %tmp13667 = getelementptr inbounds float* %tmp13666, i64 1
- %tmp13668 = getelementptr inbounds float* %tmp13667, i64 1
- %tmp13669 = getelementptr inbounds float* %tmp13668, i64 1
- %tmp13670 = getelementptr inbounds float* %tmp13669, i64 1
- %tmp13671 = getelementptr inbounds float* %tmp13670, i64 1
- %tmp13672 = getelementptr inbounds float* %tmp13671, i64 1
- %tmp13673 = getelementptr inbounds float* %tmp13672, i64 1
- %tmp13674 = getelementptr inbounds float* %tmp13673, i64 1
- %tmp13675 = getelementptr inbounds float* %tmp13674, i64 1
- %tmp13676 = getelementptr inbounds float* %tmp13675, i64 1
- %tmp13677 = getelementptr inbounds float* %tmp13676, i64 1
- %tmp13678 = getelementptr inbounds float* %tmp13677, i64 1
- %tmp13679 = getelementptr inbounds float* %tmp13678, i64 1
- %tmp13680 = getelementptr inbounds float* %tmp13679, i64 1
- %tmp13681 = getelementptr inbounds float* %tmp13680, i64 1
- %tmp13682 = getelementptr inbounds float* %tmp13681, i64 1
- %tmp13683 = getelementptr inbounds float* %tmp13682, i64 1
- %tmp13684 = getelementptr inbounds float* %tmp13683, i64 1
- %tmp13685 = getelementptr inbounds float* %tmp13684, i64 1
- %tmp13686 = getelementptr inbounds float* %tmp13685, i64 1
- %tmp13687 = getelementptr inbounds float* %tmp13686, i64 1
- %tmp13688 = getelementptr inbounds float* %tmp13687, i64 1
- %tmp13689 = getelementptr inbounds float* %tmp13688, i64 1
- %tmp13690 = getelementptr inbounds float* %tmp13689, i64 1
- %tmp13691 = getelementptr inbounds float* %tmp13690, i64 1
- %tmp13692 = getelementptr inbounds float* %tmp13691, i64 1
- %tmp13693 = getelementptr inbounds float* %tmp13692, i64 1
- %tmp13694 = getelementptr inbounds float* %tmp13693, i64 1
- %tmp13695 = getelementptr inbounds float* %tmp13694, i64 1
- %tmp13696 = getelementptr inbounds float* %tmp13695, i64 1
- %tmp13697 = getelementptr inbounds float* %tmp13696, i64 1
- %tmp13698 = getelementptr inbounds float* %tmp13697, i64 1
- %tmp13699 = getelementptr inbounds float* %tmp13698, i64 1
- %tmp13700 = getelementptr inbounds float* %tmp13699, i64 1
- %tmp13701 = getelementptr inbounds float* %tmp13700, i64 1
- %tmp13702 = getelementptr inbounds float* %tmp13701, i64 1
- %tmp13703 = getelementptr inbounds float* %tmp13702, i64 1
- %tmp13704 = getelementptr inbounds float* %tmp13703, i64 1
- %tmp13705 = getelementptr inbounds float* %tmp13704, i64 1
- %tmp13706 = getelementptr inbounds float* %tmp13705, i64 1
- %tmp13707 = getelementptr inbounds float* %tmp13706, i64 1
- %tmp13708 = getelementptr inbounds float* %tmp13707, i64 1
- %tmp13709 = getelementptr inbounds float* %tmp13708, i64 1
- %tmp13710 = getelementptr inbounds float* %tmp13709, i64 1
- %tmp13711 = getelementptr inbounds float* %tmp13710, i64 1
- %tmp13712 = getelementptr inbounds float* %tmp13711, i64 1
- %tmp13713 = getelementptr inbounds float* %tmp13712, i64 1
- %tmp13714 = getelementptr inbounds float* %tmp13713, i64 1
- %tmp13715 = getelementptr inbounds float* %tmp13714, i64 1
- %tmp13716 = getelementptr inbounds float* %tmp13715, i64 1
- %tmp13717 = getelementptr inbounds float* %tmp13716, i64 1
- %tmp13718 = getelementptr inbounds float* %tmp13717, i64 1
- %tmp13719 = getelementptr inbounds float* %tmp13718, i64 1
- %tmp13720 = getelementptr inbounds float* %tmp13719, i64 1
- %tmp13721 = getelementptr inbounds float* %tmp13720, i64 1
- %tmp13722 = getelementptr inbounds float* %tmp13721, i64 1
- %tmp13723 = getelementptr inbounds float* %tmp13722, i64 1
- %tmp13724 = getelementptr inbounds float* %tmp13723, i64 1
- %tmp13725 = getelementptr inbounds float* %tmp13724, i64 1
- %tmp13726 = getelementptr inbounds float* %tmp13725, i64 1
- %tmp13727 = getelementptr inbounds float* %tmp13726, i64 1
- %tmp13728 = getelementptr inbounds float* %tmp13727, i64 1
- %tmp13729 = getelementptr inbounds float* %tmp13728, i64 1
- %tmp13730 = getelementptr inbounds float* %tmp13729, i64 1
- %tmp13731 = getelementptr inbounds float* %tmp13730, i64 1
- %tmp13732 = getelementptr inbounds float* %tmp13731, i64 1
- %tmp13733 = getelementptr inbounds float* %tmp13732, i64 1
- %tmp13734 = getelementptr inbounds float* %tmp13733, i64 1
- %tmp13735 = getelementptr inbounds float* %tmp13734, i64 1
- %tmp13736 = getelementptr inbounds float* %tmp13735, i64 1
- %tmp13737 = getelementptr inbounds float* %tmp13736, i64 1
- %tmp13738 = getelementptr inbounds float* %tmp13737, i64 1
- %tmp13739 = getelementptr inbounds float* %tmp13738, i64 1
- %tmp13740 = getelementptr inbounds float* %tmp13739, i64 1
- %tmp13741 = getelementptr inbounds float* %tmp13740, i64 1
- %tmp13742 = getelementptr inbounds float* %tmp13741, i64 1
- %tmp13743 = getelementptr inbounds float* %tmp13742, i64 1
- %tmp13744 = getelementptr inbounds float* %tmp13743, i64 1
- %tmp13745 = getelementptr inbounds float* %tmp13744, i64 1
- %tmp13746 = getelementptr inbounds float* %tmp13745, i64 1
- %tmp13747 = getelementptr inbounds float* %tmp13746, i64 1
- %tmp13748 = getelementptr inbounds float* %tmp13747, i64 1
- %tmp13749 = getelementptr inbounds float* %tmp13748, i64 1
- %tmp13750 = getelementptr inbounds float* %tmp13749, i64 1
- %tmp13751 = getelementptr inbounds float* %tmp13750, i64 1
- %tmp13752 = getelementptr inbounds float* %tmp13751, i64 1
- %tmp13753 = getelementptr inbounds float* %tmp13752, i64 1
- %tmp13754 = getelementptr inbounds float* %tmp13753, i64 1
- %tmp13755 = getelementptr inbounds float* %tmp13754, i64 1
- %tmp13756 = getelementptr inbounds float* %tmp13755, i64 1
- %tmp13757 = getelementptr inbounds float* %tmp13756, i64 1
- %tmp13758 = getelementptr inbounds float* %tmp13757, i64 1
- %tmp13759 = getelementptr inbounds float* %tmp13758, i64 1
- %tmp13760 = getelementptr inbounds float* %tmp13759, i64 1
- %tmp13761 = getelementptr inbounds float* %tmp13760, i64 1
- %tmp13762 = getelementptr inbounds float* %tmp13761, i64 1
- %tmp13763 = getelementptr inbounds float* %tmp13762, i64 1
- %tmp13764 = getelementptr inbounds float* %tmp13763, i64 1
- %tmp13765 = getelementptr inbounds float* %tmp13764, i64 1
- %tmp13766 = getelementptr inbounds float* %tmp13765, i64 1
- %tmp13767 = getelementptr inbounds float* %tmp13766, i64 1
- %tmp13768 = getelementptr inbounds float* %tmp13767, i64 1
- %tmp13769 = getelementptr inbounds float* %tmp13768, i64 1
- %tmp13770 = getelementptr inbounds float* %tmp13769, i64 1
- %tmp13771 = getelementptr inbounds float* %tmp13770, i64 1
- %tmp13772 = getelementptr inbounds float* %tmp13771, i64 1
- %tmp13773 = getelementptr inbounds float* %tmp13772, i64 1
- %tmp13774 = getelementptr inbounds float* %tmp13773, i64 1
- %tmp13775 = getelementptr inbounds float* %tmp13774, i64 1
- %tmp13776 = getelementptr inbounds float* %tmp13775, i64 1
- %tmp13777 = getelementptr inbounds float* %tmp13776, i64 1
- %tmp13778 = getelementptr inbounds float* %tmp13777, i64 1
- %tmp13779 = getelementptr inbounds float* %tmp13778, i64 1
- %tmp13780 = getelementptr inbounds float* %tmp13779, i64 1
- %tmp13781 = getelementptr inbounds float* %tmp13780, i64 1
- %tmp13782 = getelementptr inbounds float* %tmp13781, i64 1
- %tmp13783 = getelementptr inbounds float* %tmp13782, i64 1
- %tmp13784 = getelementptr inbounds float* %tmp13783, i64 1
- %tmp13785 = getelementptr inbounds float* %tmp13784, i64 1
- %tmp13786 = getelementptr inbounds float* %tmp13785, i64 1
- %tmp13787 = getelementptr inbounds float* %tmp13786, i64 1
- %tmp13788 = getelementptr inbounds float* %tmp13787, i64 1
- %tmp13789 = getelementptr inbounds float* %tmp13788, i64 1
- %tmp13790 = getelementptr inbounds float* %tmp13789, i64 1
- %tmp13791 = getelementptr inbounds float* %tmp13790, i64 1
- %tmp13792 = getelementptr inbounds float* %tmp13791, i64 1
- %tmp13793 = getelementptr inbounds float* %tmp13792, i64 1
- %tmp13794 = getelementptr inbounds float* %tmp13793, i64 1
- %tmp13795 = getelementptr inbounds float* %tmp13794, i64 1
- %tmp13796 = getelementptr inbounds float* %tmp13795, i64 1
- %tmp13797 = getelementptr inbounds float* %tmp13796, i64 1
- %tmp13798 = getelementptr inbounds float* %tmp13797, i64 1
- %tmp13799 = getelementptr inbounds float* %tmp13798, i64 1
- %tmp13800 = getelementptr inbounds float* %tmp13799, i64 1
- %tmp13801 = getelementptr inbounds float* %tmp13800, i64 1
- %tmp13802 = getelementptr inbounds float* %tmp13801, i64 1
- %tmp13803 = getelementptr inbounds float* %tmp13802, i64 1
- %tmp13804 = getelementptr inbounds float* %tmp13803, i64 1
- %tmp13805 = getelementptr inbounds float* %tmp13804, i64 1
- %tmp13806 = getelementptr inbounds float* %tmp13805, i64 1
- %tmp13807 = getelementptr inbounds float* %tmp13806, i64 1
- %tmp13808 = getelementptr inbounds float* %tmp13807, i64 1
- %tmp13809 = getelementptr inbounds float* %tmp13808, i64 1
- %tmp13810 = getelementptr inbounds float* %tmp13809, i64 1
- %tmp13811 = getelementptr inbounds float* %tmp13810, i64 1
- %tmp13812 = getelementptr inbounds float* %tmp13811, i64 1
- %tmp13813 = getelementptr inbounds float* %tmp13812, i64 1
- %tmp13814 = getelementptr inbounds float* %tmp13813, i64 1
- %tmp13815 = getelementptr inbounds float* %tmp13814, i64 1
- %tmp13816 = getelementptr inbounds float* %tmp13815, i64 1
- %tmp13817 = getelementptr inbounds float* %tmp13816, i64 1
- %tmp13818 = getelementptr inbounds float* %tmp13817, i64 1
- %tmp13819 = getelementptr inbounds float* %tmp13818, i64 1
- %tmp13820 = getelementptr inbounds float* %tmp13819, i64 1
- %tmp13821 = getelementptr inbounds float* %tmp13820, i64 1
- %tmp13822 = getelementptr inbounds float* %tmp13821, i64 1
- %tmp13823 = getelementptr inbounds float* %tmp13822, i64 1
- %tmp13824 = getelementptr inbounds float* %tmp13823, i64 1
- %tmp13825 = getelementptr inbounds float* %tmp13824, i64 1
- %tmp13826 = getelementptr inbounds float* %tmp13825, i64 1
- %tmp13827 = getelementptr inbounds float* %tmp13826, i64 1
- %tmp13828 = getelementptr inbounds float* %tmp13827, i64 1
- %tmp13829 = getelementptr inbounds float* %tmp13828, i64 1
- %tmp13830 = getelementptr inbounds float* %tmp13829, i64 1
- %tmp13831 = getelementptr inbounds float* %tmp13830, i64 1
- %tmp13832 = getelementptr inbounds float* %tmp13831, i64 1
- %tmp13833 = getelementptr inbounds float* %tmp13832, i64 1
- %tmp13834 = getelementptr inbounds float* %tmp13833, i64 1
- %tmp13835 = getelementptr inbounds float* %tmp13834, i64 1
- %tmp13836 = getelementptr inbounds float* %tmp13835, i64 1
- %tmp13837 = getelementptr inbounds float* %tmp13836, i64 1
- %tmp13838 = getelementptr inbounds float* %tmp13837, i64 1
- %tmp13839 = getelementptr inbounds float* %tmp13838, i64 1
- %tmp13840 = getelementptr inbounds float* %tmp13839, i64 1
- %tmp13841 = getelementptr inbounds float* %tmp13840, i64 1
- %tmp13842 = getelementptr inbounds float* %tmp13841, i64 1
- %tmp13843 = getelementptr inbounds float* %tmp13842, i64 1
- %tmp13844 = getelementptr inbounds float* %tmp13843, i64 1
- %tmp13845 = getelementptr inbounds float* %tmp13844, i64 1
- %tmp13846 = getelementptr inbounds float* %tmp13845, i64 1
- %tmp13847 = getelementptr inbounds float* %tmp13846, i64 1
- %tmp13848 = getelementptr inbounds float* %tmp13847, i64 1
- %tmp13849 = getelementptr inbounds float* %tmp13848, i64 1
- %tmp13850 = getelementptr inbounds float* %tmp13849, i64 1
- %tmp13851 = getelementptr inbounds float* %tmp13850, i64 1
- %tmp13852 = getelementptr inbounds float* %tmp13851, i64 1
- %tmp13853 = getelementptr inbounds float* %tmp13852, i64 1
- %tmp13854 = getelementptr inbounds float* %tmp13853, i64 1
- %tmp13855 = getelementptr inbounds float* %tmp13854, i64 1
- %tmp13856 = getelementptr inbounds float* %tmp13855, i64 1
- %tmp13857 = getelementptr inbounds float* %tmp13856, i64 1
- %tmp13858 = getelementptr inbounds float* %tmp13857, i64 1
- %tmp13859 = getelementptr inbounds float* %tmp13858, i64 1
- %tmp13860 = getelementptr inbounds float* %tmp13859, i64 1
- %tmp13861 = getelementptr inbounds float* %tmp13860, i64 1
- %tmp13862 = getelementptr inbounds float* %tmp13861, i64 1
- %tmp13863 = getelementptr inbounds float* %tmp13862, i64 1
- %tmp13864 = getelementptr inbounds float* %tmp13863, i64 1
- %tmp13865 = getelementptr inbounds float* %tmp13864, i64 1
- %tmp13866 = getelementptr inbounds float* %tmp13865, i64 1
- %tmp13867 = getelementptr inbounds float* %tmp13866, i64 1
- %tmp13868 = getelementptr inbounds float* %tmp13867, i64 1
- %tmp13869 = getelementptr inbounds float* %tmp13868, i64 1
- %tmp13870 = getelementptr inbounds float* %tmp13869, i64 1
- %tmp13871 = getelementptr inbounds float* %tmp13870, i64 1
- %tmp13872 = getelementptr inbounds float* %tmp13871, i64 1
- %tmp13873 = getelementptr inbounds float* %tmp13872, i64 1
- %tmp13874 = getelementptr inbounds float* %tmp13873, i64 1
- %tmp13875 = getelementptr inbounds float* %tmp13874, i64 1
- %tmp13876 = getelementptr inbounds float* %tmp13875, i64 1
- %tmp13877 = getelementptr inbounds float* %tmp13876, i64 1
- %tmp13878 = getelementptr inbounds float* %tmp13877, i64 1
- %tmp13879 = getelementptr inbounds float* %tmp13878, i64 1
- %tmp13880 = getelementptr inbounds float* %tmp13879, i64 1
- %tmp13881 = getelementptr inbounds float* %tmp13880, i64 1
- %tmp13882 = getelementptr inbounds float* %tmp13881, i64 1
- %tmp13883 = getelementptr inbounds float* %tmp13882, i64 1
- %tmp13884 = getelementptr inbounds float* %tmp13883, i64 1
- %tmp13885 = getelementptr inbounds float* %tmp13884, i64 1
- %tmp13886 = getelementptr inbounds float* %tmp13885, i64 1
- %tmp13887 = getelementptr inbounds float* %tmp13886, i64 1
- %tmp13888 = getelementptr inbounds float* %tmp13887, i64 1
- %tmp13889 = getelementptr inbounds float* %tmp13888, i64 1
- %tmp13890 = getelementptr inbounds float* %tmp13889, i64 1
- %tmp13891 = getelementptr inbounds float* %tmp13890, i64 1
- %tmp13892 = getelementptr inbounds float* %tmp13891, i64 1
- %tmp13893 = getelementptr inbounds float* %tmp13892, i64 1
- %tmp13894 = getelementptr inbounds float* %tmp13893, i64 1
- %tmp13895 = getelementptr inbounds float* %tmp13894, i64 1
- %tmp13896 = getelementptr inbounds float* %tmp13895, i64 1
- %tmp13897 = getelementptr inbounds float* %tmp13896, i64 1
- %tmp13898 = getelementptr inbounds float* %tmp13897, i64 1
- %tmp13899 = getelementptr inbounds float* %tmp13898, i64 1
- %tmp13900 = getelementptr inbounds float* %tmp13899, i64 1
- %tmp13901 = getelementptr inbounds float* %tmp13900, i64 1
- %tmp13902 = getelementptr inbounds float* %tmp13901, i64 1
- %tmp13903 = getelementptr inbounds float* %tmp13902, i64 1
- %tmp13904 = getelementptr inbounds float* %tmp13903, i64 1
- %tmp13905 = getelementptr inbounds float* %tmp13904, i64 1
- %tmp13906 = getelementptr inbounds float* %tmp13905, i64 1
- %tmp13907 = getelementptr inbounds float* %tmp13906, i64 1
- %tmp13908 = getelementptr inbounds float* %tmp13907, i64 1
- %tmp13909 = getelementptr inbounds float* %tmp13908, i64 1
- %tmp13910 = getelementptr inbounds float* %tmp13909, i64 1
- %tmp13911 = getelementptr inbounds float* %tmp13910, i64 1
- %tmp13912 = getelementptr inbounds float* %tmp13911, i64 1
- %tmp13913 = getelementptr inbounds float* %tmp13912, i64 1
- %tmp13914 = getelementptr inbounds float* %tmp13913, i64 1
- %tmp13915 = getelementptr inbounds float* %tmp13914, i64 1
- %tmp13916 = getelementptr inbounds float* %tmp13915, i64 1
- %tmp13917 = getelementptr inbounds float* %tmp13916, i64 1
- %tmp13918 = getelementptr inbounds float* %tmp13917, i64 1
- %tmp13919 = getelementptr inbounds float* %tmp13918, i64 1
- %tmp13920 = getelementptr inbounds float* %tmp13919, i64 1
- %tmp13921 = getelementptr inbounds float* %tmp13920, i64 1
- %tmp13922 = getelementptr inbounds float* %tmp13921, i64 1
- %tmp13923 = getelementptr inbounds float* %tmp13922, i64 1
- %tmp13924 = getelementptr inbounds float* %tmp13923, i64 1
- %tmp13925 = getelementptr inbounds float* %tmp13924, i64 1
- %tmp13926 = getelementptr inbounds float* %tmp13925, i64 1
- %tmp13927 = getelementptr inbounds float* %tmp13926, i64 1
- %tmp13928 = getelementptr inbounds float* %tmp13927, i64 1
- %tmp13929 = getelementptr inbounds float* %tmp13928, i64 1
- %tmp13930 = getelementptr inbounds float* %tmp13929, i64 1
- %tmp13931 = getelementptr inbounds float* %tmp13930, i64 1
- %tmp13932 = getelementptr inbounds float* %tmp13931, i64 1
- %tmp13933 = getelementptr inbounds float* %tmp13932, i64 1
- %tmp13934 = getelementptr inbounds float* %tmp13933, i64 1
- %tmp13935 = getelementptr inbounds float* %tmp13934, i64 1
- %tmp13936 = getelementptr inbounds float* %tmp13935, i64 1
- %tmp13937 = getelementptr inbounds float* %tmp13936, i64 1
- %tmp13938 = getelementptr inbounds float* %tmp13937, i64 1
- %tmp13939 = getelementptr inbounds float* %tmp13938, i64 1
- %tmp13940 = getelementptr inbounds float* %tmp13939, i64 1
- %tmp13941 = getelementptr inbounds float* %tmp13940, i64 1
- %tmp13942 = getelementptr inbounds float* %tmp13941, i64 1
- %tmp13943 = getelementptr inbounds float* %tmp13942, i64 1
- %tmp13944 = getelementptr inbounds float* %tmp13943, i64 1
- %tmp13945 = getelementptr inbounds float* %tmp13944, i64 1
- %tmp13946 = getelementptr inbounds float* %tmp13945, i64 1
- %tmp13947 = getelementptr inbounds float* %tmp13946, i64 1
- %tmp13948 = getelementptr inbounds float* %tmp13947, i64 1
- %tmp13949 = getelementptr inbounds float* %tmp13948, i64 1
- %tmp13950 = getelementptr inbounds float* %tmp13949, i64 1
- %tmp13951 = getelementptr inbounds float* %tmp13950, i64 1
- %tmp13952 = getelementptr inbounds float* %tmp13951, i64 1
- %tmp13953 = getelementptr inbounds float* %tmp13952, i64 1
- %tmp13954 = getelementptr inbounds float* %tmp13953, i64 1
- %tmp13955 = getelementptr inbounds float* %tmp13954, i64 1
- %tmp13956 = getelementptr inbounds float* %tmp13955, i64 1
- %tmp13957 = getelementptr inbounds float* %tmp13956, i64 1
- %tmp13958 = getelementptr inbounds float* %tmp13957, i64 1
- %tmp13959 = getelementptr inbounds float* %tmp13958, i64 1
- %tmp13960 = getelementptr inbounds float* %tmp13959, i64 1
- %tmp13961 = getelementptr inbounds float* %tmp13960, i64 1
- %tmp13962 = getelementptr inbounds float* %tmp13961, i64 1
- %tmp13963 = getelementptr inbounds float* %tmp13962, i64 1
- %tmp13964 = getelementptr inbounds float* %tmp13963, i64 1
- %tmp13965 = getelementptr inbounds float* %tmp13964, i64 1
- %tmp13966 = getelementptr inbounds float* %tmp13965, i64 1
- %tmp13967 = getelementptr inbounds float* %tmp13966, i64 1
- %tmp13968 = getelementptr inbounds float* %tmp13967, i64 1
- %tmp13969 = getelementptr inbounds float* %tmp13968, i64 1
- %tmp13970 = getelementptr inbounds float* %tmp13969, i64 1
- %tmp13971 = getelementptr inbounds float* %tmp13970, i64 1
- %tmp13972 = getelementptr inbounds float* %tmp13971, i64 1
- %tmp13973 = getelementptr inbounds float* %tmp13972, i64 1
- %tmp13974 = getelementptr inbounds float* %tmp13973, i64 1
- %tmp13975 = getelementptr inbounds float* %tmp13974, i64 1
- %tmp13976 = getelementptr inbounds float* %tmp13975, i64 1
- %tmp13977 = getelementptr inbounds float* %tmp13976, i64 1
- %tmp13978 = getelementptr inbounds float* %tmp13977, i64 1
- %tmp13979 = getelementptr inbounds float* %tmp13978, i64 1
- %tmp13980 = getelementptr inbounds float* %tmp13979, i64 1
- %tmp13981 = getelementptr inbounds float* %tmp13980, i64 1
- %tmp13982 = getelementptr inbounds float* %tmp13981, i64 1
- %tmp13983 = getelementptr inbounds float* %tmp13982, i64 1
- %tmp13984 = getelementptr inbounds float* %tmp13983, i64 1
- %tmp13985 = getelementptr inbounds float* %tmp13984, i64 1
- %tmp13986 = getelementptr inbounds float* %tmp13985, i64 1
- %tmp13987 = getelementptr inbounds float* %tmp13986, i64 1
- %tmp13988 = getelementptr inbounds float* %tmp13987, i64 1
- %tmp13989 = getelementptr inbounds float* %tmp13988, i64 1
- %tmp13990 = getelementptr inbounds float* %tmp13989, i64 1
- %tmp13991 = getelementptr inbounds float* %tmp13990, i64 1
- %tmp13992 = getelementptr inbounds float* %tmp13991, i64 1
- %tmp13993 = getelementptr inbounds float* %tmp13992, i64 1
- %tmp13994 = getelementptr inbounds float* %tmp13993, i64 1
- %tmp13995 = getelementptr inbounds float* %tmp13994, i64 1
- %tmp13996 = getelementptr inbounds float* %tmp13995, i64 1
- %tmp13997 = getelementptr inbounds float* %tmp13996, i64 1
- %tmp13998 = getelementptr inbounds float* %tmp13997, i64 1
- %tmp13999 = getelementptr inbounds float* %tmp13998, i64 1
- %tmp14000 = getelementptr inbounds float* %tmp13999, i64 1
- %tmp14001 = getelementptr inbounds float* %tmp14000, i64 1
- %tmp14002 = getelementptr inbounds float* %tmp14001, i64 1
- %tmp14003 = getelementptr inbounds float* %tmp14002, i64 1
- %tmp14004 = getelementptr inbounds float* %tmp14003, i64 1
- %tmp14005 = getelementptr inbounds float* %tmp14004, i64 1
- %tmp14006 = getelementptr inbounds float* %tmp14005, i64 1
- %tmp14007 = getelementptr inbounds float* %tmp14006, i64 1
- %tmp14008 = getelementptr inbounds float* %tmp14007, i64 1
- %tmp14009 = getelementptr inbounds float* %tmp14008, i64 1
- %tmp14010 = getelementptr inbounds float* %tmp14009, i64 1
- %tmp14011 = getelementptr inbounds float* %tmp14010, i64 1
- %tmp14012 = getelementptr inbounds float* %tmp14011, i64 1
- %tmp14013 = getelementptr inbounds float* %tmp14012, i64 1
- %tmp14014 = getelementptr inbounds float* %tmp14013, i64 1
- %tmp14015 = getelementptr inbounds float* %tmp14014, i64 1
- %tmp14016 = getelementptr inbounds float* %tmp14015, i64 1
- %tmp14017 = getelementptr inbounds float* %tmp14016, i64 1
- %tmp14018 = getelementptr inbounds float* %tmp14017, i64 1
- %tmp14019 = getelementptr inbounds float* %tmp14018, i64 1
- %tmp14020 = getelementptr inbounds float* %tmp14019, i64 1
- %tmp14021 = getelementptr inbounds float* %tmp14020, i64 1
- %tmp14022 = getelementptr inbounds float* %tmp14021, i64 1
- %tmp14023 = getelementptr inbounds float* %tmp14022, i64 1
- %tmp14024 = getelementptr inbounds float* %tmp14023, i64 1
- %tmp14025 = getelementptr inbounds float* %tmp14024, i64 1
- %tmp14026 = getelementptr inbounds float* %tmp14025, i64 1
- %tmp14027 = getelementptr inbounds float* %tmp14026, i64 1
- %tmp14028 = getelementptr inbounds float* %tmp14027, i64 1
- %tmp14029 = getelementptr inbounds float* %tmp14028, i64 1
- %tmp14030 = getelementptr inbounds float* %tmp14029, i64 1
- %tmp14031 = getelementptr inbounds float* %tmp14030, i64 1
- %tmp14032 = getelementptr inbounds float* %tmp14031, i64 1
- %tmp14033 = getelementptr inbounds float* %tmp14032, i64 1
- %tmp14034 = getelementptr inbounds float* %tmp14033, i64 1
- %tmp14035 = getelementptr inbounds float* %tmp14034, i64 1
- %tmp14036 = getelementptr inbounds float* %tmp14035, i64 1
- %tmp14037 = getelementptr inbounds float* %tmp14036, i64 1
- %tmp14038 = getelementptr inbounds float* %tmp14037, i64 1
- %tmp14039 = getelementptr inbounds float* %tmp14038, i64 1
- %tmp14040 = getelementptr inbounds float* %tmp14039, i64 1
- %tmp14041 = getelementptr inbounds float* %tmp14040, i64 1
- %tmp14042 = getelementptr inbounds float* %tmp14041, i64 1
- %tmp14043 = getelementptr inbounds float* %tmp14042, i64 1
- %tmp14044 = getelementptr inbounds float* %tmp14043, i64 1
- %tmp14045 = getelementptr inbounds float* %tmp14044, i64 1
- %tmp14046 = getelementptr inbounds float* %tmp14045, i64 1
- %tmp14047 = getelementptr inbounds float* %tmp14046, i64 1
- %tmp14048 = getelementptr inbounds float* %tmp14047, i64 1
- %tmp14049 = getelementptr inbounds float* %tmp14048, i64 1
- %tmp14050 = getelementptr inbounds float* %tmp14049, i64 1
- %tmp14051 = getelementptr inbounds float* %tmp14050, i64 1
- %tmp14052 = getelementptr inbounds float* %tmp14051, i64 1
- %tmp14053 = getelementptr inbounds float* %tmp14052, i64 1
- %tmp14054 = getelementptr inbounds float* %tmp14053, i64 1
- %tmp14055 = getelementptr inbounds float* %tmp14054, i64 1
- %tmp14056 = getelementptr inbounds float* %tmp14055, i64 1
- %tmp14057 = getelementptr inbounds float* %tmp14056, i64 1
- %tmp14058 = getelementptr inbounds float* %tmp14057, i64 1
- %tmp14059 = getelementptr inbounds float* %tmp14058, i64 1
- %tmp14060 = getelementptr inbounds float* %tmp14059, i64 1
- %tmp14061 = getelementptr inbounds float* %tmp14060, i64 1
- %tmp14062 = getelementptr inbounds float* %tmp14061, i64 1
- %tmp14063 = getelementptr inbounds float* %tmp14062, i64 1
- %tmp14064 = getelementptr inbounds float* %tmp14063, i64 1
- %tmp14065 = getelementptr inbounds float* %tmp14064, i64 1
- %tmp14066 = getelementptr inbounds float* %tmp14065, i64 1
- %tmp14067 = getelementptr inbounds float* %tmp14066, i64 1
- %tmp14068 = getelementptr inbounds float* %tmp14067, i64 1
- %tmp14069 = getelementptr inbounds float* %tmp14068, i64 1
- %tmp14070 = getelementptr inbounds float* %tmp14069, i64 1
- %tmp14071 = getelementptr inbounds float* %tmp14070, i64 1
- %tmp14072 = getelementptr inbounds float* %tmp14071, i64 1
- %tmp14073 = getelementptr inbounds float* %tmp14072, i64 1
- %tmp14074 = getelementptr inbounds float* %tmp14073, i64 1
- %tmp14075 = getelementptr inbounds float* %tmp14074, i64 1
- %tmp14076 = getelementptr inbounds float* %tmp14075, i64 1
- %tmp14077 = getelementptr inbounds float* %tmp14076, i64 1
- %tmp14078 = getelementptr inbounds float* %tmp14077, i64 1
- %tmp14079 = getelementptr inbounds float* %tmp14078, i64 1
- %tmp14080 = getelementptr inbounds float* %tmp14079, i64 1
- %tmp14081 = getelementptr inbounds float* %tmp14080, i64 1
- %tmp14082 = getelementptr inbounds float* %tmp14081, i64 1
- %tmp14083 = getelementptr inbounds float* %tmp14082, i64 1
- %tmp14084 = getelementptr inbounds float* %tmp14083, i64 1
- %tmp14085 = getelementptr inbounds float* %tmp14084, i64 1
- %tmp14086 = getelementptr inbounds float* %tmp14085, i64 1
- %tmp14087 = getelementptr inbounds float* %tmp14086, i64 1
- %tmp14088 = getelementptr inbounds float* %tmp14087, i64 1
- %tmp14089 = getelementptr inbounds float* %tmp14088, i64 1
- %tmp14090 = getelementptr inbounds float* %tmp14089, i64 1
- %tmp14091 = getelementptr inbounds float* %tmp14090, i64 1
- %tmp14092 = getelementptr inbounds float* %tmp14091, i64 1
- %tmp14093 = getelementptr inbounds float* %tmp14092, i64 1
- %tmp14094 = getelementptr inbounds float* %tmp14093, i64 1
- %tmp14095 = getelementptr inbounds float* %tmp14094, i64 1
- %tmp14096 = getelementptr inbounds float* %tmp14095, i64 1
- %tmp14097 = getelementptr inbounds float* %tmp14096, i64 1
- %tmp14098 = getelementptr inbounds float* %tmp14097, i64 1
- %tmp14099 = getelementptr inbounds float* %tmp14098, i64 1
- %tmp14100 = getelementptr inbounds float* %tmp14099, i64 1
- %tmp14101 = getelementptr inbounds float* %tmp14100, i64 1
- %tmp14102 = getelementptr inbounds float* %tmp14101, i64 1
- %tmp14103 = getelementptr inbounds float* %tmp14102, i64 1
- %tmp14104 = getelementptr inbounds float* %tmp14103, i64 1
- %tmp14105 = getelementptr inbounds float* %tmp14104, i64 1
- %tmp14106 = getelementptr inbounds float* %tmp14105, i64 1
- %tmp14107 = getelementptr inbounds float* %tmp14106, i64 1
- %tmp14108 = getelementptr inbounds float* %tmp14107, i64 1
- %tmp14109 = getelementptr inbounds float* %tmp14108, i64 1
- %tmp14110 = getelementptr inbounds float* %tmp14109, i64 1
- %tmp14111 = getelementptr inbounds float* %tmp14110, i64 1
- %tmp14112 = getelementptr inbounds float* %tmp14111, i64 1
- %tmp14113 = getelementptr inbounds float* %tmp14112, i64 1
- %tmp14114 = getelementptr inbounds float* %tmp14113, i64 1
- %tmp14115 = getelementptr inbounds float* %tmp14114, i64 1
- %tmp14116 = getelementptr inbounds float* %tmp14115, i64 1
- %tmp14117 = getelementptr inbounds float* %tmp14116, i64 1
- %tmp14118 = getelementptr inbounds float* %tmp14117, i64 1
- %tmp14119 = getelementptr inbounds float* %tmp14118, i64 1
- %tmp14120 = getelementptr inbounds float* %tmp14119, i64 1
- %tmp14121 = getelementptr inbounds float* %tmp14120, i64 1
- %tmp14122 = getelementptr inbounds float* %tmp14121, i64 1
- %tmp14123 = getelementptr inbounds float* %tmp14122, i64 1
- %tmp14124 = getelementptr inbounds float* %tmp14123, i64 1
- %tmp14125 = getelementptr inbounds float* %tmp14124, i64 1
- %tmp14126 = getelementptr inbounds float* %tmp14125, i64 1
- %tmp14127 = getelementptr inbounds float* %tmp14126, i64 1
- %tmp14128 = getelementptr inbounds float* %tmp14127, i64 1
- %tmp14129 = getelementptr inbounds float* %tmp14128, i64 1
- %tmp14130 = getelementptr inbounds float* %tmp14129, i64 1
- %tmp14131 = getelementptr inbounds float* %tmp14130, i64 1
- %tmp14132 = getelementptr inbounds float* %tmp14131, i64 1
- %tmp14133 = getelementptr inbounds float* %tmp14132, i64 1
- %tmp14134 = getelementptr inbounds float* %tmp14133, i64 1
- %tmp14135 = getelementptr inbounds float* %tmp14134, i64 1
- %tmp14136 = getelementptr inbounds float* %tmp14135, i64 1
- %tmp14137 = getelementptr inbounds float* %tmp14136, i64 1
- %tmp14138 = getelementptr inbounds float* %tmp14137, i64 1
- %tmp14139 = getelementptr inbounds float* %tmp14138, i64 1
- %tmp14140 = getelementptr inbounds float* %tmp14139, i64 1
- %tmp14141 = getelementptr inbounds float* %tmp14140, i64 1
- %tmp14142 = getelementptr inbounds float* %tmp14141, i64 1
- %tmp14143 = getelementptr inbounds float* %tmp14142, i64 1
- %tmp14144 = getelementptr inbounds float* %tmp14143, i64 1
- %tmp14145 = getelementptr inbounds float* %tmp14144, i64 1
- %tmp14146 = getelementptr inbounds float* %tmp14145, i64 1
- %tmp14147 = getelementptr inbounds float* %tmp14146, i64 1
- %tmp14148 = getelementptr inbounds float* %tmp14147, i64 1
- %tmp14149 = getelementptr inbounds float* %tmp14148, i64 1
- %tmp14150 = getelementptr inbounds float* %tmp14149, i64 1
- %tmp14151 = getelementptr inbounds float* %tmp14150, i64 1
- %tmp14152 = getelementptr inbounds float* %tmp14151, i64 1
- %tmp14153 = getelementptr inbounds float* %tmp14152, i64 1
- %tmp14154 = getelementptr inbounds float* %tmp14153, i64 1
- %tmp14155 = getelementptr inbounds float* %tmp14154, i64 1
- %tmp14156 = getelementptr inbounds float* %tmp14155, i64 1
- %tmp14157 = getelementptr inbounds float* %tmp14156, i64 1
- %tmp14158 = getelementptr inbounds float* %tmp14157, i64 1
- %tmp14159 = getelementptr inbounds float* %tmp14158, i64 1
- %tmp14160 = getelementptr inbounds float* %tmp14159, i64 1
- %tmp14161 = getelementptr inbounds float* %tmp14160, i64 1
- %tmp14162 = getelementptr inbounds float* %tmp14161, i64 1
- %tmp14163 = getelementptr inbounds float* %tmp14162, i64 1
- %tmp14164 = getelementptr inbounds float* %tmp14163, i64 1
- %tmp14165 = getelementptr inbounds float* %tmp14164, i64 1
- %tmp14166 = getelementptr inbounds float* %tmp14165, i64 1
- %tmp14167 = getelementptr inbounds float* %tmp14166, i64 1
- %tmp14168 = getelementptr inbounds float* %tmp14167, i64 1
- %tmp14169 = getelementptr inbounds float* %tmp14168, i64 1
- %tmp14170 = getelementptr inbounds float* %tmp14169, i64 1
- %tmp14171 = getelementptr inbounds float* %tmp14170, i64 1
- %tmp14172 = getelementptr inbounds float* %tmp14171, i64 1
- %tmp14173 = getelementptr inbounds float* %tmp14172, i64 1
- %tmp14174 = getelementptr inbounds float* %tmp14173, i64 1
- %tmp14175 = getelementptr inbounds float* %tmp14174, i64 1
- %tmp14176 = getelementptr inbounds float* %tmp14175, i64 1
- %tmp14177 = getelementptr inbounds float* %tmp14176, i64 1
- %tmp14178 = getelementptr inbounds float* %tmp14177, i64 1
- %tmp14179 = getelementptr inbounds float* %tmp14178, i64 1
- %tmp14180 = getelementptr inbounds float* %tmp14179, i64 1
- %tmp14181 = getelementptr inbounds float* %tmp14180, i64 1
- %tmp14182 = getelementptr inbounds float* %tmp14181, i64 1
- %tmp14183 = getelementptr inbounds float* %tmp14182, i64 1
- %tmp14184 = getelementptr inbounds float* %tmp14183, i64 1
- %tmp14185 = getelementptr inbounds float* %tmp14184, i64 1
- %tmp14186 = getelementptr inbounds float* %tmp14185, i64 1
- %tmp14187 = getelementptr inbounds float* %tmp14186, i64 1
- %tmp14188 = getelementptr inbounds float* %tmp14187, i64 1
- %tmp14189 = getelementptr inbounds float* %tmp14188, i64 1
- %tmp14190 = getelementptr inbounds float* %tmp14189, i64 1
- %tmp14191 = getelementptr inbounds float* %tmp14190, i64 1
- %tmp14192 = getelementptr inbounds float* %tmp14191, i64 1
- %tmp14193 = getelementptr inbounds float* %tmp14192, i64 1
- %tmp14194 = getelementptr inbounds float* %tmp14193, i64 1
- %tmp14195 = getelementptr inbounds float* %tmp14194, i64 1
- %tmp14196 = getelementptr inbounds float* %tmp14195, i64 1
- %tmp14197 = getelementptr inbounds float* %tmp14196, i64 1
- %tmp14198 = getelementptr inbounds float* %tmp14197, i64 1
- %tmp14199 = getelementptr inbounds float* %tmp14198, i64 1
- %tmp14200 = getelementptr inbounds float* %tmp14199, i64 1
- %tmp14201 = getelementptr inbounds float* %tmp14200, i64 1
- %tmp14202 = getelementptr inbounds float* %tmp14201, i64 1
- %tmp14203 = getelementptr inbounds float* %tmp14202, i64 1
- %tmp14204 = getelementptr inbounds float* %tmp14203, i64 1
- %tmp14205 = getelementptr inbounds float* %tmp14204, i64 1
- %tmp14206 = getelementptr inbounds float* %tmp14205, i64 1
- %tmp14207 = getelementptr inbounds float* %tmp14206, i64 1
- %tmp14208 = getelementptr inbounds float* %tmp14207, i64 1
- %tmp14209 = getelementptr inbounds float* %tmp14208, i64 1
- %tmp14210 = getelementptr inbounds float* %tmp14209, i64 1
- %tmp14211 = getelementptr inbounds float* %tmp14210, i64 1
- %tmp14212 = getelementptr inbounds float* %tmp14211, i64 1
- %tmp14213 = getelementptr inbounds float* %tmp14212, i64 1
- %tmp14214 = getelementptr inbounds float* %tmp14213, i64 1
- %tmp14215 = getelementptr inbounds float* %tmp14214, i64 1
- %tmp14216 = getelementptr inbounds float* %tmp14215, i64 1
- %tmp14217 = getelementptr inbounds float* %tmp14216, i64 1
- %tmp14218 = getelementptr inbounds float* %tmp14217, i64 1
- %tmp14219 = getelementptr inbounds float* %tmp14218, i64 1
- %tmp14220 = getelementptr inbounds float* %tmp14219, i64 1
- %tmp14221 = getelementptr inbounds float* %tmp14220, i64 1
- %tmp14222 = getelementptr inbounds float* %tmp14221, i64 1
- %tmp14223 = getelementptr inbounds float* %tmp14222, i64 1
- %tmp14224 = getelementptr inbounds float* %tmp14223, i64 1
- %tmp14225 = getelementptr inbounds float* %tmp14224, i64 1
- %tmp14226 = getelementptr inbounds float* %tmp14225, i64 1
- %tmp14227 = getelementptr inbounds float* %tmp14226, i64 1
- %tmp14228 = getelementptr inbounds float* %tmp14227, i64 1
- %tmp14229 = getelementptr inbounds float* %tmp14228, i64 1
- %tmp14230 = getelementptr inbounds float* %tmp14229, i64 1
- %tmp14231 = getelementptr inbounds float* %tmp14230, i64 1
- %tmp14232 = getelementptr inbounds float* %tmp14231, i64 1
- %tmp14233 = getelementptr inbounds float* %tmp14232, i64 1
- %tmp14234 = getelementptr inbounds float* %tmp14233, i64 1
- %tmp14235 = getelementptr inbounds float* %tmp14234, i64 1
- %tmp14236 = getelementptr inbounds float* %tmp14235, i64 1
- %tmp14237 = getelementptr inbounds float* %tmp14236, i64 1
- %tmp14238 = getelementptr inbounds float* %tmp14237, i64 1
- %tmp14239 = getelementptr inbounds float* %tmp14238, i64 1
- %tmp14240 = getelementptr inbounds float* %tmp14239, i64 1
- %tmp14241 = getelementptr inbounds float* %tmp14240, i64 1
- %tmp14242 = getelementptr inbounds float* %tmp14241, i64 1
- %tmp14243 = getelementptr inbounds float* %tmp14242, i64 1
- %tmp14244 = getelementptr inbounds float* %tmp14243, i64 1
- %tmp14245 = getelementptr inbounds float* %tmp14244, i64 1
- %tmp14246 = getelementptr inbounds float* %tmp14245, i64 1
- %tmp14247 = getelementptr inbounds float* %tmp14246, i64 1
- %tmp14248 = getelementptr inbounds float* %tmp14247, i64 1
- %tmp14249 = getelementptr inbounds float* %tmp14248, i64 1
- %tmp14250 = getelementptr inbounds float* %tmp14249, i64 1
- %tmp14251 = getelementptr inbounds float* %tmp14250, i64 1
- %tmp14252 = getelementptr inbounds float* %tmp14251, i64 1
- %tmp14253 = getelementptr inbounds float* %tmp14252, i64 1
- %tmp14254 = getelementptr inbounds float* %tmp14253, i64 1
- %tmp14255 = getelementptr inbounds float* %tmp14254, i64 1
- %tmp14256 = getelementptr inbounds float* %tmp14255, i64 1
- %tmp14257 = getelementptr inbounds float* %tmp14256, i64 1
- %tmp14258 = getelementptr inbounds float* %tmp14257, i64 1
- %tmp14259 = getelementptr inbounds float* %tmp14258, i64 1
- %tmp14260 = getelementptr inbounds float* %tmp14259, i64 1
- %tmp14261 = getelementptr inbounds float* %tmp14260, i64 1
- %tmp14262 = getelementptr inbounds float* %tmp14261, i64 1
- %tmp14263 = getelementptr inbounds float* %tmp14262, i64 1
- %tmp14264 = getelementptr inbounds float* %tmp14263, i64 1
- %tmp14265 = getelementptr inbounds float* %tmp14264, i64 1
- %tmp14266 = getelementptr inbounds float* %tmp14265, i64 1
- %tmp14267 = getelementptr inbounds float* %tmp14266, i64 1
- %tmp14268 = getelementptr inbounds float* %tmp14267, i64 1
- %tmp14269 = getelementptr inbounds float* %tmp14268, i64 1
- %tmp14270 = getelementptr inbounds float* %tmp14269, i64 1
- %tmp14271 = getelementptr inbounds float* %tmp14270, i64 1
- %tmp14272 = getelementptr inbounds float* %tmp14271, i64 1
- %tmp14273 = getelementptr inbounds float* %tmp14272, i64 1
- %tmp14274 = getelementptr inbounds float* %tmp14273, i64 1
- %tmp14275 = getelementptr inbounds float* %tmp14274, i64 1
- %tmp14276 = getelementptr inbounds float* %tmp14275, i64 1
- %tmp14277 = getelementptr inbounds float* %tmp14276, i64 1
- %tmp14278 = getelementptr inbounds float* %tmp14277, i64 1
- %tmp14279 = getelementptr inbounds float* %tmp14278, i64 1
- %tmp14280 = getelementptr inbounds float* %tmp14279, i64 1
- %tmp14281 = getelementptr inbounds float* %tmp14280, i64 1
- %tmp14282 = getelementptr inbounds float* %tmp14281, i64 1
- %tmp14283 = getelementptr inbounds float* %tmp14282, i64 1
- %tmp14284 = getelementptr inbounds float* %tmp14283, i64 1
- %tmp14285 = getelementptr inbounds float* %tmp14284, i64 1
- %tmp14286 = getelementptr inbounds float* %tmp14285, i64 1
- %tmp14287 = getelementptr inbounds float* %tmp14286, i64 1
- %tmp14288 = getelementptr inbounds float* %tmp14287, i64 1
- %tmp14289 = getelementptr inbounds float* %tmp14288, i64 1
- %tmp14290 = getelementptr inbounds float* %tmp14289, i64 1
- %tmp14291 = getelementptr inbounds float* %tmp14290, i64 1
- %tmp14292 = getelementptr inbounds float* %tmp14291, i64 1
- %tmp14293 = getelementptr inbounds float* %tmp14292, i64 1
- %tmp14294 = getelementptr inbounds float* %tmp14293, i64 1
- %tmp14295 = getelementptr inbounds float* %tmp14294, i64 1
- %tmp14296 = getelementptr inbounds float* %tmp14295, i64 1
- %tmp14297 = getelementptr inbounds float* %tmp14296, i64 1
- %tmp14298 = getelementptr inbounds float* %tmp14297, i64 1
- %tmp14299 = getelementptr inbounds float* %tmp14298, i64 1
- %tmp14300 = getelementptr inbounds float* %tmp14299, i64 1
- %tmp14301 = getelementptr inbounds float* %tmp14300, i64 1
- %tmp14302 = getelementptr inbounds float* %tmp14301, i64 1
- %tmp14303 = getelementptr inbounds float* %tmp14302, i64 1
- %tmp14304 = getelementptr inbounds float* %tmp14303, i64 1
- %tmp14305 = getelementptr inbounds float* %tmp14304, i64 1
- %tmp14306 = getelementptr inbounds float* %tmp14305, i64 1
- %tmp14307 = getelementptr inbounds float* %tmp14306, i64 1
- %tmp14308 = getelementptr inbounds float* %tmp14307, i64 1
- %tmp14309 = getelementptr inbounds float* %tmp14308, i64 1
- %tmp14310 = getelementptr inbounds float* %tmp14309, i64 1
- %tmp14311 = getelementptr inbounds float* %tmp14310, i64 1
- %tmp14312 = getelementptr inbounds float* %tmp14311, i64 1
- %tmp14313 = getelementptr inbounds float* %tmp14312, i64 1
- %tmp14314 = getelementptr inbounds float* %tmp14313, i64 1
- %tmp14315 = getelementptr inbounds float* %tmp14314, i64 1
- %tmp14316 = getelementptr inbounds float* %tmp14315, i64 1
- %tmp14317 = getelementptr inbounds float* %tmp14316, i64 1
- %tmp14318 = getelementptr inbounds float* %tmp14317, i64 1
- %tmp14319 = getelementptr inbounds float* %tmp14318, i64 1
- %tmp14320 = getelementptr inbounds float* %tmp14319, i64 1
- %tmp14321 = getelementptr inbounds float* %tmp14320, i64 1
- %tmp14322 = getelementptr inbounds float* %tmp14321, i64 1
- %tmp14323 = getelementptr inbounds float* %tmp14322, i64 1
- %tmp14324 = getelementptr inbounds float* %tmp14323, i64 1
- %tmp14325 = getelementptr inbounds float* %tmp14324, i64 1
- %tmp14326 = getelementptr inbounds float* %tmp14325, i64 1
- %tmp14327 = getelementptr inbounds float* %tmp14326, i64 1
- %tmp14328 = getelementptr inbounds float* %tmp14327, i64 1
- %tmp14329 = getelementptr inbounds float* %tmp14328, i64 1
- %tmp14330 = getelementptr inbounds float* %tmp14329, i64 1
- %tmp14331 = getelementptr inbounds float* %tmp14330, i64 1
- %tmp14332 = getelementptr inbounds float* %tmp14331, i64 1
- %tmp14333 = getelementptr inbounds float* %tmp14332, i64 1
- %tmp14334 = getelementptr inbounds float* %tmp14333, i64 1
- %tmp14335 = getelementptr inbounds float* %tmp14334, i64 1
- %tmp14336 = getelementptr inbounds float* %tmp14335, i64 1
- %tmp14337 = getelementptr inbounds float* %tmp14336, i64 1
- %tmp14338 = getelementptr inbounds float* %tmp14337, i64 1
- %tmp14339 = getelementptr inbounds float* %tmp14338, i64 1
- %tmp14340 = getelementptr inbounds float* %tmp14339, i64 1
- %tmp14341 = getelementptr inbounds float* %tmp14340, i64 1
- %tmp14342 = getelementptr inbounds float* %tmp14341, i64 1
- %tmp14343 = getelementptr inbounds float* %tmp14342, i64 1
- %tmp14344 = getelementptr inbounds float* %tmp14343, i64 1
- %tmp14345 = getelementptr inbounds float* %tmp14344, i64 1
- %tmp14346 = getelementptr inbounds float* %tmp14345, i64 1
- %tmp14347 = getelementptr inbounds float* %tmp14346, i64 1
- %tmp14348 = getelementptr inbounds float* %tmp14347, i64 1
- %tmp14349 = getelementptr inbounds float* %tmp14348, i64 1
- %tmp14350 = getelementptr inbounds float* %tmp14349, i64 1
- %tmp14351 = getelementptr inbounds float* %tmp14350, i64 1
- %tmp14352 = getelementptr inbounds float* %tmp14351, i64 1
- %tmp14353 = getelementptr inbounds float* %tmp14352, i64 1
- %tmp14354 = getelementptr inbounds float* %tmp14353, i64 1
- %tmp14355 = getelementptr inbounds float* %tmp14354, i64 1
- %tmp14356 = getelementptr inbounds float* %tmp14355, i64 1
- %tmp14357 = getelementptr inbounds float* %tmp14356, i64 1
- %tmp14358 = getelementptr inbounds float* %tmp14357, i64 1
- %tmp14359 = getelementptr inbounds float* %tmp14358, i64 1
- %tmp14360 = getelementptr inbounds float* %tmp14359, i64 1
- %tmp14361 = getelementptr inbounds float* %tmp14360, i64 1
- %tmp14362 = getelementptr inbounds float* %tmp14361, i64 1
- %tmp14363 = getelementptr inbounds float* %tmp14362, i64 1
- %tmp14364 = getelementptr inbounds float* %tmp14363, i64 1
- %tmp14365 = getelementptr inbounds float* %tmp14364, i64 1
- %tmp14366 = getelementptr inbounds float* %tmp14365, i64 1
- %tmp14367 = getelementptr inbounds float* %tmp14366, i64 1
- %tmp14368 = getelementptr inbounds float* %tmp14367, i64 1
- %tmp14369 = getelementptr inbounds float* %tmp14368, i64 1
- %tmp14370 = getelementptr inbounds float* %tmp14369, i64 1
- %tmp14371 = getelementptr inbounds float* %tmp14370, i64 1
- %tmp14372 = getelementptr inbounds float* %tmp14371, i64 1
- %tmp14373 = getelementptr inbounds float* %tmp14372, i64 1
- %tmp14374 = getelementptr inbounds float* %tmp14373, i64 1
- %tmp14375 = getelementptr inbounds float* %tmp14374, i64 1
- %tmp14376 = getelementptr inbounds float* %tmp14375, i64 1
- %tmp14377 = getelementptr inbounds float* %tmp14376, i64 1
- %tmp14378 = getelementptr inbounds float* %tmp14377, i64 1
- %tmp14379 = getelementptr inbounds float* %tmp14378, i64 1
- %tmp14380 = getelementptr inbounds float* %tmp14379, i64 1
- %tmp14381 = getelementptr inbounds float* %tmp14380, i64 1
- %tmp14382 = getelementptr inbounds float* %tmp14381, i64 1
- %tmp14383 = getelementptr inbounds float* %tmp14382, i64 1
- %tmp14384 = getelementptr inbounds float* %tmp14383, i64 1
- %tmp14385 = getelementptr inbounds float* %tmp14384, i64 1
- %tmp14386 = getelementptr inbounds float* %tmp14385, i64 1
- %tmp14387 = getelementptr inbounds float* %tmp14386, i64 1
- %tmp14388 = getelementptr inbounds float* %tmp14387, i64 1
- %tmp14389 = getelementptr inbounds float* %tmp14388, i64 1
- %tmp14390 = getelementptr inbounds float* %tmp14389, i64 1
- %tmp14391 = getelementptr inbounds float* %tmp14390, i64 1
- %tmp14392 = getelementptr inbounds float* %tmp14391, i64 1
- %tmp14393 = getelementptr inbounds float* %tmp14392, i64 1
- %tmp14394 = getelementptr inbounds float* %tmp14393, i64 1
- %tmp14395 = getelementptr inbounds float* %tmp14394, i64 1
- %tmp14396 = getelementptr inbounds float* %tmp14395, i64 1
- %tmp14397 = getelementptr inbounds float* %tmp14396, i64 1
- %tmp14398 = getelementptr inbounds float* %tmp14397, i64 1
- %tmp14399 = getelementptr inbounds float* %tmp14398, i64 1
- %tmp14400 = getelementptr inbounds float* %tmp14399, i64 1
- %tmp14401 = getelementptr inbounds float* %tmp14400, i64 1
- %tmp14402 = getelementptr inbounds float* %tmp14401, i64 1
- %tmp14403 = getelementptr inbounds float* %tmp14402, i64 1
- %tmp14404 = getelementptr inbounds float* %tmp14403, i64 1
- %tmp14405 = getelementptr inbounds float* %tmp14404, i64 1
- %tmp14406 = getelementptr inbounds float* %tmp14405, i64 1
- %tmp14407 = getelementptr inbounds float* %tmp14406, i64 1
- %tmp14408 = getelementptr inbounds float* %tmp14407, i64 1
- %tmp14409 = getelementptr inbounds float* %tmp14408, i64 1
- %tmp14410 = getelementptr inbounds float* %tmp14409, i64 1
- %tmp14411 = getelementptr inbounds float* %tmp14410, i64 1
- %tmp14412 = getelementptr inbounds float* %tmp14411, i64 1
- %tmp14413 = getelementptr inbounds float* %tmp14412, i64 1
- %tmp14414 = getelementptr inbounds float* %tmp14413, i64 1
- %tmp14415 = getelementptr inbounds float* %tmp14414, i64 1
- %tmp14416 = getelementptr inbounds float* %tmp14415, i64 1
- %tmp14417 = getelementptr inbounds float* %tmp14416, i64 1
- %tmp14418 = getelementptr inbounds float* %tmp14417, i64 1
- %tmp14419 = getelementptr inbounds float* %tmp14418, i64 1
- %tmp14420 = getelementptr inbounds float* %tmp14419, i64 1
- %tmp14421 = getelementptr inbounds float* %tmp14420, i64 1
- %tmp14422 = getelementptr inbounds float* %tmp14421, i64 1
- %tmp14423 = getelementptr inbounds float* %tmp14422, i64 1
- %tmp14424 = getelementptr inbounds float* %tmp14423, i64 1
- %tmp14425 = getelementptr inbounds float* %tmp14424, i64 1
- %tmp14426 = getelementptr inbounds float* %tmp14425, i64 1
- %tmp14427 = getelementptr inbounds float* %tmp14426, i64 1
- %tmp14428 = getelementptr inbounds float* %tmp14427, i64 1
- %tmp14429 = getelementptr inbounds float* %tmp14428, i64 1
- %tmp14430 = getelementptr inbounds float* %tmp14429, i64 1
- %tmp14431 = getelementptr inbounds float* %tmp14430, i64 1
- %tmp14432 = getelementptr inbounds float* %tmp14431, i64 1
- %tmp14433 = getelementptr inbounds float* %tmp14432, i64 1
- %tmp14434 = getelementptr inbounds float* %tmp14433, i64 1
- %tmp14435 = getelementptr inbounds float* %tmp14434, i64 1
- %tmp14436 = getelementptr inbounds float* %tmp14435, i64 1
- %tmp14437 = getelementptr inbounds float* %tmp14436, i64 1
- %tmp14438 = getelementptr inbounds float* %tmp14437, i64 1
- %tmp14439 = getelementptr inbounds float* %tmp14438, i64 1
- %tmp14440 = getelementptr inbounds float* %tmp14439, i64 1
- %tmp14441 = getelementptr inbounds float* %tmp14440, i64 1
- %tmp14442 = getelementptr inbounds float* %tmp14441, i64 1
- %tmp14443 = getelementptr inbounds float* %tmp14442, i64 1
- %tmp14444 = getelementptr inbounds float* %tmp14443, i64 1
- %tmp14445 = getelementptr inbounds float* %tmp14444, i64 1
- %tmp14446 = getelementptr inbounds float* %tmp14445, i64 1
- %tmp14447 = getelementptr inbounds float* %tmp14446, i64 1
- %tmp14448 = getelementptr inbounds float* %tmp14447, i64 1
- %tmp14449 = getelementptr inbounds float* %tmp14448, i64 1
- %tmp14450 = getelementptr inbounds float* %tmp14449, i64 1
- %tmp14451 = getelementptr inbounds float* %tmp14450, i64 1
- %tmp14452 = getelementptr inbounds float* %tmp14451, i64 1
- %tmp14453 = getelementptr inbounds float* %tmp14452, i64 1
- %tmp14454 = getelementptr inbounds float* %tmp14453, i64 1
- %tmp14455 = getelementptr inbounds float* %tmp14454, i64 1
- %tmp14456 = getelementptr inbounds float* %tmp14455, i64 1
- %tmp14457 = getelementptr inbounds float* %tmp14456, i64 1
- %tmp14458 = getelementptr inbounds float* %tmp14457, i64 1
- %tmp14459 = getelementptr inbounds float* %tmp14458, i64 1
- %tmp14460 = getelementptr inbounds float* %tmp14459, i64 1
- %tmp14461 = getelementptr inbounds float* %tmp14460, i64 1
- %tmp14462 = getelementptr inbounds float* %tmp14461, i64 1
- %tmp14463 = getelementptr inbounds float* %tmp14462, i64 1
- %tmp14464 = getelementptr inbounds float* %tmp14463, i64 1
- %tmp14465 = getelementptr inbounds float* %tmp14464, i64 1
- %tmp14466 = getelementptr inbounds float* %tmp14465, i64 1
- %tmp14467 = getelementptr inbounds float* %tmp14466, i64 1
- %tmp14468 = getelementptr inbounds float* %tmp14467, i64 1
- %tmp14469 = getelementptr inbounds float* %tmp14468, i64 1
- %tmp14470 = getelementptr inbounds float* %tmp14469, i64 1
- %tmp14471 = getelementptr inbounds float* %tmp14470, i64 1
- %tmp14472 = getelementptr inbounds float* %tmp14471, i64 1
- %tmp14473 = getelementptr inbounds float* %tmp14472, i64 1
- %tmp14474 = getelementptr inbounds float* %tmp14473, i64 1
- %tmp14475 = getelementptr inbounds float* %tmp14474, i64 1
- %tmp14476 = getelementptr inbounds float* %tmp14475, i64 1
- %tmp14477 = getelementptr inbounds float* %tmp14476, i64 1
- %tmp14478 = getelementptr inbounds float* %tmp14477, i64 1
- %tmp14479 = getelementptr inbounds float* %tmp14478, i64 1
- %tmp14480 = getelementptr inbounds float* %tmp14479, i64 1
- %tmp14481 = getelementptr inbounds float* %tmp14480, i64 1
- %tmp14482 = getelementptr inbounds float* %tmp14481, i64 1
- %tmp14483 = getelementptr inbounds float* %tmp14482, i64 1
- %tmp14484 = getelementptr inbounds float* %tmp14483, i64 1
- %tmp14485 = getelementptr inbounds float* %tmp14484, i64 1
- %tmp14486 = getelementptr inbounds float* %tmp14485, i64 1
- %tmp14487 = getelementptr inbounds float* %tmp14486, i64 1
- %tmp14488 = getelementptr inbounds float* %tmp14487, i64 1
- %tmp14489 = getelementptr inbounds float* %tmp14488, i64 1
- %tmp14490 = getelementptr inbounds float* %tmp14489, i64 1
- %tmp14491 = getelementptr inbounds float* %tmp14490, i64 1
- %tmp14492 = getelementptr inbounds float* %tmp14491, i64 1
- %tmp14493 = getelementptr inbounds float* %tmp14492, i64 1
- %tmp14494 = getelementptr inbounds float* %tmp14493, i64 1
- %tmp14495 = getelementptr inbounds float* %tmp14494, i64 1
- %tmp14496 = getelementptr inbounds float* %tmp14495, i64 1
- %tmp14497 = getelementptr inbounds float* %tmp14496, i64 1
- %tmp14498 = getelementptr inbounds float* %tmp14497, i64 1
- %tmp14499 = getelementptr inbounds float* %tmp14498, i64 1
- %tmp14500 = getelementptr inbounds float* %tmp14499, i64 1
- %tmp14501 = getelementptr inbounds float* %tmp14500, i64 1
- %tmp14502 = getelementptr inbounds float* %tmp14501, i64 1
- %tmp14503 = getelementptr inbounds float* %tmp14502, i64 1
- %tmp14504 = getelementptr inbounds float* %tmp14503, i64 1
- %tmp14505 = getelementptr inbounds float* %tmp14504, i64 1
- %tmp14506 = getelementptr inbounds float* %tmp14505, i64 1
- %tmp14507 = getelementptr inbounds float* %tmp14506, i64 1
- %tmp14508 = getelementptr inbounds float* %tmp14507, i64 1
- %tmp14509 = getelementptr inbounds float* %tmp14508, i64 1
- %tmp14510 = getelementptr inbounds float* %tmp14509, i64 1
- %tmp14511 = getelementptr inbounds float* %tmp14510, i64 1
- %tmp14512 = getelementptr inbounds float* %tmp14511, i64 1
- %tmp14513 = getelementptr inbounds float* %tmp14512, i64 1
- %tmp14514 = getelementptr inbounds float* %tmp14513, i64 1
- %tmp14515 = getelementptr inbounds float* %tmp14514, i64 1
- %tmp14516 = getelementptr inbounds float* %tmp14515, i64 1
- %tmp14517 = getelementptr inbounds float* %tmp14516, i64 1
- %tmp14518 = getelementptr inbounds float* %tmp14517, i64 1
- %tmp14519 = getelementptr inbounds float* %tmp14518, i64 1
- %tmp14520 = getelementptr inbounds float* %tmp14519, i64 1
- %tmp14521 = getelementptr inbounds float* %tmp14520, i64 1
- %tmp14522 = getelementptr inbounds float* %tmp14521, i64 1
- %tmp14523 = getelementptr inbounds float* %tmp14522, i64 1
- %tmp14524 = getelementptr inbounds float* %tmp14523, i64 1
- %tmp14525 = getelementptr inbounds float* %tmp14524, i64 1
- %tmp14526 = getelementptr inbounds float* %tmp14525, i64 1
- %tmp14527 = getelementptr inbounds float* %tmp14526, i64 1
- %tmp14528 = getelementptr inbounds float* %tmp14527, i64 1
- %tmp14529 = getelementptr inbounds float* %tmp14528, i64 1
- %tmp14530 = getelementptr inbounds float* %tmp14529, i64 1
- %tmp14531 = getelementptr inbounds float* %tmp14530, i64 1
- %tmp14532 = getelementptr inbounds float* %tmp14531, i64 1
- %tmp14533 = getelementptr inbounds float* %tmp14532, i64 1
- %tmp14534 = getelementptr inbounds float* %tmp14533, i64 1
- %tmp14535 = getelementptr inbounds float* %tmp14534, i64 1
- %tmp14536 = getelementptr inbounds float* %tmp14535, i64 1
- %tmp14537 = getelementptr inbounds float* %tmp14536, i64 1
- %tmp14538 = getelementptr inbounds float* %tmp14537, i64 1
- %tmp14539 = getelementptr inbounds float* %tmp14538, i64 1
- %tmp14540 = getelementptr inbounds float* %tmp14539, i64 1
- %tmp14541 = getelementptr inbounds float* %tmp14540, i64 1
- %tmp14542 = getelementptr inbounds float* %tmp14541, i64 1
- %tmp14543 = getelementptr inbounds float* %tmp14542, i64 1
- %tmp14544 = getelementptr inbounds float* %tmp14543, i64 1
- %tmp14545 = getelementptr inbounds float* %tmp14544, i64 1
- %tmp14546 = getelementptr inbounds float* %tmp14545, i64 1
- %tmp14547 = getelementptr inbounds float* %tmp14546, i64 1
- %tmp14548 = getelementptr inbounds float* %tmp14547, i64 1
- %tmp14549 = getelementptr inbounds float* %tmp14548, i64 1
- %tmp14550 = getelementptr inbounds float* %tmp14549, i64 1
- %tmp14551 = getelementptr inbounds float* %tmp14550, i64 1
- %tmp14552 = getelementptr inbounds float* %tmp14551, i64 1
- %tmp14553 = getelementptr inbounds float* %tmp14552, i64 1
- %tmp14554 = getelementptr inbounds float* %tmp14553, i64 1
- %tmp14555 = getelementptr inbounds float* %tmp14554, i64 1
- %tmp14556 = getelementptr inbounds float* %tmp14555, i64 1
- %tmp14557 = getelementptr inbounds float* %tmp14556, i64 1
- %tmp14558 = getelementptr inbounds float* %tmp14557, i64 1
- %tmp14559 = getelementptr inbounds float* %tmp14558, i64 1
- %tmp14560 = getelementptr inbounds float* %tmp14559, i64 1
- %tmp14561 = getelementptr inbounds float* %tmp14560, i64 1
- %tmp14562 = getelementptr inbounds float* %tmp14561, i64 1
- %tmp14563 = getelementptr inbounds float* %tmp14562, i64 1
- %tmp14564 = getelementptr inbounds float* %tmp14563, i64 1
- %tmp14565 = getelementptr inbounds float* %tmp14564, i64 1
- %tmp14566 = getelementptr inbounds float* %tmp14565, i64 1
- %tmp14567 = getelementptr inbounds float* %tmp14566, i64 1
- %tmp14568 = getelementptr inbounds float* %tmp14567, i64 1
- %tmp14569 = getelementptr inbounds float* %tmp14568, i64 1
- %tmp14570 = getelementptr inbounds float* %tmp14569, i64 1
- %tmp14571 = getelementptr inbounds float* %tmp14570, i64 1
- %tmp14572 = getelementptr inbounds float* %tmp14571, i64 1
- %tmp14573 = getelementptr inbounds float* %tmp14572, i64 1
- %tmp14574 = getelementptr inbounds float* %tmp14573, i64 1
- %tmp14575 = getelementptr inbounds float* %tmp14574, i64 1
- %tmp14576 = getelementptr inbounds float* %tmp14575, i64 1
- %tmp14577 = getelementptr inbounds float* %tmp14576, i64 1
- %tmp14578 = getelementptr inbounds float* %tmp14577, i64 1
- %tmp14579 = getelementptr inbounds float* %tmp14578, i64 1
- %tmp14580 = getelementptr inbounds float* %tmp14579, i64 1
- %tmp14581 = getelementptr inbounds float* %tmp14580, i64 1
- %tmp14582 = getelementptr inbounds float* %tmp14581, i64 1
- %tmp14583 = getelementptr inbounds float* %tmp14582, i64 1
- %tmp14584 = getelementptr inbounds float* %tmp14583, i64 1
- %tmp14585 = getelementptr inbounds float* %tmp14584, i64 1
- %tmp14586 = getelementptr inbounds float* %tmp14585, i64 1
- %tmp14587 = getelementptr inbounds float* %tmp14586, i64 1
- %tmp14588 = getelementptr inbounds float* %tmp14587, i64 1
- %tmp14589 = getelementptr inbounds float* %tmp14588, i64 1
- %tmp14590 = getelementptr inbounds float* %tmp14589, i64 1
- %tmp14591 = getelementptr inbounds float* %tmp14590, i64 1
- %tmp14592 = getelementptr inbounds float* %tmp14591, i64 1
- %tmp14593 = getelementptr inbounds float* %tmp14592, i64 1
- %tmp14594 = getelementptr inbounds float* %tmp14593, i64 1
- %tmp14595 = getelementptr inbounds float* %tmp14594, i64 1
- %tmp14596 = getelementptr inbounds float* %tmp14595, i64 1
- %tmp14597 = getelementptr inbounds float* %tmp14596, i64 1
- %tmp14598 = getelementptr inbounds float* %tmp14597, i64 1
- %tmp14599 = getelementptr inbounds float* %tmp14598, i64 1
- %tmp14600 = getelementptr inbounds float* %tmp14599, i64 1
- %tmp14601 = getelementptr inbounds float* %tmp14600, i64 1
- %tmp14602 = getelementptr inbounds float* %tmp14601, i64 1
- %tmp14603 = getelementptr inbounds float* %tmp14602, i64 1
- %tmp14604 = getelementptr inbounds float* %tmp14603, i64 1
- %tmp14605 = getelementptr inbounds float* %tmp14604, i64 1
- %tmp14606 = getelementptr inbounds float* %tmp14605, i64 1
- %tmp14607 = getelementptr inbounds float* %tmp14606, i64 1
- %tmp14608 = getelementptr inbounds float* %tmp14607, i64 1
- %tmp14609 = getelementptr inbounds float* %tmp14608, i64 1
- %tmp14610 = getelementptr inbounds float* %tmp14609, i64 1
- %tmp14611 = getelementptr inbounds float* %tmp14610, i64 1
- %tmp14612 = getelementptr inbounds float* %tmp14611, i64 1
- %tmp14613 = getelementptr inbounds float* %tmp14612, i64 1
- %tmp14614 = getelementptr inbounds float* %tmp14613, i64 1
- %tmp14615 = getelementptr inbounds float* %tmp14614, i64 1
- %tmp14616 = getelementptr inbounds float* %tmp14615, i64 1
- %tmp14617 = getelementptr inbounds float* %tmp14616, i64 1
- %tmp14618 = getelementptr inbounds float* %tmp14617, i64 1
- %tmp14619 = getelementptr inbounds float* %tmp14618, i64 1
- %tmp14620 = getelementptr inbounds float* %tmp14619, i64 1
- %tmp14621 = getelementptr inbounds float* %tmp14620, i64 1
- %tmp14622 = getelementptr inbounds float* %tmp14621, i64 1
- %tmp14623 = getelementptr inbounds float* %tmp14622, i64 1
- %tmp14624 = getelementptr inbounds float* %tmp14623, i64 1
- %tmp14625 = getelementptr inbounds float* %tmp14624, i64 1
- %tmp14626 = getelementptr inbounds float* %tmp14625, i64 1
- %tmp14627 = getelementptr inbounds float* %tmp14626, i64 1
- %tmp14628 = getelementptr inbounds float* %tmp14627, i64 1
- %tmp14629 = getelementptr inbounds float* %tmp14628, i64 1
- %tmp14630 = getelementptr inbounds float* %tmp14629, i64 1
- %tmp14631 = getelementptr inbounds float* %tmp14630, i64 1
- %tmp14632 = getelementptr inbounds float* %tmp14631, i64 1
- %tmp14633 = getelementptr inbounds float* %tmp14632, i64 1
- %tmp14634 = getelementptr inbounds float* %tmp14633, i64 1
- %tmp14635 = getelementptr inbounds float* %tmp14634, i64 1
- %tmp14636 = getelementptr inbounds float* %tmp14635, i64 1
- %tmp14637 = getelementptr inbounds float* %tmp14636, i64 1
- %tmp14638 = getelementptr inbounds float* %tmp14637, i64 1
- %tmp14639 = getelementptr inbounds float* %tmp14638, i64 1
- %tmp14640 = getelementptr inbounds float* %tmp14639, i64 1
- %tmp14641 = getelementptr inbounds float* %tmp14640, i64 1
- %tmp14642 = getelementptr inbounds float* %tmp14641, i64 1
- %tmp14643 = getelementptr inbounds float* %tmp14642, i64 1
- %tmp14644 = getelementptr inbounds float* %tmp14643, i64 1
- %tmp14645 = getelementptr inbounds float* %tmp14644, i64 1
- %tmp14646 = getelementptr inbounds float* %tmp14645, i64 1
- %tmp14647 = getelementptr inbounds float* %tmp14646, i64 1
- %tmp14648 = getelementptr inbounds float* %tmp14647, i64 1
- %tmp14649 = getelementptr inbounds float* %tmp14648, i64 1
- %tmp14650 = getelementptr inbounds float* %tmp14649, i64 1
- %tmp14651 = getelementptr inbounds float* %tmp14650, i64 1
- %tmp14652 = getelementptr inbounds float* %tmp14651, i64 1
- %tmp14653 = getelementptr inbounds float* %tmp14652, i64 1
- %tmp14654 = getelementptr inbounds float* %tmp14653, i64 1
- %tmp14655 = getelementptr inbounds float* %tmp14654, i64 1
- %tmp14656 = getelementptr inbounds float* %tmp14655, i64 1
- %tmp14657 = getelementptr inbounds float* %tmp14656, i64 1
- %tmp14658 = getelementptr inbounds float* %tmp14657, i64 1
- %tmp14659 = getelementptr inbounds float* %tmp14658, i64 1
- %tmp14660 = getelementptr inbounds float* %tmp14659, i64 1
- %tmp14661 = getelementptr inbounds float* %tmp14660, i64 1
- %tmp14662 = getelementptr inbounds float* %tmp14661, i64 1
- %tmp14663 = getelementptr inbounds float* %tmp14662, i64 1
- %tmp14664 = getelementptr inbounds float* %tmp14663, i64 1
- %tmp14665 = getelementptr inbounds float* %tmp14664, i64 1
- %tmp14666 = getelementptr inbounds float* %tmp14665, i64 1
- %tmp14667 = getelementptr inbounds float* %tmp14666, i64 1
- %tmp14668 = getelementptr inbounds float* %tmp14667, i64 1
- %tmp14669 = getelementptr inbounds float* %tmp14668, i64 1
- %tmp14670 = getelementptr inbounds float* %tmp14669, i64 1
- %tmp14671 = getelementptr inbounds float* %tmp14670, i64 1
- %tmp14672 = getelementptr inbounds float* %tmp14671, i64 1
- %tmp14673 = getelementptr inbounds float* %tmp14672, i64 1
- %tmp14674 = getelementptr inbounds float* %tmp14673, i64 1
- %tmp14675 = getelementptr inbounds float* %tmp14674, i64 1
- %tmp14676 = getelementptr inbounds float* %tmp14675, i64 1
- %tmp14677 = getelementptr inbounds float* %tmp14676, i64 1
- %tmp14678 = getelementptr inbounds float* %tmp14677, i64 1
- %tmp14679 = getelementptr inbounds float* %tmp14678, i64 1
- %tmp14680 = getelementptr inbounds float* %tmp14679, i64 1
- %tmp14681 = getelementptr inbounds float* %tmp14680, i64 1
- %tmp14682 = getelementptr inbounds float* %tmp14681, i64 1
- %tmp14683 = getelementptr inbounds float* %tmp14682, i64 1
- %tmp14684 = getelementptr inbounds float* %tmp14683, i64 1
- %tmp14685 = getelementptr inbounds float* %tmp14684, i64 1
- %tmp14686 = getelementptr inbounds float* %tmp14685, i64 1
- %tmp14687 = getelementptr inbounds float* %tmp14686, i64 1
- %tmp14688 = getelementptr inbounds float* %tmp14687, i64 1
- %tmp14689 = getelementptr inbounds float* %tmp14688, i64 1
- %tmp14690 = getelementptr inbounds float* %tmp14689, i64 1
- %tmp14691 = getelementptr inbounds float* %tmp14690, i64 1
- %tmp14692 = getelementptr inbounds float* %tmp14691, i64 1
- %tmp14693 = getelementptr inbounds float* %tmp14692, i64 1
- %tmp14694 = getelementptr inbounds float* %tmp14693, i64 1
- %tmp14695 = getelementptr inbounds float* %tmp14694, i64 1
- %tmp14696 = getelementptr inbounds float* %tmp14695, i64 1
- %tmp14697 = getelementptr inbounds float* %tmp14696, i64 1
- %tmp14698 = getelementptr inbounds float* %tmp14697, i64 1
- %tmp14699 = getelementptr inbounds float* %tmp14698, i64 1
- %tmp14700 = getelementptr inbounds float* %tmp14699, i64 1
- %tmp14701 = getelementptr inbounds float* %tmp14700, i64 1
- %tmp14702 = getelementptr inbounds float* %tmp14701, i64 1
- %tmp14703 = getelementptr inbounds float* %tmp14702, i64 1
- %tmp14704 = getelementptr inbounds float* %tmp14703, i64 1
- %tmp14705 = getelementptr inbounds float* %tmp14704, i64 1
- %tmp14706 = getelementptr inbounds float* %tmp14705, i64 1
- %tmp14707 = getelementptr inbounds float* %tmp14706, i64 1
- %tmp14708 = getelementptr inbounds float* %tmp14707, i64 1
- %tmp14709 = getelementptr inbounds float* %tmp14708, i64 1
- %tmp14710 = getelementptr inbounds float* %tmp14709, i64 1
- %tmp14711 = getelementptr inbounds float* %tmp14710, i64 1
- %tmp14712 = getelementptr inbounds float* %tmp14711, i64 1
- %tmp14713 = getelementptr inbounds float* %tmp14712, i64 1
- %tmp14714 = getelementptr inbounds float* %tmp14713, i64 1
- %tmp14715 = getelementptr inbounds float* %tmp14714, i64 1
- %tmp14716 = getelementptr inbounds float* %tmp14715, i64 1
- %tmp14717 = getelementptr inbounds float* %tmp14716, i64 1
- %tmp14718 = getelementptr inbounds float* %tmp14717, i64 1
- %tmp14719 = getelementptr inbounds float* %tmp14718, i64 1
- %tmp14720 = getelementptr inbounds float* %tmp14719, i64 1
- %tmp14721 = getelementptr inbounds float* %tmp14720, i64 1
- %tmp14722 = getelementptr inbounds float* %tmp14721, i64 1
- %tmp14723 = getelementptr inbounds float* %tmp14722, i64 1
- %tmp14724 = getelementptr inbounds float* %tmp14723, i64 1
- %tmp14725 = getelementptr inbounds float* %tmp14724, i64 1
- %tmp14726 = getelementptr inbounds float* %tmp14725, i64 1
- %tmp14727 = getelementptr inbounds float* %tmp14726, i64 1
- %tmp14728 = getelementptr inbounds float* %tmp14727, i64 1
- %tmp14729 = getelementptr inbounds float* %tmp14728, i64 1
- %tmp14730 = getelementptr inbounds float* %tmp14729, i64 1
- %tmp14731 = getelementptr inbounds float* %tmp14730, i64 1
- %tmp14732 = getelementptr inbounds float* %tmp14731, i64 1
- %tmp14733 = getelementptr inbounds float* %tmp14732, i64 1
- %tmp14734 = getelementptr inbounds float* %tmp14733, i64 1
- %tmp14735 = getelementptr inbounds float* %tmp14734, i64 1
- %tmp14736 = getelementptr inbounds float* %tmp14735, i64 1
- %tmp14737 = getelementptr inbounds float* %tmp14736, i64 1
- %tmp14738 = getelementptr inbounds float* %tmp14737, i64 1
- %tmp14739 = getelementptr inbounds float* %tmp14738, i64 1
- %tmp14740 = getelementptr inbounds float* %tmp14739, i64 1
- %tmp14741 = getelementptr inbounds float* %tmp14740, i64 1
- %tmp14742 = getelementptr inbounds float* %tmp14741, i64 1
- %tmp14743 = getelementptr inbounds float* %tmp14742, i64 1
- %tmp14744 = getelementptr inbounds float* %tmp14743, i64 1
- %tmp14745 = getelementptr inbounds float* %tmp14744, i64 1
- %tmp14746 = getelementptr inbounds float* %tmp14745, i64 1
- %tmp14747 = getelementptr inbounds float* %tmp14746, i64 1
- %tmp14748 = getelementptr inbounds float* %tmp14747, i64 1
- %tmp14749 = getelementptr inbounds float* %tmp14748, i64 1
- %tmp14750 = getelementptr inbounds float* %tmp14749, i64 1
- %tmp14751 = getelementptr inbounds float* %tmp14750, i64 1
- %tmp14752 = getelementptr inbounds float* %tmp14751, i64 1
- %tmp14753 = getelementptr inbounds float* %tmp14752, i64 1
- %tmp14754 = getelementptr inbounds float* %tmp14753, i64 1
- %tmp14755 = getelementptr inbounds float* %tmp14754, i64 1
- %tmp14756 = getelementptr inbounds float* %tmp14755, i64 1
- %tmp14757 = getelementptr inbounds float* %tmp14756, i64 1
- %tmp14758 = getelementptr inbounds float* %tmp14757, i64 1
- %tmp14759 = getelementptr inbounds float* %tmp14758, i64 1
- %tmp14760 = getelementptr inbounds float* %tmp14759, i64 1
- %tmp14761 = getelementptr inbounds float* %tmp14760, i64 1
- %tmp14762 = getelementptr inbounds float* %tmp14761, i64 1
- %tmp14763 = getelementptr inbounds float* %tmp14762, i64 1
- %tmp14764 = getelementptr inbounds float* %tmp14763, i64 1
- %tmp14765 = getelementptr inbounds float* %tmp14764, i64 1
- %tmp14766 = getelementptr inbounds float* %tmp14765, i64 1
- %tmp14767 = getelementptr inbounds float* %tmp14766, i64 1
- %tmp14768 = getelementptr inbounds float* %tmp14767, i64 1
- %tmp14769 = getelementptr inbounds float* %tmp14768, i64 1
- %tmp14770 = getelementptr inbounds float* %tmp14769, i64 1
- %tmp14771 = getelementptr inbounds float* %tmp14770, i64 1
- %tmp14772 = getelementptr inbounds float* %tmp14771, i64 1
- %tmp14773 = getelementptr inbounds float* %tmp14772, i64 1
- %tmp14774 = getelementptr inbounds float* %tmp14773, i64 1
- %tmp14775 = getelementptr inbounds float* %tmp14774, i64 1
- %tmp14776 = getelementptr inbounds float* %tmp14775, i64 1
- %tmp14777 = getelementptr inbounds float* %tmp14776, i64 1
- %tmp14778 = getelementptr inbounds float* %tmp14777, i64 1
- %tmp14779 = getelementptr inbounds float* %tmp14778, i64 1
- %tmp14780 = getelementptr inbounds float* %tmp14779, i64 1
- %tmp14781 = getelementptr inbounds float* %tmp14780, i64 1
- %tmp14782 = getelementptr inbounds float* %tmp14781, i64 1
- %tmp14783 = getelementptr inbounds float* %tmp14782, i64 1
- %tmp14784 = getelementptr inbounds float* %tmp14783, i64 1
- %tmp14785 = getelementptr inbounds float* %tmp14784, i64 1
- %tmp14786 = getelementptr inbounds float* %tmp14785, i64 1
- %tmp14787 = getelementptr inbounds float* %tmp14786, i64 1
- %tmp14788 = getelementptr inbounds float* %tmp14787, i64 1
- %tmp14789 = getelementptr inbounds float* %tmp14788, i64 1
- %tmp14790 = getelementptr inbounds float* %tmp14789, i64 1
- %tmp14791 = getelementptr inbounds float* %tmp14790, i64 1
- %tmp14792 = getelementptr inbounds float* %tmp14791, i64 1
- %tmp14793 = getelementptr inbounds float* %tmp14792, i64 1
- %tmp14794 = getelementptr inbounds float* %tmp14793, i64 1
- %tmp14795 = getelementptr inbounds float* %tmp14794, i64 1
- %tmp14796 = getelementptr inbounds float* %tmp14795, i64 1
- %tmp14797 = getelementptr inbounds float* %tmp14796, i64 1
- %tmp14798 = getelementptr inbounds float* %tmp14797, i64 1
- %tmp14799 = getelementptr inbounds float* %tmp14798, i64 1
- %tmp14800 = getelementptr inbounds float* %tmp14799, i64 1
- %tmp14801 = getelementptr inbounds float* %tmp14800, i64 1
- %tmp14802 = getelementptr inbounds float* %tmp14801, i64 1
- %tmp14803 = getelementptr inbounds float* %tmp14802, i64 1
- %tmp14804 = getelementptr inbounds float* %tmp14803, i64 1
- %tmp14805 = getelementptr inbounds float* %tmp14804, i64 1
- %tmp14806 = getelementptr inbounds float* %tmp14805, i64 1
- %tmp14807 = getelementptr inbounds float* %tmp14806, i64 1
- %tmp14808 = getelementptr inbounds float* %tmp14807, i64 1
- %tmp14809 = getelementptr inbounds float* %tmp14808, i64 1
- %tmp14810 = getelementptr inbounds float* %tmp14809, i64 1
- %tmp14811 = getelementptr inbounds float* %tmp14810, i64 1
- %tmp14812 = getelementptr inbounds float* %tmp14811, i64 1
- %tmp14813 = getelementptr inbounds float* %tmp14812, i64 1
- %tmp14814 = getelementptr inbounds float* %tmp14813, i64 1
- %tmp14815 = getelementptr inbounds float* %tmp14814, i64 1
- %tmp14816 = getelementptr inbounds float* %tmp14815, i64 1
- %tmp14817 = getelementptr inbounds float* %tmp14816, i64 1
- %tmp14818 = getelementptr inbounds float* %tmp14817, i64 1
- %tmp14819 = getelementptr inbounds float* %tmp14818, i64 1
- %tmp14820 = getelementptr inbounds float* %tmp14819, i64 1
- %tmp14821 = getelementptr inbounds float* %tmp14820, i64 1
- %tmp14822 = getelementptr inbounds float* %tmp14821, i64 1
- %tmp14823 = getelementptr inbounds float* %tmp14822, i64 1
- %tmp14824 = getelementptr inbounds float* %tmp14823, i64 1
- %tmp14825 = getelementptr inbounds float* %tmp14824, i64 1
- %tmp14826 = getelementptr inbounds float* %tmp14825, i64 1
- %tmp14827 = getelementptr inbounds float* %tmp14826, i64 1
- %tmp14828 = getelementptr inbounds float* %tmp14827, i64 1
- %tmp14829 = getelementptr inbounds float* %tmp14828, i64 1
- %tmp14830 = getelementptr inbounds float* %tmp14829, i64 1
- %tmp14831 = getelementptr inbounds float* %tmp14830, i64 1
- %tmp14832 = getelementptr inbounds float* %tmp14831, i64 1
- %tmp14833 = getelementptr inbounds float* %tmp14832, i64 1
- %tmp14834 = getelementptr inbounds float* %tmp14833, i64 1
- %tmp14835 = getelementptr inbounds float* %tmp14834, i64 1
- %tmp14836 = getelementptr inbounds float* %tmp14835, i64 1
- %tmp14837 = getelementptr inbounds float* %tmp14836, i64 1
- %tmp14838 = getelementptr inbounds float* %tmp14837, i64 1
- %tmp14839 = getelementptr inbounds float* %tmp14838, i64 1
- %tmp14840 = getelementptr inbounds float* %tmp14839, i64 1
- %tmp14841 = getelementptr inbounds float* %tmp14840, i64 1
- %tmp14842 = getelementptr inbounds float* %tmp14841, i64 1
- %tmp14843 = getelementptr inbounds float* %tmp14842, i64 1
- %tmp14844 = getelementptr inbounds float* %tmp14843, i64 1
- %tmp14845 = getelementptr inbounds float* %tmp14844, i64 1
- %tmp14846 = getelementptr inbounds float* %tmp14845, i64 1
- %tmp14847 = getelementptr inbounds float* %tmp14846, i64 1
- %tmp14848 = getelementptr inbounds float* %tmp14847, i64 1
- %tmp14849 = getelementptr inbounds float* %tmp14848, i64 1
- %tmp14850 = getelementptr inbounds float* %tmp14849, i64 1
- %tmp14851 = getelementptr inbounds float* %tmp14850, i64 1
- %tmp14852 = getelementptr inbounds float* %tmp14851, i64 1
- %tmp14853 = getelementptr inbounds float* %tmp14852, i64 1
- %tmp14854 = getelementptr inbounds float* %tmp14853, i64 1
- %tmp14855 = getelementptr inbounds float* %tmp14854, i64 1
- %tmp14856 = getelementptr inbounds float* %tmp14855, i64 1
- %tmp14857 = getelementptr inbounds float* %tmp14856, i64 1
- %tmp14858 = getelementptr inbounds float* %tmp14857, i64 1
- %tmp14859 = getelementptr inbounds float* %tmp14858, i64 1
- %tmp14860 = getelementptr inbounds float* %tmp14859, i64 1
- %tmp14861 = getelementptr inbounds float* %tmp14860, i64 1
- %tmp14862 = getelementptr inbounds float* %tmp14861, i64 1
- %tmp14863 = getelementptr inbounds float* %tmp14862, i64 1
- %tmp14864 = getelementptr inbounds float* %tmp14863, i64 1
- %tmp14865 = getelementptr inbounds float* %tmp14864, i64 1
- %tmp14866 = getelementptr inbounds float* %tmp14865, i64 1
- %tmp14867 = getelementptr inbounds float* %tmp14866, i64 1
- %tmp14868 = getelementptr inbounds float* %tmp14867, i64 1
- %tmp14869 = getelementptr inbounds float* %tmp14868, i64 1
- %tmp14870 = getelementptr inbounds float* %tmp14869, i64 1
- %tmp14871 = getelementptr inbounds float* %tmp14870, i64 1
- %tmp14872 = getelementptr inbounds float* %tmp14871, i64 1
- %tmp14873 = getelementptr inbounds float* %tmp14872, i64 1
- %tmp14874 = getelementptr inbounds float* %tmp14873, i64 1
- %tmp14875 = getelementptr inbounds float* %tmp14874, i64 1
- %tmp14876 = getelementptr inbounds float* %tmp14875, i64 1
- %tmp14877 = getelementptr inbounds float* %tmp14876, i64 1
- %tmp14878 = getelementptr inbounds float* %tmp14877, i64 1
- %tmp14879 = getelementptr inbounds float* %tmp14878, i64 1
- %tmp14880 = getelementptr inbounds float* %tmp14879, i64 1
- %tmp14881 = getelementptr inbounds float* %tmp14880, i64 1
- %tmp14882 = getelementptr inbounds float* %tmp14881, i64 1
- %tmp14883 = getelementptr inbounds float* %tmp14882, i64 1
- %tmp14884 = getelementptr inbounds float* %tmp14883, i64 1
- %tmp14885 = getelementptr inbounds float* %tmp14884, i64 1
- %tmp14886 = getelementptr inbounds float* %tmp14885, i64 1
- %tmp14887 = getelementptr inbounds float* %tmp14886, i64 1
- %tmp14888 = getelementptr inbounds float* %tmp14887, i64 1
- %tmp14889 = getelementptr inbounds float* %tmp14888, i64 1
- %tmp14890 = getelementptr inbounds float* %tmp14889, i64 1
- %tmp14891 = getelementptr inbounds float* %tmp14890, i64 1
- %tmp14892 = getelementptr inbounds float* %tmp14891, i64 1
- %tmp14893 = getelementptr inbounds float* %tmp14892, i64 1
- %tmp14894 = getelementptr inbounds float* %tmp14893, i64 1
- %tmp14895 = getelementptr inbounds float* %tmp14894, i64 1
- %tmp14896 = getelementptr inbounds float* %tmp14895, i64 1
- %tmp14897 = getelementptr inbounds float* %tmp14896, i64 1
- %tmp14898 = getelementptr inbounds float* %tmp14897, i64 1
- %tmp14899 = getelementptr inbounds float* %tmp14898, i64 1
- %tmp14900 = getelementptr inbounds float* %tmp14899, i64 1
- %tmp14901 = getelementptr inbounds float* %tmp14900, i64 1
- %tmp14902 = getelementptr inbounds float* %tmp14901, i64 1
- %tmp14903 = getelementptr inbounds float* %tmp14902, i64 1
- %tmp14904 = getelementptr inbounds float* %tmp14903, i64 1
- %tmp14905 = getelementptr inbounds float* %tmp14904, i64 1
- %tmp14906 = getelementptr inbounds float* %tmp14905, i64 1
- %tmp14907 = getelementptr inbounds float* %tmp14906, i64 1
- %tmp14908 = getelementptr inbounds float* %tmp14907, i64 1
- %tmp14909 = getelementptr inbounds float* %tmp14908, i64 1
- %tmp14910 = getelementptr inbounds float* %tmp14909, i64 1
- %tmp14911 = getelementptr inbounds float* %tmp14910, i64 1
- %tmp14912 = getelementptr inbounds float* %tmp14911, i64 1
- %tmp14913 = getelementptr inbounds float* %tmp14912, i64 1
- %tmp14914 = getelementptr inbounds float* %tmp14913, i64 1
- %tmp14915 = getelementptr inbounds float* %tmp14914, i64 1
- %tmp14916 = getelementptr inbounds float* %tmp14915, i64 1
- %tmp14917 = getelementptr inbounds float* %tmp14916, i64 1
- %tmp14918 = getelementptr inbounds float* %tmp14917, i64 1
- %tmp14919 = getelementptr inbounds float* %tmp14918, i64 1
- %tmp14920 = getelementptr inbounds float* %tmp14919, i64 1
- %tmp14921 = getelementptr inbounds float* %tmp14920, i64 1
- %tmp14922 = getelementptr inbounds float* %tmp14921, i64 1
- %tmp14923 = getelementptr inbounds float* %tmp14922, i64 1
- %tmp14924 = getelementptr inbounds float* %tmp14923, i64 1
- %tmp14925 = getelementptr inbounds float* %tmp14924, i64 1
- %tmp14926 = getelementptr inbounds float* %tmp14925, i64 1
- %tmp14927 = getelementptr inbounds float* %tmp14926, i64 1
- %tmp14928 = getelementptr inbounds float* %tmp14927, i64 1
- %tmp14929 = getelementptr inbounds float* %tmp14928, i64 1
- %tmp14930 = getelementptr inbounds float* %tmp14929, i64 1
- %tmp14931 = getelementptr inbounds float* %tmp14930, i64 1
- %tmp14932 = getelementptr inbounds float* %tmp14931, i64 1
- %tmp14933 = getelementptr inbounds float* %tmp14932, i64 1
- %tmp14934 = getelementptr inbounds float* %tmp14933, i64 1
- %tmp14935 = getelementptr inbounds float* %tmp14934, i64 1
- %tmp14936 = getelementptr inbounds float* %tmp14935, i64 1
- %tmp14937 = getelementptr inbounds float* %tmp14936, i64 1
- %tmp14938 = getelementptr inbounds float* %tmp14937, i64 1
- %tmp14939 = getelementptr inbounds float* %tmp14938, i64 1
- %tmp14940 = getelementptr inbounds float* %tmp14939, i64 1
- %tmp14941 = getelementptr inbounds float* %tmp14940, i64 1
- %tmp14942 = getelementptr inbounds float* %tmp14941, i64 1
- %tmp14943 = getelementptr inbounds float* %tmp14942, i64 1
- %tmp14944 = getelementptr inbounds float* %tmp14943, i64 1
- %tmp14945 = getelementptr inbounds float* %tmp14944, i64 1
- %tmp14946 = getelementptr inbounds float* %tmp14945, i64 1
- %tmp14947 = getelementptr inbounds float* %tmp14946, i64 1
- %tmp14948 = getelementptr inbounds float* %tmp14947, i64 1
- %tmp14949 = getelementptr inbounds float* %tmp14948, i64 1
- %tmp14950 = getelementptr inbounds float* %tmp14949, i64 1
- %tmp14951 = getelementptr inbounds float* %tmp14950, i64 1
- %tmp14952 = getelementptr inbounds float* %tmp14951, i64 1
- %tmp14953 = getelementptr inbounds float* %tmp14952, i64 1
- %tmp14954 = getelementptr inbounds float* %tmp14953, i64 1
- %tmp14955 = getelementptr inbounds float* %tmp14954, i64 1
- %tmp14956 = getelementptr inbounds float* %tmp14955, i64 1
- %tmp14957 = getelementptr inbounds float* %tmp14956, i64 1
- %tmp14958 = getelementptr inbounds float* %tmp14957, i64 1
- %tmp14959 = getelementptr inbounds float* %tmp14958, i64 1
- %tmp14960 = getelementptr inbounds float* %tmp14959, i64 1
- %tmp14961 = getelementptr inbounds float* %tmp14960, i64 1
- %tmp14962 = getelementptr inbounds float* %tmp14961, i64 1
- %tmp14963 = getelementptr inbounds float* %tmp14962, i64 1
- %tmp14964 = getelementptr inbounds float* %tmp14963, i64 1
- %tmp14965 = getelementptr inbounds float* %tmp14964, i64 1
- %tmp14966 = getelementptr inbounds float* %tmp14965, i64 1
- %tmp14967 = getelementptr inbounds float* %tmp14966, i64 1
- %tmp14968 = getelementptr inbounds float* %tmp14967, i64 1
- %tmp14969 = getelementptr inbounds float* %tmp14968, i64 1
- %tmp14970 = getelementptr inbounds float* %tmp14969, i64 1
- %tmp14971 = getelementptr inbounds float* %tmp14970, i64 1
- %tmp14972 = getelementptr inbounds float* %tmp14971, i64 1
- %tmp14973 = getelementptr inbounds float* %tmp14972, i64 1
- %tmp14974 = getelementptr inbounds float* %tmp14973, i64 1
- %tmp14975 = getelementptr inbounds float* %tmp14974, i64 1
- %tmp14976 = getelementptr inbounds float* %tmp14975, i64 1
- %tmp14977 = getelementptr inbounds float* %tmp14976, i64 1
- %tmp14978 = getelementptr inbounds float* %tmp14977, i64 1
- %tmp14979 = getelementptr inbounds float* %tmp14978, i64 1
- %tmp14980 = getelementptr inbounds float* %tmp14979, i64 1
- %tmp14981 = getelementptr inbounds float* %tmp14980, i64 1
- %tmp14982 = getelementptr inbounds float* %tmp14981, i64 1
- %tmp14983 = getelementptr inbounds float* %tmp14982, i64 1
- %tmp14984 = getelementptr inbounds float* %tmp14983, i64 1
- %tmp14985 = getelementptr inbounds float* %tmp14984, i64 1
- %tmp14986 = getelementptr inbounds float* %tmp14985, i64 1
- %tmp14987 = getelementptr inbounds float* %tmp14986, i64 1
- %tmp14988 = getelementptr inbounds float* %tmp14987, i64 1
- %tmp14989 = getelementptr inbounds float* %tmp14988, i64 1
- %tmp14990 = getelementptr inbounds float* %tmp14989, i64 1
- %tmp14991 = getelementptr inbounds float* %tmp14990, i64 1
- %tmp14992 = getelementptr inbounds float* %tmp14991, i64 1
- %tmp14993 = getelementptr inbounds float* %tmp14992, i64 1
- %tmp14994 = getelementptr inbounds float* %tmp14993, i64 1
- %tmp14995 = getelementptr inbounds float* %tmp14994, i64 1
- %tmp14996 = getelementptr inbounds float* %tmp14995, i64 1
- %tmp14997 = getelementptr inbounds float* %tmp14996, i64 1
- %tmp14998 = getelementptr inbounds float* %tmp14997, i64 1
- %tmp14999 = getelementptr inbounds float* %tmp14998, i64 1
- %tmp15000 = getelementptr inbounds float* %tmp14999, i64 1
- %tmp15001 = getelementptr inbounds float* %tmp15000, i64 1
- %tmp15002 = getelementptr inbounds float* %tmp15001, i64 1
- %tmp15003 = getelementptr inbounds float* %tmp15002, i64 1
- %tmp15004 = getelementptr inbounds float* %tmp15003, i64 1
- %tmp15005 = getelementptr inbounds float* %tmp15004, i64 1
- %tmp15006 = getelementptr inbounds float* %tmp15005, i64 1
- %tmp15007 = getelementptr inbounds float* %tmp15006, i64 1
- %tmp15008 = getelementptr inbounds float* %tmp15007, i64 1
- %tmp15009 = getelementptr inbounds float* %tmp15008, i64 1
- %tmp15010 = getelementptr inbounds float* %tmp15009, i64 1
- %tmp15011 = getelementptr inbounds float* %tmp15010, i64 1
- %tmp15012 = getelementptr inbounds float* %tmp15011, i64 1
- %tmp15013 = getelementptr inbounds float* %tmp15012, i64 1
- %tmp15014 = getelementptr inbounds float* %tmp15013, i64 1
- %tmp15015 = getelementptr inbounds float* %tmp15014, i64 1
- %tmp15016 = getelementptr inbounds float* %tmp15015, i64 1
- %tmp15017 = getelementptr inbounds float* %tmp15016, i64 1
- %tmp15018 = getelementptr inbounds float* %tmp15017, i64 1
- %tmp15019 = getelementptr inbounds float* %tmp15018, i64 1
- %tmp15020 = getelementptr inbounds float* %tmp15019, i64 1
- %tmp15021 = getelementptr inbounds float* %tmp15020, i64 1
- %tmp15022 = getelementptr inbounds float* %tmp15021, i64 1
- %tmp15023 = getelementptr inbounds float* %tmp15022, i64 1
- %tmp15024 = getelementptr inbounds float* %tmp15023, i64 1
- %tmp15025 = getelementptr inbounds float* %tmp15024, i64 1
- %tmp15026 = getelementptr inbounds float* %tmp15025, i64 1
- %tmp15027 = getelementptr inbounds float* %tmp15026, i64 1
- %tmp15028 = getelementptr inbounds float* %tmp15027, i64 1
- %tmp15029 = getelementptr inbounds float* %tmp15028, i64 1
- %tmp15030 = getelementptr inbounds float* %tmp15029, i64 1
- %tmp15031 = getelementptr inbounds float* %tmp15030, i64 1
- %tmp15032 = getelementptr inbounds float* %tmp15031, i64 1
- %tmp15033 = getelementptr inbounds float* %tmp15032, i64 1
- %tmp15034 = getelementptr inbounds float* %tmp15033, i64 1
- %tmp15035 = getelementptr inbounds float* %tmp15034, i64 1
- %tmp15036 = getelementptr inbounds float* %tmp15035, i64 1
- %tmp15037 = getelementptr inbounds float* %tmp15036, i64 1
- %tmp15038 = getelementptr inbounds float* %tmp15037, i64 1
- %tmp15039 = getelementptr inbounds float* %tmp15038, i64 1
- %tmp15040 = getelementptr inbounds float* %tmp15039, i64 1
- %tmp15041 = getelementptr inbounds float* %tmp15040, i64 1
- %tmp15042 = getelementptr inbounds float* %tmp15041, i64 1
- %tmp15043 = getelementptr inbounds float* %tmp15042, i64 1
- %tmp15044 = getelementptr inbounds float* %tmp15043, i64 1
- %tmp15045 = getelementptr inbounds float* %tmp15044, i64 1
- %tmp15046 = getelementptr inbounds float* %tmp15045, i64 1
- %tmp15047 = getelementptr inbounds float* %tmp15046, i64 1
- %tmp15048 = getelementptr inbounds float* %tmp15047, i64 1
- %tmp15049 = getelementptr inbounds float* %tmp15048, i64 1
- %tmp15050 = getelementptr inbounds float* %tmp15049, i64 1
- %tmp15051 = getelementptr inbounds float* %tmp15050, i64 1
- %tmp15052 = getelementptr inbounds float* %tmp15051, i64 1
- %tmp15053 = getelementptr inbounds float* %tmp15052, i64 1
- %tmp15054 = getelementptr inbounds float* %tmp15053, i64 1
- %tmp15055 = getelementptr inbounds float* %tmp15054, i64 1
- %tmp15056 = getelementptr inbounds float* %tmp15055, i64 1
- %tmp15057 = getelementptr inbounds float* %tmp15056, i64 1
- %tmp15058 = getelementptr inbounds float* %tmp15057, i64 1
- %tmp15059 = getelementptr inbounds float* %tmp15058, i64 1
- %tmp15060 = getelementptr inbounds float* %tmp15059, i64 1
- %tmp15061 = getelementptr inbounds float* %tmp15060, i64 1
- %tmp15062 = getelementptr inbounds float* %tmp15061, i64 1
- %tmp15063 = getelementptr inbounds float* %tmp15062, i64 1
- %tmp15064 = getelementptr inbounds float* %tmp15063, i64 1
- %tmp15065 = getelementptr inbounds float* %tmp15064, i64 1
- %tmp15066 = getelementptr inbounds float* %tmp15065, i64 1
- %tmp15067 = getelementptr inbounds float* %tmp15066, i64 1
- %tmp15068 = getelementptr inbounds float* %tmp15067, i64 1
- %tmp15069 = getelementptr inbounds float* %tmp15068, i64 1
- %tmp15070 = getelementptr inbounds float* %tmp15069, i64 1
- %tmp15071 = getelementptr inbounds float* %tmp15070, i64 1
- %tmp15072 = getelementptr inbounds float* %tmp15071, i64 1
- %tmp15073 = getelementptr inbounds float* %tmp15072, i64 1
- %tmp15074 = getelementptr inbounds float* %tmp15073, i64 1
- %tmp15075 = getelementptr inbounds float* %tmp15074, i64 1
- %tmp15076 = getelementptr inbounds float* %tmp15075, i64 1
- %tmp15077 = getelementptr inbounds float* %tmp15076, i64 1
- %tmp15078 = getelementptr inbounds float* %tmp15077, i64 1
- %tmp15079 = getelementptr inbounds float* %tmp15078, i64 1
- %tmp15080 = getelementptr inbounds float* %tmp15079, i64 1
- %tmp15081 = getelementptr inbounds float* %tmp15080, i64 1
- %tmp15082 = getelementptr inbounds float* %tmp15081, i64 1
- %tmp15083 = getelementptr inbounds float* %tmp15082, i64 1
- %tmp15084 = getelementptr inbounds float* %tmp15083, i64 1
- %tmp15085 = getelementptr inbounds float* %tmp15084, i64 1
- %tmp15086 = getelementptr inbounds float* %tmp15085, i64 1
- %tmp15087 = getelementptr inbounds float* %tmp15086, i64 1
- %tmp15088 = getelementptr inbounds float* %tmp15087, i64 1
- %tmp15089 = getelementptr inbounds float* %tmp15088, i64 1
- %tmp15090 = getelementptr inbounds float* %tmp15089, i64 1
- %tmp15091 = getelementptr inbounds float* %tmp15090, i64 1
- %tmp15092 = getelementptr inbounds float* %tmp15091, i64 1
- %tmp15093 = getelementptr inbounds float* %tmp15092, i64 1
- %tmp15094 = getelementptr inbounds float* %tmp15093, i64 1
- %tmp15095 = getelementptr inbounds float* %tmp15094, i64 1
- %tmp15096 = getelementptr inbounds float* %tmp15095, i64 1
- %tmp15097 = getelementptr inbounds float* %tmp15096, i64 1
- %tmp15098 = getelementptr inbounds float* %tmp15097, i64 1
- %tmp15099 = getelementptr inbounds float* %tmp15098, i64 1
- %tmp15100 = getelementptr inbounds float* %tmp15099, i64 1
- %tmp15101 = getelementptr inbounds float* %tmp15100, i64 1
- %tmp15102 = getelementptr inbounds float* %tmp15101, i64 1
- %tmp15103 = getelementptr inbounds float* %tmp15102, i64 1
- %tmp15104 = getelementptr inbounds float* %tmp15103, i64 1
- %tmp15105 = getelementptr inbounds float* %tmp15104, i64 1
- %tmp15106 = getelementptr inbounds float* %tmp15105, i64 1
- %tmp15107 = getelementptr inbounds float* %tmp15106, i64 1
- %tmp15108 = getelementptr inbounds float* %tmp15107, i64 1
- %tmp15109 = getelementptr inbounds float* %tmp15108, i64 1
- %tmp15110 = getelementptr inbounds float* %tmp15109, i64 1
- %tmp15111 = getelementptr inbounds float* %tmp15110, i64 1
- %tmp15112 = getelementptr inbounds float* %tmp15111, i64 1
- %tmp15113 = getelementptr inbounds float* %tmp15112, i64 1
- %tmp15114 = getelementptr inbounds float* %tmp15113, i64 1
- %tmp15115 = getelementptr inbounds float* %tmp15114, i64 1
- %tmp15116 = getelementptr inbounds float* %tmp15115, i64 1
- %tmp15117 = getelementptr inbounds float* %tmp15116, i64 1
- %tmp15118 = getelementptr inbounds float* %tmp15117, i64 1
- %tmp15119 = getelementptr inbounds float* %tmp15118, i64 1
- %tmp15120 = getelementptr inbounds float* %tmp15119, i64 1
- %tmp15121 = getelementptr inbounds float* %tmp15120, i64 1
- %tmp15122 = getelementptr inbounds float* %tmp15121, i64 1
- %tmp15123 = getelementptr inbounds float* %tmp15122, i64 1
- %tmp15124 = getelementptr inbounds float* %tmp15123, i64 1
- %tmp15125 = getelementptr inbounds float* %tmp15124, i64 1
- %tmp15126 = getelementptr inbounds float* %tmp15125, i64 1
- %tmp15127 = getelementptr inbounds float* %tmp15126, i64 1
- %tmp15128 = getelementptr inbounds float* %tmp15127, i64 1
- %tmp15129 = getelementptr inbounds float* %tmp15128, i64 1
- %tmp15130 = getelementptr inbounds float* %tmp15129, i64 1
- %tmp15131 = getelementptr inbounds float* %tmp15130, i64 1
- %tmp15132 = getelementptr inbounds float* %tmp15131, i64 1
- %tmp15133 = getelementptr inbounds float* %tmp15132, i64 1
- %tmp15134 = getelementptr inbounds float* %tmp15133, i64 1
- %tmp15135 = getelementptr inbounds float* %tmp15134, i64 1
- %tmp15136 = getelementptr inbounds float* %tmp15135, i64 1
- %tmp15137 = getelementptr inbounds float* %tmp15136, i64 1
- %tmp15138 = getelementptr inbounds float* %tmp15137, i64 1
- %tmp15139 = getelementptr inbounds float* %tmp15138, i64 1
- %tmp15140 = getelementptr inbounds float* %tmp15139, i64 1
- %tmp15141 = getelementptr inbounds float* %tmp15140, i64 1
- %tmp15142 = getelementptr inbounds float* %tmp15141, i64 1
- %tmp15143 = getelementptr inbounds float* %tmp15142, i64 1
- %tmp15144 = getelementptr inbounds float* %tmp15143, i64 1
- %tmp15145 = getelementptr inbounds float* %tmp15144, i64 1
- %tmp15146 = getelementptr inbounds float* %tmp15145, i64 1
- %tmp15147 = getelementptr inbounds float* %tmp15146, i64 1
- %tmp15148 = getelementptr inbounds float* %tmp15147, i64 1
- %tmp15149 = getelementptr inbounds float* %tmp15148, i64 1
- %tmp15150 = getelementptr inbounds float* %tmp15149, i64 1
- %tmp15151 = getelementptr inbounds float* %tmp15150, i64 1
- %tmp15152 = getelementptr inbounds float* %tmp15151, i64 1
- %tmp15153 = getelementptr inbounds float* %tmp15152, i64 1
- %tmp15154 = getelementptr inbounds float* %tmp15153, i64 1
- %tmp15155 = getelementptr inbounds float* %tmp15154, i64 1
- %tmp15156 = getelementptr inbounds float* %tmp15155, i64 1
- %tmp15157 = getelementptr inbounds float* %tmp15156, i64 1
- %tmp15158 = getelementptr inbounds float* %tmp15157, i64 1
- %tmp15159 = getelementptr inbounds float* %tmp15158, i64 1
- %tmp15160 = getelementptr inbounds float* %tmp15159, i64 1
- %tmp15161 = getelementptr inbounds float* %tmp15160, i64 1
- %tmp15162 = getelementptr inbounds float* %tmp15161, i64 1
- %tmp15163 = getelementptr inbounds float* %tmp15162, i64 1
- %tmp15164 = getelementptr inbounds float* %tmp15163, i64 1
- %tmp15165 = getelementptr inbounds float* %tmp15164, i64 1
- %tmp15166 = getelementptr inbounds float* %tmp15165, i64 1
- %tmp15167 = getelementptr inbounds float* %tmp15166, i64 1
- %tmp15168 = getelementptr inbounds float* %tmp15167, i64 1
- %tmp15169 = getelementptr inbounds float* %tmp15168, i64 1
- %tmp15170 = getelementptr inbounds float* %tmp15169, i64 1
- %tmp15171 = getelementptr inbounds float* %tmp15170, i64 1
- %tmp15172 = getelementptr inbounds float* %tmp15171, i64 1
- %tmp15173 = getelementptr inbounds float* %tmp15172, i64 1
- %tmp15174 = getelementptr inbounds float* %tmp15173, i64 1
- %tmp15175 = getelementptr inbounds float* %tmp15174, i64 1
- %tmp15176 = getelementptr inbounds float* %tmp15175, i64 1
- %tmp15177 = getelementptr inbounds float* %tmp15176, i64 1
- %tmp15178 = getelementptr inbounds float* %tmp15177, i64 1
- %tmp15179 = getelementptr inbounds float* %tmp15178, i64 1
- %tmp15180 = getelementptr inbounds float* %tmp15179, i64 1
- %tmp15181 = getelementptr inbounds float* %tmp15180, i64 1
- %tmp15182 = getelementptr inbounds float* %tmp15181, i64 1
- %tmp15183 = getelementptr inbounds float* %tmp15182, i64 1
- %tmp15184 = getelementptr inbounds float* %tmp15183, i64 1
- %tmp15185 = getelementptr inbounds float* %tmp15184, i64 1
- %tmp15186 = getelementptr inbounds float* %tmp15185, i64 1
- %tmp15187 = getelementptr inbounds float* %tmp15186, i64 1
- %tmp15188 = getelementptr inbounds float* %tmp15187, i64 1
- %tmp15189 = getelementptr inbounds float* %tmp15188, i64 1
- %tmp15190 = getelementptr inbounds float* %tmp15189, i64 1
- %tmp15191 = getelementptr inbounds float* %tmp15190, i64 1
- %tmp15192 = getelementptr inbounds float* %tmp15191, i64 1
- %tmp15193 = getelementptr inbounds float* %tmp15192, i64 1
- %tmp15194 = getelementptr inbounds float* %tmp15193, i64 1
- %tmp15195 = getelementptr inbounds float* %tmp15194, i64 1
- %tmp15196 = getelementptr inbounds float* %tmp15195, i64 1
- %tmp15197 = getelementptr inbounds float* %tmp15196, i64 1
- %tmp15198 = getelementptr inbounds float* %tmp15197, i64 1
- %tmp15199 = getelementptr inbounds float* %tmp15198, i64 1
- %tmp15200 = getelementptr inbounds float* %tmp15199, i64 1
- %tmp15201 = getelementptr inbounds float* %tmp15200, i64 1
- %tmp15202 = getelementptr inbounds float* %tmp15201, i64 1
- %tmp15203 = getelementptr inbounds float* %tmp15202, i64 1
- %tmp15204 = getelementptr inbounds float* %tmp15203, i64 1
- %tmp15205 = getelementptr inbounds float* %tmp15204, i64 1
- %tmp15206 = getelementptr inbounds float* %tmp15205, i64 1
- %tmp15207 = getelementptr inbounds float* %tmp15206, i64 1
- %tmp15208 = getelementptr inbounds float* %tmp15207, i64 1
- %tmp15209 = getelementptr inbounds float* %tmp15208, i64 1
- %tmp15210 = getelementptr inbounds float* %tmp15209, i64 1
- %tmp15211 = getelementptr inbounds float* %tmp15210, i64 1
- %tmp15212 = getelementptr inbounds float* %tmp15211, i64 1
- %tmp15213 = getelementptr inbounds float* %tmp15212, i64 1
- %tmp15214 = getelementptr inbounds float* %tmp15213, i64 1
- %tmp15215 = getelementptr inbounds float* %tmp15214, i64 1
- %tmp15216 = getelementptr inbounds float* %tmp15215, i64 1
- %tmp15217 = getelementptr inbounds float* %tmp15216, i64 1
- %tmp15218 = getelementptr inbounds float* %tmp15217, i64 1
- %tmp15219 = getelementptr inbounds float* %tmp15218, i64 1
- %tmp15220 = getelementptr inbounds float* %tmp15219, i64 1
- %tmp15221 = getelementptr inbounds float* %tmp15220, i64 1
- %tmp15222 = getelementptr inbounds float* %tmp15221, i64 1
- %tmp15223 = getelementptr inbounds float* %tmp15222, i64 1
- %tmp15224 = getelementptr inbounds float* %tmp15223, i64 1
- %tmp15225 = getelementptr inbounds float* %tmp15224, i64 1
- %tmp15226 = getelementptr inbounds float* %tmp15225, i64 1
- %tmp15227 = getelementptr inbounds float* %tmp15226, i64 1
- %tmp15228 = getelementptr inbounds float* %tmp15227, i64 1
- %tmp15229 = getelementptr inbounds float* %tmp15228, i64 1
- %tmp15230 = getelementptr inbounds float* %tmp15229, i64 1
- %tmp15231 = getelementptr inbounds float* %tmp15230, i64 1
- %tmp15232 = getelementptr inbounds float* %tmp15231, i64 1
- %tmp15233 = getelementptr inbounds float* %tmp15232, i64 1
- %tmp15234 = getelementptr inbounds float* %tmp15233, i64 1
- %tmp15235 = getelementptr inbounds float* %tmp15234, i64 1
- %tmp15236 = getelementptr inbounds float* %tmp15235, i64 1
- %tmp15237 = getelementptr inbounds float* %tmp15236, i64 1
- %tmp15238 = getelementptr inbounds float* %tmp15237, i64 1
- %tmp15239 = getelementptr inbounds float* %tmp15238, i64 1
- %tmp15240 = getelementptr inbounds float* %tmp15239, i64 1
- %tmp15241 = getelementptr inbounds float* %tmp15240, i64 1
- %tmp15242 = getelementptr inbounds float* %tmp15241, i64 1
- %tmp15243 = getelementptr inbounds float* %tmp15242, i64 1
- %tmp15244 = getelementptr inbounds float* %tmp15243, i64 1
- %tmp15245 = getelementptr inbounds float* %tmp15244, i64 1
- %tmp15246 = getelementptr inbounds float* %tmp15245, i64 1
- %tmp15247 = getelementptr inbounds float* %tmp15246, i64 1
- %tmp15248 = getelementptr inbounds float* %tmp15247, i64 1
- %tmp15249 = getelementptr inbounds float* %tmp15248, i64 1
- %tmp15250 = getelementptr inbounds float* %tmp15249, i64 1
- %tmp15251 = getelementptr inbounds float* %tmp15250, i64 1
- %tmp15252 = getelementptr inbounds float* %tmp15251, i64 1
- %tmp15253 = getelementptr inbounds float* %tmp15252, i64 1
- %tmp15254 = getelementptr inbounds float* %tmp15253, i64 1
- %tmp15255 = getelementptr inbounds float* %tmp15254, i64 1
- %tmp15256 = getelementptr inbounds float* %tmp15255, i64 1
- %tmp15257 = getelementptr inbounds float* %tmp15256, i64 1
- %tmp15258 = getelementptr inbounds float* %tmp15257, i64 1
- %tmp15259 = getelementptr inbounds float* %tmp15258, i64 1
- %tmp15260 = getelementptr inbounds float* %tmp15259, i64 1
- %tmp15261 = getelementptr inbounds float* %tmp15260, i64 1
- %tmp15262 = getelementptr inbounds float* %tmp15261, i64 1
- %tmp15263 = getelementptr inbounds float* %tmp15262, i64 1
- %tmp15264 = getelementptr inbounds float* %tmp15263, i64 1
- %tmp15265 = getelementptr inbounds float* %tmp15264, i64 1
- %tmp15266 = getelementptr inbounds float* %tmp15265, i64 1
- %tmp15267 = getelementptr inbounds float* %tmp15266, i64 1
- %tmp15268 = getelementptr inbounds float* %tmp15267, i64 1
- %tmp15269 = getelementptr inbounds float* %tmp15268, i64 1
- %tmp15270 = getelementptr inbounds float* %tmp15269, i64 1
- %tmp15271 = getelementptr inbounds float* %tmp15270, i64 1
- %tmp15272 = getelementptr inbounds float* %tmp15271, i64 1
- %tmp15273 = getelementptr inbounds float* %tmp15272, i64 1
- %tmp15274 = getelementptr inbounds float* %tmp15273, i64 1
- %tmp15275 = getelementptr inbounds float* %tmp15274, i64 1
- %tmp15276 = getelementptr inbounds float* %tmp15275, i64 1
- %tmp15277 = getelementptr inbounds float* %tmp15276, i64 1
- %tmp15278 = getelementptr inbounds float* %tmp15277, i64 1
- %tmp15279 = getelementptr inbounds float* %tmp15278, i64 1
- %tmp15280 = getelementptr inbounds float* %tmp15279, i64 1
- %tmp15281 = getelementptr inbounds float* %tmp15280, i64 1
- %tmp15282 = getelementptr inbounds float* %tmp15281, i64 1
- %tmp15283 = getelementptr inbounds float* %tmp15282, i64 1
- %tmp15284 = getelementptr inbounds float* %tmp15283, i64 1
- %tmp15285 = getelementptr inbounds float* %tmp15284, i64 1
- %tmp15286 = getelementptr inbounds float* %tmp15285, i64 1
- %tmp15287 = getelementptr inbounds float* %tmp15286, i64 1
- %tmp15288 = getelementptr inbounds float* %tmp15287, i64 1
- %tmp15289 = getelementptr inbounds float* %tmp15288, i64 1
- %tmp15290 = getelementptr inbounds float* %tmp15289, i64 1
- %tmp15291 = getelementptr inbounds float* %tmp15290, i64 1
- %tmp15292 = getelementptr inbounds float* %tmp15291, i64 1
- %tmp15293 = getelementptr inbounds float* %tmp15292, i64 1
- %tmp15294 = getelementptr inbounds float* %tmp15293, i64 1
- %tmp15295 = getelementptr inbounds float* %tmp15294, i64 1
- %tmp15296 = getelementptr inbounds float* %tmp15295, i64 1
- %tmp15297 = getelementptr inbounds float* %tmp15296, i64 1
- %tmp15298 = getelementptr inbounds float* %tmp15297, i64 1
- %tmp15299 = getelementptr inbounds float* %tmp15298, i64 1
- %tmp15300 = getelementptr inbounds float* %tmp15299, i64 1
- %tmp15301 = getelementptr inbounds float* %tmp15300, i64 1
- %tmp15302 = getelementptr inbounds float* %tmp15301, i64 1
- %tmp15303 = getelementptr inbounds float* %tmp15302, i64 1
- %tmp15304 = getelementptr inbounds float* %tmp15303, i64 1
- %tmp15305 = getelementptr inbounds float* %tmp15304, i64 1
- %tmp15306 = getelementptr inbounds float* %tmp15305, i64 1
- %tmp15307 = getelementptr inbounds float* %tmp15306, i64 1
- %tmp15308 = getelementptr inbounds float* %tmp15307, i64 1
- %tmp15309 = getelementptr inbounds float* %tmp15308, i64 1
- %tmp15310 = getelementptr inbounds float* %tmp15309, i64 1
- %tmp15311 = getelementptr inbounds float* %tmp15310, i64 1
- %tmp15312 = getelementptr inbounds float* %tmp15311, i64 1
- %tmp15313 = getelementptr inbounds float* %tmp15312, i64 1
- %tmp15314 = getelementptr inbounds float* %tmp15313, i64 1
- %tmp15315 = getelementptr inbounds float* %tmp15314, i64 1
- %tmp15316 = getelementptr inbounds float* %tmp15315, i64 1
- %tmp15317 = getelementptr inbounds float* %tmp15316, i64 1
- %tmp15318 = getelementptr inbounds float* %tmp15317, i64 1
- %tmp15319 = getelementptr inbounds float* %tmp15318, i64 1
- %tmp15320 = getelementptr inbounds float* %tmp15319, i64 1
- %tmp15321 = getelementptr inbounds float* %tmp15320, i64 1
- %tmp15322 = getelementptr inbounds float* %tmp15321, i64 1
- %tmp15323 = getelementptr inbounds float* %tmp15322, i64 1
- %tmp15324 = getelementptr inbounds float* %tmp15323, i64 1
- %tmp15325 = getelementptr inbounds float* %tmp15324, i64 1
- %tmp15326 = getelementptr inbounds float* %tmp15325, i64 1
- %tmp15327 = getelementptr inbounds float* %tmp15326, i64 1
- %tmp15328 = getelementptr inbounds float* %tmp15327, i64 1
- %tmp15329 = getelementptr inbounds float* %tmp15328, i64 1
- %tmp15330 = getelementptr inbounds float* %tmp15329, i64 1
- %tmp15331 = getelementptr inbounds float* %tmp15330, i64 1
- %tmp15332 = getelementptr inbounds float* %tmp15331, i64 1
- %tmp15333 = getelementptr inbounds float* %tmp15332, i64 1
- %tmp15334 = getelementptr inbounds float* %tmp15333, i64 1
- %tmp15335 = getelementptr inbounds float* %tmp15334, i64 1
- %tmp15336 = getelementptr inbounds float* %tmp15335, i64 1
- %tmp15337 = getelementptr inbounds float* %tmp15336, i64 1
- %tmp15338 = getelementptr inbounds float* %tmp15337, i64 1
- %tmp15339 = getelementptr inbounds float* %tmp15338, i64 1
- %tmp15340 = getelementptr inbounds float* %tmp15339, i64 1
- %tmp15341 = getelementptr inbounds float* %tmp15340, i64 1
- %tmp15342 = getelementptr inbounds float* %tmp15341, i64 1
- %tmp15343 = getelementptr inbounds float* %tmp15342, i64 1
- %tmp15344 = getelementptr inbounds float* %tmp15343, i64 1
- %tmp15345 = getelementptr inbounds float* %tmp15344, i64 1
- %tmp15346 = getelementptr inbounds float* %tmp15345, i64 1
- %tmp15347 = getelementptr inbounds float* %tmp15346, i64 1
- %tmp15348 = getelementptr inbounds float* %tmp15347, i64 1
- %tmp15349 = getelementptr inbounds float* %tmp15348, i64 1
- %tmp15350 = getelementptr inbounds float* %tmp15349, i64 1
- %tmp15351 = getelementptr inbounds float* %tmp15350, i64 1
- %tmp15352 = getelementptr inbounds float* %tmp15351, i64 1
- %tmp15353 = getelementptr inbounds float* %tmp15352, i64 1
- %tmp15354 = getelementptr inbounds float* %tmp15353, i64 1
- %tmp15355 = getelementptr inbounds float* %tmp15354, i64 1
- %tmp15356 = getelementptr inbounds float* %tmp15355, i64 1
- %tmp15357 = getelementptr inbounds float* %tmp15356, i64 1
- %tmp15358 = getelementptr inbounds float* %tmp15357, i64 1
- %tmp15359 = getelementptr inbounds float* %tmp15358, i64 1
- %tmp15360 = getelementptr inbounds float* %tmp15359, i64 1
- %tmp15361 = getelementptr inbounds float* %tmp15360, i64 1
- %tmp15362 = getelementptr inbounds float* %tmp15361, i64 1
- %tmp15363 = getelementptr inbounds float* %tmp15362, i64 1
- %tmp15364 = getelementptr inbounds float* %tmp15363, i64 1
- %tmp15365 = getelementptr inbounds float* %tmp15364, i64 1
- %tmp15366 = getelementptr inbounds float* %tmp15365, i64 1
- %tmp15367 = getelementptr inbounds float* %tmp15366, i64 1
- %tmp15368 = getelementptr inbounds float* %tmp15367, i64 1
- %tmp15369 = getelementptr inbounds float* %tmp15368, i64 1
- %tmp15370 = getelementptr inbounds float* %tmp15369, i64 1
- %tmp15371 = getelementptr inbounds float* %tmp15370, i64 1
- %tmp15372 = getelementptr inbounds float* %tmp15371, i64 1
- %tmp15373 = getelementptr inbounds float* %tmp15372, i64 1
- %tmp15374 = getelementptr inbounds float* %tmp15373, i64 1
- %tmp15375 = getelementptr inbounds float* %tmp15374, i64 1
- %tmp15376 = getelementptr inbounds float* %tmp15375, i64 1
- %tmp15377 = getelementptr inbounds float* %tmp15376, i64 1
- %tmp15378 = getelementptr inbounds float* %tmp15377, i64 1
- %tmp15379 = getelementptr inbounds float* %tmp15378, i64 1
- %tmp15380 = getelementptr inbounds float* %tmp15379, i64 1
- %tmp15381 = getelementptr inbounds float* %tmp15380, i64 1
- %tmp15382 = getelementptr inbounds float* %tmp15381, i64 1
- %tmp15383 = getelementptr inbounds float* %tmp15382, i64 1
- %tmp15384 = getelementptr inbounds float* %tmp15383, i64 1
- %tmp15385 = getelementptr inbounds float* %tmp15384, i64 1
- %tmp15386 = getelementptr inbounds float* %tmp15385, i64 1
- %tmp15387 = getelementptr inbounds float* %tmp15386, i64 1
- %tmp15388 = getelementptr inbounds float* %tmp15387, i64 1
- %tmp15389 = getelementptr inbounds float* %tmp15388, i64 1
- %tmp15390 = getelementptr inbounds float* %tmp15389, i64 1
- %tmp15391 = getelementptr inbounds float* %tmp15390, i64 1
- %tmp15392 = getelementptr inbounds float* %tmp15391, i64 1
- %tmp15393 = getelementptr inbounds float* %tmp15392, i64 1
- %tmp15394 = getelementptr inbounds float* %tmp15393, i64 1
- %tmp15395 = getelementptr inbounds float* %tmp15394, i64 1
- %tmp15396 = getelementptr inbounds float* %tmp15395, i64 1
- %tmp15397 = getelementptr inbounds float* %tmp15396, i64 1
- %tmp15398 = getelementptr inbounds float* %tmp15397, i64 1
- %tmp15399 = getelementptr inbounds float* %tmp15398, i64 1
- %tmp15400 = getelementptr inbounds float* %tmp15399, i64 1
- %tmp15401 = getelementptr inbounds float* %tmp15400, i64 1
- %tmp15402 = getelementptr inbounds float* %tmp15401, i64 1
- %tmp15403 = getelementptr inbounds float* %tmp15402, i64 1
- %tmp15404 = getelementptr inbounds float* %tmp15403, i64 1
- %tmp15405 = getelementptr inbounds float* %tmp15404, i64 1
- %tmp15406 = getelementptr inbounds float* %tmp15405, i64 1
- %tmp15407 = getelementptr inbounds float* %tmp15406, i64 1
- %tmp15408 = getelementptr inbounds float* %tmp15407, i64 1
- %tmp15409 = getelementptr inbounds float* %tmp15408, i64 1
- %tmp15410 = getelementptr inbounds float* %tmp15409, i64 1
- %tmp15411 = getelementptr inbounds float* %tmp15410, i64 1
- %tmp15412 = getelementptr inbounds float* %tmp15411, i64 1
- %tmp15413 = getelementptr inbounds float* %tmp15412, i64 1
- %tmp15414 = getelementptr inbounds float* %tmp15413, i64 1
- %tmp15415 = getelementptr inbounds float* %tmp15414, i64 1
- %tmp15416 = getelementptr inbounds float* %tmp15415, i64 1
- %tmp15417 = getelementptr inbounds float* %tmp15416, i64 1
- %tmp15418 = getelementptr inbounds float* %tmp15417, i64 1
- %tmp15419 = getelementptr inbounds float* %tmp15418, i64 1
- %tmp15420 = getelementptr inbounds float* %tmp15419, i64 1
- %tmp15421 = getelementptr inbounds float* %tmp15420, i64 1
- %tmp15422 = getelementptr inbounds float* %tmp15421, i64 1
- %tmp15423 = getelementptr inbounds float* %tmp15422, i64 1
- %tmp15424 = getelementptr inbounds float* %tmp15423, i64 1
- %tmp15425 = getelementptr inbounds float* %tmp15424, i64 1
- %tmp15426 = getelementptr inbounds float* %tmp15425, i64 1
- %tmp15427 = getelementptr inbounds float* %tmp15426, i64 1
- %tmp15428 = getelementptr inbounds float* %tmp15427, i64 1
- %tmp15429 = getelementptr inbounds float* %tmp15428, i64 1
- %tmp15430 = getelementptr inbounds float* %tmp15429, i64 1
- %tmp15431 = getelementptr inbounds float* %tmp15430, i64 1
- %tmp15432 = getelementptr inbounds float* %tmp15431, i64 1
- %tmp15433 = getelementptr inbounds float* %tmp15432, i64 1
- %tmp15434 = getelementptr inbounds float* %tmp15433, i64 1
- %tmp15435 = getelementptr inbounds float* %tmp15434, i64 1
- %tmp15436 = getelementptr inbounds float* %tmp15435, i64 1
- %tmp15437 = getelementptr inbounds float* %tmp15436, i64 1
- %tmp15438 = getelementptr inbounds float* %tmp15437, i64 1
- %tmp15439 = getelementptr inbounds float* %tmp15438, i64 1
- %tmp15440 = getelementptr inbounds float* %tmp15439, i64 1
- %tmp15441 = getelementptr inbounds float* %tmp15440, i64 1
- %tmp15442 = getelementptr inbounds float* %tmp15441, i64 1
- %tmp15443 = getelementptr inbounds float* %tmp15442, i64 1
- %tmp15444 = getelementptr inbounds float* %tmp15443, i64 1
- %tmp15445 = getelementptr inbounds float* %tmp15444, i64 1
- %tmp15446 = getelementptr inbounds float* %tmp15445, i64 1
- %tmp15447 = getelementptr inbounds float* %tmp15446, i64 1
- %tmp15448 = getelementptr inbounds float* %tmp15447, i64 1
- %tmp15449 = getelementptr inbounds float* %tmp15448, i64 1
- %tmp15450 = getelementptr inbounds float* %tmp15449, i64 1
- %tmp15451 = getelementptr inbounds float* %tmp15450, i64 1
- %tmp15452 = getelementptr inbounds float* %tmp15451, i64 1
- %tmp15453 = getelementptr inbounds float* %tmp15452, i64 1
- %tmp15454 = getelementptr inbounds float* %tmp15453, i64 1
- %tmp15455 = getelementptr inbounds float* %tmp15454, i64 1
- %tmp15456 = getelementptr inbounds float* %tmp15455, i64 1
- %tmp15457 = getelementptr inbounds float* %tmp15456, i64 1
- %tmp15458 = getelementptr inbounds float* %tmp15457, i64 1
- %tmp15459 = getelementptr inbounds float* %tmp15458, i64 1
- %tmp15460 = getelementptr inbounds float* %tmp15459, i64 1
- %tmp15461 = getelementptr inbounds float* %tmp15460, i64 1
- %tmp15462 = getelementptr inbounds float* %tmp15461, i64 1
- %tmp15463 = getelementptr inbounds float* %tmp15462, i64 1
- %tmp15464 = getelementptr inbounds float* %tmp15463, i64 1
- %tmp15465 = getelementptr inbounds float* %tmp15464, i64 1
- %tmp15466 = getelementptr inbounds float* %tmp15465, i64 1
- %tmp15467 = getelementptr inbounds float* %tmp15466, i64 1
- %tmp15468 = getelementptr inbounds float* %tmp15467, i64 1
- %tmp15469 = getelementptr inbounds float* %tmp15468, i64 1
- %tmp15470 = getelementptr inbounds float* %tmp15469, i64 1
- %tmp15471 = getelementptr inbounds float* %tmp15470, i64 1
- %tmp15472 = getelementptr inbounds float* %tmp15471, i64 1
- %tmp15473 = getelementptr inbounds float* %tmp15472, i64 1
- %tmp15474 = getelementptr inbounds float* %tmp15473, i64 1
- %tmp15475 = getelementptr inbounds float* %tmp15474, i64 1
- %tmp15476 = getelementptr inbounds float* %tmp15475, i64 1
- %tmp15477 = getelementptr inbounds float* %tmp15476, i64 1
- %tmp15478 = getelementptr inbounds float* %tmp15477, i64 1
- %tmp15479 = getelementptr inbounds float* %tmp15478, i64 1
- %tmp15480 = getelementptr inbounds float* %tmp15479, i64 1
- %tmp15481 = getelementptr inbounds float* %tmp15480, i64 1
- %tmp15482 = getelementptr inbounds float* %tmp15481, i64 1
- %tmp15483 = getelementptr inbounds float* %tmp15482, i64 1
- %tmp15484 = getelementptr inbounds float* %tmp15483, i64 1
- %tmp15485 = getelementptr inbounds float* %tmp15484, i64 1
- %tmp15486 = getelementptr inbounds float* %tmp15485, i64 1
- %tmp15487 = getelementptr inbounds float* %tmp15486, i64 1
- %tmp15488 = getelementptr inbounds float* %tmp15487, i64 1
- %tmp15489 = getelementptr inbounds float* %tmp15488, i64 1
- %tmp15490 = getelementptr inbounds float* %tmp15489, i64 1
- %tmp15491 = getelementptr inbounds float* %tmp15490, i64 1
- %tmp15492 = getelementptr inbounds float* %tmp15491, i64 1
- %tmp15493 = getelementptr inbounds float* %tmp15492, i64 1
- %tmp15494 = getelementptr inbounds float* %tmp15493, i64 1
- %tmp15495 = getelementptr inbounds float* %tmp15494, i64 1
- %tmp15496 = getelementptr inbounds float* %tmp15495, i64 1
- %tmp15497 = getelementptr inbounds float* %tmp15496, i64 1
- %tmp15498 = getelementptr inbounds float* %tmp15497, i64 1
- %tmp15499 = getelementptr inbounds float* %tmp15498, i64 1
- %tmp15500 = getelementptr inbounds float* %tmp15499, i64 1
- %tmp15501 = getelementptr inbounds float* %tmp15500, i64 1
- %tmp15502 = getelementptr inbounds float* %tmp15501, i64 1
- %tmp15503 = getelementptr inbounds float* %tmp15502, i64 1
- %tmp15504 = getelementptr inbounds float* %tmp15503, i64 1
- %tmp15505 = getelementptr inbounds float* %tmp15504, i64 1
- %tmp15506 = getelementptr inbounds float* %tmp15505, i64 1
- %tmp15507 = getelementptr inbounds float* %tmp15506, i64 1
- %tmp15508 = getelementptr inbounds float* %tmp15507, i64 1
- %tmp15509 = getelementptr inbounds float* %tmp15508, i64 1
- %tmp15510 = getelementptr inbounds float* %tmp15509, i64 1
- %tmp15511 = getelementptr inbounds float* %tmp15510, i64 1
- %tmp15512 = getelementptr inbounds float* %tmp15511, i64 1
- %tmp15513 = getelementptr inbounds float* %tmp15512, i64 1
- %tmp15514 = getelementptr inbounds float* %tmp15513, i64 1
- %tmp15515 = getelementptr inbounds float* %tmp15514, i64 1
- %tmp15516 = getelementptr inbounds float* %tmp15515, i64 1
- %tmp15517 = getelementptr inbounds float* %tmp15516, i64 1
- %tmp15518 = getelementptr inbounds float* %tmp15517, i64 1
- %tmp15519 = getelementptr inbounds float* %tmp15518, i64 1
- %tmp15520 = getelementptr inbounds float* %tmp15519, i64 1
- %tmp15521 = getelementptr inbounds float* %tmp15520, i64 1
- %tmp15522 = getelementptr inbounds float* %tmp15521, i64 1
- %tmp15523 = getelementptr inbounds float* %tmp15522, i64 1
- %tmp15524 = getelementptr inbounds float* %tmp15523, i64 1
- %tmp15525 = getelementptr inbounds float* %tmp15524, i64 1
- %tmp15526 = getelementptr inbounds float* %tmp15525, i64 1
- %tmp15527 = getelementptr inbounds float* %tmp15526, i64 1
- %tmp15528 = getelementptr inbounds float* %tmp15527, i64 1
- %tmp15529 = getelementptr inbounds float* %tmp15528, i64 1
- %tmp15530 = getelementptr inbounds float* %tmp15529, i64 1
- %tmp15531 = getelementptr inbounds float* %tmp15530, i64 1
- %tmp15532 = getelementptr inbounds float* %tmp15531, i64 1
- %tmp15533 = getelementptr inbounds float* %tmp15532, i64 1
- %tmp15534 = getelementptr inbounds float* %tmp15533, i64 1
- %tmp15535 = getelementptr inbounds float* %tmp15534, i64 1
- %tmp15536 = getelementptr inbounds float* %tmp15535, i64 1
- %tmp15537 = getelementptr inbounds float* %tmp15536, i64 1
- %tmp15538 = getelementptr inbounds float* %tmp15537, i64 1
- %tmp15539 = getelementptr inbounds float* %tmp15538, i64 1
- %tmp15540 = getelementptr inbounds float* %tmp15539, i64 1
- %tmp15541 = getelementptr inbounds float* %tmp15540, i64 1
- %tmp15542 = getelementptr inbounds float* %tmp15541, i64 1
- %tmp15543 = getelementptr inbounds float* %tmp15542, i64 1
- %tmp15544 = getelementptr inbounds float* %tmp15543, i64 1
- %tmp15545 = getelementptr inbounds float* %tmp15544, i64 1
- %tmp15546 = getelementptr inbounds float* %tmp15545, i64 1
- %tmp15547 = getelementptr inbounds float* %tmp15546, i64 1
- %tmp15548 = getelementptr inbounds float* %tmp15547, i64 1
- %tmp15549 = getelementptr inbounds float* %tmp15548, i64 1
- %tmp15550 = getelementptr inbounds float* %tmp15549, i64 1
- %tmp15551 = getelementptr inbounds float* %tmp15550, i64 1
- %tmp15552 = getelementptr inbounds float* %tmp15551, i64 1
- %tmp15553 = getelementptr inbounds float* %tmp15552, i64 1
- %tmp15554 = getelementptr inbounds float* %tmp15553, i64 1
- %tmp15555 = getelementptr inbounds float* %tmp15554, i64 1
- %tmp15556 = getelementptr inbounds float* %tmp15555, i64 1
- %tmp15557 = getelementptr inbounds float* %tmp15556, i64 1
- %tmp15558 = getelementptr inbounds float* %tmp15557, i64 1
- %tmp15559 = getelementptr inbounds float* %tmp15558, i64 1
- %tmp15560 = getelementptr inbounds float* %tmp15559, i64 1
- %tmp15561 = getelementptr inbounds float* %tmp15560, i64 1
- %tmp15562 = getelementptr inbounds float* %tmp15561, i64 1
- %tmp15563 = getelementptr inbounds float* %tmp15562, i64 1
- %tmp15564 = getelementptr inbounds float* %tmp15563, i64 1
- %tmp15565 = getelementptr inbounds float* %tmp15564, i64 1
- %tmp15566 = getelementptr inbounds float* %tmp15565, i64 1
- %tmp15567 = getelementptr inbounds float* %tmp15566, i64 1
- %tmp15568 = getelementptr inbounds float* %tmp15567, i64 1
- %tmp15569 = getelementptr inbounds float* %tmp15568, i64 1
- %tmp15570 = getelementptr inbounds float* %tmp15569, i64 1
- %tmp15571 = getelementptr inbounds float* %tmp15570, i64 1
- %tmp15572 = getelementptr inbounds float* %tmp15571, i64 1
- %tmp15573 = getelementptr inbounds float* %tmp15572, i64 1
- %tmp15574 = getelementptr inbounds float* %tmp15573, i64 1
- %tmp15575 = getelementptr inbounds float* %tmp15574, i64 1
- %tmp15576 = getelementptr inbounds float* %tmp15575, i64 1
- %tmp15577 = getelementptr inbounds float* %tmp15576, i64 1
- %tmp15578 = getelementptr inbounds float* %tmp15577, i64 1
- %tmp15579 = getelementptr inbounds float* %tmp15578, i64 1
- %tmp15580 = getelementptr inbounds float* %tmp15579, i64 1
- %tmp15581 = getelementptr inbounds float* %tmp15580, i64 1
- %tmp15582 = getelementptr inbounds float* %tmp15581, i64 1
- %tmp15583 = getelementptr inbounds float* %tmp15582, i64 1
- %tmp15584 = getelementptr inbounds float* %tmp15583, i64 1
- %tmp15585 = getelementptr inbounds float* %tmp15584, i64 1
- %tmp15586 = getelementptr inbounds float* %tmp15585, i64 1
- %tmp15587 = getelementptr inbounds float* %tmp15586, i64 1
- %tmp15588 = getelementptr inbounds float* %tmp15587, i64 1
- %tmp15589 = getelementptr inbounds float* %tmp15588, i64 1
- %tmp15590 = getelementptr inbounds float* %tmp15589, i64 1
- %tmp15591 = getelementptr inbounds float* %tmp15590, i64 1
- %tmp15592 = getelementptr inbounds float* %tmp15591, i64 1
- %tmp15593 = getelementptr inbounds float* %tmp15592, i64 1
- %tmp15594 = getelementptr inbounds float* %tmp15593, i64 1
- %tmp15595 = getelementptr inbounds float* %tmp15594, i64 1
- %tmp15596 = getelementptr inbounds float* %tmp15595, i64 1
- %tmp15597 = getelementptr inbounds float* %tmp15596, i64 1
- %tmp15598 = getelementptr inbounds float* %tmp15597, i64 1
- %tmp15599 = getelementptr inbounds float* %tmp15598, i64 1
- %tmp15600 = getelementptr inbounds float* %tmp15599, i64 1
- %tmp15601 = getelementptr inbounds float* %tmp15600, i64 1
- %tmp15602 = getelementptr inbounds float* %tmp15601, i64 1
- %tmp15603 = getelementptr inbounds float* %tmp15602, i64 1
- %tmp15604 = getelementptr inbounds float* %tmp15603, i64 1
- %tmp15605 = getelementptr inbounds float* %tmp15604, i64 1
- %tmp15606 = getelementptr inbounds float* %tmp15605, i64 1
- %tmp15607 = getelementptr inbounds float* %tmp15606, i64 1
- %tmp15608 = getelementptr inbounds float* %tmp15607, i64 1
- %tmp15609 = getelementptr inbounds float* %tmp15608, i64 1
- %tmp15610 = getelementptr inbounds float* %tmp15609, i64 1
- %tmp15611 = getelementptr inbounds float* %tmp15610, i64 1
- %tmp15612 = getelementptr inbounds float* %tmp15611, i64 1
- %tmp15613 = getelementptr inbounds float* %tmp15612, i64 1
- %tmp15614 = getelementptr inbounds float* %tmp15613, i64 1
- %tmp15615 = getelementptr inbounds float* %tmp15614, i64 1
- %tmp15616 = getelementptr inbounds float* %tmp15615, i64 1
- %tmp15617 = getelementptr inbounds float* %tmp15616, i64 1
- %tmp15618 = getelementptr inbounds float* %tmp15617, i64 1
- %tmp15619 = getelementptr inbounds float* %tmp15618, i64 1
- %tmp15620 = getelementptr inbounds float* %tmp15619, i64 1
- %tmp15621 = getelementptr inbounds float* %tmp15620, i64 1
- %tmp15622 = getelementptr inbounds float* %tmp15621, i64 1
- %tmp15623 = getelementptr inbounds float* %tmp15622, i64 1
- %tmp15624 = getelementptr inbounds float* %tmp15623, i64 1
- %tmp15625 = getelementptr inbounds float* %tmp15624, i64 1
- %tmp15626 = getelementptr inbounds float* %tmp15625, i64 1
- %tmp15627 = getelementptr inbounds float* %tmp15626, i64 1
- %tmp15628 = getelementptr inbounds float* %tmp15627, i64 1
- %tmp15629 = getelementptr inbounds float* %tmp15628, i64 1
- %tmp15630 = getelementptr inbounds float* %tmp15629, i64 1
- %tmp15631 = getelementptr inbounds float* %tmp15630, i64 1
- %tmp15632 = getelementptr inbounds float* %tmp15631, i64 1
- %tmp15633 = getelementptr inbounds float* %tmp15632, i64 1
- %tmp15634 = getelementptr inbounds float* %tmp15633, i64 1
- %tmp15635 = getelementptr inbounds float* %tmp15634, i64 1
- %tmp15636 = getelementptr inbounds float* %tmp15635, i64 1
- %tmp15637 = getelementptr inbounds float* %tmp15636, i64 1
- %tmp15638 = getelementptr inbounds float* %tmp15637, i64 1
- %tmp15639 = getelementptr inbounds float* %tmp15638, i64 1
- %tmp15640 = getelementptr inbounds float* %tmp15639, i64 1
- %tmp15641 = getelementptr inbounds float* %tmp15640, i64 1
- %tmp15642 = getelementptr inbounds float* %tmp15641, i64 1
- %tmp15643 = getelementptr inbounds float* %tmp15642, i64 1
- %tmp15644 = getelementptr inbounds float* %tmp15643, i64 1
- %tmp15645 = getelementptr inbounds float* %tmp15644, i64 1
- %tmp15646 = getelementptr inbounds float* %tmp15645, i64 1
- %tmp15647 = getelementptr inbounds float* %tmp15646, i64 1
- %tmp15648 = getelementptr inbounds float* %tmp15647, i64 1
- %tmp15649 = getelementptr inbounds float* %tmp15648, i64 1
- %tmp15650 = getelementptr inbounds float* %tmp15649, i64 1
- %tmp15651 = getelementptr inbounds float* %tmp15650, i64 1
- %tmp15652 = getelementptr inbounds float* %tmp15651, i64 1
- %tmp15653 = getelementptr inbounds float* %tmp15652, i64 1
- %tmp15654 = getelementptr inbounds float* %tmp15653, i64 1
- %tmp15655 = getelementptr inbounds float* %tmp15654, i64 1
- %tmp15656 = getelementptr inbounds float* %tmp15655, i64 1
- %tmp15657 = getelementptr inbounds float* %tmp15656, i64 1
- %tmp15658 = getelementptr inbounds float* %tmp15657, i64 1
- %tmp15659 = getelementptr inbounds float* %tmp15658, i64 1
- %tmp15660 = getelementptr inbounds float* %tmp15659, i64 1
- %tmp15661 = getelementptr inbounds float* %tmp15660, i64 1
- %tmp15662 = getelementptr inbounds float* %tmp15661, i64 1
- %tmp15663 = getelementptr inbounds float* %tmp15662, i64 1
- %tmp15664 = getelementptr inbounds float* %tmp15663, i64 1
- %tmp15665 = getelementptr inbounds float* %tmp15664, i64 1
- %tmp15666 = getelementptr inbounds float* %tmp15665, i64 1
- %tmp15667 = getelementptr inbounds float* %tmp15666, i64 1
- %tmp15668 = getelementptr inbounds float* %tmp15667, i64 1
- %tmp15669 = getelementptr inbounds float* %tmp15668, i64 1
- %tmp15670 = getelementptr inbounds float* %tmp15669, i64 1
- %tmp15671 = getelementptr inbounds float* %tmp15670, i64 1
- %tmp15672 = getelementptr inbounds float* %tmp15671, i64 1
- %tmp15673 = getelementptr inbounds float* %tmp15672, i64 1
- %tmp15674 = getelementptr inbounds float* %tmp15673, i64 1
- %tmp15675 = getelementptr inbounds float* %tmp15674, i64 1
- %tmp15676 = getelementptr inbounds float* %tmp15675, i64 1
- %tmp15677 = getelementptr inbounds float* %tmp15676, i64 1
- %tmp15678 = getelementptr inbounds float* %tmp15677, i64 1
- %tmp15679 = getelementptr inbounds float* %tmp15678, i64 1
- %tmp15680 = getelementptr inbounds float* %tmp15679, i64 1
- %tmp15681 = getelementptr inbounds float* %tmp15680, i64 1
- %tmp15682 = getelementptr inbounds float* %tmp15681, i64 1
- %tmp15683 = getelementptr inbounds float* %tmp15682, i64 1
- %tmp15684 = getelementptr inbounds float* %tmp15683, i64 1
- %tmp15685 = getelementptr inbounds float* %tmp15684, i64 1
- %tmp15686 = getelementptr inbounds float* %tmp15685, i64 1
- %tmp15687 = getelementptr inbounds float* %tmp15686, i64 1
- %tmp15688 = getelementptr inbounds float* %tmp15687, i64 1
- %tmp15689 = getelementptr inbounds float* %tmp15688, i64 1
- %tmp15690 = getelementptr inbounds float* %tmp15689, i64 1
- %tmp15691 = getelementptr inbounds float* %tmp15690, i64 1
- %tmp15692 = getelementptr inbounds float* %tmp15691, i64 1
- %tmp15693 = getelementptr inbounds float* %tmp15692, i64 1
- %tmp15694 = getelementptr inbounds float* %tmp15693, i64 1
- %tmp15695 = getelementptr inbounds float* %tmp15694, i64 1
- %tmp15696 = getelementptr inbounds float* %tmp15695, i64 1
- %tmp15697 = getelementptr inbounds float* %tmp15696, i64 1
- %tmp15698 = getelementptr inbounds float* %tmp15697, i64 1
- %tmp15699 = getelementptr inbounds float* %tmp15698, i64 1
- %tmp15700 = getelementptr inbounds float* %tmp15699, i64 1
- %tmp15701 = getelementptr inbounds float* %tmp15700, i64 1
- %tmp15702 = getelementptr inbounds float* %tmp15701, i64 1
- %tmp15703 = getelementptr inbounds float* %tmp15702, i64 1
- %tmp15704 = getelementptr inbounds float* %tmp15703, i64 1
- %tmp15705 = getelementptr inbounds float* %tmp15704, i64 1
- %tmp15706 = getelementptr inbounds float* %tmp15705, i64 1
- %tmp15707 = getelementptr inbounds float* %tmp15706, i64 1
- %tmp15708 = getelementptr inbounds float* %tmp15707, i64 1
- %tmp15709 = getelementptr inbounds float* %tmp15708, i64 1
- %tmp15710 = getelementptr inbounds float* %tmp15709, i64 1
- %tmp15711 = getelementptr inbounds float* %tmp15710, i64 1
- %tmp15712 = getelementptr inbounds float* %tmp15711, i64 1
- %tmp15713 = getelementptr inbounds float* %tmp15712, i64 1
- %tmp15714 = getelementptr inbounds float* %tmp15713, i64 1
- %tmp15715 = getelementptr inbounds float* %tmp15714, i64 1
- %tmp15716 = getelementptr inbounds float* %tmp15715, i64 1
- %tmp15717 = getelementptr inbounds float* %tmp15716, i64 1
- %tmp15718 = getelementptr inbounds float* %tmp15717, i64 1
- %tmp15719 = getelementptr inbounds float* %tmp15718, i64 1
- %tmp15720 = getelementptr inbounds float* %tmp15719, i64 1
- %tmp15721 = getelementptr inbounds float* %tmp15720, i64 1
- %tmp15722 = getelementptr inbounds float* %tmp15721, i64 1
- %tmp15723 = getelementptr inbounds float* %tmp15722, i64 1
- %tmp15724 = getelementptr inbounds float* %tmp15723, i64 1
- %tmp15725 = getelementptr inbounds float* %tmp15724, i64 1
- %tmp15726 = getelementptr inbounds float* %tmp15725, i64 1
- %tmp15727 = getelementptr inbounds float* %tmp15726, i64 1
- %tmp15728 = getelementptr inbounds float* %tmp15727, i64 1
- %tmp15729 = getelementptr inbounds float* %tmp15728, i64 1
- %tmp15730 = getelementptr inbounds float* %tmp15729, i64 1
- %tmp15731 = getelementptr inbounds float* %tmp15730, i64 1
- %tmp15732 = getelementptr inbounds float* %tmp15731, i64 1
- %tmp15733 = getelementptr inbounds float* %tmp15732, i64 1
- %tmp15734 = getelementptr inbounds float* %tmp15733, i64 1
- %tmp15735 = getelementptr inbounds float* %tmp15734, i64 1
- %tmp15736 = getelementptr inbounds float* %tmp15735, i64 1
- %tmp15737 = getelementptr inbounds float* %tmp15736, i64 1
- %tmp15738 = getelementptr inbounds float* %tmp15737, i64 1
- %tmp15739 = getelementptr inbounds float* %tmp15738, i64 1
- %tmp15740 = getelementptr inbounds float* %tmp15739, i64 1
- %tmp15741 = getelementptr inbounds float* %tmp15740, i64 1
- %tmp15742 = getelementptr inbounds float* %tmp15741, i64 1
- %tmp15743 = getelementptr inbounds float* %tmp15742, i64 1
- %tmp15744 = getelementptr inbounds float* %tmp15743, i64 1
- %tmp15745 = getelementptr inbounds float* %tmp15744, i64 1
- %tmp15746 = getelementptr inbounds float* %tmp15745, i64 1
- %tmp15747 = getelementptr inbounds float* %tmp15746, i64 1
- %tmp15748 = getelementptr inbounds float* %tmp15747, i64 1
- %tmp15749 = getelementptr inbounds float* %tmp15748, i64 1
- %tmp15750 = getelementptr inbounds float* %tmp15749, i64 1
- %tmp15751 = getelementptr inbounds float* %tmp15750, i64 1
- %tmp15752 = getelementptr inbounds float* %tmp15751, i64 1
- %tmp15753 = getelementptr inbounds float* %tmp15752, i64 1
- %tmp15754 = getelementptr inbounds float* %tmp15753, i64 1
- %tmp15755 = getelementptr inbounds float* %tmp15754, i64 1
- %tmp15756 = getelementptr inbounds float* %tmp15755, i64 1
- %tmp15757 = getelementptr inbounds float* %tmp15756, i64 1
- %tmp15758 = getelementptr inbounds float* %tmp15757, i64 1
- %tmp15759 = getelementptr inbounds float* %tmp15758, i64 1
- %tmp15760 = getelementptr inbounds float* %tmp15759, i64 1
- %tmp15761 = getelementptr inbounds float* %tmp15760, i64 1
- %tmp15762 = getelementptr inbounds float* %tmp15761, i64 1
- %tmp15763 = getelementptr inbounds float* %tmp15762, i64 1
- %tmp15764 = getelementptr inbounds float* %tmp15763, i64 1
- %tmp15765 = getelementptr inbounds float* %tmp15764, i64 1
- %tmp15766 = getelementptr inbounds float* %tmp15765, i64 1
- %tmp15767 = getelementptr inbounds float* %tmp15766, i64 1
- %tmp15768 = getelementptr inbounds float* %tmp15767, i64 1
- %tmp15769 = getelementptr inbounds float* %tmp15768, i64 1
- %tmp15770 = getelementptr inbounds float* %tmp15769, i64 1
- %tmp15771 = getelementptr inbounds float* %tmp15770, i64 1
- %tmp15772 = getelementptr inbounds float* %tmp15771, i64 1
- %tmp15773 = getelementptr inbounds float* %tmp15772, i64 1
- %tmp15774 = getelementptr inbounds float* %tmp15773, i64 1
- %tmp15775 = getelementptr inbounds float* %tmp15774, i64 1
- %tmp15776 = getelementptr inbounds float* %tmp15775, i64 1
- %tmp15777 = getelementptr inbounds float* %tmp15776, i64 1
- %tmp15778 = getelementptr inbounds float* %tmp15777, i64 1
- %tmp15779 = getelementptr inbounds float* %tmp15778, i64 1
- %tmp15780 = getelementptr inbounds float* %tmp15779, i64 1
- %tmp15781 = getelementptr inbounds float* %tmp15780, i64 1
- %tmp15782 = getelementptr inbounds float* %tmp15781, i64 1
- %tmp15783 = getelementptr inbounds float* %tmp15782, i64 1
- %tmp15784 = getelementptr inbounds float* %tmp15783, i64 1
- %tmp15785 = getelementptr inbounds float* %tmp15784, i64 1
- %tmp15786 = getelementptr inbounds float* %tmp15785, i64 1
- %tmp15787 = getelementptr inbounds float* %tmp15786, i64 1
- %tmp15788 = getelementptr inbounds float* %tmp15787, i64 1
- %tmp15789 = getelementptr inbounds float* %tmp15788, i64 1
- %tmp15790 = getelementptr inbounds float* %tmp15789, i64 1
- %tmp15791 = getelementptr inbounds float* %tmp15790, i64 1
- %tmp15792 = getelementptr inbounds float* %tmp15791, i64 1
- %tmp15793 = getelementptr inbounds float* %tmp15792, i64 1
- %tmp15794 = getelementptr inbounds float* %tmp15793, i64 1
- %tmp15795 = getelementptr inbounds float* %tmp15794, i64 1
- %tmp15796 = getelementptr inbounds float* %tmp15795, i64 1
- %tmp15797 = getelementptr inbounds float* %tmp15796, i64 1
- %tmp15798 = getelementptr inbounds float* %tmp15797, i64 1
- %tmp15799 = getelementptr inbounds float* %tmp15798, i64 1
- %tmp15800 = getelementptr inbounds float* %tmp15799, i64 1
- %tmp15801 = getelementptr inbounds float* %tmp15800, i64 1
- %tmp15802 = getelementptr inbounds float* %tmp15801, i64 1
- %tmp15803 = getelementptr inbounds float* %tmp15802, i64 1
- %tmp15804 = getelementptr inbounds float* %tmp15803, i64 1
- %tmp15805 = getelementptr inbounds float* %tmp15804, i64 1
- %tmp15806 = getelementptr inbounds float* %tmp15805, i64 1
- %tmp15807 = getelementptr inbounds float* %tmp15806, i64 1
- %tmp15808 = getelementptr inbounds float* %tmp15807, i64 1
- %tmp15809 = getelementptr inbounds float* %tmp15808, i64 1
- %tmp15810 = getelementptr inbounds float* %tmp15809, i64 1
- %tmp15811 = getelementptr inbounds float* %tmp15810, i64 1
- %tmp15812 = getelementptr inbounds float* %tmp15811, i64 1
- %tmp15813 = getelementptr inbounds float* %tmp15812, i64 1
- %tmp15814 = getelementptr inbounds float* %tmp15813, i64 1
- %tmp15815 = getelementptr inbounds float* %tmp15814, i64 1
- %tmp15816 = getelementptr inbounds float* %tmp15815, i64 1
- %tmp15817 = getelementptr inbounds float* %tmp15816, i64 1
- %tmp15818 = getelementptr inbounds float* %tmp15817, i64 1
- %tmp15819 = getelementptr inbounds float* %tmp15818, i64 1
- %tmp15820 = getelementptr inbounds float* %tmp15819, i64 1
- %tmp15821 = getelementptr inbounds float* %tmp15820, i64 1
- %tmp15822 = getelementptr inbounds float* %tmp15821, i64 1
- %tmp15823 = getelementptr inbounds float* %tmp15822, i64 1
- %tmp15824 = getelementptr inbounds float* %tmp15823, i64 1
- %tmp15825 = getelementptr inbounds float* %tmp15824, i64 1
- %tmp15826 = getelementptr inbounds float* %tmp15825, i64 1
- %tmp15827 = getelementptr inbounds float* %tmp15826, i64 1
- %tmp15828 = getelementptr inbounds float* %tmp15827, i64 1
- %tmp15829 = getelementptr inbounds float* %tmp15828, i64 1
- %tmp15830 = getelementptr inbounds float* %tmp15829, i64 1
- %tmp15831 = getelementptr inbounds float* %tmp15830, i64 1
- %tmp15832 = getelementptr inbounds float* %tmp15831, i64 1
- %tmp15833 = getelementptr inbounds float* %tmp15832, i64 1
- %tmp15834 = getelementptr inbounds float* %tmp15833, i64 1
- %tmp15835 = getelementptr inbounds float* %tmp15834, i64 1
- %tmp15836 = getelementptr inbounds float* %tmp15835, i64 1
- %tmp15837 = getelementptr inbounds float* %tmp15836, i64 1
- %tmp15838 = getelementptr inbounds float* %tmp15837, i64 1
- %tmp15839 = getelementptr inbounds float* %tmp15838, i64 1
- %tmp15840 = getelementptr inbounds float* %tmp15839, i64 1
- %tmp15841 = getelementptr inbounds float* %tmp15840, i64 1
- %tmp15842 = getelementptr inbounds float* %tmp15841, i64 1
- %tmp15843 = getelementptr inbounds float* %tmp15842, i64 1
- %tmp15844 = getelementptr inbounds float* %tmp15843, i64 1
- %tmp15845 = getelementptr inbounds float* %tmp15844, i64 1
- %tmp15846 = getelementptr inbounds float* %tmp15845, i64 1
- %tmp15847 = getelementptr inbounds float* %tmp15846, i64 1
- %tmp15848 = getelementptr inbounds float* %tmp15847, i64 1
- %tmp15849 = getelementptr inbounds float* %tmp15848, i64 1
- %tmp15850 = getelementptr inbounds float* %tmp15849, i64 1
- %tmp15851 = getelementptr inbounds float* %tmp15850, i64 1
- %tmp15852 = getelementptr inbounds float* %tmp15851, i64 1
- %tmp15853 = getelementptr inbounds float* %tmp15852, i64 1
- %tmp15854 = getelementptr inbounds float* %tmp15853, i64 1
- %tmp15855 = getelementptr inbounds float* %tmp15854, i64 1
- %tmp15856 = getelementptr inbounds float* %tmp15855, i64 1
- %tmp15857 = getelementptr inbounds float* %tmp15856, i64 1
- %tmp15858 = getelementptr inbounds float* %tmp15857, i64 1
- %tmp15859 = getelementptr inbounds float* %tmp15858, i64 1
- %tmp15860 = getelementptr inbounds float* %tmp15859, i64 1
- %tmp15861 = getelementptr inbounds float* %tmp15860, i64 1
- %tmp15862 = getelementptr inbounds float* %tmp15861, i64 1
- %tmp15863 = getelementptr inbounds float* %tmp15862, i64 1
- %tmp15864 = getelementptr inbounds float* %tmp15863, i64 1
- %tmp15865 = getelementptr inbounds float* %tmp15864, i64 1
- %tmp15866 = getelementptr inbounds float* %tmp15865, i64 1
- %tmp15867 = getelementptr inbounds float* %tmp15866, i64 1
- %tmp15868 = getelementptr inbounds float* %tmp15867, i64 1
- %tmp15869 = getelementptr inbounds float* %tmp15868, i64 1
- %tmp15870 = getelementptr inbounds float* %tmp15869, i64 1
- %tmp15871 = getelementptr inbounds float* %tmp15870, i64 1
- %tmp15872 = getelementptr inbounds float* %tmp15871, i64 1
- %tmp15873 = getelementptr inbounds float* %tmp15872, i64 1
- %tmp15874 = getelementptr inbounds float* %tmp15873, i64 1
- %tmp15875 = getelementptr inbounds float* %tmp15874, i64 1
- %tmp15876 = getelementptr inbounds float* %tmp15875, i64 1
- %tmp15877 = getelementptr inbounds float* %tmp15876, i64 1
- %tmp15878 = getelementptr inbounds float* %tmp15877, i64 1
- %tmp15879 = getelementptr inbounds float* %tmp15878, i64 1
- %tmp15880 = getelementptr inbounds float* %tmp15879, i64 1
- %tmp15881 = getelementptr inbounds float* %tmp15880, i64 1
- %tmp15882 = getelementptr inbounds float* %tmp15881, i64 1
- %tmp15883 = getelementptr inbounds float* %tmp15882, i64 1
- %tmp15884 = getelementptr inbounds float* %tmp15883, i64 1
- %tmp15885 = getelementptr inbounds float* %tmp15884, i64 1
- %tmp15886 = getelementptr inbounds float* %tmp15885, i64 1
- %tmp15887 = getelementptr inbounds float* %tmp15886, i64 1
- %tmp15888 = getelementptr inbounds float* %tmp15887, i64 1
- %tmp15889 = getelementptr inbounds float* %tmp15888, i64 1
- %tmp15890 = getelementptr inbounds float* %tmp15889, i64 1
- %tmp15891 = getelementptr inbounds float* %tmp15890, i64 1
- %tmp15892 = getelementptr inbounds float* %tmp15891, i64 1
- %tmp15893 = getelementptr inbounds float* %tmp15892, i64 1
- %tmp15894 = getelementptr inbounds float* %tmp15893, i64 1
- %tmp15895 = getelementptr inbounds float* %tmp15894, i64 1
- %tmp15896 = getelementptr inbounds float* %tmp15895, i64 1
- %tmp15897 = getelementptr inbounds float* %tmp15896, i64 1
- %tmp15898 = getelementptr inbounds float* %tmp15897, i64 1
- %tmp15899 = getelementptr inbounds float* %tmp15898, i64 1
- %tmp15900 = getelementptr inbounds float* %tmp15899, i64 1
- %tmp15901 = getelementptr inbounds float* %tmp15900, i64 1
- %tmp15902 = getelementptr inbounds float* %tmp15901, i64 1
- %tmp15903 = getelementptr inbounds float* %tmp15902, i64 1
- %tmp15904 = getelementptr inbounds float* %tmp15903, i64 1
- %tmp15905 = getelementptr inbounds float* %tmp15904, i64 1
- %tmp15906 = getelementptr inbounds float* %tmp15905, i64 1
- %tmp15907 = getelementptr inbounds float* %tmp15906, i64 1
- %tmp15908 = getelementptr inbounds float* %tmp15907, i64 1
- %tmp15909 = getelementptr inbounds float* %tmp15908, i64 1
- %tmp15910 = getelementptr inbounds float* %tmp15909, i64 1
- %tmp15911 = getelementptr inbounds float* %tmp15910, i64 1
- %tmp15912 = getelementptr inbounds float* %tmp15911, i64 1
- %tmp15913 = getelementptr inbounds float* %tmp15912, i64 1
- %tmp15914 = getelementptr inbounds float* %tmp15913, i64 1
- %tmp15915 = getelementptr inbounds float* %tmp15914, i64 1
- %tmp15916 = getelementptr inbounds float* %tmp15915, i64 1
- %tmp15917 = getelementptr inbounds float* %tmp15916, i64 1
- %tmp15918 = getelementptr inbounds float* %tmp15917, i64 1
- %tmp15919 = getelementptr inbounds float* %tmp15918, i64 1
- %tmp15920 = getelementptr inbounds float* %tmp15919, i64 1
- %tmp15921 = getelementptr inbounds float* %tmp15920, i64 1
- %tmp15922 = getelementptr inbounds float* %tmp15921, i64 1
- %tmp15923 = getelementptr inbounds float* %tmp15922, i64 1
- %tmp15924 = getelementptr inbounds float* %tmp15923, i64 1
- %tmp15925 = getelementptr inbounds float* %tmp15924, i64 1
- %tmp15926 = getelementptr inbounds float* %tmp15925, i64 1
- %tmp15927 = getelementptr inbounds float* %tmp15926, i64 1
- %tmp15928 = getelementptr inbounds float* %tmp15927, i64 1
- %tmp15929 = getelementptr inbounds float* %tmp15928, i64 1
- %tmp15930 = getelementptr inbounds float* %tmp15929, i64 1
- %tmp15931 = getelementptr inbounds float* %tmp15930, i64 1
- %tmp15932 = getelementptr inbounds float* %tmp15931, i64 1
- %tmp15933 = getelementptr inbounds float* %tmp15932, i64 1
- %tmp15934 = getelementptr inbounds float* %tmp15933, i64 1
- %tmp15935 = getelementptr inbounds float* %tmp15934, i64 1
- %tmp15936 = getelementptr inbounds float* %tmp15935, i64 1
- %tmp15937 = getelementptr inbounds float* %tmp15936, i64 1
- %tmp15938 = getelementptr inbounds float* %tmp15937, i64 1
- %tmp15939 = getelementptr inbounds float* %tmp15938, i64 1
- %tmp15940 = getelementptr inbounds float* %tmp15939, i64 1
- %tmp15941 = getelementptr inbounds float* %tmp15940, i64 1
- %tmp15942 = getelementptr inbounds float* %tmp15941, i64 1
- %tmp15943 = getelementptr inbounds float* %tmp15942, i64 1
- %tmp15944 = getelementptr inbounds float* %tmp15943, i64 1
- %tmp15945 = getelementptr inbounds float* %tmp15944, i64 1
- %tmp15946 = getelementptr inbounds float* %tmp15945, i64 1
- %tmp15947 = getelementptr inbounds float* %tmp15946, i64 1
- %tmp15948 = getelementptr inbounds float* %tmp15947, i64 1
- %tmp15949 = getelementptr inbounds float* %tmp15948, i64 1
- %tmp15950 = getelementptr inbounds float* %tmp15949, i64 1
- %tmp15951 = getelementptr inbounds float* %tmp15950, i64 1
- %tmp15952 = getelementptr inbounds float* %tmp15951, i64 1
- %tmp15953 = getelementptr inbounds float* %tmp15952, i64 1
- %tmp15954 = getelementptr inbounds float* %tmp15953, i64 1
- %tmp15955 = getelementptr inbounds float* %tmp15954, i64 1
- %tmp15956 = getelementptr inbounds float* %tmp15955, i64 1
- %tmp15957 = getelementptr inbounds float* %tmp15956, i64 1
- %tmp15958 = getelementptr inbounds float* %tmp15957, i64 1
- %tmp15959 = getelementptr inbounds float* %tmp15958, i64 1
- %tmp15960 = getelementptr inbounds float* %tmp15959, i64 1
- %tmp15961 = getelementptr inbounds float* %tmp15960, i64 1
- %tmp15962 = getelementptr inbounds float* %tmp15961, i64 1
- %tmp15963 = getelementptr inbounds float* %tmp15962, i64 1
- %tmp15964 = getelementptr inbounds float* %tmp15963, i64 1
- %tmp15965 = getelementptr inbounds float* %tmp15964, i64 1
- %tmp15966 = getelementptr inbounds float* %tmp15965, i64 1
- %tmp15967 = getelementptr inbounds float* %tmp15966, i64 1
- %tmp15968 = getelementptr inbounds float* %tmp15967, i64 1
- %tmp15969 = getelementptr inbounds float* %tmp15968, i64 1
- %tmp15970 = getelementptr inbounds float* %tmp15969, i64 1
- %tmp15971 = getelementptr inbounds float* %tmp15970, i64 1
- %tmp15972 = getelementptr inbounds float* %tmp15971, i64 1
- %tmp15973 = getelementptr inbounds float* %tmp15972, i64 1
- %tmp15974 = getelementptr inbounds float* %tmp15973, i64 1
- %tmp15975 = getelementptr inbounds float* %tmp15974, i64 1
- %tmp15976 = getelementptr inbounds float* %tmp15975, i64 1
- %tmp15977 = getelementptr inbounds float* %tmp15976, i64 1
- %tmp15978 = getelementptr inbounds float* %tmp15977, i64 1
- %tmp15979 = getelementptr inbounds float* %tmp15978, i64 1
- %tmp15980 = getelementptr inbounds float* %tmp15979, i64 1
- %tmp15981 = getelementptr inbounds float* %tmp15980, i64 1
- %tmp15982 = getelementptr inbounds float* %tmp15981, i64 1
- %tmp15983 = getelementptr inbounds float* %tmp15982, i64 1
- %tmp15984 = getelementptr inbounds float* %tmp15983, i64 1
- %tmp15985 = getelementptr inbounds float* %tmp15984, i64 1
- %tmp15986 = getelementptr inbounds float* %tmp15985, i64 1
- %tmp15987 = getelementptr inbounds float* %tmp15986, i64 1
- %tmp15988 = getelementptr inbounds float* %tmp15987, i64 1
- %tmp15989 = getelementptr inbounds float* %tmp15988, i64 1
- %tmp15990 = getelementptr inbounds float* %tmp15989, i64 1
- %tmp15991 = getelementptr inbounds float* %tmp15990, i64 1
- %tmp15992 = getelementptr inbounds float* %tmp15991, i64 1
- %tmp15993 = getelementptr inbounds float* %tmp15992, i64 1
- %tmp15994 = getelementptr inbounds float* %tmp15993, i64 1
- %tmp15995 = getelementptr inbounds float* %tmp15994, i64 1
- %tmp15996 = getelementptr inbounds float* %tmp15995, i64 1
- %tmp15997 = getelementptr inbounds float* %tmp15996, i64 1
- %tmp15998 = getelementptr inbounds float* %tmp15997, i64 1
- %tmp15999 = getelementptr inbounds float* %tmp15998, i64 1
- %tmp16000 = getelementptr inbounds float* %tmp15999, i64 1
- %tmp16001 = getelementptr inbounds float* %tmp16000, i64 1
- %tmp16002 = getelementptr inbounds float* %tmp16001, i64 1
- %tmp16003 = getelementptr inbounds float* %tmp16002, i64 1
- %tmp16004 = getelementptr inbounds float* %tmp16003, i64 1
- %tmp16005 = getelementptr inbounds float* %tmp16004, i64 1
- %tmp16006 = getelementptr inbounds float* %tmp16005, i64 1
- %tmp16007 = getelementptr inbounds float* %tmp16006, i64 1
- %tmp16008 = getelementptr inbounds float* %tmp16007, i64 1
- %tmp16009 = getelementptr inbounds float* %tmp16008, i64 1
- %tmp16010 = getelementptr inbounds float* %tmp16009, i64 1
- %tmp16011 = getelementptr inbounds float* %tmp16010, i64 1
- %tmp16012 = getelementptr inbounds float* %tmp16011, i64 1
- %tmp16013 = getelementptr inbounds float* %tmp16012, i64 1
- %tmp16014 = getelementptr inbounds float* %tmp16013, i64 1
- %tmp16015 = getelementptr inbounds float* %tmp16014, i64 1
- %tmp16016 = getelementptr inbounds float* %tmp16015, i64 1
- %tmp16017 = getelementptr inbounds float* %tmp16016, i64 1
- %tmp16018 = getelementptr inbounds float* %tmp16017, i64 1
- %tmp16019 = getelementptr inbounds float* %tmp16018, i64 1
- %tmp16020 = getelementptr inbounds float* %tmp16019, i64 1
- %tmp16021 = getelementptr inbounds float* %tmp16020, i64 1
- %tmp16022 = getelementptr inbounds float* %tmp16021, i64 1
- %tmp16023 = getelementptr inbounds float* %tmp16022, i64 1
- %tmp16024 = getelementptr inbounds float* %tmp16023, i64 1
- %tmp16025 = getelementptr inbounds float* %tmp16024, i64 1
- %tmp16026 = getelementptr inbounds float* %tmp16025, i64 1
- %tmp16027 = getelementptr inbounds float* %tmp16026, i64 1
- %tmp16028 = getelementptr inbounds float* %tmp16027, i64 1
- %tmp16029 = getelementptr inbounds float* %tmp16028, i64 1
- %tmp16030 = getelementptr inbounds float* %tmp16029, i64 1
- %tmp16031 = getelementptr inbounds float* %tmp16030, i64 1
- %tmp16032 = getelementptr inbounds float* %tmp16031, i64 1
- %tmp16033 = getelementptr inbounds float* %tmp16032, i64 1
- %tmp16034 = getelementptr inbounds float* %tmp16033, i64 1
- %tmp16035 = getelementptr inbounds float* %tmp16034, i64 1
- %tmp16036 = getelementptr inbounds float* %tmp16035, i64 1
- %tmp16037 = getelementptr inbounds float* %tmp16036, i64 1
- %tmp16038 = getelementptr inbounds float* %tmp16037, i64 1
- %tmp16039 = getelementptr inbounds float* %tmp16038, i64 1
- %tmp16040 = getelementptr inbounds float* %tmp16039, i64 1
- %tmp16041 = getelementptr inbounds float* %tmp16040, i64 1
- %tmp16042 = getelementptr inbounds float* %tmp16041, i64 1
- %tmp16043 = getelementptr inbounds float* %tmp16042, i64 1
- %tmp16044 = getelementptr inbounds float* %tmp16043, i64 1
- %tmp16045 = getelementptr inbounds float* %tmp16044, i64 1
- %tmp16046 = getelementptr inbounds float* %tmp16045, i64 1
- %tmp16047 = getelementptr inbounds float* %tmp16046, i64 1
- %tmp16048 = getelementptr inbounds float* %tmp16047, i64 1
- %tmp16049 = getelementptr inbounds float* %tmp16048, i64 1
- %tmp16050 = getelementptr inbounds float* %tmp16049, i64 1
- %tmp16051 = getelementptr inbounds float* %tmp16050, i64 1
- %tmp16052 = getelementptr inbounds float* %tmp16051, i64 1
- %tmp16053 = getelementptr inbounds float* %tmp16052, i64 1
- %tmp16054 = getelementptr inbounds float* %tmp16053, i64 1
- %tmp16055 = getelementptr inbounds float* %tmp16054, i64 1
- %tmp16056 = getelementptr inbounds float* %tmp16055, i64 1
- %tmp16057 = getelementptr inbounds float* %tmp16056, i64 1
- %tmp16058 = getelementptr inbounds float* %tmp16057, i64 1
- %tmp16059 = getelementptr inbounds float* %tmp16058, i64 1
- %tmp16060 = getelementptr inbounds float* %tmp16059, i64 1
- %tmp16061 = getelementptr inbounds float* %tmp16060, i64 1
- %tmp16062 = getelementptr inbounds float* %tmp16061, i64 1
- %tmp16063 = getelementptr inbounds float* %tmp16062, i64 1
- %tmp16064 = getelementptr inbounds float* %tmp16063, i64 1
- %tmp16065 = getelementptr inbounds float* %tmp16064, i64 1
- %tmp16066 = getelementptr inbounds float* %tmp16065, i64 1
- %tmp16067 = getelementptr inbounds float* %tmp16066, i64 1
- %tmp16068 = getelementptr inbounds float* %tmp16067, i64 1
- %tmp16069 = getelementptr inbounds float* %tmp16068, i64 1
- %tmp16070 = getelementptr inbounds float* %tmp16069, i64 1
- %tmp16071 = getelementptr inbounds float* %tmp16070, i64 1
- %tmp16072 = getelementptr inbounds float* %tmp16071, i64 1
- %tmp16073 = getelementptr inbounds float* %tmp16072, i64 1
- %tmp16074 = getelementptr inbounds float* %tmp16073, i64 1
- %tmp16075 = getelementptr inbounds float* %tmp16074, i64 1
- %tmp16076 = getelementptr inbounds float* %tmp16075, i64 1
- %tmp16077 = getelementptr inbounds float* %tmp16076, i64 1
- %tmp16078 = getelementptr inbounds float* %tmp16077, i64 1
- %tmp16079 = getelementptr inbounds float* %tmp16078, i64 1
- %tmp16080 = getelementptr inbounds float* %tmp16079, i64 1
- %tmp16081 = getelementptr inbounds float* %tmp16080, i64 1
- %tmp16082 = getelementptr inbounds float* %tmp16081, i64 1
- %tmp16083 = getelementptr inbounds float* %tmp16082, i64 1
- %tmp16084 = getelementptr inbounds float* %tmp16083, i64 1
- %tmp16085 = getelementptr inbounds float* %tmp16084, i64 1
- %tmp16086 = getelementptr inbounds float* %tmp16085, i64 1
- %tmp16087 = getelementptr inbounds float* %tmp16086, i64 1
- %tmp16088 = getelementptr inbounds float* %tmp16087, i64 1
- %tmp16089 = getelementptr inbounds float* %tmp16088, i64 1
- %tmp16090 = getelementptr inbounds float* %tmp16089, i64 1
- %tmp16091 = getelementptr inbounds float* %tmp16090, i64 1
- %tmp16092 = getelementptr inbounds float* %tmp16091, i64 1
- %tmp16093 = getelementptr inbounds float* %tmp16092, i64 1
- %tmp16094 = getelementptr inbounds float* %tmp16093, i64 1
- %tmp16095 = getelementptr inbounds float* %tmp16094, i64 1
- %tmp16096 = getelementptr inbounds float* %tmp16095, i64 1
- %tmp16097 = getelementptr inbounds float* %tmp16096, i64 1
- %tmp16098 = getelementptr inbounds float* %tmp16097, i64 1
- %tmp16099 = getelementptr inbounds float* %tmp16098, i64 1
- %tmp16100 = getelementptr inbounds float* %tmp16099, i64 1
- %tmp16101 = getelementptr inbounds float* %tmp16100, i64 1
- %tmp16102 = getelementptr inbounds float* %tmp16101, i64 1
- %tmp16103 = getelementptr inbounds float* %tmp16102, i64 1
- %tmp16104 = getelementptr inbounds float* %tmp16103, i64 1
- %tmp16105 = getelementptr inbounds float* %tmp16104, i64 1
- %tmp16106 = getelementptr inbounds float* %tmp16105, i64 1
- %tmp16107 = getelementptr inbounds float* %tmp16106, i64 1
- %tmp16108 = getelementptr inbounds float* %tmp16107, i64 1
- %tmp16109 = getelementptr inbounds float* %tmp16108, i64 1
- %tmp16110 = getelementptr inbounds float* %tmp16109, i64 1
- %tmp16111 = getelementptr inbounds float* %tmp16110, i64 1
- %tmp16112 = getelementptr inbounds float* %tmp16111, i64 1
- %tmp16113 = getelementptr inbounds float* %tmp16112, i64 1
- %tmp16114 = getelementptr inbounds float* %tmp16113, i64 1
- %tmp16115 = getelementptr inbounds float* %tmp16114, i64 1
- %tmp16116 = getelementptr inbounds float* %tmp16115, i64 1
- %tmp16117 = getelementptr inbounds float* %tmp16116, i64 1
- %tmp16118 = getelementptr inbounds float* %tmp16117, i64 1
- %tmp16119 = getelementptr inbounds float* %tmp16118, i64 1
- %tmp16120 = getelementptr inbounds float* %tmp16119, i64 1
- %tmp16121 = getelementptr inbounds float* %tmp16120, i64 1
- %tmp16122 = getelementptr inbounds float* %tmp16121, i64 1
- %tmp16123 = getelementptr inbounds float* %tmp16122, i64 1
- %tmp16124 = getelementptr inbounds float* %tmp16123, i64 1
- %tmp16125 = getelementptr inbounds float* %tmp16124, i64 1
- %tmp16126 = getelementptr inbounds float* %tmp16125, i64 1
- %tmp16127 = getelementptr inbounds float* %tmp16126, i64 1
- %tmp16128 = getelementptr inbounds float* %tmp16127, i64 1
- %tmp16129 = getelementptr inbounds float* %tmp16128, i64 1
- %tmp16130 = getelementptr inbounds float* %tmp16129, i64 1
- %tmp16131 = getelementptr inbounds float* %tmp16130, i64 1
- %tmp16132 = getelementptr inbounds float* %tmp16131, i64 1
- %tmp16133 = getelementptr inbounds float* %tmp16132, i64 1
- %tmp16134 = getelementptr inbounds float* %tmp16133, i64 1
- %tmp16135 = getelementptr inbounds float* %tmp16134, i64 1
- %tmp16136 = getelementptr inbounds float* %tmp16135, i64 1
- %tmp16137 = getelementptr inbounds float* %tmp16136, i64 1
- %tmp16138 = getelementptr inbounds float* %tmp16137, i64 1
- %tmp16139 = getelementptr inbounds float* %tmp16138, i64 1
- %tmp16140 = getelementptr inbounds float* %tmp16139, i64 1
- %tmp16141 = getelementptr inbounds float* %tmp16140, i64 1
- %tmp16142 = getelementptr inbounds float* %tmp16141, i64 1
- %tmp16143 = getelementptr inbounds float* %tmp16142, i64 1
- %tmp16144 = getelementptr inbounds float* %tmp16143, i64 1
- %tmp16145 = getelementptr inbounds float* %tmp16144, i64 1
- %tmp16146 = getelementptr inbounds float* %tmp16145, i64 1
- %tmp16147 = getelementptr inbounds float* %tmp16146, i64 1
- %tmp16148 = getelementptr inbounds float* %tmp16147, i64 1
- %tmp16149 = getelementptr inbounds float* %tmp16148, i64 1
- %tmp16150 = getelementptr inbounds float* %tmp16149, i64 1
- %tmp16151 = getelementptr inbounds float* %tmp16150, i64 1
- %tmp16152 = getelementptr inbounds float* %tmp16151, i64 1
- %tmp16153 = getelementptr inbounds float* %tmp16152, i64 1
- %tmp16154 = getelementptr inbounds float* %tmp16153, i64 1
- %tmp16155 = getelementptr inbounds float* %tmp16154, i64 1
- %tmp16156 = getelementptr inbounds float* %tmp16155, i64 1
- %tmp16157 = getelementptr inbounds float* %tmp16156, i64 1
- %tmp16158 = getelementptr inbounds float* %tmp16157, i64 1
- %tmp16159 = getelementptr inbounds float* %tmp16158, i64 1
- %tmp16160 = getelementptr inbounds float* %tmp16159, i64 1
- %tmp16161 = getelementptr inbounds float* %tmp16160, i64 1
- %tmp16162 = getelementptr inbounds float* %tmp16161, i64 1
- %tmp16163 = getelementptr inbounds float* %tmp16162, i64 1
- %tmp16164 = getelementptr inbounds float* %tmp16163, i64 1
- %tmp16165 = getelementptr inbounds float* %tmp16164, i64 1
- %tmp16166 = getelementptr inbounds float* %tmp16165, i64 1
- %tmp16167 = getelementptr inbounds float* %tmp16166, i64 1
- %tmp16168 = getelementptr inbounds float* %tmp16167, i64 1
- %tmp16169 = getelementptr inbounds float* %tmp16168, i64 1
- %tmp16170 = getelementptr inbounds float* %tmp16169, i64 1
- %tmp16171 = getelementptr inbounds float* %tmp16170, i64 1
- %tmp16172 = getelementptr inbounds float* %tmp16171, i64 1
- %tmp16173 = getelementptr inbounds float* %tmp16172, i64 1
- %tmp16174 = getelementptr inbounds float* %tmp16173, i64 1
- %tmp16175 = getelementptr inbounds float* %tmp16174, i64 1
- %tmp16176 = getelementptr inbounds float* %tmp16175, i64 1
- %tmp16177 = getelementptr inbounds float* %tmp16176, i64 1
- %tmp16178 = getelementptr inbounds float* %tmp16177, i64 1
- %tmp16179 = getelementptr inbounds float* %tmp16178, i64 1
- %tmp16180 = getelementptr inbounds float* %tmp16179, i64 1
- %tmp16181 = getelementptr inbounds float* %tmp16180, i64 1
- %tmp16182 = getelementptr inbounds float* %tmp16181, i64 1
- %tmp16183 = getelementptr inbounds float* %tmp16182, i64 1
- %tmp16184 = getelementptr inbounds float* %tmp16183, i64 1
- %tmp16185 = getelementptr inbounds float* %tmp16184, i64 1
- %tmp16186 = getelementptr inbounds float* %tmp16185, i64 1
- %tmp16187 = getelementptr inbounds float* %tmp16186, i64 1
- %tmp16188 = getelementptr inbounds float* %tmp16187, i64 1
- %tmp16189 = getelementptr inbounds float* %tmp16188, i64 1
- %tmp16190 = getelementptr inbounds float* %tmp16189, i64 1
- %tmp16191 = getelementptr inbounds float* %tmp16190, i64 1
- %tmp16192 = getelementptr inbounds float* %tmp16191, i64 1
- %tmp16193 = getelementptr inbounds float* %tmp16192, i64 1
- %tmp16194 = getelementptr inbounds float* %tmp16193, i64 1
- %tmp16195 = getelementptr inbounds float* %tmp16194, i64 1
- %tmp16196 = getelementptr inbounds float* %tmp16195, i64 1
- %tmp16197 = getelementptr inbounds float* %tmp16196, i64 1
- %tmp16198 = getelementptr inbounds float* %tmp16197, i64 1
- %tmp16199 = getelementptr inbounds float* %tmp16198, i64 1
- %tmp16200 = getelementptr inbounds float* %tmp16199, i64 1
- %tmp16201 = getelementptr inbounds float* %tmp16200, i64 1
- %tmp16202 = getelementptr inbounds float* %tmp16201, i64 1
- %tmp16203 = getelementptr inbounds float* %tmp16202, i64 1
- %tmp16204 = getelementptr inbounds float* %tmp16203, i64 1
- %tmp16205 = getelementptr inbounds float* %tmp16204, i64 1
- %tmp16206 = getelementptr inbounds float* %tmp16205, i64 1
- %tmp16207 = getelementptr inbounds float* %tmp16206, i64 1
- %tmp16208 = getelementptr inbounds float* %tmp16207, i64 1
- %tmp16209 = getelementptr inbounds float* %tmp16208, i64 1
- %tmp16210 = getelementptr inbounds float* %tmp16209, i64 1
- %tmp16211 = getelementptr inbounds float* %tmp16210, i64 1
- %tmp16212 = getelementptr inbounds float* %tmp16211, i64 1
- %tmp16213 = getelementptr inbounds float* %tmp16212, i64 1
- %tmp16214 = getelementptr inbounds float* %tmp16213, i64 1
- %tmp16215 = getelementptr inbounds float* %tmp16214, i64 1
- %tmp16216 = getelementptr inbounds float* %tmp16215, i64 1
- %tmp16217 = getelementptr inbounds float* %tmp16216, i64 1
- %tmp16218 = getelementptr inbounds float* %tmp16217, i64 1
- %tmp16219 = getelementptr inbounds float* %tmp16218, i64 1
- %tmp16220 = getelementptr inbounds float* %tmp16219, i64 1
- %tmp16221 = getelementptr inbounds float* %tmp16220, i64 1
- %tmp16222 = getelementptr inbounds float* %tmp16221, i64 1
- %tmp16223 = getelementptr inbounds float* %tmp16222, i64 1
- %tmp16224 = getelementptr inbounds float* %tmp16223, i64 1
- %tmp16225 = getelementptr inbounds float* %tmp16224, i64 1
- %tmp16226 = getelementptr inbounds float* %tmp16225, i64 1
- %tmp16227 = getelementptr inbounds float* %tmp16226, i64 1
- %tmp16228 = getelementptr inbounds float* %tmp16227, i64 1
- %tmp16229 = getelementptr inbounds float* %tmp16228, i64 1
- %tmp16230 = getelementptr inbounds float* %tmp16229, i64 1
- %tmp16231 = getelementptr inbounds float* %tmp16230, i64 1
- %tmp16232 = getelementptr inbounds float* %tmp16231, i64 1
- %tmp16233 = getelementptr inbounds float* %tmp16232, i64 1
- %tmp16234 = getelementptr inbounds float* %tmp16233, i64 1
- %tmp16235 = getelementptr inbounds float* %tmp16234, i64 1
- %tmp16236 = getelementptr inbounds float* %tmp16235, i64 1
- %tmp16237 = getelementptr inbounds float* %tmp16236, i64 1
- %tmp16238 = getelementptr inbounds float* %tmp16237, i64 1
- %tmp16239 = getelementptr inbounds float* %tmp16238, i64 1
- %tmp16240 = getelementptr inbounds float* %tmp16239, i64 1
- %tmp16241 = getelementptr inbounds float* %tmp16240, i64 1
- %tmp16242 = getelementptr inbounds float* %tmp16241, i64 1
- %tmp16243 = getelementptr inbounds float* %tmp16242, i64 1
- %tmp16244 = getelementptr inbounds float* %tmp16243, i64 1
- %tmp16245 = getelementptr inbounds float* %tmp16244, i64 1
- %tmp16246 = getelementptr inbounds float* %tmp16245, i64 1
- %tmp16247 = getelementptr inbounds float* %tmp16246, i64 1
- %tmp16248 = getelementptr inbounds float* %tmp16247, i64 1
- %tmp16249 = getelementptr inbounds float* %tmp16248, i64 1
- %tmp16250 = getelementptr inbounds float* %tmp16249, i64 1
- %tmp16251 = getelementptr inbounds float* %tmp16250, i64 1
- %tmp16252 = getelementptr inbounds float* %tmp16251, i64 1
- %tmp16253 = getelementptr inbounds float* %tmp16252, i64 1
- %tmp16254 = getelementptr inbounds float* %tmp16253, i64 1
- %tmp16255 = getelementptr inbounds float* %tmp16254, i64 1
- %tmp16256 = getelementptr inbounds float* %tmp16255, i64 1
- %tmp16257 = getelementptr inbounds float* %tmp16256, i64 1
- %tmp16258 = getelementptr inbounds float* %tmp16257, i64 1
- %tmp16259 = getelementptr inbounds float* %tmp16258, i64 1
- %tmp16260 = getelementptr inbounds float* %tmp16259, i64 1
- %tmp16261 = getelementptr inbounds float* %tmp16260, i64 1
- %tmp16262 = getelementptr inbounds float* %tmp16261, i64 1
- %tmp16263 = getelementptr inbounds float* %tmp16262, i64 1
- %tmp16264 = getelementptr inbounds float* %tmp16263, i64 1
- %tmp16265 = getelementptr inbounds float* %tmp16264, i64 1
- %tmp16266 = getelementptr inbounds float* %tmp16265, i64 1
- %tmp16267 = getelementptr inbounds float* %tmp16266, i64 1
- %tmp16268 = getelementptr inbounds float* %tmp16267, i64 1
- %tmp16269 = getelementptr inbounds float* %tmp16268, i64 1
- %tmp16270 = getelementptr inbounds float* %tmp16269, i64 1
- %tmp16271 = getelementptr inbounds float* %tmp16270, i64 1
- %tmp16272 = getelementptr inbounds float* %tmp16271, i64 1
- %tmp16273 = getelementptr inbounds float* %tmp16272, i64 1
- %tmp16274 = getelementptr inbounds float* %tmp16273, i64 1
- %tmp16275 = getelementptr inbounds float* %tmp16274, i64 1
- %tmp16276 = getelementptr inbounds float* %tmp16275, i64 1
- %tmp16277 = getelementptr inbounds float* %tmp16276, i64 1
- %tmp16278 = getelementptr inbounds float* %tmp16277, i64 1
- %tmp16279 = getelementptr inbounds float* %tmp16278, i64 1
- %tmp16280 = getelementptr inbounds float* %tmp16279, i64 1
- %tmp16281 = getelementptr inbounds float* %tmp16280, i64 1
- %tmp16282 = getelementptr inbounds float* %tmp16281, i64 1
- %tmp16283 = getelementptr inbounds float* %tmp16282, i64 1
- %tmp16284 = getelementptr inbounds float* %tmp16283, i64 1
- %tmp16285 = getelementptr inbounds float* %tmp16284, i64 1
- %tmp16286 = getelementptr inbounds float* %tmp16285, i64 1
- %tmp16287 = getelementptr inbounds float* %tmp16286, i64 1
- %tmp16288 = getelementptr inbounds float* %tmp16287, i64 1
- %tmp16289 = getelementptr inbounds float* %tmp16288, i64 1
- %tmp16290 = getelementptr inbounds float* %tmp16289, i64 1
- %tmp16291 = getelementptr inbounds float* %tmp16290, i64 1
- %tmp16292 = getelementptr inbounds float* %tmp16291, i64 1
- %tmp16293 = getelementptr inbounds float* %tmp16292, i64 1
- %tmp16294 = getelementptr inbounds float* %tmp16293, i64 1
- %tmp16295 = getelementptr inbounds float* %tmp16294, i64 1
- %tmp16296 = getelementptr inbounds float* %tmp16295, i64 1
- %tmp16297 = getelementptr inbounds float* %tmp16296, i64 1
- %tmp16298 = getelementptr inbounds float* %tmp16297, i64 1
- %tmp16299 = getelementptr inbounds float* %tmp16298, i64 1
- %tmp16300 = getelementptr inbounds float* %tmp16299, i64 1
- %tmp16301 = getelementptr inbounds float* %tmp16300, i64 1
- %tmp16302 = getelementptr inbounds float* %tmp16301, i64 1
- %tmp16303 = getelementptr inbounds float* %tmp16302, i64 1
- %tmp16304 = getelementptr inbounds float* %tmp16303, i64 1
- %tmp16305 = getelementptr inbounds float* %tmp16304, i64 1
- %tmp16306 = getelementptr inbounds float* %tmp16305, i64 1
- %tmp16307 = getelementptr inbounds float* %tmp16306, i64 1
- %tmp16308 = getelementptr inbounds float* %tmp16307, i64 1
- %tmp16309 = getelementptr inbounds float* %tmp16308, i64 1
- %tmp16310 = getelementptr inbounds float* %tmp16309, i64 1
- %tmp16311 = getelementptr inbounds float* %tmp16310, i64 1
- %tmp16312 = getelementptr inbounds float* %tmp16311, i64 1
- %tmp16313 = getelementptr inbounds float* %tmp16312, i64 1
- %tmp16314 = getelementptr inbounds float* %tmp16313, i64 1
- %tmp16315 = getelementptr inbounds float* %tmp16314, i64 1
- %tmp16316 = getelementptr inbounds float* %tmp16315, i64 1
- %tmp16317 = getelementptr inbounds float* %tmp16316, i64 1
- %tmp16318 = getelementptr inbounds float* %tmp16317, i64 1
- %tmp16319 = getelementptr inbounds float* %tmp16318, i64 1
- %tmp16320 = getelementptr inbounds float* %tmp16319, i64 1
- %tmp16321 = getelementptr inbounds float* %tmp16320, i64 1
- %tmp16322 = getelementptr inbounds float* %tmp16321, i64 1
- %tmp16323 = getelementptr inbounds float* %tmp16322, i64 1
- %tmp16324 = getelementptr inbounds float* %tmp16323, i64 1
- %tmp16325 = getelementptr inbounds float* %tmp16324, i64 1
- %tmp16326 = getelementptr inbounds float* %tmp16325, i64 1
- %tmp16327 = getelementptr inbounds float* %tmp16326, i64 1
- %tmp16328 = getelementptr inbounds float* %tmp16327, i64 1
- %tmp16329 = getelementptr inbounds float* %tmp16328, i64 1
- %tmp16330 = getelementptr inbounds float* %tmp16329, i64 1
- %tmp16331 = getelementptr inbounds float* %tmp16330, i64 1
- %tmp16332 = getelementptr inbounds float* %tmp16331, i64 1
- %tmp16333 = getelementptr inbounds float* %tmp16332, i64 1
- %tmp16334 = getelementptr inbounds float* %tmp16333, i64 1
- %tmp16335 = getelementptr inbounds float* %tmp16334, i64 1
- %tmp16336 = getelementptr inbounds float* %tmp16335, i64 1
- %tmp16337 = getelementptr inbounds float* %tmp16336, i64 1
- %tmp16338 = getelementptr inbounds float* %tmp16337, i64 1
- %tmp16339 = getelementptr inbounds float* %tmp16338, i64 1
- %tmp16340 = getelementptr inbounds float* %tmp16339, i64 1
- %tmp16341 = getelementptr inbounds float* %tmp16340, i64 1
- %tmp16342 = getelementptr inbounds float* %tmp16341, i64 1
- %tmp16343 = getelementptr inbounds float* %tmp16342, i64 1
- %tmp16344 = getelementptr inbounds float* %tmp16343, i64 1
- %tmp16345 = getelementptr inbounds float* %tmp16344, i64 1
- %tmp16346 = getelementptr inbounds float* %tmp16345, i64 1
- %tmp16347 = getelementptr inbounds float* %tmp16346, i64 1
- %tmp16348 = getelementptr inbounds float* %tmp16347, i64 1
- %tmp16349 = getelementptr inbounds float* %tmp16348, i64 1
- %tmp16350 = getelementptr inbounds float* %tmp16349, i64 1
- %tmp16351 = getelementptr inbounds float* %tmp16350, i64 1
- %tmp16352 = getelementptr inbounds float* %tmp16351, i64 1
- %tmp16353 = getelementptr inbounds float* %tmp16352, i64 1
- %tmp16354 = getelementptr inbounds float* %tmp16353, i64 1
- %tmp16355 = getelementptr inbounds float* %tmp16354, i64 1
- %tmp16356 = getelementptr inbounds float* %tmp16355, i64 1
- %tmp16357 = getelementptr inbounds float* %tmp16356, i64 1
- %tmp16358 = getelementptr inbounds float* %tmp16357, i64 1
- %tmp16359 = getelementptr inbounds float* %tmp16358, i64 1
- %tmp16360 = getelementptr inbounds float* %tmp16359, i64 1
- %tmp16361 = getelementptr inbounds float* %tmp16360, i64 1
- %tmp16362 = getelementptr inbounds float* %tmp16361, i64 1
- %tmp16363 = getelementptr inbounds float* %tmp16362, i64 1
- %tmp16364 = getelementptr inbounds float* %tmp16363, i64 1
- %tmp16365 = getelementptr inbounds float* %tmp16364, i64 1
- %tmp16366 = getelementptr inbounds float* %tmp16365, i64 1
- %tmp16367 = getelementptr inbounds float* %tmp16366, i64 1
- %tmp16368 = getelementptr inbounds float* %tmp16367, i64 1
- %tmp16369 = getelementptr inbounds float* %tmp16368, i64 1
- %tmp16370 = getelementptr inbounds float* %tmp16369, i64 1
- %tmp16371 = getelementptr inbounds float* %tmp16370, i64 1
- %tmp16372 = getelementptr inbounds float* %tmp16371, i64 1
- %tmp16373 = getelementptr inbounds float* %tmp16372, i64 1
- %tmp16374 = getelementptr inbounds float* %tmp16373, i64 1
- %tmp16375 = getelementptr inbounds float* %tmp16374, i64 1
- %tmp16376 = getelementptr inbounds float* %tmp16375, i64 1
- %tmp16377 = getelementptr inbounds float* %tmp16376, i64 1
- %tmp16378 = getelementptr inbounds float* %tmp16377, i64 1
- %tmp16379 = getelementptr inbounds float* %tmp16378, i64 1
- %tmp16380 = getelementptr inbounds float* %tmp16379, i64 1
- %tmp16381 = getelementptr inbounds float* %tmp16380, i64 1
- %tmp16382 = getelementptr inbounds float* %tmp16381, i64 1
- %tmp16383 = getelementptr inbounds float* %tmp16382, i64 1
- %tmp16384 = getelementptr inbounds float* %tmp16383, i64 1
- %tmp16385 = getelementptr inbounds float* %tmp16384, i64 1
- %tmp16386 = getelementptr inbounds float* %tmp16385, i64 1
- %tmp16387 = getelementptr inbounds float* %tmp16386, i64 1
- %tmp16388 = getelementptr inbounds float* %tmp16387, i64 1
- %tmp16389 = getelementptr inbounds float* %tmp16388, i64 1
- %tmp16390 = getelementptr inbounds float* %tmp16389, i64 1
- %tmp16391 = getelementptr inbounds float* %tmp16390, i64 1
- %tmp16392 = getelementptr inbounds float* %tmp16391, i64 1
- %tmp16393 = getelementptr inbounds float* %tmp16392, i64 1
- %tmp16394 = getelementptr inbounds float* %tmp16393, i64 1
- %tmp16395 = getelementptr inbounds float* %tmp16394, i64 1
- %tmp16396 = getelementptr inbounds float* %tmp16395, i64 1
- %tmp16397 = getelementptr inbounds float* %tmp16396, i64 1
- %tmp16398 = getelementptr inbounds float* %tmp16397, i64 1
- %tmp16399 = getelementptr inbounds float* %tmp16398, i64 1
- %tmp16400 = getelementptr inbounds float* %tmp16399, i64 1
- %tmp16401 = getelementptr inbounds float* %tmp16400, i64 1
- %tmp16402 = getelementptr inbounds float* %tmp16401, i64 1
- %tmp16403 = getelementptr inbounds float* %tmp16402, i64 1
- %tmp16404 = getelementptr inbounds float* %tmp16403, i64 1
- %tmp16405 = getelementptr inbounds float* %tmp16404, i64 1
- %tmp16406 = getelementptr inbounds float* %tmp16405, i64 1
- %tmp16407 = getelementptr inbounds float* %tmp16406, i64 1
- %tmp16408 = getelementptr inbounds float* %tmp16407, i64 1
- %tmp16409 = getelementptr inbounds float* %tmp16408, i64 1
- %tmp16410 = getelementptr inbounds float* %tmp16409, i64 1
- %tmp16411 = getelementptr inbounds float* %tmp16410, i64 1
- %tmp16412 = getelementptr inbounds float* %tmp16411, i64 1
- %tmp16413 = getelementptr inbounds float* %tmp16412, i64 1
- %tmp16414 = getelementptr inbounds float* %tmp16413, i64 1
- %tmp16415 = getelementptr inbounds float* %tmp16414, i64 1
- %tmp16416 = getelementptr inbounds float* %tmp16415, i64 1
- %tmp16417 = getelementptr inbounds float* %tmp16416, i64 1
- %tmp16418 = getelementptr inbounds float* %tmp16417, i64 1
- %tmp16419 = getelementptr inbounds float* %tmp16418, i64 1
- %tmp16420 = getelementptr inbounds float* %tmp16419, i64 1
- %tmp16421 = getelementptr inbounds float* %tmp16420, i64 1
- %tmp16422 = getelementptr inbounds float* %tmp16421, i64 1
- %tmp16423 = getelementptr inbounds float* %tmp16422, i64 1
- %tmp16424 = getelementptr inbounds float* %tmp16423, i64 1
- %tmp16425 = getelementptr inbounds float* %tmp16424, i64 1
- %tmp16426 = getelementptr inbounds float* %tmp16425, i64 1
- %tmp16427 = getelementptr inbounds float* %tmp16426, i64 1
- %tmp16428 = getelementptr inbounds float* %tmp16427, i64 1
- %tmp16429 = getelementptr inbounds float* %tmp16428, i64 1
- %tmp16430 = getelementptr inbounds float* %tmp16429, i64 1
- %tmp16431 = getelementptr inbounds float* %tmp16430, i64 1
- %tmp16432 = getelementptr inbounds float* %tmp16431, i64 1
- %tmp16433 = getelementptr inbounds float* %tmp16432, i64 1
- %tmp16434 = getelementptr inbounds float* %tmp16433, i64 1
- %tmp16435 = getelementptr inbounds float* %tmp16434, i64 1
- %tmp16436 = getelementptr inbounds float* %tmp16435, i64 1
- %tmp16437 = getelementptr inbounds float* %tmp16436, i64 1
- %tmp16438 = getelementptr inbounds float* %tmp16437, i64 1
- %tmp16439 = getelementptr inbounds float* %tmp16438, i64 1
- %tmp16440 = getelementptr inbounds float* %tmp16439, i64 1
- %tmp16441 = getelementptr inbounds float* %tmp16440, i64 1
- %tmp16442 = getelementptr inbounds float* %tmp16441, i64 1
- %tmp16443 = getelementptr inbounds float* %tmp16442, i64 1
- %tmp16444 = getelementptr inbounds float* %tmp16443, i64 1
- %tmp16445 = getelementptr inbounds float* %tmp16444, i64 1
- %tmp16446 = getelementptr inbounds float* %tmp16445, i64 1
- %tmp16447 = getelementptr inbounds float* %tmp16446, i64 1
- %tmp16448 = getelementptr inbounds float* %tmp16447, i64 1
- %tmp16449 = getelementptr inbounds float* %tmp16448, i64 1
- %tmp16450 = getelementptr inbounds float* %tmp16449, i64 1
- %tmp16451 = getelementptr inbounds float* %tmp16450, i64 1
- %tmp16452 = getelementptr inbounds float* %tmp16451, i64 1
- %tmp16453 = getelementptr inbounds float* %tmp16452, i64 1
- %tmp16454 = getelementptr inbounds float* %tmp16453, i64 1
- %tmp16455 = getelementptr inbounds float* %tmp16454, i64 1
- %tmp16456 = getelementptr inbounds float* %tmp16455, i64 1
- %tmp16457 = getelementptr inbounds float* %tmp16456, i64 1
- %tmp16458 = getelementptr inbounds float* %tmp16457, i64 1
- %tmp16459 = getelementptr inbounds float* %tmp16458, i64 1
- %tmp16460 = getelementptr inbounds float* %tmp16459, i64 1
- %tmp16461 = getelementptr inbounds float* %tmp16460, i64 1
- %tmp16462 = getelementptr inbounds float* %tmp16461, i64 1
- %tmp16463 = getelementptr inbounds float* %tmp16462, i64 1
- %tmp16464 = getelementptr inbounds float* %tmp16463, i64 1
- %tmp16465 = getelementptr inbounds float* %tmp16464, i64 1
- %tmp16466 = getelementptr inbounds float* %tmp16465, i64 1
- %tmp16467 = getelementptr inbounds float* %tmp16466, i64 1
- %tmp16468 = getelementptr inbounds float* %tmp16467, i64 1
- %tmp16469 = getelementptr inbounds float* %tmp16468, i64 1
- %tmp16470 = getelementptr inbounds float* %tmp16469, i64 1
- %tmp16471 = getelementptr inbounds float* %tmp16470, i64 1
- %tmp16472 = getelementptr inbounds float* %tmp16471, i64 1
- %tmp16473 = getelementptr inbounds float* %tmp16472, i64 1
- %tmp16474 = getelementptr inbounds float* %tmp16473, i64 1
- %tmp16475 = getelementptr inbounds float* %tmp16474, i64 1
- %tmp16476 = getelementptr inbounds float* %tmp16475, i64 1
- %tmp16477 = getelementptr inbounds float* %tmp16476, i64 1
- %tmp16478 = getelementptr inbounds float* %tmp16477, i64 1
- %tmp16479 = getelementptr inbounds float* %tmp16478, i64 1
- %tmp16480 = getelementptr inbounds float* %tmp16479, i64 1
- %tmp16481 = getelementptr inbounds float* %tmp16480, i64 1
- %tmp16482 = getelementptr inbounds float* %tmp16481, i64 1
- %tmp16483 = getelementptr inbounds float* %tmp16482, i64 1
- %tmp16484 = getelementptr inbounds float* %tmp16483, i64 1
- %tmp16485 = getelementptr inbounds float* %tmp16484, i64 1
- %tmp16486 = getelementptr inbounds float* %tmp16485, i64 1
- %tmp16487 = getelementptr inbounds float* %tmp16486, i64 1
- %tmp16488 = getelementptr inbounds float* %tmp16487, i64 1
- %tmp16489 = getelementptr inbounds float* %tmp16488, i64 1
- %tmp16490 = getelementptr inbounds float* %tmp16489, i64 1
- %tmp16491 = getelementptr inbounds float* %tmp16490, i64 1
- %tmp16492 = getelementptr inbounds float* %tmp16491, i64 1
- %tmp16493 = getelementptr inbounds float* %tmp16492, i64 1
- %tmp16494 = getelementptr inbounds float* %tmp16493, i64 1
- %tmp16495 = getelementptr inbounds float* %tmp16494, i64 1
- %tmp16496 = getelementptr inbounds float* %tmp16495, i64 1
- %tmp16497 = getelementptr inbounds float* %tmp16496, i64 1
- %tmp16498 = getelementptr inbounds float* %tmp16497, i64 1
- %tmp16499 = getelementptr inbounds float* %tmp16498, i64 1
- %tmp16500 = getelementptr inbounds float* %tmp16499, i64 1
- %tmp16501 = getelementptr inbounds float* %tmp16500, i64 1
- %tmp16502 = getelementptr inbounds float* %tmp16501, i64 1
- %tmp16503 = getelementptr inbounds float* %tmp16502, i64 1
- %tmp16504 = getelementptr inbounds float* %tmp16503, i64 1
- %tmp16505 = getelementptr inbounds float* %tmp16504, i64 1
- %tmp16506 = getelementptr inbounds float* %tmp16505, i64 1
- %tmp16507 = getelementptr inbounds float* %tmp16506, i64 1
- %tmp16508 = getelementptr inbounds float* %tmp16507, i64 1
- %tmp16509 = getelementptr inbounds float* %tmp16508, i64 1
- %tmp16510 = getelementptr inbounds float* %tmp16509, i64 1
- %tmp16511 = getelementptr inbounds float* %tmp16510, i64 1
- %tmp16512 = getelementptr inbounds float* %tmp16511, i64 1
- %tmp16513 = getelementptr inbounds float* %tmp16512, i64 1
- %tmp16514 = getelementptr inbounds float* %tmp16513, i64 1
- %tmp16515 = getelementptr inbounds float* %tmp16514, i64 1
- %tmp16516 = getelementptr inbounds float* %tmp16515, i64 1
- %tmp16517 = getelementptr inbounds float* %tmp16516, i64 1
- %tmp16518 = getelementptr inbounds float* %tmp16517, i64 1
- %tmp16519 = getelementptr inbounds float* %tmp16518, i64 1
- %tmp16520 = getelementptr inbounds float* %tmp16519, i64 1
- %tmp16521 = getelementptr inbounds float* %tmp16520, i64 1
- %tmp16522 = getelementptr inbounds float* %tmp16521, i64 1
- %tmp16523 = getelementptr inbounds float* %tmp16522, i64 1
- %tmp16524 = getelementptr inbounds float* %tmp16523, i64 1
- %tmp16525 = getelementptr inbounds float* %tmp16524, i64 1
- %tmp16526 = getelementptr inbounds float* %tmp16525, i64 1
- %tmp16527 = getelementptr inbounds float* %tmp16526, i64 1
- %tmp16528 = getelementptr inbounds float* %tmp16527, i64 1
- %tmp16529 = getelementptr inbounds float* %tmp16528, i64 1
- %tmp16530 = getelementptr inbounds float* %tmp16529, i64 1
- %tmp16531 = getelementptr inbounds float* %tmp16530, i64 1
- %tmp16532 = getelementptr inbounds float* %tmp16531, i64 1
- %tmp16533 = getelementptr inbounds float* %tmp16532, i64 1
- %tmp16534 = getelementptr inbounds float* %tmp16533, i64 1
- %tmp16535 = getelementptr inbounds float* %tmp16534, i64 1
- %tmp16536 = getelementptr inbounds float* %tmp16535, i64 1
- %tmp16537 = getelementptr inbounds float* %tmp16536, i64 1
- %tmp16538 = getelementptr inbounds float* %tmp16537, i64 1
- %tmp16539 = getelementptr inbounds float* %tmp16538, i64 1
- %tmp16540 = getelementptr inbounds float* %tmp16539, i64 1
- %tmp16541 = getelementptr inbounds float* %tmp16540, i64 1
- %tmp16542 = getelementptr inbounds float* %tmp16541, i64 1
- %tmp16543 = getelementptr inbounds float* %tmp16542, i64 1
- %tmp16544 = getelementptr inbounds float* %tmp16543, i64 1
- %tmp16545 = getelementptr inbounds float* %tmp16544, i64 1
- %tmp16546 = getelementptr inbounds float* %tmp16545, i64 1
- %tmp16547 = getelementptr inbounds float* %tmp16546, i64 1
- %tmp16548 = getelementptr inbounds float* %tmp16547, i64 1
- %tmp16549 = getelementptr inbounds float* %tmp16548, i64 1
- %tmp16550 = getelementptr inbounds float* %tmp16549, i64 1
- %tmp16551 = getelementptr inbounds float* %tmp16550, i64 1
- %tmp16552 = getelementptr inbounds float* %tmp16551, i64 1
- %tmp16553 = getelementptr inbounds float* %tmp16552, i64 1
- %tmp16554 = getelementptr inbounds float* %tmp16553, i64 1
- %tmp16555 = getelementptr inbounds float* %tmp16554, i64 1
- %tmp16556 = getelementptr inbounds float* %tmp16555, i64 1
- %tmp16557 = getelementptr inbounds float* %tmp16556, i64 1
- %tmp16558 = getelementptr inbounds float* %tmp16557, i64 1
- %tmp16559 = getelementptr inbounds float* %tmp16558, i64 1
- %tmp16560 = getelementptr inbounds float* %tmp16559, i64 1
- %tmp16561 = getelementptr inbounds float* %tmp16560, i64 1
- %tmp16562 = getelementptr inbounds float* %tmp16561, i64 1
- %tmp16563 = getelementptr inbounds float* %tmp16562, i64 1
- %tmp16564 = getelementptr inbounds float* %tmp16563, i64 1
- %tmp16565 = getelementptr inbounds float* %tmp16564, i64 1
- %tmp16566 = getelementptr inbounds float* %tmp16565, i64 1
- %tmp16567 = getelementptr inbounds float* %tmp16566, i64 1
- %tmp16568 = getelementptr inbounds float* %tmp16567, i64 1
- %tmp16569 = getelementptr inbounds float* %tmp16568, i64 1
- %tmp16570 = getelementptr inbounds float* %tmp16569, i64 1
- %tmp16571 = getelementptr inbounds float* %tmp16570, i64 1
- %tmp16572 = getelementptr inbounds float* %tmp16571, i64 1
- %tmp16573 = getelementptr inbounds float* %tmp16572, i64 1
- %tmp16574 = getelementptr inbounds float* %tmp16573, i64 1
- %tmp16575 = getelementptr inbounds float* %tmp16574, i64 1
- %tmp16576 = getelementptr inbounds float* %tmp16575, i64 1
- %tmp16577 = getelementptr inbounds float* %tmp16576, i64 1
- %tmp16578 = getelementptr inbounds float* %tmp16577, i64 1
- %tmp16579 = getelementptr inbounds float* %tmp16578, i64 1
- %tmp16580 = getelementptr inbounds float* %tmp16579, i64 1
- %tmp16581 = getelementptr inbounds float* %tmp16580, i64 1
- %tmp16582 = getelementptr inbounds float* %tmp16581, i64 1
- %tmp16583 = getelementptr inbounds float* %tmp16582, i64 1
- %tmp16584 = getelementptr inbounds float* %tmp16583, i64 1
- %tmp16585 = getelementptr inbounds float* %tmp16584, i64 1
- %tmp16586 = getelementptr inbounds float* %tmp16585, i64 1
- %tmp16587 = getelementptr inbounds float* %tmp16586, i64 1
- %tmp16588 = getelementptr inbounds float* %tmp16587, i64 1
- %tmp16589 = getelementptr inbounds float* %tmp16588, i64 1
- %tmp16590 = getelementptr inbounds float* %tmp16589, i64 1
- %tmp16591 = getelementptr inbounds float* %tmp16590, i64 1
- %tmp16592 = getelementptr inbounds float* %tmp16591, i64 1
- %tmp16593 = getelementptr inbounds float* %tmp16592, i64 1
- %tmp16594 = getelementptr inbounds float* %tmp16593, i64 1
- %tmp16595 = getelementptr inbounds float* %tmp16594, i64 1
- %tmp16596 = getelementptr inbounds float* %tmp16595, i64 1
- %tmp16597 = getelementptr inbounds float* %tmp16596, i64 1
- %tmp16598 = getelementptr inbounds float* %tmp16597, i64 1
- %tmp16599 = getelementptr inbounds float* %tmp16598, i64 1
- %tmp16600 = getelementptr inbounds float* %tmp16599, i64 1
- %tmp16601 = getelementptr inbounds float* %tmp16600, i64 1
- %tmp16602 = getelementptr inbounds float* %tmp16601, i64 1
- %tmp16603 = getelementptr inbounds float* %tmp16602, i64 1
- %tmp16604 = getelementptr inbounds float* %tmp16603, i64 1
- %tmp16605 = getelementptr inbounds float* %tmp16604, i64 1
- %tmp16606 = getelementptr inbounds float* %tmp16605, i64 1
- %tmp16607 = getelementptr inbounds float* %tmp16606, i64 1
- %tmp16608 = getelementptr inbounds float* %tmp16607, i64 1
- %tmp16609 = getelementptr inbounds float* %tmp16608, i64 1
- %tmp16610 = getelementptr inbounds float* %tmp16609, i64 1
- %tmp16611 = getelementptr inbounds float* %tmp16610, i64 1
- %tmp16612 = getelementptr inbounds float* %tmp16611, i64 1
- %tmp16613 = getelementptr inbounds float* %tmp16612, i64 1
- %tmp16614 = getelementptr inbounds float* %tmp16613, i64 1
- %tmp16615 = getelementptr inbounds float* %tmp16614, i64 1
- %tmp16616 = getelementptr inbounds float* %tmp16615, i64 1
- %tmp16617 = getelementptr inbounds float* %tmp16616, i64 1
- %tmp16618 = getelementptr inbounds float* %tmp16617, i64 1
- %tmp16619 = getelementptr inbounds float* %tmp16618, i64 1
- %tmp16620 = getelementptr inbounds float* %tmp16619, i64 1
- %tmp16621 = getelementptr inbounds float* %tmp16620, i64 1
- %tmp16622 = getelementptr inbounds float* %tmp16621, i64 1
- %tmp16623 = getelementptr inbounds float* %tmp16622, i64 1
- %tmp16624 = getelementptr inbounds float* %tmp16623, i64 1
- %tmp16625 = getelementptr inbounds float* %tmp16624, i64 1
- %tmp16626 = getelementptr inbounds float* %tmp16625, i64 1
- %tmp16627 = getelementptr inbounds float* %tmp16626, i64 1
- %tmp16628 = getelementptr inbounds float* %tmp16627, i64 1
- %tmp16629 = getelementptr inbounds float* %tmp16628, i64 1
- %tmp16630 = getelementptr inbounds float* %tmp16629, i64 1
- %tmp16631 = getelementptr inbounds float* %tmp16630, i64 1
- %tmp16632 = getelementptr inbounds float* %tmp16631, i64 1
- %tmp16633 = getelementptr inbounds float* %tmp16632, i64 1
- %tmp16634 = getelementptr inbounds float* %tmp16633, i64 1
- %tmp16635 = getelementptr inbounds float* %tmp16634, i64 1
- %tmp16636 = getelementptr inbounds float* %tmp16635, i64 1
- %tmp16637 = getelementptr inbounds float* %tmp16636, i64 1
- %tmp16638 = getelementptr inbounds float* %tmp16637, i64 1
- %tmp16639 = getelementptr inbounds float* %tmp16638, i64 1
- %tmp16640 = getelementptr inbounds float* %tmp16639, i64 1
- %tmp16641 = getelementptr inbounds float* %tmp16640, i64 1
- %tmp16642 = getelementptr inbounds float* %tmp16641, i64 1
- %tmp16643 = getelementptr inbounds float* %tmp16642, i64 1
- %tmp16644 = getelementptr inbounds float* %tmp16643, i64 1
- %tmp16645 = getelementptr inbounds float* %tmp16644, i64 1
- %tmp16646 = getelementptr inbounds float* %tmp16645, i64 1
- %tmp16647 = getelementptr inbounds float* %tmp16646, i64 1
- %tmp16648 = getelementptr inbounds float* %tmp16647, i64 1
- %tmp16649 = getelementptr inbounds float* %tmp16648, i64 1
- %tmp16650 = getelementptr inbounds float* %tmp16649, i64 1
- %tmp16651 = getelementptr inbounds float* %tmp16650, i64 1
- %tmp16652 = getelementptr inbounds float* %tmp16651, i64 1
- %tmp16653 = getelementptr inbounds float* %tmp16652, i64 1
- %tmp16654 = getelementptr inbounds float* %tmp16653, i64 1
- %tmp16655 = getelementptr inbounds float* %tmp16654, i64 1
- %tmp16656 = getelementptr inbounds float* %tmp16655, i64 1
- %tmp16657 = getelementptr inbounds float* %tmp16656, i64 1
- %tmp16658 = getelementptr inbounds float* %tmp16657, i64 1
- %tmp16659 = getelementptr inbounds float* %tmp16658, i64 1
- %tmp16660 = getelementptr inbounds float* %tmp16659, i64 1
- %tmp16661 = getelementptr inbounds float* %tmp16660, i64 1
- %tmp16662 = getelementptr inbounds float* %tmp16661, i64 1
- %tmp16663 = getelementptr inbounds float* %tmp16662, i64 1
- %tmp16664 = getelementptr inbounds float* %tmp16663, i64 1
- %tmp16665 = getelementptr inbounds float* %tmp16664, i64 1
- %tmp16666 = getelementptr inbounds float* %tmp16665, i64 1
- %tmp16667 = getelementptr inbounds float* %tmp16666, i64 1
- %tmp16668 = getelementptr inbounds float* %tmp16667, i64 1
- %tmp16669 = getelementptr inbounds float* %tmp16668, i64 1
- %tmp16670 = getelementptr inbounds float* %tmp16669, i64 1
- %tmp16671 = getelementptr inbounds float* %tmp16670, i64 1
- %tmp16672 = getelementptr inbounds float* %tmp16671, i64 1
- %tmp16673 = getelementptr inbounds float* %tmp16672, i64 1
- %tmp16674 = getelementptr inbounds float* %tmp16673, i64 1
- %tmp16675 = getelementptr inbounds float* %tmp16674, i64 1
- %tmp16676 = getelementptr inbounds float* %tmp16675, i64 1
- %tmp16677 = getelementptr inbounds float* %tmp16676, i64 1
- %tmp16678 = getelementptr inbounds float* %tmp16677, i64 1
- %tmp16679 = getelementptr inbounds float* %tmp16678, i64 1
- %tmp16680 = getelementptr inbounds float* %tmp16679, i64 1
- %tmp16681 = getelementptr inbounds float* %tmp16680, i64 1
- %tmp16682 = getelementptr inbounds float* %tmp16681, i64 1
- %tmp16683 = getelementptr inbounds float* %tmp16682, i64 1
- %tmp16684 = getelementptr inbounds float* %tmp16683, i64 1
- %tmp16685 = getelementptr inbounds float* %tmp16684, i64 1
- %tmp16686 = getelementptr inbounds float* %tmp16685, i64 1
- %tmp16687 = getelementptr inbounds float* %tmp16686, i64 1
- %tmp16688 = getelementptr inbounds float* %tmp16687, i64 1
- %tmp16689 = getelementptr inbounds float* %tmp16688, i64 1
- %tmp16690 = getelementptr inbounds float* %tmp16689, i64 1
- %tmp16691 = getelementptr inbounds float* %tmp16690, i64 1
- %tmp16692 = getelementptr inbounds float* %tmp16691, i64 1
- %tmp16693 = getelementptr inbounds float* %tmp16692, i64 1
- %tmp16694 = getelementptr inbounds float* %tmp16693, i64 1
- %tmp16695 = getelementptr inbounds float* %tmp16694, i64 1
- %tmp16696 = getelementptr inbounds float* %tmp16695, i64 1
- %tmp16697 = getelementptr inbounds float* %tmp16696, i64 1
- %tmp16698 = getelementptr inbounds float* %tmp16697, i64 1
- %tmp16699 = getelementptr inbounds float* %tmp16698, i64 1
- %tmp16700 = getelementptr inbounds float* %tmp16699, i64 1
- %tmp16701 = getelementptr inbounds float* %tmp16700, i64 1
- %tmp16702 = getelementptr inbounds float* %tmp16701, i64 1
- %tmp16703 = getelementptr inbounds float* %tmp16702, i64 1
- %tmp16704 = getelementptr inbounds float* %tmp16703, i64 1
- %tmp16705 = getelementptr inbounds float* %tmp16704, i64 1
- %tmp16706 = getelementptr inbounds float* %tmp16705, i64 1
- %tmp16707 = getelementptr inbounds float* %tmp16706, i64 1
- %tmp16708 = getelementptr inbounds float* %tmp16707, i64 1
- %tmp16709 = getelementptr inbounds float* %tmp16708, i64 1
- %tmp16710 = getelementptr inbounds float* %tmp16709, i64 1
- %tmp16711 = getelementptr inbounds float* %tmp16710, i64 1
- %tmp16712 = getelementptr inbounds float* %tmp16711, i64 1
- %tmp16713 = getelementptr inbounds float* %tmp16712, i64 1
- %tmp16714 = getelementptr inbounds float* %tmp16713, i64 1
- %tmp16715 = getelementptr inbounds float* %tmp16714, i64 1
- %tmp16716 = getelementptr inbounds float* %tmp16715, i64 1
- %tmp16717 = getelementptr inbounds float* %tmp16716, i64 1
- %tmp16718 = getelementptr inbounds float* %tmp16717, i64 1
- %tmp16719 = getelementptr inbounds float* %tmp16718, i64 1
- %tmp16720 = getelementptr inbounds float* %tmp16719, i64 1
- %tmp16721 = getelementptr inbounds float* %tmp16720, i64 1
- %tmp16722 = getelementptr inbounds float* %tmp16721, i64 1
- %tmp16723 = getelementptr inbounds float* %tmp16722, i64 1
- %tmp16724 = getelementptr inbounds float* %tmp16723, i64 1
- %tmp16725 = getelementptr inbounds float* %tmp16724, i64 1
- %tmp16726 = getelementptr inbounds float* %tmp16725, i64 1
- %tmp16727 = getelementptr inbounds float* %tmp16726, i64 1
- %tmp16728 = getelementptr inbounds float* %tmp16727, i64 1
- %tmp16729 = getelementptr inbounds float* %tmp16728, i64 1
- %tmp16730 = getelementptr inbounds float* %tmp16729, i64 1
- %tmp16731 = getelementptr inbounds float* %tmp16730, i64 1
- %tmp16732 = getelementptr inbounds float* %tmp16731, i64 1
- %tmp16733 = getelementptr inbounds float* %tmp16732, i64 1
- %tmp16734 = getelementptr inbounds float* %tmp16733, i64 1
- %tmp16735 = getelementptr inbounds float* %tmp16734, i64 1
- %tmp16736 = getelementptr inbounds float* %tmp16735, i64 1
- %tmp16737 = getelementptr inbounds float* %tmp16736, i64 1
- %tmp16738 = getelementptr inbounds float* %tmp16737, i64 1
- %tmp16739 = getelementptr inbounds float* %tmp16738, i64 1
- %tmp16740 = getelementptr inbounds float* %tmp16739, i64 1
- %tmp16741 = getelementptr inbounds float* %tmp16740, i64 1
- %tmp16742 = getelementptr inbounds float* %tmp16741, i64 1
- %tmp16743 = getelementptr inbounds float* %tmp16742, i64 1
- %tmp16744 = getelementptr inbounds float* %tmp16743, i64 1
- %tmp16745 = getelementptr inbounds float* %tmp16744, i64 1
- %tmp16746 = getelementptr inbounds float* %tmp16745, i64 1
- %tmp16747 = getelementptr inbounds float* %tmp16746, i64 1
- %tmp16748 = getelementptr inbounds float* %tmp16747, i64 1
- %tmp16749 = getelementptr inbounds float* %tmp16748, i64 1
- %tmp16750 = getelementptr inbounds float* %tmp16749, i64 1
- %tmp16751 = getelementptr inbounds float* %tmp16750, i64 1
- %tmp16752 = getelementptr inbounds float* %tmp16751, i64 1
- %tmp16753 = getelementptr inbounds float* %tmp16752, i64 1
- %tmp16754 = getelementptr inbounds float* %tmp16753, i64 1
- %tmp16755 = getelementptr inbounds float* %tmp16754, i64 1
- %tmp16756 = getelementptr inbounds float* %tmp16755, i64 1
- %tmp16757 = getelementptr inbounds float* %tmp16756, i64 1
- %tmp16758 = getelementptr inbounds float* %tmp16757, i64 1
- %tmp16759 = getelementptr inbounds float* %tmp16758, i64 1
- %tmp16760 = getelementptr inbounds float* %tmp16759, i64 1
- %tmp16761 = getelementptr inbounds float* %tmp16760, i64 1
- %tmp16762 = getelementptr inbounds float* %tmp16761, i64 1
- %tmp16763 = getelementptr inbounds float* %tmp16762, i64 1
- %tmp16764 = getelementptr inbounds float* %tmp16763, i64 1
- %tmp16765 = getelementptr inbounds float* %tmp16764, i64 1
- %tmp16766 = getelementptr inbounds float* %tmp16765, i64 1
- %tmp16767 = getelementptr inbounds float* %tmp16766, i64 1
- %tmp16768 = getelementptr inbounds float* %tmp16767, i64 1
- %tmp16769 = getelementptr inbounds float* %tmp16768, i64 1
- %tmp16770 = getelementptr inbounds float* %tmp16769, i64 1
- %tmp16771 = getelementptr inbounds float* %tmp16770, i64 1
- %tmp16772 = getelementptr inbounds float* %tmp16771, i64 1
- %tmp16773 = getelementptr inbounds float* %tmp16772, i64 1
- %tmp16774 = getelementptr inbounds float* %tmp16773, i64 1
- %tmp16775 = getelementptr inbounds float* %tmp16774, i64 1
- %tmp16776 = getelementptr inbounds float* %tmp16775, i64 1
- %tmp16777 = getelementptr inbounds float* %tmp16776, i64 1
- %tmp16778 = getelementptr inbounds float* %tmp16777, i64 1
- %tmp16779 = getelementptr inbounds float* %tmp16778, i64 1
- %tmp16780 = getelementptr inbounds float* %tmp16779, i64 1
- %tmp16781 = getelementptr inbounds float* %tmp16780, i64 1
- %tmp16782 = getelementptr inbounds float* %tmp16781, i64 1
- %tmp16783 = getelementptr inbounds float* %tmp16782, i64 1
- %tmp16784 = getelementptr inbounds float* %tmp16783, i64 1
- %tmp16785 = getelementptr inbounds float* %tmp16784, i64 1
- %tmp16786 = getelementptr inbounds float* %tmp16785, i64 1
- %tmp16787 = getelementptr inbounds float* %tmp16786, i64 1
- %tmp16788 = getelementptr inbounds float* %tmp16787, i64 1
- %tmp16789 = getelementptr inbounds float* %tmp16788, i64 1
- %tmp16790 = getelementptr inbounds float* %tmp16789, i64 1
- %tmp16791 = getelementptr inbounds float* %tmp16790, i64 1
- %tmp16792 = getelementptr inbounds float* %tmp16791, i64 1
- %tmp16793 = getelementptr inbounds float* %tmp16792, i64 1
- %tmp16794 = getelementptr inbounds float* %tmp16793, i64 1
- %tmp16795 = getelementptr inbounds float* %tmp16794, i64 1
- %tmp16796 = getelementptr inbounds float* %tmp16795, i64 1
- %tmp16797 = getelementptr inbounds float* %tmp16796, i64 1
- %tmp16798 = getelementptr inbounds float* %tmp16797, i64 1
- %tmp16799 = getelementptr inbounds float* %tmp16798, i64 1
- %tmp16800 = getelementptr inbounds float* %tmp16799, i64 1
- %tmp16801 = getelementptr inbounds float* %tmp16800, i64 1
- %tmp16802 = getelementptr inbounds float* %tmp16801, i64 1
- %tmp16803 = getelementptr inbounds float* %tmp16802, i64 1
- %tmp16804 = getelementptr inbounds float* %tmp16803, i64 1
- %tmp16805 = getelementptr inbounds float* %tmp16804, i64 1
- %tmp16806 = getelementptr inbounds float* %tmp16805, i64 1
- %tmp16807 = getelementptr inbounds float* %tmp16806, i64 1
- %tmp16808 = getelementptr inbounds float* %tmp16807, i64 1
- %tmp16809 = getelementptr inbounds float* %tmp16808, i64 1
- %tmp16810 = getelementptr inbounds float* %tmp16809, i64 1
- %tmp16811 = getelementptr inbounds float* %tmp16810, i64 1
- %tmp16812 = getelementptr inbounds float* %tmp16811, i64 1
- %tmp16813 = getelementptr inbounds float* %tmp16812, i64 1
- %tmp16814 = getelementptr inbounds float* %tmp16813, i64 1
- %tmp16815 = getelementptr inbounds float* %tmp16814, i64 1
- %tmp16816 = getelementptr inbounds float* %tmp16815, i64 1
- %tmp16817 = getelementptr inbounds float* %tmp16816, i64 1
- %tmp16818 = getelementptr inbounds float* %tmp16817, i64 1
- %tmp16819 = getelementptr inbounds float* %tmp16818, i64 1
- %tmp16820 = getelementptr inbounds float* %tmp16819, i64 1
- %tmp16821 = getelementptr inbounds float* %tmp16820, i64 1
- %tmp16822 = getelementptr inbounds float* %tmp16821, i64 1
- %tmp16823 = getelementptr inbounds float* %tmp16822, i64 1
- %tmp16824 = getelementptr inbounds float* %tmp16823, i64 1
- %tmp16825 = getelementptr inbounds float* %tmp16824, i64 1
- %tmp16826 = getelementptr inbounds float* %tmp16825, i64 1
- %tmp16827 = getelementptr inbounds float* %tmp16826, i64 1
- %tmp16828 = getelementptr inbounds float* %tmp16827, i64 1
- %tmp16829 = getelementptr inbounds float* %tmp16828, i64 1
- %tmp16830 = getelementptr inbounds float* %tmp16829, i64 1
- %tmp16831 = getelementptr inbounds float* %tmp16830, i64 1
- %tmp16832 = getelementptr inbounds float* %tmp16831, i64 1
- %tmp16833 = getelementptr inbounds float* %tmp16832, i64 1
- %tmp16834 = getelementptr inbounds float* %tmp16833, i64 1
- %tmp16835 = getelementptr inbounds float* %tmp16834, i64 1
- %tmp16836 = getelementptr inbounds float* %tmp16835, i64 1
- %tmp16837 = getelementptr inbounds float* %tmp16836, i64 1
- %tmp16838 = getelementptr inbounds float* %tmp16837, i64 1
- %tmp16839 = getelementptr inbounds float* %tmp16838, i64 1
- %tmp16840 = getelementptr inbounds float* %tmp16839, i64 1
- %tmp16841 = getelementptr inbounds float* %tmp16840, i64 1
- %tmp16842 = getelementptr inbounds float* %tmp16841, i64 1
- %tmp16843 = getelementptr inbounds float* %tmp16842, i64 1
- %tmp16844 = getelementptr inbounds float* %tmp16843, i64 1
- %tmp16845 = getelementptr inbounds float* %tmp16844, i64 1
- %tmp16846 = getelementptr inbounds float* %tmp16845, i64 1
- %tmp16847 = getelementptr inbounds float* %tmp16846, i64 1
- %tmp16848 = getelementptr inbounds float* %tmp16847, i64 1
- %tmp16849 = getelementptr inbounds float* %tmp16848, i64 1
- %tmp16850 = getelementptr inbounds float* %tmp16849, i64 1
- %tmp16851 = getelementptr inbounds float* %tmp16850, i64 1
- %tmp16852 = getelementptr inbounds float* %tmp16851, i64 1
- %tmp16853 = getelementptr inbounds float* %tmp16852, i64 1
- %tmp16854 = getelementptr inbounds float* %tmp16853, i64 1
- %tmp16855 = getelementptr inbounds float* %tmp16854, i64 1
- %tmp16856 = getelementptr inbounds float* %tmp16855, i64 1
- %tmp16857 = getelementptr inbounds float* %tmp16856, i64 1
- %tmp16858 = getelementptr inbounds float* %tmp16857, i64 1
- %tmp16859 = getelementptr inbounds float* %tmp16858, i64 1
- %tmp16860 = getelementptr inbounds float* %tmp16859, i64 1
- %tmp16861 = getelementptr inbounds float* %tmp16860, i64 1
- %tmp16862 = getelementptr inbounds float* %tmp16861, i64 1
- %tmp16863 = getelementptr inbounds float* %tmp16862, i64 1
- %tmp16864 = getelementptr inbounds float* %tmp16863, i64 1
- %tmp16865 = getelementptr inbounds float* %tmp16864, i64 1
- %tmp16866 = getelementptr inbounds float* %tmp16865, i64 1
- %tmp16867 = getelementptr inbounds float* %tmp16866, i64 1
- %tmp16868 = getelementptr inbounds float* %tmp16867, i64 1
- %tmp16869 = getelementptr inbounds float* %tmp16868, i64 1
- %tmp16870 = getelementptr inbounds float* %tmp16869, i64 1
- %tmp16871 = getelementptr inbounds float* %tmp16870, i64 1
- %tmp16872 = getelementptr inbounds float* %tmp16871, i64 1
- %tmp16873 = getelementptr inbounds float* %tmp16872, i64 1
- %tmp16874 = getelementptr inbounds float* %tmp16873, i64 1
- %tmp16875 = getelementptr inbounds float* %tmp16874, i64 1
- %tmp16876 = getelementptr inbounds float* %tmp16875, i64 1
- %tmp16877 = getelementptr inbounds float* %tmp16876, i64 1
- %tmp16878 = getelementptr inbounds float* %tmp16877, i64 1
- %tmp16879 = getelementptr inbounds float* %tmp16878, i64 1
- %tmp16880 = getelementptr inbounds float* %tmp16879, i64 1
- %tmp16881 = getelementptr inbounds float* %tmp16880, i64 1
- %tmp16882 = getelementptr inbounds float* %tmp16881, i64 1
- %tmp16883 = getelementptr inbounds float* %tmp16882, i64 1
- %tmp16884 = getelementptr inbounds float* %tmp16883, i64 1
- %tmp16885 = getelementptr inbounds float* %tmp16884, i64 1
- %tmp16886 = getelementptr inbounds float* %tmp16885, i64 1
- %tmp16887 = getelementptr inbounds float* %tmp16886, i64 1
- %tmp16888 = getelementptr inbounds float* %tmp16887, i64 1
- %tmp16889 = getelementptr inbounds float* %tmp16888, i64 1
- %tmp16890 = getelementptr inbounds float* %tmp16889, i64 1
- %tmp16891 = getelementptr inbounds float* %tmp16890, i64 1
- %tmp16892 = getelementptr inbounds float* %tmp16891, i64 1
- %tmp16893 = getelementptr inbounds float* %tmp16892, i64 1
- %tmp16894 = getelementptr inbounds float* %tmp16893, i64 1
- %tmp16895 = getelementptr inbounds float* %tmp16894, i64 1
- %tmp16896 = getelementptr inbounds float* %tmp16895, i64 1
- %tmp16897 = getelementptr inbounds float* %tmp16896, i64 1
- %tmp16898 = getelementptr inbounds float* %tmp16897, i64 1
- %tmp16899 = getelementptr inbounds float* %tmp16898, i64 1
- %tmp16900 = getelementptr inbounds float* %tmp16899, i64 1
- %tmp16901 = getelementptr inbounds float* %tmp16900, i64 1
- %tmp16902 = getelementptr inbounds float* %tmp16901, i64 1
- %tmp16903 = getelementptr inbounds float* %tmp16902, i64 1
- %tmp16904 = getelementptr inbounds float* %tmp16903, i64 1
- %tmp16905 = getelementptr inbounds float* %tmp16904, i64 1
- %tmp16906 = getelementptr inbounds float* %tmp16905, i64 1
- %tmp16907 = getelementptr inbounds float* %tmp16906, i64 1
- %tmp16908 = getelementptr inbounds float* %tmp16907, i64 1
- %tmp16909 = getelementptr inbounds float* %tmp16908, i64 1
- %tmp16910 = getelementptr inbounds float* %tmp16909, i64 1
- %tmp16911 = getelementptr inbounds float* %tmp16910, i64 1
- %tmp16912 = getelementptr inbounds float* %tmp16911, i64 1
- %tmp16913 = getelementptr inbounds float* %tmp16912, i64 1
- %tmp16914 = getelementptr inbounds float* %tmp16913, i64 1
- %tmp16915 = getelementptr inbounds float* %tmp16914, i64 1
- %tmp16916 = getelementptr inbounds float* %tmp16915, i64 1
- %tmp16917 = getelementptr inbounds float* %tmp16916, i64 1
- %tmp16918 = getelementptr inbounds float* %tmp16917, i64 1
- %tmp16919 = getelementptr inbounds float* %tmp16918, i64 1
- %tmp16920 = getelementptr inbounds float* %tmp16919, i64 1
- %tmp16921 = getelementptr inbounds float* %tmp16920, i64 1
- %tmp16922 = getelementptr inbounds float* %tmp16921, i64 1
- %tmp16923 = getelementptr inbounds float* %tmp16922, i64 1
- %tmp16924 = getelementptr inbounds float* %tmp16923, i64 1
- %tmp16925 = getelementptr inbounds float* %tmp16924, i64 1
- %tmp16926 = getelementptr inbounds float* %tmp16925, i64 1
- %tmp16927 = getelementptr inbounds float* %tmp16926, i64 1
- %tmp16928 = getelementptr inbounds float* %tmp16927, i64 1
- %tmp16929 = getelementptr inbounds float* %tmp16928, i64 1
- %tmp16930 = getelementptr inbounds float* %tmp16929, i64 1
- %tmp16931 = getelementptr inbounds float* %tmp16930, i64 1
- %tmp16932 = getelementptr inbounds float* %tmp16931, i64 1
- %tmp16933 = getelementptr inbounds float* %tmp16932, i64 1
- %tmp16934 = getelementptr inbounds float* %tmp16933, i64 1
- %tmp16935 = getelementptr inbounds float* %tmp16934, i64 1
- %tmp16936 = getelementptr inbounds float* %tmp16935, i64 1
- %tmp16937 = getelementptr inbounds float* %tmp16936, i64 1
- %tmp16938 = getelementptr inbounds float* %tmp16937, i64 1
- %tmp16939 = getelementptr inbounds float* %tmp16938, i64 1
- %tmp16940 = getelementptr inbounds float* %tmp16939, i64 1
- %tmp16941 = getelementptr inbounds float* %tmp16940, i64 1
- %tmp16942 = getelementptr inbounds float* %tmp16941, i64 1
- %tmp16943 = getelementptr inbounds float* %tmp16942, i64 1
- %tmp16944 = getelementptr inbounds float* %tmp16943, i64 1
- %tmp16945 = getelementptr inbounds float* %tmp16944, i64 1
- %tmp16946 = getelementptr inbounds float* %tmp16945, i64 1
- %tmp16947 = getelementptr inbounds float* %tmp16946, i64 1
- %tmp16948 = getelementptr inbounds float* %tmp16947, i64 1
- %tmp16949 = getelementptr inbounds float* %tmp16948, i64 1
- %tmp16950 = getelementptr inbounds float* %tmp16949, i64 1
- %tmp16951 = getelementptr inbounds float* %tmp16950, i64 1
- %tmp16952 = getelementptr inbounds float* %tmp16951, i64 1
- %tmp16953 = getelementptr inbounds float* %tmp16952, i64 1
- %tmp16954 = getelementptr inbounds float* %tmp16953, i64 1
- %tmp16955 = getelementptr inbounds float* %tmp16954, i64 1
- %tmp16956 = getelementptr inbounds float* %tmp16955, i64 1
- %tmp16957 = getelementptr inbounds float* %tmp16956, i64 1
- %tmp16958 = getelementptr inbounds float* %tmp16957, i64 1
- %tmp16959 = getelementptr inbounds float* %tmp16958, i64 1
- %tmp16960 = getelementptr inbounds float* %tmp16959, i64 1
- %tmp16961 = getelementptr inbounds float* %tmp16960, i64 1
- %tmp16962 = getelementptr inbounds float* %tmp16961, i64 1
- %tmp16963 = getelementptr inbounds float* %tmp16962, i64 1
- %tmp16964 = getelementptr inbounds float* %tmp16963, i64 1
- %tmp16965 = getelementptr inbounds float* %tmp16964, i64 1
- %tmp16966 = getelementptr inbounds float* %tmp16965, i64 1
- %tmp16967 = getelementptr inbounds float* %tmp16966, i64 1
- %tmp16968 = getelementptr inbounds float* %tmp16967, i64 1
- %tmp16969 = getelementptr inbounds float* %tmp16968, i64 1
- %tmp16970 = getelementptr inbounds float* %tmp16969, i64 1
- %tmp16971 = getelementptr inbounds float* %tmp16970, i64 1
- %tmp16972 = getelementptr inbounds float* %tmp16971, i64 1
- %tmp16973 = getelementptr inbounds float* %tmp16972, i64 1
- %tmp16974 = getelementptr inbounds float* %tmp16973, i64 1
- %tmp16975 = getelementptr inbounds float* %tmp16974, i64 1
- %tmp16976 = getelementptr inbounds float* %tmp16975, i64 1
- %tmp16977 = getelementptr inbounds float* %tmp16976, i64 1
- %tmp16978 = getelementptr inbounds float* %tmp16977, i64 1
- %tmp16979 = getelementptr inbounds float* %tmp16978, i64 1
- %tmp16980 = getelementptr inbounds float* %tmp16979, i64 1
- %tmp16981 = getelementptr inbounds float* %tmp16980, i64 1
- %tmp16982 = getelementptr inbounds float* %tmp16981, i64 1
- %tmp16983 = getelementptr inbounds float* %tmp16982, i64 1
- %tmp16984 = getelementptr inbounds float* %tmp16983, i64 1
- %tmp16985 = getelementptr inbounds float* %tmp16984, i64 1
- %tmp16986 = getelementptr inbounds float* %tmp16985, i64 1
- %tmp16987 = getelementptr inbounds float* %tmp16986, i64 1
- %tmp16988 = getelementptr inbounds float* %tmp16987, i64 1
- %tmp16989 = getelementptr inbounds float* %tmp16988, i64 1
- %tmp16990 = getelementptr inbounds float* %tmp16989, i64 1
- %tmp16991 = getelementptr inbounds float* %tmp16990, i64 1
- %tmp16992 = getelementptr inbounds float* %tmp16991, i64 1
- %tmp16993 = getelementptr inbounds float* %tmp16992, i64 1
- %tmp16994 = getelementptr inbounds float* %tmp16993, i64 1
- %tmp16995 = getelementptr inbounds float* %tmp16994, i64 1
- %tmp16996 = getelementptr inbounds float* %tmp16995, i64 1
- %tmp16997 = getelementptr inbounds float* %tmp16996, i64 1
- %tmp16998 = getelementptr inbounds float* %tmp16997, i64 1
- %tmp16999 = getelementptr inbounds float* %tmp16998, i64 1
- %tmp17000 = getelementptr inbounds float* %tmp16999, i64 1
- %tmp17001 = getelementptr inbounds float* %tmp17000, i64 1
- %tmp17002 = getelementptr inbounds float* %tmp17001, i64 1
- %tmp17003 = getelementptr inbounds float* %tmp17002, i64 1
- %tmp17004 = getelementptr inbounds float* %tmp17003, i64 1
- %tmp17005 = getelementptr inbounds float* %tmp17004, i64 1
- %tmp17006 = getelementptr inbounds float* %tmp17005, i64 1
- %tmp17007 = getelementptr inbounds float* %tmp17006, i64 1
- %tmp17008 = getelementptr inbounds float* %tmp17007, i64 1
- %tmp17009 = getelementptr inbounds float* %tmp17008, i64 1
- %tmp17010 = getelementptr inbounds float* %tmp17009, i64 1
- %tmp17011 = getelementptr inbounds float* %tmp17010, i64 1
- %tmp17012 = getelementptr inbounds float* %tmp17011, i64 1
- %tmp17013 = getelementptr inbounds float* %tmp17012, i64 1
- %tmp17014 = getelementptr inbounds float* %tmp17013, i64 1
- %tmp17015 = getelementptr inbounds float* %tmp17014, i64 1
- %tmp17016 = getelementptr inbounds float* %tmp17015, i64 1
- %tmp17017 = getelementptr inbounds float* %tmp17016, i64 1
- %tmp17018 = getelementptr inbounds float* %tmp17017, i64 1
- %tmp17019 = getelementptr inbounds float* %tmp17018, i64 1
- %tmp17020 = getelementptr inbounds float* %tmp17019, i64 1
- %tmp17021 = getelementptr inbounds float* %tmp17020, i64 1
- %tmp17022 = getelementptr inbounds float* %tmp17021, i64 1
- %tmp17023 = getelementptr inbounds float* %tmp17022, i64 1
- %tmp17024 = getelementptr inbounds float* %tmp17023, i64 1
- %tmp17025 = getelementptr inbounds float* %tmp17024, i64 1
- %tmp17026 = getelementptr inbounds float* %tmp17025, i64 1
- %tmp17027 = getelementptr inbounds float* %tmp17026, i64 1
- %tmp17028 = getelementptr inbounds float* %tmp17027, i64 1
- %tmp17029 = getelementptr inbounds float* %tmp17028, i64 1
- %tmp17030 = getelementptr inbounds float* %tmp17029, i64 1
- %tmp17031 = getelementptr inbounds float* %tmp17030, i64 1
- %tmp17032 = getelementptr inbounds float* %tmp17031, i64 1
- %tmp17033 = getelementptr inbounds float* %tmp17032, i64 1
- %tmp17034 = getelementptr inbounds float* %tmp17033, i64 1
- %tmp17035 = getelementptr inbounds float* %tmp17034, i64 1
- %tmp17036 = getelementptr inbounds float* %tmp17035, i64 1
- %tmp17037 = getelementptr inbounds float* %tmp17036, i64 1
- %tmp17038 = getelementptr inbounds float* %tmp17037, i64 1
- %tmp17039 = getelementptr inbounds float* %tmp17038, i64 1
- %tmp17040 = getelementptr inbounds float* %tmp17039, i64 1
- %tmp17041 = getelementptr inbounds float* %tmp17040, i64 1
- %tmp17042 = getelementptr inbounds float* %tmp17041, i64 1
- %tmp17043 = getelementptr inbounds float* %tmp17042, i64 1
- %tmp17044 = getelementptr inbounds float* %tmp17043, i64 1
- %tmp17045 = getelementptr inbounds float* %tmp17044, i64 1
- %tmp17046 = getelementptr inbounds float* %tmp17045, i64 1
- %tmp17047 = getelementptr inbounds float* %tmp17046, i64 1
- %tmp17048 = getelementptr inbounds float* %tmp17047, i64 1
- %tmp17049 = getelementptr inbounds float* %tmp17048, i64 1
- %tmp17050 = getelementptr inbounds float* %tmp17049, i64 1
- %tmp17051 = getelementptr inbounds float* %tmp17050, i64 1
- %tmp17052 = getelementptr inbounds float* %tmp17051, i64 1
- %tmp17053 = getelementptr inbounds float* %tmp17052, i64 1
- %tmp17054 = getelementptr inbounds float* %tmp17053, i64 1
- %tmp17055 = getelementptr inbounds float* %tmp17054, i64 1
- %tmp17056 = getelementptr inbounds float* %tmp17055, i64 1
- %tmp17057 = getelementptr inbounds float* %tmp17056, i64 1
- %tmp17058 = getelementptr inbounds float* %tmp17057, i64 1
- %tmp17059 = getelementptr inbounds float* %tmp17058, i64 1
- %tmp17060 = getelementptr inbounds float* %tmp17059, i64 1
- %tmp17061 = getelementptr inbounds float* %tmp17060, i64 1
- %tmp17062 = getelementptr inbounds float* %tmp17061, i64 1
- %tmp17063 = getelementptr inbounds float* %tmp17062, i64 1
- %tmp17064 = getelementptr inbounds float* %tmp17063, i64 1
- %tmp17065 = getelementptr inbounds float* %tmp17064, i64 1
- %tmp17066 = getelementptr inbounds float* %tmp17065, i64 1
- %tmp17067 = getelementptr inbounds float* %tmp17066, i64 1
- %tmp17068 = getelementptr inbounds float* %tmp17067, i64 1
- %tmp17069 = getelementptr inbounds float* %tmp17068, i64 1
- %tmp17070 = getelementptr inbounds float* %tmp17069, i64 1
- %tmp17071 = getelementptr inbounds float* %tmp17070, i64 1
- %tmp17072 = getelementptr inbounds float* %tmp17071, i64 1
- %tmp17073 = getelementptr inbounds float* %tmp17072, i64 1
- %tmp17074 = getelementptr inbounds float* %tmp17073, i64 1
- %tmp17075 = getelementptr inbounds float* %tmp17074, i64 1
- %tmp17076 = getelementptr inbounds float* %tmp17075, i64 1
- %tmp17077 = getelementptr inbounds float* %tmp17076, i64 1
- %tmp17078 = getelementptr inbounds float* %tmp17077, i64 1
- %tmp17079 = getelementptr inbounds float* %tmp17078, i64 1
- %tmp17080 = getelementptr inbounds float* %tmp17079, i64 1
- %tmp17081 = getelementptr inbounds float* %tmp17080, i64 1
- %tmp17082 = getelementptr inbounds float* %tmp17081, i64 1
- %tmp17083 = getelementptr inbounds float* %tmp17082, i64 1
- %tmp17084 = getelementptr inbounds float* %tmp17083, i64 1
- %tmp17085 = getelementptr inbounds float* %tmp17084, i64 1
- %tmp17086 = getelementptr inbounds float* %tmp17085, i64 1
- %tmp17087 = getelementptr inbounds float* %tmp17086, i64 1
- %tmp17088 = getelementptr inbounds float* %tmp17087, i64 1
- %tmp17089 = getelementptr inbounds float* %tmp17088, i64 1
- %tmp17090 = getelementptr inbounds float* %tmp17089, i64 1
- %tmp17091 = getelementptr inbounds float* %tmp17090, i64 1
- %tmp17092 = getelementptr inbounds float* %tmp17091, i64 1
- %tmp17093 = getelementptr inbounds float* %tmp17092, i64 1
- %tmp17094 = getelementptr inbounds float* %tmp17093, i64 1
- %tmp17095 = getelementptr inbounds float* %tmp17094, i64 1
- %tmp17096 = getelementptr inbounds float* %tmp17095, i64 1
- %tmp17097 = getelementptr inbounds float* %tmp17096, i64 1
- %tmp17098 = getelementptr inbounds float* %tmp17097, i64 1
- %tmp17099 = getelementptr inbounds float* %tmp17098, i64 1
- %tmp17100 = getelementptr inbounds float* %tmp17099, i64 1
- %tmp17101 = getelementptr inbounds float* %tmp17100, i64 1
- %tmp17102 = getelementptr inbounds float* %tmp17101, i64 1
- %tmp17103 = getelementptr inbounds float* %tmp17102, i64 1
- %tmp17104 = getelementptr inbounds float* %tmp17103, i64 1
- %tmp17105 = getelementptr inbounds float* %tmp17104, i64 1
- %tmp17106 = getelementptr inbounds float* %tmp17105, i64 1
- %tmp17107 = getelementptr inbounds float* %tmp17106, i64 1
- %tmp17108 = getelementptr inbounds float* %tmp17107, i64 1
- %tmp17109 = getelementptr inbounds float* %tmp17108, i64 1
- %tmp17110 = getelementptr inbounds float* %tmp17109, i64 1
- %tmp17111 = getelementptr inbounds float* %tmp17110, i64 1
- %tmp17112 = getelementptr inbounds float* %tmp17111, i64 1
- %tmp17113 = getelementptr inbounds float* %tmp17112, i64 1
- %tmp17114 = getelementptr inbounds float* %tmp17113, i64 1
- %tmp17115 = getelementptr inbounds float* %tmp17114, i64 1
- %tmp17116 = getelementptr inbounds float* %tmp17115, i64 1
- %tmp17117 = getelementptr inbounds float* %tmp17116, i64 1
- %tmp17118 = getelementptr inbounds float* %tmp17117, i64 1
- %tmp17119 = getelementptr inbounds float* %tmp17118, i64 1
- %tmp17120 = getelementptr inbounds float* %tmp17119, i64 1
- %tmp17121 = getelementptr inbounds float* %tmp17120, i64 1
- %tmp17122 = getelementptr inbounds float* %tmp17121, i64 1
- %tmp17123 = getelementptr inbounds float* %tmp17122, i64 1
- %tmp17124 = getelementptr inbounds float* %tmp17123, i64 1
- %tmp17125 = getelementptr inbounds float* %tmp17124, i64 1
- %tmp17126 = getelementptr inbounds float* %tmp17125, i64 1
- %tmp17127 = getelementptr inbounds float* %tmp17126, i64 1
- %tmp17128 = getelementptr inbounds float* %tmp17127, i64 1
- %tmp17129 = getelementptr inbounds float* %tmp17128, i64 1
- %tmp17130 = getelementptr inbounds float* %tmp17129, i64 1
- %tmp17131 = getelementptr inbounds float* %tmp17130, i64 1
- %tmp17132 = getelementptr inbounds float* %tmp17131, i64 1
- %tmp17133 = getelementptr inbounds float* %tmp17132, i64 1
- %tmp17134 = getelementptr inbounds float* %tmp17133, i64 1
- %tmp17135 = getelementptr inbounds float* %tmp17134, i64 1
- %tmp17136 = getelementptr inbounds float* %tmp17135, i64 1
- %tmp17137 = getelementptr inbounds float* %tmp17136, i64 1
- %tmp17138 = getelementptr inbounds float* %tmp17137, i64 1
- %tmp17139 = getelementptr inbounds float* %tmp17138, i64 1
- %tmp17140 = getelementptr inbounds float* %tmp17139, i64 1
- %tmp17141 = getelementptr inbounds float* %tmp17140, i64 1
- %tmp17142 = getelementptr inbounds float* %tmp17141, i64 1
- %tmp17143 = getelementptr inbounds float* %tmp17142, i64 1
- %tmp17144 = getelementptr inbounds float* %tmp17143, i64 1
- %tmp17145 = getelementptr inbounds float* %tmp17144, i64 1
- %tmp17146 = getelementptr inbounds float* %tmp17145, i64 1
- %tmp17147 = getelementptr inbounds float* %tmp17146, i64 1
- %tmp17148 = getelementptr inbounds float* %tmp17147, i64 1
- %tmp17149 = getelementptr inbounds float* %tmp17148, i64 1
- %tmp17150 = getelementptr inbounds float* %tmp17149, i64 1
- %tmp17151 = getelementptr inbounds float* %tmp17150, i64 1
- %tmp17152 = getelementptr inbounds float* %tmp17151, i64 1
- %tmp17153 = getelementptr inbounds float* %tmp17152, i64 1
- %tmp17154 = getelementptr inbounds float* %tmp17153, i64 1
- %tmp17155 = getelementptr inbounds float* %tmp17154, i64 1
- %tmp17156 = getelementptr inbounds float* %tmp17155, i64 1
- %tmp17157 = getelementptr inbounds float* %tmp17156, i64 1
- %tmp17158 = getelementptr inbounds float* %tmp17157, i64 1
- %tmp17159 = getelementptr inbounds float* %tmp17158, i64 1
- %tmp17160 = getelementptr inbounds float* %tmp17159, i64 1
- %tmp17161 = getelementptr inbounds float* %tmp17160, i64 1
- %tmp17162 = getelementptr inbounds float* %tmp17161, i64 1
- %tmp17163 = getelementptr inbounds float* %tmp17162, i64 1
- %tmp17164 = getelementptr inbounds float* %tmp17163, i64 1
- %tmp17165 = getelementptr inbounds float* %tmp17164, i64 1
- %tmp17166 = getelementptr inbounds float* %tmp17165, i64 1
- %tmp17167 = getelementptr inbounds float* %tmp17166, i64 1
- %tmp17168 = getelementptr inbounds float* %tmp17167, i64 1
- %tmp17169 = getelementptr inbounds float* %tmp17168, i64 1
- %tmp17170 = getelementptr inbounds float* %tmp17169, i64 1
- %tmp17171 = getelementptr inbounds float* %tmp17170, i64 1
- %tmp17172 = getelementptr inbounds float* %tmp17171, i64 1
- %tmp17173 = getelementptr inbounds float* %tmp17172, i64 1
- %tmp17174 = getelementptr inbounds float* %tmp17173, i64 1
- %tmp17175 = getelementptr inbounds float* %tmp17174, i64 1
- %tmp17176 = getelementptr inbounds float* %tmp17175, i64 1
- %tmp17177 = getelementptr inbounds float* %tmp17176, i64 1
- %tmp17178 = getelementptr inbounds float* %tmp17177, i64 1
- %tmp17179 = getelementptr inbounds float* %tmp17178, i64 1
- %tmp17180 = getelementptr inbounds float* %tmp17179, i64 1
- %tmp17181 = getelementptr inbounds float* %tmp17180, i64 1
- %tmp17182 = getelementptr inbounds float* %tmp17181, i64 1
- %tmp17183 = getelementptr inbounds float* %tmp17182, i64 1
- %tmp17184 = getelementptr inbounds float* %tmp17183, i64 1
- %tmp17185 = getelementptr inbounds float* %tmp17184, i64 1
- %tmp17186 = getelementptr inbounds float* %tmp17185, i64 1
- %tmp17187 = getelementptr inbounds float* %tmp17186, i64 1
- %tmp17188 = getelementptr inbounds float* %tmp17187, i64 1
- %tmp17189 = getelementptr inbounds float* %tmp17188, i64 1
- %tmp17190 = getelementptr inbounds float* %tmp17189, i64 1
- %tmp17191 = getelementptr inbounds float* %tmp17190, i64 1
- %tmp17192 = getelementptr inbounds float* %tmp17191, i64 1
- %tmp17193 = getelementptr inbounds float* %tmp17192, i64 1
- %tmp17194 = getelementptr inbounds float* %tmp17193, i64 1
- %tmp17195 = getelementptr inbounds float* %tmp17194, i64 1
- %tmp17196 = getelementptr inbounds float* %tmp17195, i64 1
- %tmp17197 = getelementptr inbounds float* %tmp17196, i64 1
- %tmp17198 = getelementptr inbounds float* %tmp17197, i64 1
- %tmp17199 = getelementptr inbounds float* %tmp17198, i64 1
- %tmp17200 = getelementptr inbounds float* %tmp17199, i64 1
- %tmp17201 = getelementptr inbounds float* %tmp17200, i64 1
- %tmp17202 = getelementptr inbounds float* %tmp17201, i64 1
- %tmp17203 = getelementptr inbounds float* %tmp17202, i64 1
- %tmp17204 = getelementptr inbounds float* %tmp17203, i64 1
- %tmp17205 = getelementptr inbounds float* %tmp17204, i64 1
- %tmp17206 = getelementptr inbounds float* %tmp17205, i64 1
- %tmp17207 = getelementptr inbounds float* %tmp17206, i64 1
- %tmp17208 = getelementptr inbounds float* %tmp17207, i64 1
- %tmp17209 = getelementptr inbounds float* %tmp17208, i64 1
- %tmp17210 = getelementptr inbounds float* %tmp17209, i64 1
- %tmp17211 = getelementptr inbounds float* %tmp17210, i64 1
- %tmp17212 = getelementptr inbounds float* %tmp17211, i64 1
- %tmp17213 = getelementptr inbounds float* %tmp17212, i64 1
- %tmp17214 = getelementptr inbounds float* %tmp17213, i64 1
- %tmp17215 = getelementptr inbounds float* %tmp17214, i64 1
- %tmp17216 = getelementptr inbounds float* %tmp17215, i64 1
- %tmp17217 = getelementptr inbounds float* %tmp17216, i64 1
- %tmp17218 = getelementptr inbounds float* %tmp17217, i64 1
- %tmp17219 = getelementptr inbounds float* %tmp17218, i64 1
- %tmp17220 = getelementptr inbounds float* %tmp17219, i64 1
- %tmp17221 = getelementptr inbounds float* %tmp17220, i64 1
- %tmp17222 = getelementptr inbounds float* %tmp17221, i64 1
- %tmp17223 = getelementptr inbounds float* %tmp17222, i64 1
- %tmp17224 = getelementptr inbounds float* %tmp17223, i64 1
- %tmp17225 = getelementptr inbounds float* %tmp17224, i64 1
- %tmp17226 = getelementptr inbounds float* %tmp17225, i64 1
- %tmp17227 = getelementptr inbounds float* %tmp17226, i64 1
- %tmp17228 = getelementptr inbounds float* %tmp17227, i64 1
- %tmp17229 = getelementptr inbounds float* %tmp17228, i64 1
- %tmp17230 = getelementptr inbounds float* %tmp17229, i64 1
- %tmp17231 = getelementptr inbounds float* %tmp17230, i64 1
- %tmp17232 = getelementptr inbounds float* %tmp17231, i64 1
- %tmp17233 = getelementptr inbounds float* %tmp17232, i64 1
- %tmp17234 = getelementptr inbounds float* %tmp17233, i64 1
- %tmp17235 = getelementptr inbounds float* %tmp17234, i64 1
- %tmp17236 = getelementptr inbounds float* %tmp17235, i64 1
- %tmp17237 = getelementptr inbounds float* %tmp17236, i64 1
- %tmp17238 = getelementptr inbounds float* %tmp17237, i64 1
- %tmp17239 = getelementptr inbounds float* %tmp17238, i64 1
- %tmp17240 = getelementptr inbounds float* %tmp17239, i64 1
- %tmp17241 = getelementptr inbounds float* %tmp17240, i64 1
- %tmp17242 = getelementptr inbounds float* %tmp17241, i64 1
- %tmp17243 = getelementptr inbounds float* %tmp17242, i64 1
- %tmp17244 = getelementptr inbounds float* %tmp17243, i64 1
- %tmp17245 = getelementptr inbounds float* %tmp17244, i64 1
- %tmp17246 = getelementptr inbounds float* %tmp17245, i64 1
- %tmp17247 = getelementptr inbounds float* %tmp17246, i64 1
- %tmp17248 = getelementptr inbounds float* %tmp17247, i64 1
- %tmp17249 = getelementptr inbounds float* %tmp17248, i64 1
- %tmp17250 = getelementptr inbounds float* %tmp17249, i64 1
- %tmp17251 = getelementptr inbounds float* %tmp17250, i64 1
- %tmp17252 = getelementptr inbounds float* %tmp17251, i64 1
- %tmp17253 = getelementptr inbounds float* %tmp17252, i64 1
- %tmp17254 = getelementptr inbounds float* %tmp17253, i64 1
- %tmp17255 = getelementptr inbounds float* %tmp17254, i64 1
- %tmp17256 = getelementptr inbounds float* %tmp17255, i64 1
- %tmp17257 = getelementptr inbounds float* %tmp17256, i64 1
- %tmp17258 = getelementptr inbounds float* %tmp17257, i64 1
- %tmp17259 = getelementptr inbounds float* %tmp17258, i64 1
- %tmp17260 = getelementptr inbounds float* %tmp17259, i64 1
- %tmp17261 = getelementptr inbounds float* %tmp17260, i64 1
- %tmp17262 = getelementptr inbounds float* %tmp17261, i64 1
- %tmp17263 = getelementptr inbounds float* %tmp17262, i64 1
- %tmp17264 = getelementptr inbounds float* %tmp17263, i64 1
- %tmp17265 = getelementptr inbounds float* %tmp17264, i64 1
- %tmp17266 = getelementptr inbounds float* %tmp17265, i64 1
- %tmp17267 = getelementptr inbounds float* %tmp17266, i64 1
- %tmp17268 = getelementptr inbounds float* %tmp17267, i64 1
- %tmp17269 = getelementptr inbounds float* %tmp17268, i64 1
- %tmp17270 = getelementptr inbounds float* %tmp17269, i64 1
- %tmp17271 = getelementptr inbounds float* %tmp17270, i64 1
- %tmp17272 = getelementptr inbounds float* %tmp17271, i64 1
- %tmp17273 = getelementptr inbounds float* %tmp17272, i64 1
- %tmp17274 = getelementptr inbounds float* %tmp17273, i64 1
- %tmp17275 = getelementptr inbounds float* %tmp17274, i64 1
- %tmp17276 = getelementptr inbounds float* %tmp17275, i64 1
- %tmp17277 = getelementptr inbounds float* %tmp17276, i64 1
- %tmp17278 = getelementptr inbounds float* %tmp17277, i64 1
- %tmp17279 = getelementptr inbounds float* %tmp17278, i64 1
- %tmp17280 = getelementptr inbounds float* %tmp17279, i64 1
- %tmp17281 = getelementptr inbounds float* %tmp17280, i64 1
- %tmp17282 = getelementptr inbounds float* %tmp17281, i64 1
- %tmp17283 = getelementptr inbounds float* %tmp17282, i64 1
- %tmp17284 = getelementptr inbounds float* %tmp17283, i64 1
- %tmp17285 = getelementptr inbounds float* %tmp17284, i64 1
- %tmp17286 = getelementptr inbounds float* %tmp17285, i64 1
- %tmp17287 = getelementptr inbounds float* %tmp17286, i64 1
- %tmp17288 = getelementptr inbounds float* %tmp17287, i64 1
- %tmp17289 = getelementptr inbounds float* %tmp17288, i64 1
- %tmp17290 = getelementptr inbounds float* %tmp17289, i64 1
- %tmp17291 = getelementptr inbounds float* %tmp17290, i64 1
- %tmp17292 = getelementptr inbounds float* %tmp17291, i64 1
- %tmp17293 = getelementptr inbounds float* %tmp17292, i64 1
- %tmp17294 = getelementptr inbounds float* %tmp17293, i64 1
- %tmp17295 = getelementptr inbounds float* %tmp17294, i64 1
- %tmp17296 = getelementptr inbounds float* %tmp17295, i64 1
- %tmp17297 = getelementptr inbounds float* %tmp17296, i64 1
- %tmp17298 = getelementptr inbounds float* %tmp17297, i64 1
- %tmp17299 = getelementptr inbounds float* %tmp17298, i64 1
- %tmp17300 = getelementptr inbounds float* %tmp17299, i64 1
- %tmp17301 = getelementptr inbounds float* %tmp17300, i64 1
- %tmp17302 = getelementptr inbounds float* %tmp17301, i64 1
- %tmp17303 = getelementptr inbounds float* %tmp17302, i64 1
- %tmp17304 = getelementptr inbounds float* %tmp17303, i64 1
- %tmp17305 = getelementptr inbounds float* %tmp17304, i64 1
- %tmp17306 = getelementptr inbounds float* %tmp17305, i64 1
- %tmp17307 = getelementptr inbounds float* %tmp17306, i64 1
- %tmp17308 = getelementptr inbounds float* %tmp17307, i64 1
- %tmp17309 = getelementptr inbounds float* %tmp17308, i64 1
- %tmp17310 = getelementptr inbounds float* %tmp17309, i64 1
- %tmp17311 = getelementptr inbounds float* %tmp17310, i64 1
- %tmp17312 = getelementptr inbounds float* %tmp17311, i64 1
- %tmp17313 = getelementptr inbounds float* %tmp17312, i64 1
- %tmp17314 = getelementptr inbounds float* %tmp17313, i64 1
- %tmp17315 = getelementptr inbounds float* %tmp17314, i64 1
- %tmp17316 = getelementptr inbounds float* %tmp17315, i64 1
- %tmp17317 = getelementptr inbounds float* %tmp17316, i64 1
- %tmp17318 = getelementptr inbounds float* %tmp17317, i64 1
- %tmp17319 = getelementptr inbounds float* %tmp17318, i64 1
- %tmp17320 = getelementptr inbounds float* %tmp17319, i64 1
- %tmp17321 = getelementptr inbounds float* %tmp17320, i64 1
- %tmp17322 = getelementptr inbounds float* %tmp17321, i64 1
- %tmp17323 = getelementptr inbounds float* %tmp17322, i64 1
- %tmp17324 = getelementptr inbounds float* %tmp17323, i64 1
- %tmp17325 = getelementptr inbounds float* %tmp17324, i64 1
- %tmp17326 = getelementptr inbounds float* %tmp17325, i64 1
- %tmp17327 = getelementptr inbounds float* %tmp17326, i64 1
- %tmp17328 = getelementptr inbounds float* %tmp17327, i64 1
- %tmp17329 = getelementptr inbounds float* %tmp17328, i64 1
- %tmp17330 = getelementptr inbounds float* %tmp17329, i64 1
- %tmp17331 = getelementptr inbounds float* %tmp17330, i64 1
- %tmp17332 = getelementptr inbounds float* %tmp17331, i64 1
- %tmp17333 = getelementptr inbounds float* %tmp17332, i64 1
- %tmp17334 = getelementptr inbounds float* %tmp17333, i64 1
- %tmp17335 = getelementptr inbounds float* %tmp17334, i64 1
- %tmp17336 = getelementptr inbounds float* %tmp17335, i64 1
- %tmp17337 = getelementptr inbounds float* %tmp17336, i64 1
- %tmp17338 = getelementptr inbounds float* %tmp17337, i64 1
- %tmp17339 = getelementptr inbounds float* %tmp17338, i64 1
- %tmp17340 = getelementptr inbounds float* %tmp17339, i64 1
- %tmp17341 = getelementptr inbounds float* %tmp17340, i64 1
- %tmp17342 = getelementptr inbounds float* %tmp17341, i64 1
- %tmp17343 = getelementptr inbounds float* %tmp17342, i64 1
- %tmp17344 = getelementptr inbounds float* %tmp17343, i64 1
- %tmp17345 = getelementptr inbounds float* %tmp17344, i64 1
- %tmp17346 = getelementptr inbounds float* %tmp17345, i64 1
- %tmp17347 = getelementptr inbounds float* %tmp17346, i64 1
- %tmp17348 = getelementptr inbounds float* %tmp17347, i64 1
- %tmp17349 = getelementptr inbounds float* %tmp17348, i64 1
- %tmp17350 = getelementptr inbounds float* %tmp17349, i64 1
- %tmp17351 = getelementptr inbounds float* %tmp17350, i64 1
- %tmp17352 = getelementptr inbounds float* %tmp17351, i64 1
- %tmp17353 = getelementptr inbounds float* %tmp17352, i64 1
- %tmp17354 = getelementptr inbounds float* %tmp17353, i64 1
- %tmp17355 = getelementptr inbounds float* %tmp17354, i64 1
- %tmp17356 = getelementptr inbounds float* %tmp17355, i64 1
- %tmp17357 = getelementptr inbounds float* %tmp17356, i64 1
- %tmp17358 = getelementptr inbounds float* %tmp17357, i64 1
- %tmp17359 = getelementptr inbounds float* %tmp17358, i64 1
- %tmp17360 = getelementptr inbounds float* %tmp17359, i64 1
- %tmp17361 = getelementptr inbounds float* %tmp17360, i64 1
- %tmp17362 = getelementptr inbounds float* %tmp17361, i64 1
- %tmp17363 = getelementptr inbounds float* %tmp17362, i64 1
- %tmp17364 = getelementptr inbounds float* %tmp17363, i64 1
- %tmp17365 = getelementptr inbounds float* %tmp17364, i64 1
- %tmp17366 = getelementptr inbounds float* %tmp17365, i64 1
- %tmp17367 = getelementptr inbounds float* %tmp17366, i64 1
- %tmp17368 = getelementptr inbounds float* %tmp17367, i64 1
- %tmp17369 = getelementptr inbounds float* %tmp17368, i64 1
- %tmp17370 = getelementptr inbounds float* %tmp17369, i64 1
- %tmp17371 = getelementptr inbounds float* %tmp17370, i64 1
- %tmp17372 = getelementptr inbounds float* %tmp17371, i64 1
- %tmp17373 = getelementptr inbounds float* %tmp17372, i64 1
- %tmp17374 = getelementptr inbounds float* %tmp17373, i64 1
- %tmp17375 = getelementptr inbounds float* %tmp17374, i64 1
- %tmp17376 = getelementptr inbounds float* %tmp17375, i64 1
- %tmp17377 = getelementptr inbounds float* %tmp17376, i64 1
- %tmp17378 = getelementptr inbounds float* %tmp17377, i64 1
- %tmp17379 = getelementptr inbounds float* %tmp17378, i64 1
- %tmp17380 = getelementptr inbounds float* %tmp17379, i64 1
- %tmp17381 = getelementptr inbounds float* %tmp17380, i64 1
- %tmp17382 = getelementptr inbounds float* %tmp17381, i64 1
- %tmp17383 = getelementptr inbounds float* %tmp17382, i64 1
- %tmp17384 = getelementptr inbounds float* %tmp17383, i64 1
- %tmp17385 = getelementptr inbounds float* %tmp17384, i64 1
- %tmp17386 = getelementptr inbounds float* %tmp17385, i64 1
- %tmp17387 = getelementptr inbounds float* %tmp17386, i64 1
- %tmp17388 = getelementptr inbounds float* %tmp17387, i64 1
- %tmp17389 = getelementptr inbounds float* %tmp17388, i64 1
- %tmp17390 = getelementptr inbounds float* %tmp17389, i64 1
- %tmp17391 = getelementptr inbounds float* %tmp17390, i64 1
- %tmp17392 = getelementptr inbounds float* %tmp17391, i64 1
- %tmp17393 = getelementptr inbounds float* %tmp17392, i64 1
- %tmp17394 = getelementptr inbounds float* %tmp17393, i64 1
- %tmp17395 = getelementptr inbounds float* %tmp17394, i64 1
- %tmp17396 = getelementptr inbounds float* %tmp17395, i64 1
- %tmp17397 = getelementptr inbounds float* %tmp17396, i64 1
- %tmp17398 = getelementptr inbounds float* %tmp17397, i64 1
- %tmp17399 = getelementptr inbounds float* %tmp17398, i64 1
- %tmp17400 = getelementptr inbounds float* %tmp17399, i64 1
- %tmp17401 = getelementptr inbounds float* %tmp17400, i64 1
- %tmp17402 = getelementptr inbounds float* %tmp17401, i64 1
- %tmp17403 = getelementptr inbounds float* %tmp17402, i64 1
- %tmp17404 = getelementptr inbounds float* %tmp17403, i64 1
- %tmp17405 = getelementptr inbounds float* %tmp17404, i64 1
- %tmp17406 = getelementptr inbounds float* %tmp17405, i64 1
- %tmp17407 = getelementptr inbounds float* %tmp17406, i64 1
- %tmp17408 = getelementptr inbounds float* %tmp17407, i64 1
- %tmp17409 = getelementptr inbounds float* %tmp17408, i64 1
- %tmp17410 = getelementptr inbounds float* %tmp17409, i64 1
- %tmp17411 = getelementptr inbounds float* %tmp17410, i64 1
- %tmp17412 = getelementptr inbounds float* %tmp17411, i64 1
- %tmp17413 = getelementptr inbounds float* %tmp17412, i64 1
- %tmp17414 = getelementptr inbounds float* %tmp17413, i64 1
- %tmp17415 = getelementptr inbounds float* %tmp17414, i64 1
- %tmp17416 = getelementptr inbounds float* %tmp17415, i64 1
- %tmp17417 = getelementptr inbounds float* %tmp17416, i64 1
- %tmp17418 = getelementptr inbounds float* %tmp17417, i64 1
- %tmp17419 = getelementptr inbounds float* %tmp17418, i64 1
- %tmp17420 = getelementptr inbounds float* %tmp17419, i64 1
- %tmp17421 = getelementptr inbounds float* %tmp17420, i64 1
- %tmp17422 = getelementptr inbounds float* %tmp17421, i64 1
- %tmp17423 = getelementptr inbounds float* %tmp17422, i64 1
- %tmp17424 = getelementptr inbounds float* %tmp17423, i64 1
- %tmp17425 = getelementptr inbounds float* %tmp17424, i64 1
- %tmp17426 = getelementptr inbounds float* %tmp17425, i64 1
- %tmp17427 = getelementptr inbounds float* %tmp17426, i64 1
- %tmp17428 = getelementptr inbounds float* %tmp17427, i64 1
- %tmp17429 = getelementptr inbounds float* %tmp17428, i64 1
- %tmp17430 = getelementptr inbounds float* %tmp17429, i64 1
- %tmp17431 = getelementptr inbounds float* %tmp17430, i64 1
- %tmp17432 = getelementptr inbounds float* %tmp17431, i64 1
- %tmp17433 = getelementptr inbounds float* %tmp17432, i64 1
- %tmp17434 = getelementptr inbounds float* %tmp17433, i64 1
- %tmp17435 = getelementptr inbounds float* %tmp17434, i64 1
- %tmp17436 = getelementptr inbounds float* %tmp17435, i64 1
- %tmp17437 = getelementptr inbounds float* %tmp17436, i64 1
- %tmp17438 = getelementptr inbounds float* %tmp17437, i64 1
- %tmp17439 = getelementptr inbounds float* %tmp17438, i64 1
- %tmp17440 = getelementptr inbounds float* %tmp17439, i64 1
- %tmp17441 = getelementptr inbounds float* %tmp17440, i64 1
- %tmp17442 = getelementptr inbounds float* %tmp17441, i64 1
- %tmp17443 = getelementptr inbounds float* %tmp17442, i64 1
- %tmp17444 = getelementptr inbounds float* %tmp17443, i64 1
- %tmp17445 = getelementptr inbounds float* %tmp17444, i64 1
- %tmp17446 = getelementptr inbounds float* %tmp17445, i64 1
- %tmp17447 = getelementptr inbounds float* %tmp17446, i64 1
- %tmp17448 = getelementptr inbounds float* %tmp17447, i64 1
- %tmp17449 = getelementptr inbounds float* %tmp17448, i64 1
- %tmp17450 = getelementptr inbounds float* %tmp17449, i64 1
- %tmp17451 = getelementptr inbounds float* %tmp17450, i64 1
- %tmp17452 = getelementptr inbounds float* %tmp17451, i64 1
- %tmp17453 = getelementptr inbounds float* %tmp17452, i64 1
- %tmp17454 = getelementptr inbounds float* %tmp17453, i64 1
- %tmp17455 = getelementptr inbounds float* %tmp17454, i64 1
- %tmp17456 = getelementptr inbounds float* %tmp17455, i64 1
- %tmp17457 = getelementptr inbounds float* %tmp17456, i64 1
- %tmp17458 = getelementptr inbounds float* %tmp17457, i64 1
- %tmp17459 = getelementptr inbounds float* %tmp17458, i64 1
- %tmp17460 = getelementptr inbounds float* %tmp17459, i64 1
- %tmp17461 = getelementptr inbounds float* %tmp17460, i64 1
- %tmp17462 = getelementptr inbounds float* %tmp17461, i64 1
- %tmp17463 = getelementptr inbounds float* %tmp17462, i64 1
- %tmp17464 = getelementptr inbounds float* %tmp17463, i64 1
- %tmp17465 = getelementptr inbounds float* %tmp17464, i64 1
- %tmp17466 = getelementptr inbounds float* %tmp17465, i64 1
- %tmp17467 = getelementptr inbounds float* %tmp17466, i64 1
- %tmp17468 = getelementptr inbounds float* %tmp17467, i64 1
- %tmp17469 = getelementptr inbounds float* %tmp17468, i64 1
- %tmp17470 = getelementptr inbounds float* %tmp17469, i64 1
- %tmp17471 = getelementptr inbounds float* %tmp17470, i64 1
- %tmp17472 = getelementptr inbounds float* %tmp17471, i64 1
- %tmp17473 = getelementptr inbounds float* %tmp17472, i64 1
- %tmp17474 = getelementptr inbounds float* %tmp17473, i64 1
- %tmp17475 = getelementptr inbounds float* %tmp17474, i64 1
- %tmp17476 = getelementptr inbounds float* %tmp17475, i64 1
- %tmp17477 = getelementptr inbounds float* %tmp17476, i64 1
- %tmp17478 = getelementptr inbounds float* %tmp17477, i64 1
- %tmp17479 = getelementptr inbounds float* %tmp17478, i64 1
- %tmp17480 = getelementptr inbounds float* %tmp17479, i64 1
- %tmp17481 = getelementptr inbounds float* %tmp17480, i64 1
- %tmp17482 = getelementptr inbounds float* %tmp17481, i64 1
- %tmp17483 = getelementptr inbounds float* %tmp17482, i64 1
- %tmp17484 = getelementptr inbounds float* %tmp17483, i64 1
- %tmp17485 = getelementptr inbounds float* %tmp17484, i64 1
- %tmp17486 = getelementptr inbounds float* %tmp17485, i64 1
- %tmp17487 = getelementptr inbounds float* %tmp17486, i64 1
- %tmp17488 = getelementptr inbounds float* %tmp17487, i64 1
- %tmp17489 = getelementptr inbounds float* %tmp17488, i64 1
- %tmp17490 = getelementptr inbounds float* %tmp17489, i64 1
- %tmp17491 = getelementptr inbounds float* %tmp17490, i64 1
- %tmp17492 = getelementptr inbounds float* %tmp17491, i64 1
- %tmp17493 = getelementptr inbounds float* %tmp17492, i64 1
- %tmp17494 = getelementptr inbounds float* %tmp17493, i64 1
- %tmp17495 = getelementptr inbounds float* %tmp17494, i64 1
- %tmp17496 = getelementptr inbounds float* %tmp17495, i64 1
- %tmp17497 = getelementptr inbounds float* %tmp17496, i64 1
- %tmp17498 = getelementptr inbounds float* %tmp17497, i64 1
- %tmp17499 = getelementptr inbounds float* %tmp17498, i64 1
- %tmp17500 = getelementptr inbounds float* %tmp17499, i64 1
- %tmp17501 = getelementptr inbounds float* %tmp17500, i64 1
- %tmp17502 = getelementptr inbounds float* %tmp17501, i64 1
- %tmp17503 = getelementptr inbounds float* %tmp17502, i64 1
- %tmp17504 = getelementptr inbounds float* %tmp17503, i64 1
- %tmp17505 = getelementptr inbounds float* %tmp17504, i64 1
- %tmp17506 = getelementptr inbounds float* %tmp17505, i64 1
- %tmp17507 = getelementptr inbounds float* %tmp17506, i64 1
- %tmp17508 = getelementptr inbounds float* %tmp17507, i64 1
- %tmp17509 = getelementptr inbounds float* %tmp17508, i64 1
- %tmp17510 = getelementptr inbounds float* %tmp17509, i64 1
- %tmp17511 = getelementptr inbounds float* %tmp17510, i64 1
- %tmp17512 = getelementptr inbounds float* %tmp17511, i64 1
- %tmp17513 = getelementptr inbounds float* %tmp17512, i64 1
- %tmp17514 = getelementptr inbounds float* %tmp17513, i64 1
- %tmp17515 = getelementptr inbounds float* %tmp17514, i64 1
- %tmp17516 = getelementptr inbounds float* %tmp17515, i64 1
- %tmp17517 = getelementptr inbounds float* %tmp17516, i64 1
- %tmp17518 = getelementptr inbounds float* %tmp17517, i64 1
- %tmp17519 = getelementptr inbounds float* %tmp17518, i64 1
- %tmp17520 = getelementptr inbounds float* %tmp17519, i64 1
- %tmp17521 = getelementptr inbounds float* %tmp17520, i64 1
- %tmp17522 = getelementptr inbounds float* %tmp17521, i64 1
- %tmp17523 = getelementptr inbounds float* %tmp17522, i64 1
- %tmp17524 = getelementptr inbounds float* %tmp17523, i64 1
- %tmp17525 = getelementptr inbounds float* %tmp17524, i64 1
- %tmp17526 = getelementptr inbounds float* %tmp17525, i64 1
- %tmp17527 = getelementptr inbounds float* %tmp17526, i64 1
- %tmp17528 = getelementptr inbounds float* %tmp17527, i64 1
- %tmp17529 = getelementptr inbounds float* %tmp17528, i64 1
- %tmp17530 = getelementptr inbounds float* %tmp17529, i64 1
- %tmp17531 = getelementptr inbounds float* %tmp17530, i64 1
- %tmp17532 = getelementptr inbounds float* %tmp17531, i64 1
- %tmp17533 = getelementptr inbounds float* %tmp17532, i64 1
- %tmp17534 = getelementptr inbounds float* %tmp17533, i64 1
- %tmp17535 = getelementptr inbounds float* %tmp17534, i64 1
- %tmp17536 = getelementptr inbounds float* %tmp17535, i64 1
- %tmp17537 = getelementptr inbounds float* %tmp17536, i64 1
- %tmp17538 = getelementptr inbounds float* %tmp17537, i64 1
- %tmp17539 = getelementptr inbounds float* %tmp17538, i64 1
- %tmp17540 = getelementptr inbounds float* %tmp17539, i64 1
- %tmp17541 = getelementptr inbounds float* %tmp17540, i64 1
- %tmp17542 = getelementptr inbounds float* %tmp17541, i64 1
- %tmp17543 = getelementptr inbounds float* %tmp17542, i64 1
- %tmp17544 = getelementptr inbounds float* %tmp17543, i64 1
- %tmp17545 = getelementptr inbounds float* %tmp17544, i64 1
- %tmp17546 = getelementptr inbounds float* %tmp17545, i64 1
- %tmp17547 = getelementptr inbounds float* %tmp17546, i64 1
- %tmp17548 = getelementptr inbounds float* %tmp17547, i64 1
- %tmp17549 = getelementptr inbounds float* %tmp17548, i64 1
- %tmp17550 = getelementptr inbounds float* %tmp17549, i64 1
- %tmp17551 = getelementptr inbounds float* %tmp17550, i64 1
- %tmp17552 = getelementptr inbounds float* %tmp17551, i64 1
- %tmp17553 = getelementptr inbounds float* %tmp17552, i64 1
- %tmp17554 = getelementptr inbounds float* %tmp17553, i64 1
- %tmp17555 = getelementptr inbounds float* %tmp17554, i64 1
- %tmp17556 = getelementptr inbounds float* %tmp17555, i64 1
- %tmp17557 = getelementptr inbounds float* %tmp17556, i64 1
- %tmp17558 = getelementptr inbounds float* %tmp17557, i64 1
- %tmp17559 = getelementptr inbounds float* %tmp17558, i64 1
- %tmp17560 = getelementptr inbounds float* %tmp17559, i64 1
- %tmp17561 = getelementptr inbounds float* %tmp17560, i64 1
- %tmp17562 = getelementptr inbounds float* %tmp17561, i64 1
- %tmp17563 = getelementptr inbounds float* %tmp17562, i64 1
- %tmp17564 = getelementptr inbounds float* %tmp17563, i64 1
- %tmp17565 = getelementptr inbounds float* %tmp17564, i64 1
- %tmp17566 = getelementptr inbounds float* %tmp17565, i64 1
- %tmp17567 = getelementptr inbounds float* %tmp17566, i64 1
- %tmp17568 = getelementptr inbounds float* %tmp17567, i64 1
- %tmp17569 = getelementptr inbounds float* %tmp17568, i64 1
- %tmp17570 = getelementptr inbounds float* %tmp17569, i64 1
- %tmp17571 = getelementptr inbounds float* %tmp17570, i64 1
- %tmp17572 = getelementptr inbounds float* %tmp17571, i64 1
- %tmp17573 = getelementptr inbounds float* %tmp17572, i64 1
- %tmp17574 = getelementptr inbounds float* %tmp17573, i64 1
- %tmp17575 = getelementptr inbounds float* %tmp17574, i64 1
- %tmp17576 = getelementptr inbounds float* %tmp17575, i64 1
- %tmp17577 = getelementptr inbounds float* %tmp17576, i64 1
- %tmp17578 = getelementptr inbounds float* %tmp17577, i64 1
- %tmp17579 = getelementptr inbounds float* %tmp17578, i64 1
- %tmp17580 = getelementptr inbounds float* %tmp17579, i64 1
- %tmp17581 = getelementptr inbounds float* %tmp17580, i64 1
- %tmp17582 = getelementptr inbounds float* %tmp17581, i64 1
- %tmp17583 = getelementptr inbounds float* %tmp17582, i64 1
- %tmp17584 = getelementptr inbounds float* %tmp17583, i64 1
- %tmp17585 = getelementptr inbounds float* %tmp17584, i64 1
- %tmp17586 = getelementptr inbounds float* %tmp17585, i64 1
- %tmp17587 = getelementptr inbounds float* %tmp17586, i64 1
- %tmp17588 = getelementptr inbounds float* %tmp17587, i64 1
- %tmp17589 = getelementptr inbounds float* %tmp17588, i64 1
- %tmp17590 = getelementptr inbounds float* %tmp17589, i64 1
- %tmp17591 = getelementptr inbounds float* %tmp17590, i64 1
- %tmp17592 = getelementptr inbounds float* %tmp17591, i64 1
- %tmp17593 = getelementptr inbounds float* %tmp17592, i64 1
- %tmp17594 = getelementptr inbounds float* %tmp17593, i64 1
- %tmp17595 = getelementptr inbounds float* %tmp17594, i64 1
- %tmp17596 = getelementptr inbounds float* %tmp17595, i64 1
- %tmp17597 = getelementptr inbounds float* %tmp17596, i64 1
- %tmp17598 = getelementptr inbounds float* %tmp17597, i64 1
- %tmp17599 = getelementptr inbounds float* %tmp17598, i64 1
- %tmp17600 = getelementptr inbounds float* %tmp17599, i64 1
- %tmp17601 = getelementptr inbounds float* %tmp17600, i64 1
- %tmp17602 = getelementptr inbounds float* %tmp17601, i64 1
- %tmp17603 = getelementptr inbounds float* %tmp17602, i64 1
- %tmp17604 = getelementptr inbounds float* %tmp17603, i64 1
- %tmp17605 = getelementptr inbounds float* %tmp17604, i64 1
- %tmp17606 = getelementptr inbounds float* %tmp17605, i64 1
- %tmp17607 = getelementptr inbounds float* %tmp17606, i64 1
- %tmp17608 = getelementptr inbounds float* %tmp17607, i64 1
- %tmp17609 = getelementptr inbounds float* %tmp17608, i64 1
- %tmp17610 = getelementptr inbounds float* %tmp17609, i64 1
- %tmp17611 = getelementptr inbounds float* %tmp17610, i64 1
- %tmp17612 = getelementptr inbounds float* %tmp17611, i64 1
- %tmp17613 = getelementptr inbounds float* %tmp17612, i64 1
- %tmp17614 = getelementptr inbounds float* %tmp17613, i64 1
- %tmp17615 = getelementptr inbounds float* %tmp17614, i64 1
- %tmp17616 = getelementptr inbounds float* %tmp17615, i64 1
- %tmp17617 = getelementptr inbounds float* %tmp17616, i64 1
- %tmp17618 = getelementptr inbounds float* %tmp17617, i64 1
- %tmp17619 = getelementptr inbounds float* %tmp17618, i64 1
- %tmp17620 = getelementptr inbounds float* %tmp17619, i64 1
- %tmp17621 = getelementptr inbounds float* %tmp17620, i64 1
- %tmp17622 = getelementptr inbounds float* %tmp17621, i64 1
- %tmp17623 = getelementptr inbounds float* %tmp17622, i64 1
- %tmp17624 = getelementptr inbounds float* %tmp17623, i64 1
- %tmp17625 = getelementptr inbounds float* %tmp17624, i64 1
- %tmp17626 = getelementptr inbounds float* %tmp17625, i64 1
- %tmp17627 = getelementptr inbounds float* %tmp17626, i64 1
- %tmp17628 = getelementptr inbounds float* %tmp17627, i64 1
- %tmp17629 = getelementptr inbounds float* %tmp17628, i64 1
- %tmp17630 = getelementptr inbounds float* %tmp17629, i64 1
- %tmp17631 = getelementptr inbounds float* %tmp17630, i64 1
- %tmp17632 = getelementptr inbounds float* %tmp17631, i64 1
- %tmp17633 = getelementptr inbounds float* %tmp17632, i64 1
- %tmp17634 = getelementptr inbounds float* %tmp17633, i64 1
- %tmp17635 = getelementptr inbounds float* %tmp17634, i64 1
- %tmp17636 = getelementptr inbounds float* %tmp17635, i64 1
- %tmp17637 = getelementptr inbounds float* %tmp17636, i64 1
- %tmp17638 = getelementptr inbounds float* %tmp17637, i64 1
- %tmp17639 = getelementptr inbounds float* %tmp17638, i64 1
- %tmp17640 = getelementptr inbounds float* %tmp17639, i64 1
- %tmp17641 = getelementptr inbounds float* %tmp17640, i64 1
- %tmp17642 = getelementptr inbounds float* %tmp17641, i64 1
- %tmp17643 = getelementptr inbounds float* %tmp17642, i64 1
- %tmp17644 = getelementptr inbounds float* %tmp17643, i64 1
- %tmp17645 = getelementptr inbounds float* %tmp17644, i64 1
- %tmp17646 = getelementptr inbounds float* %tmp17645, i64 1
- %tmp17647 = getelementptr inbounds float* %tmp17646, i64 1
- %tmp17648 = getelementptr inbounds float* %tmp17647, i64 1
- %tmp17649 = getelementptr inbounds float* %tmp17648, i64 1
- %tmp17650 = getelementptr inbounds float* %tmp17649, i64 1
- %tmp17651 = getelementptr inbounds float* %tmp17650, i64 1
- %tmp17652 = getelementptr inbounds float* %tmp17651, i64 1
- %tmp17653 = getelementptr inbounds float* %tmp17652, i64 1
- %tmp17654 = getelementptr inbounds float* %tmp17653, i64 1
- %tmp17655 = getelementptr inbounds float* %tmp17654, i64 1
- %tmp17656 = getelementptr inbounds float* %tmp17655, i64 1
- %tmp17657 = getelementptr inbounds float* %tmp17656, i64 1
- %tmp17658 = getelementptr inbounds float* %tmp17657, i64 1
- %tmp17659 = getelementptr inbounds float* %tmp17658, i64 1
- %tmp17660 = getelementptr inbounds float* %tmp17659, i64 1
- %tmp17661 = getelementptr inbounds float* %tmp17660, i64 1
- %tmp17662 = getelementptr inbounds float* %tmp17661, i64 1
- %tmp17663 = getelementptr inbounds float* %tmp17662, i64 1
- %tmp17664 = getelementptr inbounds float* %tmp17663, i64 1
- %tmp17665 = getelementptr inbounds float* %tmp17664, i64 1
- %tmp17666 = getelementptr inbounds float* %tmp17665, i64 1
- %tmp17667 = getelementptr inbounds float* %tmp17666, i64 1
- %tmp17668 = getelementptr inbounds float* %tmp17667, i64 1
- %tmp17669 = getelementptr inbounds float* %tmp17668, i64 1
- %tmp17670 = getelementptr inbounds float* %tmp17669, i64 1
- %tmp17671 = getelementptr inbounds float* %tmp17670, i64 1
- %tmp17672 = getelementptr inbounds float* %tmp17671, i64 1
- %tmp17673 = getelementptr inbounds float* %tmp17672, i64 1
- %tmp17674 = getelementptr inbounds float* %tmp17673, i64 1
- %tmp17675 = getelementptr inbounds float* %tmp17674, i64 1
- %tmp17676 = getelementptr inbounds float* %tmp17675, i64 1
- %tmp17677 = getelementptr inbounds float* %tmp17676, i64 1
- %tmp17678 = getelementptr inbounds float* %tmp17677, i64 1
- %tmp17679 = getelementptr inbounds float* %tmp17678, i64 1
- %tmp17680 = getelementptr inbounds float* %tmp17679, i64 1
- %tmp17681 = getelementptr inbounds float* %tmp17680, i64 1
- %tmp17682 = getelementptr inbounds float* %tmp17681, i64 1
- %tmp17683 = getelementptr inbounds float* %tmp17682, i64 1
- %tmp17684 = getelementptr inbounds float* %tmp17683, i64 1
- %tmp17685 = getelementptr inbounds float* %tmp17684, i64 1
- %tmp17686 = getelementptr inbounds float* %tmp17685, i64 1
- %tmp17687 = getelementptr inbounds float* %tmp17686, i64 1
- %tmp17688 = getelementptr inbounds float* %tmp17687, i64 1
- %tmp17689 = getelementptr inbounds float* %tmp17688, i64 1
- %tmp17690 = getelementptr inbounds float* %tmp17689, i64 1
- %tmp17691 = getelementptr inbounds float* %tmp17690, i64 1
- %tmp17692 = getelementptr inbounds float* %tmp17691, i64 1
- %tmp17693 = getelementptr inbounds float* %tmp17692, i64 1
- %tmp17694 = getelementptr inbounds float* %tmp17693, i64 1
- %tmp17695 = getelementptr inbounds float* %tmp17694, i64 1
- %tmp17696 = getelementptr inbounds float* %tmp17695, i64 1
- %tmp17697 = getelementptr inbounds float* %tmp17696, i64 1
- %tmp17698 = getelementptr inbounds float* %tmp17697, i64 1
- %tmp17699 = getelementptr inbounds float* %tmp17698, i64 1
- %tmp17700 = getelementptr inbounds float* %tmp17699, i64 1
- %tmp17701 = getelementptr inbounds float* %tmp17700, i64 1
- %tmp17702 = getelementptr inbounds float* %tmp17701, i64 1
- %tmp17703 = getelementptr inbounds float* %tmp17702, i64 1
- %tmp17704 = getelementptr inbounds float* %tmp17703, i64 1
- %tmp17705 = getelementptr inbounds float* %tmp17704, i64 1
- %tmp17706 = getelementptr inbounds float* %tmp17705, i64 1
- %tmp17707 = getelementptr inbounds float* %tmp17706, i64 1
- %tmp17708 = getelementptr inbounds float* %tmp17707, i64 1
- %tmp17709 = getelementptr inbounds float* %tmp17708, i64 1
- %tmp17710 = getelementptr inbounds float* %tmp17709, i64 1
- %tmp17711 = getelementptr inbounds float* %tmp17710, i64 1
- %tmp17712 = getelementptr inbounds float* %tmp17711, i64 1
- %tmp17713 = getelementptr inbounds float* %tmp17712, i64 1
- %tmp17714 = getelementptr inbounds float* %tmp17713, i64 1
- %tmp17715 = getelementptr inbounds float* %tmp17714, i64 1
- %tmp17716 = getelementptr inbounds float* %tmp17715, i64 1
- %tmp17717 = getelementptr inbounds float* %tmp17716, i64 1
- %tmp17718 = getelementptr inbounds float* %tmp17717, i64 1
- %tmp17719 = getelementptr inbounds float* %tmp17718, i64 1
- %tmp17720 = getelementptr inbounds float* %tmp17719, i64 1
- %tmp17721 = getelementptr inbounds float* %tmp17720, i64 1
- %tmp17722 = getelementptr inbounds float* %tmp17721, i64 1
- %tmp17723 = getelementptr inbounds float* %tmp17722, i64 1
- %tmp17724 = getelementptr inbounds float* %tmp17723, i64 1
- %tmp17725 = getelementptr inbounds float* %tmp17724, i64 1
- %tmp17726 = getelementptr inbounds float* %tmp17725, i64 1
- %tmp17727 = getelementptr inbounds float* %tmp17726, i64 1
- %tmp17728 = getelementptr inbounds float* %tmp17727, i64 1
- %tmp17729 = getelementptr inbounds float* %tmp17728, i64 1
- %tmp17730 = getelementptr inbounds float* %tmp17729, i64 1
- %tmp17731 = getelementptr inbounds float* %tmp17730, i64 1
- %tmp17732 = getelementptr inbounds float* %tmp17731, i64 1
- %tmp17733 = getelementptr inbounds float* %tmp17732, i64 1
- %tmp17734 = getelementptr inbounds float* %tmp17733, i64 1
- %tmp17735 = getelementptr inbounds float* %tmp17734, i64 1
- %tmp17736 = getelementptr inbounds float* %tmp17735, i64 1
- %tmp17737 = getelementptr inbounds float* %tmp17736, i64 1
- %tmp17738 = getelementptr inbounds float* %tmp17737, i64 1
- %tmp17739 = getelementptr inbounds float* %tmp17738, i64 1
- %tmp17740 = getelementptr inbounds float* %tmp17739, i64 1
- %tmp17741 = getelementptr inbounds float* %tmp17740, i64 1
- %tmp17742 = getelementptr inbounds float* %tmp17741, i64 1
- %tmp17743 = getelementptr inbounds float* %tmp17742, i64 1
- %tmp17744 = getelementptr inbounds float* %tmp17743, i64 1
- %tmp17745 = getelementptr inbounds float* %tmp17744, i64 1
- %tmp17746 = getelementptr inbounds float* %tmp17745, i64 1
- %tmp17747 = getelementptr inbounds float* %tmp17746, i64 1
- %tmp17748 = getelementptr inbounds float* %tmp17747, i64 1
- %tmp17749 = getelementptr inbounds float* %tmp17748, i64 1
- %tmp17750 = getelementptr inbounds float* %tmp17749, i64 1
- %tmp17751 = getelementptr inbounds float* %tmp17750, i64 1
- %tmp17752 = getelementptr inbounds float* %tmp17751, i64 1
- %tmp17753 = getelementptr inbounds float* %tmp17752, i64 1
- %tmp17754 = getelementptr inbounds float* %tmp17753, i64 1
- %tmp17755 = getelementptr inbounds float* %tmp17754, i64 1
- %tmp17756 = getelementptr inbounds float* %tmp17755, i64 1
- %tmp17757 = getelementptr inbounds float* %tmp17756, i64 1
- %tmp17758 = getelementptr inbounds float* %tmp17757, i64 1
- %tmp17759 = getelementptr inbounds float* %tmp17758, i64 1
- %tmp17760 = getelementptr inbounds float* %tmp17759, i64 1
- %tmp17761 = getelementptr inbounds float* %tmp17760, i64 1
- %tmp17762 = getelementptr inbounds float* %tmp17761, i64 1
- %tmp17763 = getelementptr inbounds float* %tmp17762, i64 1
- %tmp17764 = getelementptr inbounds float* %tmp17763, i64 1
- %tmp17765 = getelementptr inbounds float* %tmp17764, i64 1
- %tmp17766 = getelementptr inbounds float* %tmp17765, i64 1
- %tmp17767 = getelementptr inbounds float* %tmp17766, i64 1
- %tmp17768 = getelementptr inbounds float* %tmp17767, i64 1
- %tmp17769 = getelementptr inbounds float* %tmp17768, i64 1
- %tmp17770 = getelementptr inbounds float* %tmp17769, i64 1
- %tmp17771 = getelementptr inbounds float* %tmp17770, i64 1
- %tmp17772 = getelementptr inbounds float* %tmp17771, i64 1
- %tmp17773 = getelementptr inbounds float* %tmp17772, i64 1
- %tmp17774 = getelementptr inbounds float* %tmp17773, i64 1
- %tmp17775 = getelementptr inbounds float* %tmp17774, i64 1
- %tmp17776 = getelementptr inbounds float* %tmp17775, i64 1
- %tmp17777 = getelementptr inbounds float* %tmp17776, i64 1
- %tmp17778 = getelementptr inbounds float* %tmp17777, i64 1
- %tmp17779 = getelementptr inbounds float* %tmp17778, i64 1
- %tmp17780 = getelementptr inbounds float* %tmp17779, i64 1
- %tmp17781 = getelementptr inbounds float* %tmp17780, i64 1
- %tmp17782 = getelementptr inbounds float* %tmp17781, i64 1
- %tmp17783 = getelementptr inbounds float* %tmp17782, i64 1
- %tmp17784 = getelementptr inbounds float* %tmp17783, i64 1
- %tmp17785 = getelementptr inbounds float* %tmp17784, i64 1
- %tmp17786 = getelementptr inbounds float* %tmp17785, i64 1
- %tmp17787 = getelementptr inbounds float* %tmp17786, i64 1
- %tmp17788 = getelementptr inbounds float* %tmp17787, i64 1
- %tmp17789 = getelementptr inbounds float* %tmp17788, i64 1
- %tmp17790 = getelementptr inbounds float* %tmp17789, i64 1
- %tmp17791 = getelementptr inbounds float* %tmp17790, i64 1
- %tmp17792 = getelementptr inbounds float* %tmp17791, i64 1
- %tmp17793 = getelementptr inbounds float* %tmp17792, i64 1
- %tmp17794 = getelementptr inbounds float* %tmp17793, i64 1
- %tmp17795 = getelementptr inbounds float* %tmp17794, i64 1
- %tmp17796 = getelementptr inbounds float* %tmp17795, i64 1
- %tmp17797 = getelementptr inbounds float* %tmp17796, i64 1
- %tmp17798 = getelementptr inbounds float* %tmp17797, i64 1
- %tmp17799 = getelementptr inbounds float* %tmp17798, i64 1
- %tmp17800 = getelementptr inbounds float* %tmp17799, i64 1
- %tmp17801 = getelementptr inbounds float* %tmp17800, i64 1
- %tmp17802 = getelementptr inbounds float* %tmp17801, i64 1
- %tmp17803 = getelementptr inbounds float* %tmp17802, i64 1
- %tmp17804 = getelementptr inbounds float* %tmp17803, i64 1
- %tmp17805 = getelementptr inbounds float* %tmp17804, i64 1
- %tmp17806 = getelementptr inbounds float* %tmp17805, i64 1
- %tmp17807 = getelementptr inbounds float* %tmp17806, i64 1
- %tmp17808 = getelementptr inbounds float* %tmp17807, i64 1
- %tmp17809 = getelementptr inbounds float* %tmp17808, i64 1
- %tmp17810 = getelementptr inbounds float* %tmp17809, i64 1
- %tmp17811 = getelementptr inbounds float* %tmp17810, i64 1
- %tmp17812 = getelementptr inbounds float* %tmp17811, i64 1
- %tmp17813 = getelementptr inbounds float* %tmp17812, i64 1
- %tmp17814 = getelementptr inbounds float* %tmp17813, i64 1
- %tmp17815 = getelementptr inbounds float* %tmp17814, i64 1
- %tmp17816 = getelementptr inbounds float* %tmp17815, i64 1
- %tmp17817 = getelementptr inbounds float* %tmp17816, i64 1
- %tmp17818 = getelementptr inbounds float* %tmp17817, i64 1
- %tmp17819 = getelementptr inbounds float* %tmp17818, i64 1
- %tmp17820 = getelementptr inbounds float* %tmp17819, i64 1
- %tmp17821 = getelementptr inbounds float* %tmp17820, i64 1
- %tmp17822 = getelementptr inbounds float* %tmp17821, i64 1
- %tmp17823 = getelementptr inbounds float* %tmp17822, i64 1
- %tmp17824 = getelementptr inbounds float* %tmp17823, i64 1
- %tmp17825 = getelementptr inbounds float* %tmp17824, i64 1
- %tmp17826 = getelementptr inbounds float* %tmp17825, i64 1
- %tmp17827 = getelementptr inbounds float* %tmp17826, i64 1
- %tmp17828 = getelementptr inbounds float* %tmp17827, i64 1
- %tmp17829 = getelementptr inbounds float* %tmp17828, i64 1
- %tmp17830 = getelementptr inbounds float* %tmp17829, i64 1
- %tmp17831 = getelementptr inbounds float* %tmp17830, i64 1
- %tmp17832 = getelementptr inbounds float* %tmp17831, i64 1
- %tmp17833 = getelementptr inbounds float* %tmp17832, i64 1
- %tmp17834 = getelementptr inbounds float* %tmp17833, i64 1
- %tmp17835 = getelementptr inbounds float* %tmp17834, i64 1
- %tmp17836 = getelementptr inbounds float* %tmp17835, i64 1
- %tmp17837 = getelementptr inbounds float* %tmp17836, i64 1
- %tmp17838 = getelementptr inbounds float* %tmp17837, i64 1
- %tmp17839 = getelementptr inbounds float* %tmp17838, i64 1
- %tmp17840 = getelementptr inbounds float* %tmp17839, i64 1
- %tmp17841 = getelementptr inbounds float* %tmp17840, i64 1
- %tmp17842 = getelementptr inbounds float* %tmp17841, i64 1
- %tmp17843 = getelementptr inbounds float* %tmp17842, i64 1
- %tmp17844 = getelementptr inbounds float* %tmp17843, i64 1
- %tmp17845 = getelementptr inbounds float* %tmp17844, i64 1
- %tmp17846 = getelementptr inbounds float* %tmp17845, i64 1
- %tmp17847 = getelementptr inbounds float* %tmp17846, i64 1
- %tmp17848 = getelementptr inbounds float* %tmp17847, i64 1
- %tmp17849 = getelementptr inbounds float* %tmp17848, i64 1
- %tmp17850 = getelementptr inbounds float* %tmp17849, i64 1
- %tmp17851 = getelementptr inbounds float* %tmp17850, i64 1
- %tmp17852 = getelementptr inbounds float* %tmp17851, i64 1
- %tmp17853 = getelementptr inbounds float* %tmp17852, i64 1
- %tmp17854 = getelementptr inbounds float* %tmp17853, i64 1
- %tmp17855 = getelementptr inbounds float* %tmp17854, i64 1
- %tmp17856 = getelementptr inbounds float* %tmp17855, i64 1
- %tmp17857 = getelementptr inbounds float* %tmp17856, i64 1
- %tmp17858 = getelementptr inbounds float* %tmp17857, i64 1
- %tmp17859 = getelementptr inbounds float* %tmp17858, i64 1
- %tmp17860 = getelementptr inbounds float* %tmp17859, i64 1
- %tmp17861 = getelementptr inbounds float* %tmp17860, i64 1
- %tmp17862 = getelementptr inbounds float* %tmp17861, i64 1
- %tmp17863 = getelementptr inbounds float* %tmp17862, i64 1
- %tmp17864 = getelementptr inbounds float* %tmp17863, i64 1
- %tmp17865 = getelementptr inbounds float* %tmp17864, i64 1
- %tmp17866 = getelementptr inbounds float* %tmp17865, i64 1
- %tmp17867 = getelementptr inbounds float* %tmp17866, i64 1
- %tmp17868 = getelementptr inbounds float* %tmp17867, i64 1
- %tmp17869 = getelementptr inbounds float* %tmp17868, i64 1
- %tmp17870 = getelementptr inbounds float* %tmp17869, i64 1
- %tmp17871 = getelementptr inbounds float* %tmp17870, i64 1
- %tmp17872 = getelementptr inbounds float* %tmp17871, i64 1
- %tmp17873 = getelementptr inbounds float* %tmp17872, i64 1
- %tmp17874 = getelementptr inbounds float* %tmp17873, i64 1
- %tmp17875 = getelementptr inbounds float* %tmp17874, i64 1
- %tmp17876 = getelementptr inbounds float* %tmp17875, i64 1
- %tmp17877 = getelementptr inbounds float* %tmp17876, i64 1
- %tmp17878 = getelementptr inbounds float* %tmp17877, i64 1
- %tmp17879 = getelementptr inbounds float* %tmp17878, i64 1
- %tmp17880 = getelementptr inbounds float* %tmp17879, i64 1
- %tmp17881 = getelementptr inbounds float* %tmp17880, i64 1
- %tmp17882 = getelementptr inbounds float* %tmp17881, i64 1
- %tmp17883 = getelementptr inbounds float* %tmp17882, i64 1
- %tmp17884 = getelementptr inbounds float* %tmp17883, i64 1
- %tmp17885 = getelementptr inbounds float* %tmp17884, i64 1
- %tmp17886 = getelementptr inbounds float* %tmp17885, i64 1
- %tmp17887 = getelementptr inbounds float* %tmp17886, i64 1
- %tmp17888 = getelementptr inbounds float* %tmp17887, i64 1
- %tmp17889 = getelementptr inbounds float* %tmp17888, i64 1
- %tmp17890 = getelementptr inbounds float* %tmp17889, i64 1
- %tmp17891 = getelementptr inbounds float* %tmp17890, i64 1
- %tmp17892 = getelementptr inbounds float* %tmp17891, i64 1
- %tmp17893 = getelementptr inbounds float* %tmp17892, i64 1
- %tmp17894 = getelementptr inbounds float* %tmp17893, i64 1
- %tmp17895 = getelementptr inbounds float* %tmp17894, i64 1
- %tmp17896 = getelementptr inbounds float* %tmp17895, i64 1
- %tmp17897 = getelementptr inbounds float* %tmp17896, i64 1
- %tmp17898 = getelementptr inbounds float* %tmp17897, i64 1
- %tmp17899 = getelementptr inbounds float* %tmp17898, i64 1
- %tmp17900 = getelementptr inbounds float* %tmp17899, i64 1
- %tmp17901 = getelementptr inbounds float* %tmp17900, i64 1
- %tmp17902 = getelementptr inbounds float* %tmp17901, i64 1
- %tmp17903 = getelementptr inbounds float* %tmp17902, i64 1
- %tmp17904 = getelementptr inbounds float* %tmp17903, i64 1
- %tmp17905 = getelementptr inbounds float* %tmp17904, i64 1
- %tmp17906 = getelementptr inbounds float* %tmp17905, i64 1
- %tmp17907 = getelementptr inbounds float* %tmp17906, i64 1
- %tmp17908 = getelementptr inbounds float* %tmp17907, i64 1
- %tmp17909 = getelementptr inbounds float* %tmp17908, i64 1
- %tmp17910 = getelementptr inbounds float* %tmp17909, i64 1
- %tmp17911 = getelementptr inbounds float* %tmp17910, i64 1
- %tmp17912 = getelementptr inbounds float* %tmp17911, i64 1
- %tmp17913 = getelementptr inbounds float* %tmp17912, i64 1
- %tmp17914 = getelementptr inbounds float* %tmp17913, i64 1
- %tmp17915 = getelementptr inbounds float* %tmp17914, i64 1
- %tmp17916 = getelementptr inbounds float* %tmp17915, i64 1
- %tmp17917 = getelementptr inbounds float* %tmp17916, i64 1
- %tmp17918 = getelementptr inbounds float* %tmp17917, i64 1
- %tmp17919 = getelementptr inbounds float* %tmp17918, i64 1
- %tmp17920 = getelementptr inbounds float* %tmp17919, i64 1
- %tmp17921 = getelementptr inbounds float* %tmp17920, i64 1
- %tmp17922 = getelementptr inbounds float* %tmp17921, i64 1
- %tmp17923 = getelementptr inbounds float* %tmp17922, i64 1
- %tmp17924 = getelementptr inbounds float* %tmp17923, i64 1
- %tmp17925 = getelementptr inbounds float* %tmp17924, i64 1
- %tmp17926 = getelementptr inbounds float* %tmp17925, i64 1
- %tmp17927 = getelementptr inbounds float* %tmp17926, i64 1
- %tmp17928 = getelementptr inbounds float* %tmp17927, i64 1
- %tmp17929 = getelementptr inbounds float* %tmp17928, i64 1
- %tmp17930 = getelementptr inbounds float* %tmp17929, i64 1
- %tmp17931 = getelementptr inbounds float* %tmp17930, i64 1
- %tmp17932 = getelementptr inbounds float* %tmp17931, i64 1
- %tmp17933 = getelementptr inbounds float* %tmp17932, i64 1
- %tmp17934 = getelementptr inbounds float* %tmp17933, i64 1
- %tmp17935 = getelementptr inbounds float* %tmp17934, i64 1
- %tmp17936 = getelementptr inbounds float* %tmp17935, i64 1
- %tmp17937 = getelementptr inbounds float* %tmp17936, i64 1
- %tmp17938 = getelementptr inbounds float* %tmp17937, i64 1
- %tmp17939 = getelementptr inbounds float* %tmp17938, i64 1
- %tmp17940 = getelementptr inbounds float* %tmp17939, i64 1
- %tmp17941 = getelementptr inbounds float* %tmp17940, i64 1
- %tmp17942 = getelementptr inbounds float* %tmp17941, i64 1
- %tmp17943 = getelementptr inbounds float* %tmp17942, i64 1
- %tmp17944 = getelementptr inbounds float* %tmp17943, i64 1
- %tmp17945 = getelementptr inbounds float* %tmp17944, i64 1
- %tmp17946 = getelementptr inbounds float* %tmp17945, i64 1
- %tmp17947 = getelementptr inbounds float* %tmp17946, i64 1
- %tmp17948 = getelementptr inbounds float* %tmp17947, i64 1
- %tmp17949 = getelementptr inbounds float* %tmp17948, i64 1
- %tmp17950 = getelementptr inbounds float* %tmp17949, i64 1
- %tmp17951 = getelementptr inbounds float* %tmp17950, i64 1
- %tmp17952 = getelementptr inbounds float* %tmp17951, i64 1
- %tmp17953 = getelementptr inbounds float* %tmp17952, i64 1
- %tmp17954 = getelementptr inbounds float* %tmp17953, i64 1
- %tmp17955 = getelementptr inbounds float* %tmp17954, i64 1
- %tmp17956 = getelementptr inbounds float* %tmp17955, i64 1
- %tmp17957 = getelementptr inbounds float* %tmp17956, i64 1
- %tmp17958 = getelementptr inbounds float* %tmp17957, i64 1
- %tmp17959 = getelementptr inbounds float* %tmp17958, i64 1
- %tmp17960 = getelementptr inbounds float* %tmp17959, i64 1
- %tmp17961 = getelementptr inbounds float* %tmp17960, i64 1
- %tmp17962 = getelementptr inbounds float* %tmp17961, i64 1
- %tmp17963 = getelementptr inbounds float* %tmp17962, i64 1
- %tmp17964 = getelementptr inbounds float* %tmp17963, i64 1
- %tmp17965 = getelementptr inbounds float* %tmp17964, i64 1
- %tmp17966 = getelementptr inbounds float* %tmp17965, i64 1
- %tmp17967 = getelementptr inbounds float* %tmp17966, i64 1
- %tmp17968 = getelementptr inbounds float* %tmp17967, i64 1
- %tmp17969 = getelementptr inbounds float* %tmp17968, i64 1
- %tmp17970 = getelementptr inbounds float* %tmp17969, i64 1
- %tmp17971 = getelementptr inbounds float* %tmp17970, i64 1
- %tmp17972 = getelementptr inbounds float* %tmp17971, i64 1
- %tmp17973 = getelementptr inbounds float* %tmp17972, i64 1
- %tmp17974 = getelementptr inbounds float* %tmp17973, i64 1
- %tmp17975 = getelementptr inbounds float* %tmp17974, i64 1
- %tmp17976 = getelementptr inbounds float* %tmp17975, i64 1
- %tmp17977 = getelementptr inbounds float* %tmp17976, i64 1
- %tmp17978 = getelementptr inbounds float* %tmp17977, i64 1
- %tmp17979 = getelementptr inbounds float* %tmp17978, i64 1
- %tmp17980 = getelementptr inbounds float* %tmp17979, i64 1
- %tmp17981 = getelementptr inbounds float* %tmp17980, i64 1
- %tmp17982 = getelementptr inbounds float* %tmp17981, i64 1
- %tmp17983 = getelementptr inbounds float* %tmp17982, i64 1
- %tmp17984 = getelementptr inbounds float* %tmp17983, i64 1
- %tmp17985 = getelementptr inbounds float* %tmp17984, i64 1
- %tmp17986 = getelementptr inbounds float* %tmp17985, i64 1
- %tmp17987 = getelementptr inbounds float* %tmp17986, i64 1
- %tmp17988 = getelementptr inbounds float* %tmp17987, i64 1
- %tmp17989 = getelementptr inbounds float* %tmp17988, i64 1
- %tmp17990 = getelementptr inbounds float* %tmp17989, i64 1
- %tmp17991 = getelementptr inbounds float* %tmp17990, i64 1
- %tmp17992 = getelementptr inbounds float* %tmp17991, i64 1
- %tmp17993 = getelementptr inbounds float* %tmp17992, i64 1
- %tmp17994 = getelementptr inbounds float* %tmp17993, i64 1
- %tmp17995 = getelementptr inbounds float* %tmp17994, i64 1
- %tmp17996 = getelementptr inbounds float* %tmp17995, i64 1
- %tmp17997 = getelementptr inbounds float* %tmp17996, i64 1
- %tmp17998 = getelementptr inbounds float* %tmp17997, i64 1
- %tmp17999 = getelementptr inbounds float* %tmp17998, i64 1
- %tmp18000 = getelementptr inbounds float* %tmp17999, i64 1
- %tmp18001 = getelementptr inbounds float* %tmp18000, i64 1
- %tmp18002 = getelementptr inbounds float* %tmp18001, i64 1
- %tmp18003 = getelementptr inbounds float* %tmp18002, i64 1
- %tmp18004 = getelementptr inbounds float* %tmp18003, i64 1
- %tmp18005 = getelementptr inbounds float* %tmp18004, i64 1
- %tmp18006 = getelementptr inbounds float* %tmp18005, i64 1
- %tmp18007 = getelementptr inbounds float* %tmp18006, i64 1
- %tmp18008 = getelementptr inbounds float* %tmp18007, i64 1
- %tmp18009 = getelementptr inbounds float* %tmp18008, i64 1
- %tmp18010 = getelementptr inbounds float* %tmp18009, i64 1
- %tmp18011 = getelementptr inbounds float* %tmp18010, i64 1
- %tmp18012 = getelementptr inbounds float* %tmp18011, i64 1
- %tmp18013 = getelementptr inbounds float* %tmp18012, i64 1
- %tmp18014 = getelementptr inbounds float* %tmp18013, i64 1
- %tmp18015 = getelementptr inbounds float* %tmp18014, i64 1
- %tmp18016 = getelementptr inbounds float* %tmp18015, i64 1
- %tmp18017 = getelementptr inbounds float* %tmp18016, i64 1
- %tmp18018 = getelementptr inbounds float* %tmp18017, i64 1
- %tmp18019 = getelementptr inbounds float* %tmp18018, i64 1
- %tmp18020 = getelementptr inbounds float* %tmp18019, i64 1
- %tmp18021 = getelementptr inbounds float* %tmp18020, i64 1
- %tmp18022 = getelementptr inbounds float* %tmp18021, i64 1
- %tmp18023 = getelementptr inbounds float* %tmp18022, i64 1
- %tmp18024 = getelementptr inbounds float* %tmp18023, i64 1
- %tmp18025 = getelementptr inbounds float* %tmp18024, i64 1
- %tmp18026 = getelementptr inbounds float* %tmp18025, i64 1
- %tmp18027 = getelementptr inbounds float* %tmp18026, i64 1
- %tmp18028 = getelementptr inbounds float* %tmp18027, i64 1
- %tmp18029 = getelementptr inbounds float* %tmp18028, i64 1
- %tmp18030 = getelementptr inbounds float* %tmp18029, i64 1
- %tmp18031 = getelementptr inbounds float* %tmp18030, i64 1
- %tmp18032 = getelementptr inbounds float* %tmp18031, i64 1
- %tmp18033 = getelementptr inbounds float* %tmp18032, i64 1
- %tmp18034 = getelementptr inbounds float* %tmp18033, i64 1
- %tmp18035 = getelementptr inbounds float* %tmp18034, i64 1
- %tmp18036 = getelementptr inbounds float* %tmp18035, i64 1
- %tmp18037 = getelementptr inbounds float* %tmp18036, i64 1
- %tmp18038 = getelementptr inbounds float* %tmp18037, i64 1
- %tmp18039 = getelementptr inbounds float* %tmp18038, i64 1
- %tmp18040 = getelementptr inbounds float* %tmp18039, i64 1
- %tmp18041 = getelementptr inbounds float* %tmp18040, i64 1
- %tmp18042 = getelementptr inbounds float* %tmp18041, i64 1
- %tmp18043 = getelementptr inbounds float* %tmp18042, i64 1
- %tmp18044 = getelementptr inbounds float* %tmp18043, i64 1
- %tmp18045 = getelementptr inbounds float* %tmp18044, i64 1
- %tmp18046 = getelementptr inbounds float* %tmp18045, i64 1
- %tmp18047 = getelementptr inbounds float* %tmp18046, i64 1
- %tmp18048 = getelementptr inbounds float* %tmp18047, i64 1
- %tmp18049 = getelementptr inbounds float* %tmp18048, i64 1
- %tmp18050 = getelementptr inbounds float* %tmp18049, i64 1
- %tmp18051 = getelementptr inbounds float* %tmp18050, i64 1
- %tmp18052 = getelementptr inbounds float* %tmp18051, i64 1
- %tmp18053 = getelementptr inbounds float* %tmp18052, i64 1
- %tmp18054 = getelementptr inbounds float* %tmp18053, i64 1
- %tmp18055 = getelementptr inbounds float* %tmp18054, i64 1
- %tmp18056 = getelementptr inbounds float* %tmp18055, i64 1
- %tmp18057 = getelementptr inbounds float* %tmp18056, i64 1
- %tmp18058 = getelementptr inbounds float* %tmp18057, i64 1
- %tmp18059 = getelementptr inbounds float* %tmp18058, i64 1
- %tmp18060 = getelementptr inbounds float* %tmp18059, i64 1
- %tmp18061 = getelementptr inbounds float* %tmp18060, i64 1
- %tmp18062 = getelementptr inbounds float* %tmp18061, i64 1
- %tmp18063 = getelementptr inbounds float* %tmp18062, i64 1
- %tmp18064 = getelementptr inbounds float* %tmp18063, i64 1
- %tmp18065 = getelementptr inbounds float* %tmp18064, i64 1
- %tmp18066 = getelementptr inbounds float* %tmp18065, i64 1
- %tmp18067 = getelementptr inbounds float* %tmp18066, i64 1
- %tmp18068 = getelementptr inbounds float* %tmp18067, i64 1
- %tmp18069 = getelementptr inbounds float* %tmp18068, i64 1
- %tmp18070 = getelementptr inbounds float* %tmp18069, i64 1
- %tmp18071 = getelementptr inbounds float* %tmp18070, i64 1
- %tmp18072 = getelementptr inbounds float* %tmp18071, i64 1
- %tmp18073 = getelementptr inbounds float* %tmp18072, i64 1
- %tmp18074 = getelementptr inbounds float* %tmp18073, i64 1
- %tmp18075 = getelementptr inbounds float* %tmp18074, i64 1
- %tmp18076 = getelementptr inbounds float* %tmp18075, i64 1
- %tmp18077 = getelementptr inbounds float* %tmp18076, i64 1
- %tmp18078 = getelementptr inbounds float* %tmp18077, i64 1
- %tmp18079 = getelementptr inbounds float* %tmp18078, i64 1
- %tmp18080 = getelementptr inbounds float* %tmp18079, i64 1
- %tmp18081 = getelementptr inbounds float* %tmp18080, i64 1
- %tmp18082 = getelementptr inbounds float* %tmp18081, i64 1
- %tmp18083 = getelementptr inbounds float* %tmp18082, i64 1
- %tmp18084 = getelementptr inbounds float* %tmp18083, i64 1
- %tmp18085 = getelementptr inbounds float* %tmp18084, i64 1
- %tmp18086 = getelementptr inbounds float* %tmp18085, i64 1
- %tmp18087 = getelementptr inbounds float* %tmp18086, i64 1
- %tmp18088 = getelementptr inbounds float* %tmp18087, i64 1
- %tmp18089 = getelementptr inbounds float* %tmp18088, i64 1
- %tmp18090 = getelementptr inbounds float* %tmp18089, i64 1
- %tmp18091 = getelementptr inbounds float* %tmp18090, i64 1
- %tmp18092 = getelementptr inbounds float* %tmp18091, i64 1
- %tmp18093 = getelementptr inbounds float* %tmp18092, i64 1
- %tmp18094 = getelementptr inbounds float* %tmp18093, i64 1
- %tmp18095 = getelementptr inbounds float* %tmp18094, i64 1
- %tmp18096 = getelementptr inbounds float* %tmp18095, i64 1
- %tmp18097 = getelementptr inbounds float* %tmp18096, i64 1
- %tmp18098 = getelementptr inbounds float* %tmp18097, i64 1
- %tmp18099 = getelementptr inbounds float* %tmp18098, i64 1
- %tmp18100 = getelementptr inbounds float* %tmp18099, i64 1
- %tmp18101 = getelementptr inbounds float* %tmp18100, i64 1
- %tmp18102 = getelementptr inbounds float* %tmp18101, i64 1
- %tmp18103 = getelementptr inbounds float* %tmp18102, i64 1
- %tmp18104 = getelementptr inbounds float* %tmp18103, i64 1
- %tmp18105 = getelementptr inbounds float* %tmp18104, i64 1
- %tmp18106 = getelementptr inbounds float* %tmp18105, i64 1
- %tmp18107 = getelementptr inbounds float* %tmp18106, i64 1
- %tmp18108 = getelementptr inbounds float* %tmp18107, i64 1
- %tmp18109 = getelementptr inbounds float* %tmp18108, i64 1
- %tmp18110 = getelementptr inbounds float* %tmp18109, i64 1
- %tmp18111 = getelementptr inbounds float* %tmp18110, i64 1
- %tmp18112 = getelementptr inbounds float* %tmp18111, i64 1
- %tmp18113 = getelementptr inbounds float* %tmp18112, i64 1
- %tmp18114 = getelementptr inbounds float* %tmp18113, i64 1
- %tmp18115 = getelementptr inbounds float* %tmp18114, i64 1
- %tmp18116 = getelementptr inbounds float* %tmp18115, i64 1
- %tmp18117 = getelementptr inbounds float* %tmp18116, i64 1
- %tmp18118 = getelementptr inbounds float* %tmp18117, i64 1
- %tmp18119 = getelementptr inbounds float* %tmp18118, i64 1
- %tmp18120 = getelementptr inbounds float* %tmp18119, i64 1
- %tmp18121 = getelementptr inbounds float* %tmp18120, i64 1
- %tmp18122 = getelementptr inbounds float* %tmp18121, i64 1
- %tmp18123 = getelementptr inbounds float* %tmp18122, i64 1
- %tmp18124 = getelementptr inbounds float* %tmp18123, i64 1
- %tmp18125 = getelementptr inbounds float* %tmp18124, i64 1
- %tmp18126 = getelementptr inbounds float* %tmp18125, i64 1
- %tmp18127 = getelementptr inbounds float* %tmp18126, i64 1
- %tmp18128 = getelementptr inbounds float* %tmp18127, i64 1
- %tmp18129 = getelementptr inbounds float* %tmp18128, i64 1
- %tmp18130 = getelementptr inbounds float* %tmp18129, i64 1
- %tmp18131 = getelementptr inbounds float* %tmp18130, i64 1
- %tmp18132 = getelementptr inbounds float* %tmp18131, i64 1
- %tmp18133 = getelementptr inbounds float* %tmp18132, i64 1
- %tmp18134 = getelementptr inbounds float* %tmp18133, i64 1
- %tmp18135 = getelementptr inbounds float* %tmp18134, i64 1
- %tmp18136 = getelementptr inbounds float* %tmp18135, i64 1
- %tmp18137 = getelementptr inbounds float* %tmp18136, i64 1
- %tmp18138 = getelementptr inbounds float* %tmp18137, i64 1
- %tmp18139 = getelementptr inbounds float* %tmp18138, i64 1
- %tmp18140 = getelementptr inbounds float* %tmp18139, i64 1
- %tmp18141 = getelementptr inbounds float* %tmp18140, i64 1
- %tmp18142 = getelementptr inbounds float* %tmp18141, i64 1
- %tmp18143 = getelementptr inbounds float* %tmp18142, i64 1
- %tmp18144 = getelementptr inbounds float* %tmp18143, i64 1
- %tmp18145 = getelementptr inbounds float* %tmp18144, i64 1
- %tmp18146 = getelementptr inbounds float* %tmp18145, i64 1
- %tmp18147 = getelementptr inbounds float* %tmp18146, i64 1
- %tmp18148 = getelementptr inbounds float* %tmp18147, i64 1
- %tmp18149 = getelementptr inbounds float* %tmp18148, i64 1
- %tmp18150 = getelementptr inbounds float* %tmp18149, i64 1
- %tmp18151 = getelementptr inbounds float* %tmp18150, i64 1
- %tmp18152 = getelementptr inbounds float* %tmp18151, i64 1
- %tmp18153 = getelementptr inbounds float* %tmp18152, i64 1
- %tmp18154 = getelementptr inbounds float* %tmp18153, i64 1
- %tmp18155 = getelementptr inbounds float* %tmp18154, i64 1
- %tmp18156 = getelementptr inbounds float* %tmp18155, i64 1
- %tmp18157 = getelementptr inbounds float* %tmp18156, i64 1
- %tmp18158 = getelementptr inbounds float* %tmp18157, i64 1
- %tmp18159 = getelementptr inbounds float* %tmp18158, i64 1
- %tmp18160 = getelementptr inbounds float* %tmp18159, i64 1
- %tmp18161 = getelementptr inbounds float* %tmp18160, i64 1
- %tmp18162 = getelementptr inbounds float* %tmp18161, i64 1
- %tmp18163 = getelementptr inbounds float* %tmp18162, i64 1
- %tmp18164 = getelementptr inbounds float* %tmp18163, i64 1
- %tmp18165 = getelementptr inbounds float* %tmp18164, i64 1
- %tmp18166 = getelementptr inbounds float* %tmp18165, i64 1
- %tmp18167 = getelementptr inbounds float* %tmp18166, i64 1
- %tmp18168 = getelementptr inbounds float* %tmp18167, i64 1
- %tmp18169 = getelementptr inbounds float* %tmp18168, i64 1
- %tmp18170 = getelementptr inbounds float* %tmp18169, i64 1
- %tmp18171 = getelementptr inbounds float* %tmp18170, i64 1
- %tmp18172 = getelementptr inbounds float* %tmp18171, i64 1
- %tmp18173 = getelementptr inbounds float* %tmp18172, i64 1
- %tmp18174 = getelementptr inbounds float* %tmp18173, i64 1
- %tmp18175 = getelementptr inbounds float* %tmp18174, i64 1
- %tmp18176 = getelementptr inbounds float* %tmp18175, i64 1
- %tmp18177 = getelementptr inbounds float* %tmp18176, i64 1
- %tmp18178 = getelementptr inbounds float* %tmp18177, i64 1
- %tmp18179 = getelementptr inbounds float* %tmp18178, i64 1
- %tmp18180 = getelementptr inbounds float* %tmp18179, i64 1
- %tmp18181 = getelementptr inbounds float* %tmp18180, i64 1
- %tmp18182 = getelementptr inbounds float* %tmp18181, i64 1
- %tmp18183 = getelementptr inbounds float* %tmp18182, i64 1
- %tmp18184 = getelementptr inbounds float* %tmp18183, i64 1
- %tmp18185 = getelementptr inbounds float* %tmp18184, i64 1
- %tmp18186 = getelementptr inbounds float* %tmp18185, i64 1
- %tmp18187 = getelementptr inbounds float* %tmp18186, i64 1
- %tmp18188 = getelementptr inbounds float* %tmp18187, i64 1
- %tmp18189 = getelementptr inbounds float* %tmp18188, i64 1
- %tmp18190 = getelementptr inbounds float* %tmp18189, i64 1
- %tmp18191 = getelementptr inbounds float* %tmp18190, i64 1
- %tmp18192 = getelementptr inbounds float* %tmp18191, i64 1
- %tmp18193 = getelementptr inbounds float* %tmp18192, i64 1
- %tmp18194 = getelementptr inbounds float* %tmp18193, i64 1
- %tmp18195 = getelementptr inbounds float* %tmp18194, i64 1
- %tmp18196 = getelementptr inbounds float* %tmp18195, i64 1
- %tmp18197 = getelementptr inbounds float* %tmp18196, i64 1
- %tmp18198 = getelementptr inbounds float* %tmp18197, i64 1
- %tmp18199 = getelementptr inbounds float* %tmp18198, i64 1
- %tmp18200 = getelementptr inbounds float* %tmp18199, i64 1
- %tmp18201 = getelementptr inbounds float* %tmp18200, i64 1
- %tmp18202 = getelementptr inbounds float* %tmp18201, i64 1
- %tmp18203 = getelementptr inbounds float* %tmp18202, i64 1
- %tmp18204 = getelementptr inbounds float* %tmp18203, i64 1
- %tmp18205 = getelementptr inbounds float* %tmp18204, i64 1
- %tmp18206 = getelementptr inbounds float* %tmp18205, i64 1
- %tmp18207 = getelementptr inbounds float* %tmp18206, i64 1
- %tmp18208 = getelementptr inbounds float* %tmp18207, i64 1
- %tmp18209 = getelementptr inbounds float* %tmp18208, i64 1
- %tmp18210 = getelementptr inbounds float* %tmp18209, i64 1
- %tmp18211 = getelementptr inbounds float* %tmp18210, i64 1
- %tmp18212 = getelementptr inbounds float* %tmp18211, i64 1
- %tmp18213 = getelementptr inbounds float* %tmp18212, i64 1
- %tmp18214 = getelementptr inbounds float* %tmp18213, i64 1
- %tmp18215 = getelementptr inbounds float* %tmp18214, i64 1
- %tmp18216 = getelementptr inbounds float* %tmp18215, i64 1
- %tmp18217 = getelementptr inbounds float* %tmp18216, i64 1
- %tmp18218 = getelementptr inbounds float* %tmp18217, i64 1
- %tmp18219 = getelementptr inbounds float* %tmp18218, i64 1
- %tmp18220 = getelementptr inbounds float* %tmp18219, i64 1
- %tmp18221 = getelementptr inbounds float* %tmp18220, i64 1
- %tmp18222 = getelementptr inbounds float* %tmp18221, i64 1
- %tmp18223 = getelementptr inbounds float* %tmp18222, i64 1
- %tmp18224 = getelementptr inbounds float* %tmp18223, i64 1
- %tmp18225 = getelementptr inbounds float* %tmp18224, i64 1
- %tmp18226 = getelementptr inbounds float* %tmp18225, i64 1
- %tmp18227 = getelementptr inbounds float* %tmp18226, i64 1
- %tmp18228 = getelementptr inbounds float* %tmp18227, i64 1
- %tmp18229 = getelementptr inbounds float* %tmp18228, i64 1
- %tmp18230 = getelementptr inbounds float* %tmp18229, i64 1
- %tmp18231 = getelementptr inbounds float* %tmp18230, i64 1
- %tmp18232 = getelementptr inbounds float* %tmp18231, i64 1
- %tmp18233 = getelementptr inbounds float* %tmp18232, i64 1
- %tmp18234 = getelementptr inbounds float* %tmp18233, i64 1
- %tmp18235 = getelementptr inbounds float* %tmp18234, i64 1
- %tmp18236 = getelementptr inbounds float* %tmp18235, i64 1
- %tmp18237 = getelementptr inbounds float* %tmp18236, i64 1
- %tmp18238 = getelementptr inbounds float* %tmp18237, i64 1
- %tmp18239 = getelementptr inbounds float* %tmp18238, i64 1
- %tmp18240 = getelementptr inbounds float* %tmp18239, i64 1
- %tmp18241 = getelementptr inbounds float* %tmp18240, i64 1
- %tmp18242 = getelementptr inbounds float* %tmp18241, i64 1
- %tmp18243 = getelementptr inbounds float* %tmp18242, i64 1
- %tmp18244 = getelementptr inbounds float* %tmp18243, i64 1
- %tmp18245 = getelementptr inbounds float* %tmp18244, i64 1
- %tmp18246 = getelementptr inbounds float* %tmp18245, i64 1
- %tmp18247 = getelementptr inbounds float* %tmp18246, i64 1
- %tmp18248 = getelementptr inbounds float* %tmp18247, i64 1
- %tmp18249 = getelementptr inbounds float* %tmp18248, i64 1
- %tmp18250 = getelementptr inbounds float* %tmp18249, i64 1
- %tmp18251 = getelementptr inbounds float* %tmp18250, i64 1
- %tmp18252 = getelementptr inbounds float* %tmp18251, i64 1
- %tmp18253 = getelementptr inbounds float* %tmp18252, i64 1
- %tmp18254 = getelementptr inbounds float* %tmp18253, i64 1
- %tmp18255 = getelementptr inbounds float* %tmp18254, i64 1
- %tmp18256 = getelementptr inbounds float* %tmp18255, i64 1
- %tmp18257 = getelementptr inbounds float* %tmp18256, i64 1
- %tmp18258 = getelementptr inbounds float* %tmp18257, i64 1
- %tmp18259 = getelementptr inbounds float* %tmp18258, i64 1
- %tmp18260 = getelementptr inbounds float* %tmp18259, i64 1
- %tmp18261 = getelementptr inbounds float* %tmp18260, i64 1
- %tmp18262 = getelementptr inbounds float* %tmp18261, i64 1
- %tmp18263 = getelementptr inbounds float* %tmp18262, i64 1
- %tmp18264 = getelementptr inbounds float* %tmp18263, i64 1
- %tmp18265 = getelementptr inbounds float* %tmp18264, i64 1
- %tmp18266 = getelementptr inbounds float* %tmp18265, i64 1
- %tmp18267 = getelementptr inbounds float* %tmp18266, i64 1
- %tmp18268 = getelementptr inbounds float* %tmp18267, i64 1
- %tmp18269 = getelementptr inbounds float* %tmp18268, i64 1
- %tmp18270 = getelementptr inbounds float* %tmp18269, i64 1
- %tmp18271 = getelementptr inbounds float* %tmp18270, i64 1
- %tmp18272 = getelementptr inbounds float* %tmp18271, i64 1
- %tmp18273 = getelementptr inbounds float* %tmp18272, i64 1
- %tmp18274 = getelementptr inbounds float* %tmp18273, i64 1
- %tmp18275 = getelementptr inbounds float* %tmp18274, i64 1
- %tmp18276 = getelementptr inbounds float* %tmp18275, i64 1
- %tmp18277 = getelementptr inbounds float* %tmp18276, i64 1
- %tmp18278 = getelementptr inbounds float* %tmp18277, i64 1
- %tmp18279 = getelementptr inbounds float* %tmp18278, i64 1
- %tmp18280 = getelementptr inbounds float* %tmp18279, i64 1
- %tmp18281 = getelementptr inbounds float* %tmp18280, i64 1
- %tmp18282 = getelementptr inbounds float* %tmp18281, i64 1
- %tmp18283 = getelementptr inbounds float* %tmp18282, i64 1
- %tmp18284 = getelementptr inbounds float* %tmp18283, i64 1
- %tmp18285 = getelementptr inbounds float* %tmp18284, i64 1
- %tmp18286 = getelementptr inbounds float* %tmp18285, i64 1
- %tmp18287 = getelementptr inbounds float* %tmp18286, i64 1
- %tmp18288 = getelementptr inbounds float* %tmp18287, i64 1
- %tmp18289 = getelementptr inbounds float* %tmp18288, i64 1
- %tmp18290 = getelementptr inbounds float* %tmp18289, i64 1
- %tmp18291 = getelementptr inbounds float* %tmp18290, i64 1
- %tmp18292 = getelementptr inbounds float* %tmp18291, i64 1
- %tmp18293 = getelementptr inbounds float* %tmp18292, i64 1
- %tmp18294 = getelementptr inbounds float* %tmp18293, i64 1
- %tmp18295 = getelementptr inbounds float* %tmp18294, i64 1
- %tmp18296 = getelementptr inbounds float* %tmp18295, i64 1
- %tmp18297 = getelementptr inbounds float* %tmp18296, i64 1
- %tmp18298 = getelementptr inbounds float* %tmp18297, i64 1
- %tmp18299 = getelementptr inbounds float* %tmp18298, i64 1
- %tmp18300 = getelementptr inbounds float* %tmp18299, i64 1
- %tmp18301 = getelementptr inbounds float* %tmp18300, i64 1
- %tmp18302 = getelementptr inbounds float* %tmp18301, i64 1
- %tmp18303 = getelementptr inbounds float* %tmp18302, i64 1
- %tmp18304 = getelementptr inbounds float* %tmp18303, i64 1
- %tmp18305 = getelementptr inbounds float* %tmp18304, i64 1
- %tmp18306 = getelementptr inbounds float* %tmp18305, i64 1
- %tmp18307 = getelementptr inbounds float* %tmp18306, i64 1
- %tmp18308 = getelementptr inbounds float* %tmp18307, i64 1
- %tmp18309 = getelementptr inbounds float* %tmp18308, i64 1
- %tmp18310 = getelementptr inbounds float* %tmp18309, i64 1
- %tmp18311 = getelementptr inbounds float* %tmp18310, i64 1
- %tmp18312 = getelementptr inbounds float* %tmp18311, i64 1
- %tmp18313 = getelementptr inbounds float* %tmp18312, i64 1
- %tmp18314 = getelementptr inbounds float* %tmp18313, i64 1
- %tmp18315 = getelementptr inbounds float* %tmp18314, i64 1
- %tmp18316 = getelementptr inbounds float* %tmp18315, i64 1
- %tmp18317 = getelementptr inbounds float* %tmp18316, i64 1
- %tmp18318 = getelementptr inbounds float* %tmp18317, i64 1
- %tmp18319 = getelementptr inbounds float* %tmp18318, i64 1
- %tmp18320 = getelementptr inbounds float* %tmp18319, i64 1
- %tmp18321 = getelementptr inbounds float* %tmp18320, i64 1
- %tmp18322 = getelementptr inbounds float* %tmp18321, i64 1
- %tmp18323 = getelementptr inbounds float* %tmp18322, i64 1
- %tmp18324 = getelementptr inbounds float* %tmp18323, i64 1
- %tmp18325 = getelementptr inbounds float* %tmp18324, i64 1
- %tmp18326 = getelementptr inbounds float* %tmp18325, i64 1
- %tmp18327 = getelementptr inbounds float* %tmp18326, i64 1
- %tmp18328 = getelementptr inbounds float* %tmp18327, i64 1
- %tmp18329 = getelementptr inbounds float* %tmp18328, i64 1
- %tmp18330 = getelementptr inbounds float* %tmp18329, i64 1
- %tmp18331 = getelementptr inbounds float* %tmp18330, i64 1
- %tmp18332 = getelementptr inbounds float* %tmp18331, i64 1
- %tmp18333 = getelementptr inbounds float* %tmp18332, i64 1
- %tmp18334 = getelementptr inbounds float* %tmp18333, i64 1
- %tmp18335 = getelementptr inbounds float* %tmp18334, i64 1
- %tmp18336 = getelementptr inbounds float* %tmp18335, i64 1
- %tmp18337 = getelementptr inbounds float* %tmp18336, i64 1
- %tmp18338 = getelementptr inbounds float* %tmp18337, i64 1
- %tmp18339 = getelementptr inbounds float* %tmp18338, i64 1
- %tmp18340 = getelementptr inbounds float* %tmp18339, i64 1
- %tmp18341 = getelementptr inbounds float* %tmp18340, i64 1
- %tmp18342 = getelementptr inbounds float* %tmp18341, i64 1
- %tmp18343 = getelementptr inbounds float* %tmp18342, i64 1
- %tmp18344 = getelementptr inbounds float* %tmp18343, i64 1
- %tmp18345 = getelementptr inbounds float* %tmp18344, i64 1
- %tmp18346 = getelementptr inbounds float* %tmp18345, i64 1
- %tmp18347 = getelementptr inbounds float* %tmp18346, i64 1
- %tmp18348 = getelementptr inbounds float* %tmp18347, i64 1
- %tmp18349 = getelementptr inbounds float* %tmp18348, i64 1
- %tmp18350 = getelementptr inbounds float* %tmp18349, i64 1
- %tmp18351 = getelementptr inbounds float* %tmp18350, i64 1
- %tmp18352 = getelementptr inbounds float* %tmp18351, i64 1
- %tmp18353 = getelementptr inbounds float* %tmp18352, i64 1
- %tmp18354 = getelementptr inbounds float* %tmp18353, i64 1
- %tmp18355 = getelementptr inbounds float* %tmp18354, i64 1
- %tmp18356 = getelementptr inbounds float* %tmp18355, i64 1
- %tmp18357 = getelementptr inbounds float* %tmp18356, i64 1
- %tmp18358 = getelementptr inbounds float* %tmp18357, i64 1
- %tmp18359 = getelementptr inbounds float* %tmp18358, i64 1
- %tmp18360 = getelementptr inbounds float* %tmp18359, i64 1
- %tmp18361 = getelementptr inbounds float* %tmp18360, i64 1
- %tmp18362 = getelementptr inbounds float* %tmp18361, i64 1
- %tmp18363 = getelementptr inbounds float* %tmp18362, i64 1
- %tmp18364 = getelementptr inbounds float* %tmp18363, i64 1
- %tmp18365 = getelementptr inbounds float* %tmp18364, i64 1
- %tmp18366 = getelementptr inbounds float* %tmp18365, i64 1
- %tmp18367 = getelementptr inbounds float* %tmp18366, i64 1
- %tmp18368 = getelementptr inbounds float* %tmp18367, i64 1
- %tmp18369 = getelementptr inbounds float* %tmp18368, i64 1
- %tmp18370 = getelementptr inbounds float* %tmp18369, i64 1
- %tmp18371 = getelementptr inbounds float* %tmp18370, i64 1
- %tmp18372 = getelementptr inbounds float* %tmp18371, i64 1
- %tmp18373 = getelementptr inbounds float* %tmp18372, i64 1
- %tmp18374 = getelementptr inbounds float* %tmp18373, i64 1
- %tmp18375 = getelementptr inbounds float* %tmp18374, i64 1
- %tmp18376 = getelementptr inbounds float* %tmp18375, i64 1
- %tmp18377 = getelementptr inbounds float* %tmp18376, i64 1
- %tmp18378 = getelementptr inbounds float* %tmp18377, i64 1
- %tmp18379 = getelementptr inbounds float* %tmp18378, i64 1
- %tmp18380 = getelementptr inbounds float* %tmp18379, i64 1
- %tmp18381 = getelementptr inbounds float* %tmp18380, i64 1
- %tmp18382 = getelementptr inbounds float* %tmp18381, i64 1
- %tmp18383 = getelementptr inbounds float* %tmp18382, i64 1
- %tmp18384 = getelementptr inbounds float* %tmp18383, i64 1
- %tmp18385 = getelementptr inbounds float* %tmp18384, i64 1
- %tmp18386 = getelementptr inbounds float* %tmp18385, i64 1
- %tmp18387 = getelementptr inbounds float* %tmp18386, i64 1
- %tmp18388 = getelementptr inbounds float* %tmp18387, i64 1
- %tmp18389 = getelementptr inbounds float* %tmp18388, i64 1
- %tmp18390 = getelementptr inbounds float* %tmp18389, i64 1
- %tmp18391 = getelementptr inbounds float* %tmp18390, i64 1
- %tmp18392 = getelementptr inbounds float* %tmp18391, i64 1
- %tmp18393 = getelementptr inbounds float* %tmp18392, i64 1
- %tmp18394 = getelementptr inbounds float* %tmp18393, i64 1
- %tmp18395 = getelementptr inbounds float* %tmp18394, i64 1
- %tmp18396 = getelementptr inbounds float* %tmp18395, i64 1
- %tmp18397 = getelementptr inbounds float* %tmp18396, i64 1
- %tmp18398 = getelementptr inbounds float* %tmp18397, i64 1
- %tmp18399 = getelementptr inbounds float* %tmp18398, i64 1
- %tmp18400 = getelementptr inbounds float* %tmp18399, i64 1
- %tmp18401 = getelementptr inbounds float* %tmp18400, i64 1
- %tmp18402 = getelementptr inbounds float* %tmp18401, i64 1
- %tmp18403 = getelementptr inbounds float* %tmp18402, i64 1
- %tmp18404 = getelementptr inbounds float* %tmp18403, i64 1
- %tmp18405 = getelementptr inbounds float* %tmp18404, i64 1
- %tmp18406 = getelementptr inbounds float* %tmp18405, i64 1
- %tmp18407 = getelementptr inbounds float* %tmp18406, i64 1
- %tmp18408 = getelementptr inbounds float* %tmp18407, i64 1
- %tmp18409 = getelementptr inbounds float* %tmp18408, i64 1
- %tmp18410 = getelementptr inbounds float* %tmp18409, i64 1
- %tmp18411 = getelementptr inbounds float* %tmp18410, i64 1
- %tmp18412 = getelementptr inbounds float* %tmp18411, i64 1
- %tmp18413 = getelementptr inbounds float* %tmp18412, i64 1
- %tmp18414 = getelementptr inbounds float* %tmp18413, i64 1
- %tmp18415 = getelementptr inbounds float* %tmp18414, i64 1
- %tmp18416 = getelementptr inbounds float* %tmp18415, i64 1
- %tmp18417 = getelementptr inbounds float* %tmp18416, i64 1
- %tmp18418 = getelementptr inbounds float* %tmp18417, i64 1
- %tmp18419 = getelementptr inbounds float* %tmp18418, i64 1
- %tmp18420 = getelementptr inbounds float* %tmp18419, i64 1
- %tmp18421 = getelementptr inbounds float* %tmp18420, i64 1
- %tmp18422 = getelementptr inbounds float* %tmp18421, i64 1
- %tmp18423 = getelementptr inbounds float* %tmp18422, i64 1
- %tmp18424 = getelementptr inbounds float* %tmp18423, i64 1
- %tmp18425 = getelementptr inbounds float* %tmp18424, i64 1
- %tmp18426 = getelementptr inbounds float* %tmp18425, i64 1
- %tmp18427 = getelementptr inbounds float* %tmp18426, i64 1
- %tmp18428 = getelementptr inbounds float* %tmp18427, i64 1
- %tmp18429 = getelementptr inbounds float* %tmp18428, i64 1
- %tmp18430 = getelementptr inbounds float* %tmp18429, i64 1
- %tmp18431 = getelementptr inbounds float* %tmp18430, i64 1
- %tmp18432 = getelementptr inbounds float* %tmp18431, i64 1
- %tmp18433 = getelementptr inbounds float* %tmp18432, i64 1
- %tmp18434 = getelementptr inbounds float* %tmp18433, i64 1
- %tmp18435 = getelementptr inbounds float* %tmp18434, i64 1
- %tmp18436 = getelementptr inbounds float* %tmp18435, i64 1
- %tmp18437 = getelementptr inbounds float* %tmp18436, i64 1
- %tmp18438 = getelementptr inbounds float* %tmp18437, i64 1
- %tmp18439 = getelementptr inbounds float* %tmp18438, i64 1
- %tmp18440 = getelementptr inbounds float* %tmp18439, i64 1
- %tmp18441 = getelementptr inbounds float* %tmp18440, i64 1
- %tmp18442 = getelementptr inbounds float* %tmp18441, i64 1
- %tmp18443 = getelementptr inbounds float* %tmp18442, i64 1
- %tmp18444 = getelementptr inbounds float* %tmp18443, i64 1
- %tmp18445 = getelementptr inbounds float* %tmp18444, i64 1
- %tmp18446 = getelementptr inbounds float* %tmp18445, i64 1
- %tmp18447 = getelementptr inbounds float* %tmp18446, i64 1
- %tmp18448 = getelementptr inbounds float* %tmp18447, i64 1
- %tmp18449 = getelementptr inbounds float* %tmp18448, i64 1
- %tmp18450 = getelementptr inbounds float* %tmp18449, i64 1
- %tmp18451 = getelementptr inbounds float* %tmp18450, i64 1
- %tmp18452 = getelementptr inbounds float* %tmp18451, i64 1
- %tmp18453 = getelementptr inbounds float* %tmp18452, i64 1
- %tmp18454 = getelementptr inbounds float* %tmp18453, i64 1
- %tmp18455 = getelementptr inbounds float* %tmp18454, i64 1
- %tmp18456 = getelementptr inbounds float* %tmp18455, i64 1
- %tmp18457 = getelementptr inbounds float* %tmp18456, i64 1
- %tmp18458 = getelementptr inbounds float* %tmp18457, i64 1
- %tmp18459 = getelementptr inbounds float* %tmp18458, i64 1
- %tmp18460 = getelementptr inbounds float* %tmp18459, i64 1
- %tmp18461 = getelementptr inbounds float* %tmp18460, i64 1
- %tmp18462 = getelementptr inbounds float* %tmp18461, i64 1
- %tmp18463 = getelementptr inbounds float* %tmp18462, i64 1
- %tmp18464 = getelementptr inbounds float* %tmp18463, i64 1
- %tmp18465 = getelementptr inbounds float* %tmp18464, i64 1
- %tmp18466 = getelementptr inbounds float* %tmp18465, i64 1
- %tmp18467 = getelementptr inbounds float* %tmp18466, i64 1
- %tmp18468 = getelementptr inbounds float* %tmp18467, i64 1
- %tmp18469 = getelementptr inbounds float* %tmp18468, i64 1
- %tmp18470 = getelementptr inbounds float* %tmp18469, i64 1
- %tmp18471 = getelementptr inbounds float* %tmp18470, i64 1
- %tmp18472 = getelementptr inbounds float* %tmp18471, i64 1
- %tmp18473 = getelementptr inbounds float* %tmp18472, i64 1
- %tmp18474 = getelementptr inbounds float* %tmp18473, i64 1
- %tmp18475 = getelementptr inbounds float* %tmp18474, i64 1
- %tmp18476 = getelementptr inbounds float* %tmp18475, i64 1
- %tmp18477 = getelementptr inbounds float* %tmp18476, i64 1
- %tmp18478 = getelementptr inbounds float* %tmp18477, i64 1
- %tmp18479 = getelementptr inbounds float* %tmp18478, i64 1
- %tmp18480 = getelementptr inbounds float* %tmp18479, i64 1
- %tmp18481 = getelementptr inbounds float* %tmp18480, i64 1
- %tmp18482 = getelementptr inbounds float* %tmp18481, i64 1
- %tmp18483 = getelementptr inbounds float* %tmp18482, i64 1
- %tmp18484 = getelementptr inbounds float* %tmp18483, i64 1
- %tmp18485 = getelementptr inbounds float* %tmp18484, i64 1
- %tmp18486 = getelementptr inbounds float* %tmp18485, i64 1
- %tmp18487 = getelementptr inbounds float* %tmp18486, i64 1
- %tmp18488 = getelementptr inbounds float* %tmp18487, i64 1
- %tmp18489 = getelementptr inbounds float* %tmp18488, i64 1
- %tmp18490 = getelementptr inbounds float* %tmp18489, i64 1
- %tmp18491 = getelementptr inbounds float* %tmp18490, i64 1
- %tmp18492 = getelementptr inbounds float* %tmp18491, i64 1
- %tmp18493 = getelementptr inbounds float* %tmp18492, i64 1
- %tmp18494 = getelementptr inbounds float* %tmp18493, i64 1
- %tmp18495 = getelementptr inbounds float* %tmp18494, i64 1
- %tmp18496 = getelementptr inbounds float* %tmp18495, i64 1
- %tmp18497 = getelementptr inbounds float* %tmp18496, i64 1
- %tmp18498 = getelementptr inbounds float* %tmp18497, i64 1
- %tmp18499 = getelementptr inbounds float* %tmp18498, i64 1
- %tmp18500 = getelementptr inbounds float* %tmp18499, i64 1
- %tmp18501 = getelementptr inbounds float* %tmp18500, i64 1
- %tmp18502 = getelementptr inbounds float* %tmp18501, i64 1
- %tmp18503 = getelementptr inbounds float* %tmp18502, i64 1
- %tmp18504 = getelementptr inbounds float* %tmp18503, i64 1
- %tmp18505 = getelementptr inbounds float* %tmp18504, i64 1
- %tmp18506 = getelementptr inbounds float* %tmp18505, i64 1
- %tmp18507 = getelementptr inbounds float* %tmp18506, i64 1
- %tmp18508 = getelementptr inbounds float* %tmp18507, i64 1
- %tmp18509 = getelementptr inbounds float* %tmp18508, i64 1
- %tmp18510 = getelementptr inbounds float* %tmp18509, i64 1
- %tmp18511 = getelementptr inbounds float* %tmp18510, i64 1
- %tmp18512 = getelementptr inbounds float* %tmp18511, i64 1
- %tmp18513 = getelementptr inbounds float* %tmp18512, i64 1
- %tmp18514 = getelementptr inbounds float* %tmp18513, i64 1
- %tmp18515 = getelementptr inbounds float* %tmp18514, i64 1
- %tmp18516 = getelementptr inbounds float* %tmp18515, i64 1
- %tmp18517 = getelementptr inbounds float* %tmp18516, i64 1
- %tmp18518 = getelementptr inbounds float* %tmp18517, i64 1
- %tmp18519 = getelementptr inbounds float* %tmp18518, i64 1
- %tmp18520 = getelementptr inbounds float* %tmp18519, i64 1
- %tmp18521 = getelementptr inbounds float* %tmp18520, i64 1
- %tmp18522 = getelementptr inbounds float* %tmp18521, i64 1
- %tmp18523 = getelementptr inbounds float* %tmp18522, i64 1
- %tmp18524 = getelementptr inbounds float* %tmp18523, i64 1
- %tmp18525 = getelementptr inbounds float* %tmp18524, i64 1
- %tmp18526 = getelementptr inbounds float* %tmp18525, i64 1
- %tmp18527 = getelementptr inbounds float* %tmp18526, i64 1
- %tmp18528 = getelementptr inbounds float* %tmp18527, i64 1
- %tmp18529 = getelementptr inbounds float* %tmp18528, i64 1
- %tmp18530 = getelementptr inbounds float* %tmp18529, i64 1
- %tmp18531 = getelementptr inbounds float* %tmp18530, i64 1
- %tmp18532 = getelementptr inbounds float* %tmp18531, i64 1
- %tmp18533 = getelementptr inbounds float* %tmp18532, i64 1
- %tmp18534 = getelementptr inbounds float* %tmp18533, i64 1
- %tmp18535 = getelementptr inbounds float* %tmp18534, i64 1
- %tmp18536 = getelementptr inbounds float* %tmp18535, i64 1
- %tmp18537 = getelementptr inbounds float* %tmp18536, i64 1
- %tmp18538 = getelementptr inbounds float* %tmp18537, i64 1
- %tmp18539 = getelementptr inbounds float* %tmp18538, i64 1
- %tmp18540 = getelementptr inbounds float* %tmp18539, i64 1
- %tmp18541 = getelementptr inbounds float* %tmp18540, i64 1
- %tmp18542 = getelementptr inbounds float* %tmp18541, i64 1
- %tmp18543 = getelementptr inbounds float* %tmp18542, i64 1
- %tmp18544 = getelementptr inbounds float* %tmp18543, i64 1
- %tmp18545 = getelementptr inbounds float* %tmp18544, i64 1
- %tmp18546 = getelementptr inbounds float* %tmp18545, i64 1
- %tmp18547 = getelementptr inbounds float* %tmp18546, i64 1
- %tmp18548 = getelementptr inbounds float* %tmp18547, i64 1
- %tmp18549 = getelementptr inbounds float* %tmp18548, i64 1
- %tmp18550 = getelementptr inbounds float* %tmp18549, i64 1
- %tmp18551 = getelementptr inbounds float* %tmp18550, i64 1
- %tmp18552 = getelementptr inbounds float* %tmp18551, i64 1
- %tmp18553 = getelementptr inbounds float* %tmp18552, i64 1
- %tmp18554 = getelementptr inbounds float* %tmp18553, i64 1
- %tmp18555 = getelementptr inbounds float* %tmp18554, i64 1
- %tmp18556 = getelementptr inbounds float* %tmp18555, i64 1
- %tmp18557 = getelementptr inbounds float* %tmp18556, i64 1
- %tmp18558 = getelementptr inbounds float* %tmp18557, i64 1
- %tmp18559 = getelementptr inbounds float* %tmp18558, i64 1
- %tmp18560 = getelementptr inbounds float* %tmp18559, i64 1
- %tmp18561 = getelementptr inbounds float* %tmp18560, i64 1
- %tmp18562 = getelementptr inbounds float* %tmp18561, i64 1
- %tmp18563 = getelementptr inbounds float* %tmp18562, i64 1
- %tmp18564 = getelementptr inbounds float* %tmp18563, i64 1
- %tmp18565 = getelementptr inbounds float* %tmp18564, i64 1
- %tmp18566 = getelementptr inbounds float* %tmp18565, i64 1
- %tmp18567 = getelementptr inbounds float* %tmp18566, i64 1
- %tmp18568 = getelementptr inbounds float* %tmp18567, i64 1
- %tmp18569 = getelementptr inbounds float* %tmp18568, i64 1
- %tmp18570 = getelementptr inbounds float* %tmp18569, i64 1
- %tmp18571 = getelementptr inbounds float* %tmp18570, i64 1
- %tmp18572 = getelementptr inbounds float* %tmp18571, i64 1
- %tmp18573 = getelementptr inbounds float* %tmp18572, i64 1
- %tmp18574 = getelementptr inbounds float* %tmp18573, i64 1
- %tmp18575 = getelementptr inbounds float* %tmp18574, i64 1
- %tmp18576 = getelementptr inbounds float* %tmp18575, i64 1
- %tmp18577 = getelementptr inbounds float* %tmp18576, i64 1
- %tmp18578 = getelementptr inbounds float* %tmp18577, i64 1
- %tmp18579 = getelementptr inbounds float* %tmp18578, i64 1
- %tmp18580 = getelementptr inbounds float* %tmp18579, i64 1
- %tmp18581 = getelementptr inbounds float* %tmp18580, i64 1
- %tmp18582 = getelementptr inbounds float* %tmp18581, i64 1
- %tmp18583 = getelementptr inbounds float* %tmp18582, i64 1
- %tmp18584 = getelementptr inbounds float* %tmp18583, i64 1
- %tmp18585 = getelementptr inbounds float* %tmp18584, i64 1
- %tmp18586 = getelementptr inbounds float* %tmp18585, i64 1
- %tmp18587 = getelementptr inbounds float* %tmp18586, i64 1
- %tmp18588 = getelementptr inbounds float* %tmp18587, i64 1
- %tmp18589 = getelementptr inbounds float* %tmp18588, i64 1
- %tmp18590 = getelementptr inbounds float* %tmp18589, i64 1
- %tmp18591 = getelementptr inbounds float* %tmp18590, i64 1
- %tmp18592 = getelementptr inbounds float* %tmp18591, i64 1
- %tmp18593 = getelementptr inbounds float* %tmp18592, i64 1
- %tmp18594 = getelementptr inbounds float* %tmp18593, i64 1
- %tmp18595 = getelementptr inbounds float* %tmp18594, i64 1
- %tmp18596 = getelementptr inbounds float* %tmp18595, i64 1
- %tmp18597 = getelementptr inbounds float* %tmp18596, i64 1
- %tmp18598 = getelementptr inbounds float* %tmp18597, i64 1
- %tmp18599 = getelementptr inbounds float* %tmp18598, i64 1
- %tmp18600 = getelementptr inbounds float* %tmp18599, i64 1
- %tmp18601 = getelementptr inbounds float* %tmp18600, i64 1
- %tmp18602 = getelementptr inbounds float* %tmp18601, i64 1
- %tmp18603 = getelementptr inbounds float* %tmp18602, i64 1
- %tmp18604 = getelementptr inbounds float* %tmp18603, i64 1
- %tmp18605 = getelementptr inbounds float* %tmp18604, i64 1
- %tmp18606 = getelementptr inbounds float* %tmp18605, i64 1
- %tmp18607 = getelementptr inbounds float* %tmp18606, i64 1
- %tmp18608 = getelementptr inbounds float* %tmp18607, i64 1
- %tmp18609 = getelementptr inbounds float* %tmp18608, i64 1
- %tmp18610 = getelementptr inbounds float* %tmp18609, i64 1
- %tmp18611 = getelementptr inbounds float* %tmp18610, i64 1
- %tmp18612 = getelementptr inbounds float* %tmp18611, i64 1
- %tmp18613 = getelementptr inbounds float* %tmp18612, i64 1
- %tmp18614 = getelementptr inbounds float* %tmp18613, i64 1
- %tmp18615 = getelementptr inbounds float* %tmp18614, i64 1
- %tmp18616 = getelementptr inbounds float* %tmp18615, i64 1
- %tmp18617 = getelementptr inbounds float* %tmp18616, i64 1
- %tmp18618 = getelementptr inbounds float* %tmp18617, i64 1
- %tmp18619 = getelementptr inbounds float* %tmp18618, i64 1
- %tmp18620 = getelementptr inbounds float* %tmp18619, i64 1
- %tmp18621 = getelementptr inbounds float* %tmp18620, i64 1
- %tmp18622 = getelementptr inbounds float* %tmp18621, i64 1
- %tmp18623 = getelementptr inbounds float* %tmp18622, i64 1
- %tmp18624 = getelementptr inbounds float* %tmp18623, i64 1
- %tmp18625 = getelementptr inbounds float* %tmp18624, i64 1
- %tmp18626 = getelementptr inbounds float* %tmp18625, i64 1
- %tmp18627 = getelementptr inbounds float* %tmp18626, i64 1
- %tmp18628 = getelementptr inbounds float* %tmp18627, i64 1
- %tmp18629 = getelementptr inbounds float* %tmp18628, i64 1
- %tmp18630 = getelementptr inbounds float* %tmp18629, i64 1
- %tmp18631 = getelementptr inbounds float* %tmp18630, i64 1
- %tmp18632 = getelementptr inbounds float* %tmp18631, i64 1
- %tmp18633 = getelementptr inbounds float* %tmp18632, i64 1
- %tmp18634 = getelementptr inbounds float* %tmp18633, i64 1
- %tmp18635 = getelementptr inbounds float* %tmp18634, i64 1
- %tmp18636 = getelementptr inbounds float* %tmp18635, i64 1
- %tmp18637 = getelementptr inbounds float* %tmp18636, i64 1
- %tmp18638 = getelementptr inbounds float* %tmp18637, i64 1
- %tmp18639 = getelementptr inbounds float* %tmp18638, i64 1
- %tmp18640 = getelementptr inbounds float* %tmp18639, i64 1
- %tmp18641 = getelementptr inbounds float* %tmp18640, i64 1
- %tmp18642 = getelementptr inbounds float* %tmp18641, i64 1
- %tmp18643 = getelementptr inbounds float* %tmp18642, i64 1
- %tmp18644 = getelementptr inbounds float* %tmp18643, i64 1
- %tmp18645 = getelementptr inbounds float* %tmp18644, i64 1
- %tmp18646 = getelementptr inbounds float* %tmp18645, i64 1
- %tmp18647 = getelementptr inbounds float* %tmp18646, i64 1
- %tmp18648 = getelementptr inbounds float* %tmp18647, i64 1
- %tmp18649 = getelementptr inbounds float* %tmp18648, i64 1
- %tmp18650 = getelementptr inbounds float* %tmp18649, i64 1
- %tmp18651 = getelementptr inbounds float* %tmp18650, i64 1
- %tmp18652 = getelementptr inbounds float* %tmp18651, i64 1
- %tmp18653 = getelementptr inbounds float* %tmp18652, i64 1
- %tmp18654 = getelementptr inbounds float* %tmp18653, i64 1
- %tmp18655 = getelementptr inbounds float* %tmp18654, i64 1
- %tmp18656 = getelementptr inbounds float* %tmp18655, i64 1
- %tmp18657 = getelementptr inbounds float* %tmp18656, i64 1
- %tmp18658 = getelementptr inbounds float* %tmp18657, i64 1
- %tmp18659 = getelementptr inbounds float* %tmp18658, i64 1
- %tmp18660 = getelementptr inbounds float* %tmp18659, i64 1
- %tmp18661 = getelementptr inbounds float* %tmp18660, i64 1
- %tmp18662 = getelementptr inbounds float* %tmp18661, i64 1
- %tmp18663 = getelementptr inbounds float* %tmp18662, i64 1
- %tmp18664 = getelementptr inbounds float* %tmp18663, i64 1
- %tmp18665 = getelementptr inbounds float* %tmp18664, i64 1
- %tmp18666 = getelementptr inbounds float* %tmp18665, i64 1
- %tmp18667 = getelementptr inbounds float* %tmp18666, i64 1
- %tmp18668 = getelementptr inbounds float* %tmp18667, i64 1
- %tmp18669 = getelementptr inbounds float* %tmp18668, i64 1
- %tmp18670 = getelementptr inbounds float* %tmp18669, i64 1
- %tmp18671 = getelementptr inbounds float* %tmp18670, i64 1
- %tmp18672 = getelementptr inbounds float* %tmp18671, i64 1
- %tmp18673 = getelementptr inbounds float* %tmp18672, i64 1
- %tmp18674 = getelementptr inbounds float* %tmp18673, i64 1
- %tmp18675 = getelementptr inbounds float* %tmp18674, i64 1
- %tmp18676 = getelementptr inbounds float* %tmp18675, i64 1
- %tmp18677 = getelementptr inbounds float* %tmp18676, i64 1
- %tmp18678 = getelementptr inbounds float* %tmp18677, i64 1
- %tmp18679 = getelementptr inbounds float* %tmp18678, i64 1
- %tmp18680 = getelementptr inbounds float* %tmp18679, i64 1
- %tmp18681 = getelementptr inbounds float* %tmp18680, i64 1
- %tmp18682 = getelementptr inbounds float* %tmp18681, i64 1
- %tmp18683 = getelementptr inbounds float* %tmp18682, i64 1
- %tmp18684 = getelementptr inbounds float* %tmp18683, i64 1
- %tmp18685 = getelementptr inbounds float* %tmp18684, i64 1
- %tmp18686 = getelementptr inbounds float* %tmp18685, i64 1
- %tmp18687 = getelementptr inbounds float* %tmp18686, i64 1
- %tmp18688 = getelementptr inbounds float* %tmp18687, i64 1
- %tmp18689 = getelementptr inbounds float* %tmp18688, i64 1
- %tmp18690 = getelementptr inbounds float* %tmp18689, i64 1
- %tmp18691 = getelementptr inbounds float* %tmp18690, i64 1
- %tmp18692 = getelementptr inbounds float* %tmp18691, i64 1
- %tmp18693 = getelementptr inbounds float* %tmp18692, i64 1
- %tmp18694 = getelementptr inbounds float* %tmp18693, i64 1
- %tmp18695 = getelementptr inbounds float* %tmp18694, i64 1
- %tmp18696 = getelementptr inbounds float* %tmp18695, i64 1
- %tmp18697 = getelementptr inbounds float* %tmp18696, i64 1
- %tmp18698 = getelementptr inbounds float* %tmp18697, i64 1
- %tmp18699 = getelementptr inbounds float* %tmp18698, i64 1
- %tmp18700 = getelementptr inbounds float* %tmp18699, i64 1
- %tmp18701 = getelementptr inbounds float* %tmp18700, i64 1
- %tmp18702 = getelementptr inbounds float* %tmp18701, i64 1
- %tmp18703 = getelementptr inbounds float* %tmp18702, i64 1
- %tmp18704 = getelementptr inbounds float* %tmp18703, i64 1
- %tmp18705 = getelementptr inbounds float* %tmp18704, i64 1
- %tmp18706 = getelementptr inbounds float* %tmp18705, i64 1
- %tmp18707 = getelementptr inbounds float* %tmp18706, i64 1
- %tmp18708 = getelementptr inbounds float* %tmp18707, i64 1
- %tmp18709 = getelementptr inbounds float* %tmp18708, i64 1
- %tmp18710 = getelementptr inbounds float* %tmp18709, i64 1
- %tmp18711 = getelementptr inbounds float* %tmp18710, i64 1
- %tmp18712 = getelementptr inbounds float* %tmp18711, i64 1
- %tmp18713 = getelementptr inbounds float* %tmp18712, i64 1
- %tmp18714 = getelementptr inbounds float* %tmp18713, i64 1
- %tmp18715 = getelementptr inbounds float* %tmp18714, i64 1
- %tmp18716 = getelementptr inbounds float* %tmp18715, i64 1
- %tmp18717 = getelementptr inbounds float* %tmp18716, i64 1
- %tmp18718 = getelementptr inbounds float* %tmp18717, i64 1
- %tmp18719 = getelementptr inbounds float* %tmp18718, i64 1
- %tmp18720 = getelementptr inbounds float* %tmp18719, i64 1
- %tmp18721 = getelementptr inbounds float* %tmp18720, i64 1
- %tmp18722 = getelementptr inbounds float* %tmp18721, i64 1
- %tmp18723 = getelementptr inbounds float* %tmp18722, i64 1
- %tmp18724 = getelementptr inbounds float* %tmp18723, i64 1
- %tmp18725 = getelementptr inbounds float* %tmp18724, i64 1
- %tmp18726 = getelementptr inbounds float* %tmp18725, i64 1
- %tmp18727 = getelementptr inbounds float* %tmp18726, i64 1
- %tmp18728 = getelementptr inbounds float* %tmp18727, i64 1
- %tmp18729 = getelementptr inbounds float* %tmp18728, i64 1
- %tmp18730 = getelementptr inbounds float* %tmp18729, i64 1
- %tmp18731 = getelementptr inbounds float* %tmp18730, i64 1
- %tmp18732 = getelementptr inbounds float* %tmp18731, i64 1
- %tmp18733 = getelementptr inbounds float* %tmp18732, i64 1
- %tmp18734 = getelementptr inbounds float* %tmp18733, i64 1
- %tmp18735 = getelementptr inbounds float* %tmp18734, i64 1
- %tmp18736 = getelementptr inbounds float* %tmp18735, i64 1
- %tmp18737 = getelementptr inbounds float* %tmp18736, i64 1
- %tmp18738 = getelementptr inbounds float* %tmp18737, i64 1
- %tmp18739 = getelementptr inbounds float* %tmp18738, i64 1
- %tmp18740 = getelementptr inbounds float* %tmp18739, i64 1
- %tmp18741 = getelementptr inbounds float* %tmp18740, i64 1
- %tmp18742 = getelementptr inbounds float* %tmp18741, i64 1
- %tmp18743 = getelementptr inbounds float* %tmp18742, i64 1
- %tmp18744 = getelementptr inbounds float* %tmp18743, i64 1
- %tmp18745 = getelementptr inbounds float* %tmp18744, i64 1
- %tmp18746 = getelementptr inbounds float* %tmp18745, i64 1
- %tmp18747 = getelementptr inbounds float* %tmp18746, i64 1
- %tmp18748 = getelementptr inbounds float* %tmp18747, i64 1
- %tmp18749 = getelementptr inbounds float* %tmp18748, i64 1
- %tmp18750 = getelementptr inbounds float* %tmp18749, i64 1
- %tmp18751 = getelementptr inbounds float* %tmp18750, i64 1
- %tmp18752 = getelementptr inbounds float* %tmp18751, i64 1
- %tmp18753 = getelementptr inbounds float* %tmp18752, i64 1
- %tmp18754 = getelementptr inbounds float* %tmp18753, i64 1
- %tmp18755 = getelementptr inbounds float* %tmp18754, i64 1
- %tmp18756 = getelementptr inbounds float* %tmp18755, i64 1
- %tmp18757 = getelementptr inbounds float* %tmp18756, i64 1
- %tmp18758 = getelementptr inbounds float* %tmp18757, i64 1
- %tmp18759 = getelementptr inbounds float* %tmp18758, i64 1
- %tmp18760 = getelementptr inbounds float* %tmp18759, i64 1
- %tmp18761 = getelementptr inbounds float* %tmp18760, i64 1
- %tmp18762 = getelementptr inbounds float* %tmp18761, i64 1
- %tmp18763 = getelementptr inbounds float* %tmp18762, i64 1
- %tmp18764 = getelementptr inbounds float* %tmp18763, i64 1
- %tmp18765 = getelementptr inbounds float* %tmp18764, i64 1
- %tmp18766 = getelementptr inbounds float* %tmp18765, i64 1
- %tmp18767 = getelementptr inbounds float* %tmp18766, i64 1
- %tmp18768 = getelementptr inbounds float* %tmp18767, i64 1
- %tmp18769 = getelementptr inbounds float* %tmp18768, i64 1
- %tmp18770 = getelementptr inbounds float* %tmp18769, i64 1
- %tmp18771 = getelementptr inbounds float* %tmp18770, i64 1
- %tmp18772 = getelementptr inbounds float* %tmp18771, i64 1
- %tmp18773 = getelementptr inbounds float* %tmp18772, i64 1
- %tmp18774 = getelementptr inbounds float* %tmp18773, i64 1
- %tmp18775 = getelementptr inbounds float* %tmp18774, i64 1
- %tmp18776 = getelementptr inbounds float* %tmp18775, i64 1
- %tmp18777 = getelementptr inbounds float* %tmp18776, i64 1
- %tmp18778 = getelementptr inbounds float* %tmp18777, i64 1
- %tmp18779 = getelementptr inbounds float* %tmp18778, i64 1
- %tmp18780 = getelementptr inbounds float* %tmp18779, i64 1
- %tmp18781 = getelementptr inbounds float* %tmp18780, i64 1
- %tmp18782 = getelementptr inbounds float* %tmp18781, i64 1
- %tmp18783 = getelementptr inbounds float* %tmp18782, i64 1
- %tmp18784 = getelementptr inbounds float* %tmp18783, i64 1
- %tmp18785 = getelementptr inbounds float* %tmp18784, i64 1
- %tmp18786 = getelementptr inbounds float* %tmp18785, i64 1
- %tmp18787 = getelementptr inbounds float* %tmp18786, i64 1
- %tmp18788 = getelementptr inbounds float* %tmp18787, i64 1
- %tmp18789 = getelementptr inbounds float* %tmp18788, i64 1
- %tmp18790 = getelementptr inbounds float* %tmp18789, i64 1
- %tmp18791 = getelementptr inbounds float* %tmp18790, i64 1
- %tmp18792 = getelementptr inbounds float* %tmp18791, i64 1
- %tmp18793 = getelementptr inbounds float* %tmp18792, i64 1
- %tmp18794 = getelementptr inbounds float* %tmp18793, i64 1
- %tmp18795 = getelementptr inbounds float* %tmp18794, i64 1
- %tmp18796 = getelementptr inbounds float* %tmp18795, i64 1
- %tmp18797 = getelementptr inbounds float* %tmp18796, i64 1
- %tmp18798 = getelementptr inbounds float* %tmp18797, i64 1
- %tmp18799 = getelementptr inbounds float* %tmp18798, i64 1
- %tmp18800 = getelementptr inbounds float* %tmp18799, i64 1
- %tmp18801 = getelementptr inbounds float* %tmp18800, i64 1
- %tmp18802 = getelementptr inbounds float* %tmp18801, i64 1
- %tmp18803 = getelementptr inbounds float* %tmp18802, i64 1
- %tmp18804 = getelementptr inbounds float* %tmp18803, i64 1
- %tmp18805 = getelementptr inbounds float* %tmp18804, i64 1
- %tmp18806 = getelementptr inbounds float* %tmp18805, i64 1
- %tmp18807 = getelementptr inbounds float* %tmp18806, i64 1
- %tmp18808 = getelementptr inbounds float* %tmp18807, i64 1
- %tmp18809 = getelementptr inbounds float* %tmp18808, i64 1
- %tmp18810 = getelementptr inbounds float* %tmp18809, i64 1
- %tmp18811 = getelementptr inbounds float* %tmp18810, i64 1
- %tmp18812 = getelementptr inbounds float* %tmp18811, i64 1
- %tmp18813 = getelementptr inbounds float* %tmp18812, i64 1
- %tmp18814 = getelementptr inbounds float* %tmp18813, i64 1
- %tmp18815 = getelementptr inbounds float* %tmp18814, i64 1
- %tmp18816 = getelementptr inbounds float* %tmp18815, i64 1
- %tmp18817 = getelementptr inbounds float* %tmp18816, i64 1
- %tmp18818 = getelementptr inbounds float* %tmp18817, i64 1
- %tmp18819 = getelementptr inbounds float* %tmp18818, i64 1
- %tmp18820 = getelementptr inbounds float* %tmp18819, i64 1
- %tmp18821 = getelementptr inbounds float* %tmp18820, i64 1
- %tmp18822 = getelementptr inbounds float* %tmp18821, i64 1
- %tmp18823 = getelementptr inbounds float* %tmp18822, i64 1
- %tmp18824 = getelementptr inbounds float* %tmp18823, i64 1
- %tmp18825 = getelementptr inbounds float* %tmp18824, i64 1
- %tmp18826 = getelementptr inbounds float* %tmp18825, i64 1
- %tmp18827 = getelementptr inbounds float* %tmp18826, i64 1
- %tmp18828 = getelementptr inbounds float* %tmp18827, i64 1
- %tmp18829 = getelementptr inbounds float* %tmp18828, i64 1
- %tmp18830 = getelementptr inbounds float* %tmp18829, i64 1
- %tmp18831 = getelementptr inbounds float* %tmp18830, i64 1
- %tmp18832 = getelementptr inbounds float* %tmp18831, i64 1
- %tmp18833 = getelementptr inbounds float* %tmp18832, i64 1
- %tmp18834 = getelementptr inbounds float* %tmp18833, i64 1
- %tmp18835 = getelementptr inbounds float* %tmp18834, i64 1
- %tmp18836 = getelementptr inbounds float* %tmp18835, i64 1
- %tmp18837 = getelementptr inbounds float* %tmp18836, i64 1
- %tmp18838 = getelementptr inbounds float* %tmp18837, i64 1
- %tmp18839 = getelementptr inbounds float* %tmp18838, i64 1
- %tmp18840 = getelementptr inbounds float* %tmp18839, i64 1
- %tmp18841 = getelementptr inbounds float* %tmp18840, i64 1
- %tmp18842 = getelementptr inbounds float* %tmp18841, i64 1
- %tmp18843 = getelementptr inbounds float* %tmp18842, i64 1
- %tmp18844 = getelementptr inbounds float* %tmp18843, i64 1
- %tmp18845 = getelementptr inbounds float* %tmp18844, i64 1
- %tmp18846 = getelementptr inbounds float* %tmp18845, i64 1
- %tmp18847 = getelementptr inbounds float* %tmp18846, i64 1
- %tmp18848 = getelementptr inbounds float* %tmp18847, i64 1
- %tmp18849 = getelementptr inbounds float* %tmp18848, i64 1
- %tmp18850 = getelementptr inbounds float* %tmp18849, i64 1
- %tmp18851 = getelementptr inbounds float* %tmp18850, i64 1
- %tmp18852 = getelementptr inbounds float* %tmp18851, i64 1
- %tmp18853 = getelementptr inbounds float* %tmp18852, i64 1
- %tmp18854 = getelementptr inbounds float* %tmp18853, i64 1
- %tmp18855 = getelementptr inbounds float* %tmp18854, i64 1
- %tmp18856 = getelementptr inbounds float* %tmp18855, i64 1
- %tmp18857 = getelementptr inbounds float* %tmp18856, i64 1
- %tmp18858 = getelementptr inbounds float* %tmp18857, i64 1
- %tmp18859 = getelementptr inbounds float* %tmp18858, i64 1
- %tmp18860 = getelementptr inbounds float* %tmp18859, i64 1
- %tmp18861 = getelementptr inbounds float* %tmp18860, i64 1
- %tmp18862 = getelementptr inbounds float* %tmp18861, i64 1
- %tmp18863 = getelementptr inbounds float* %tmp18862, i64 1
- %tmp18864 = getelementptr inbounds float* %tmp18863, i64 1
- %tmp18865 = getelementptr inbounds float* %tmp18864, i64 1
- %tmp18866 = getelementptr inbounds float* %tmp18865, i64 1
- %tmp18867 = getelementptr inbounds float* %tmp18866, i64 1
- %tmp18868 = getelementptr inbounds float* %tmp18867, i64 1
- %tmp18869 = getelementptr inbounds float* %tmp18868, i64 1
- %tmp18870 = getelementptr inbounds float* %tmp18869, i64 1
- %tmp18871 = getelementptr inbounds float* %tmp18870, i64 1
- %tmp18872 = getelementptr inbounds float* %tmp18871, i64 1
- %tmp18873 = getelementptr inbounds float* %tmp18872, i64 1
- %tmp18874 = getelementptr inbounds float* %tmp18873, i64 1
- %tmp18875 = getelementptr inbounds float* %tmp18874, i64 1
- %tmp18876 = getelementptr inbounds float* %tmp18875, i64 1
- %tmp18877 = getelementptr inbounds float* %tmp18876, i64 1
- %tmp18878 = getelementptr inbounds float* %tmp18877, i64 1
- %tmp18879 = getelementptr inbounds float* %tmp18878, i64 1
- %tmp18880 = getelementptr inbounds float* %tmp18879, i64 1
- %tmp18881 = getelementptr inbounds float* %tmp18880, i64 1
- %tmp18882 = getelementptr inbounds float* %tmp18881, i64 1
- %tmp18883 = getelementptr inbounds float* %tmp18882, i64 1
- %tmp18884 = getelementptr inbounds float* %tmp18883, i64 1
- %tmp18885 = getelementptr inbounds float* %tmp18884, i64 1
- %tmp18886 = getelementptr inbounds float* %tmp18885, i64 1
- %tmp18887 = getelementptr inbounds float* %tmp18886, i64 1
- %tmp18888 = getelementptr inbounds float* %tmp18887, i64 1
- %tmp18889 = getelementptr inbounds float* %tmp18888, i64 1
- %tmp18890 = getelementptr inbounds float* %tmp18889, i64 1
- %tmp18891 = getelementptr inbounds float* %tmp18890, i64 1
- %tmp18892 = getelementptr inbounds float* %tmp18891, i64 1
- %tmp18893 = getelementptr inbounds float* %tmp18892, i64 1
- %tmp18894 = getelementptr inbounds float* %tmp18893, i64 1
- %tmp18895 = getelementptr inbounds float* %tmp18894, i64 1
- %tmp18896 = getelementptr inbounds float* %tmp18895, i64 1
- %tmp18897 = getelementptr inbounds float* %tmp18896, i64 1
- %tmp18898 = getelementptr inbounds float* %tmp18897, i64 1
- %tmp18899 = getelementptr inbounds float* %tmp18898, i64 1
- %tmp18900 = getelementptr inbounds float* %tmp18899, i64 1
- %tmp18901 = getelementptr inbounds float* %tmp18900, i64 1
- %tmp18902 = getelementptr inbounds float* %tmp18901, i64 1
- %tmp18903 = getelementptr inbounds float* %tmp18902, i64 1
- %tmp18904 = getelementptr inbounds float* %tmp18903, i64 1
- %tmp18905 = getelementptr inbounds float* %tmp18904, i64 1
- %tmp18906 = getelementptr inbounds float* %tmp18905, i64 1
- %tmp18907 = getelementptr inbounds float* %tmp18906, i64 1
- %tmp18908 = getelementptr inbounds float* %tmp18907, i64 1
- %tmp18909 = getelementptr inbounds float* %tmp18908, i64 1
- %tmp18910 = getelementptr inbounds float* %tmp18909, i64 1
- %tmp18911 = getelementptr inbounds float* %tmp18910, i64 1
- %tmp18912 = getelementptr inbounds float* %tmp18911, i64 1
- %tmp18913 = getelementptr inbounds float* %tmp18912, i64 1
- %tmp18914 = getelementptr inbounds float* %tmp18913, i64 1
- %tmp18915 = getelementptr inbounds float* %tmp18914, i64 1
- %tmp18916 = getelementptr inbounds float* %tmp18915, i64 1
- %tmp18917 = getelementptr inbounds float* %tmp18916, i64 1
- %tmp18918 = getelementptr inbounds float* %tmp18917, i64 1
- %tmp18919 = getelementptr inbounds float* %tmp18918, i64 1
- %tmp18920 = getelementptr inbounds float* %tmp18919, i64 1
- %tmp18921 = getelementptr inbounds float* %tmp18920, i64 1
- %tmp18922 = getelementptr inbounds float* %tmp18921, i64 1
- %tmp18923 = getelementptr inbounds float* %tmp18922, i64 1
- %tmp18924 = getelementptr inbounds float* %tmp18923, i64 1
- %tmp18925 = getelementptr inbounds float* %tmp18924, i64 1
- %tmp18926 = getelementptr inbounds float* %tmp18925, i64 1
- %tmp18927 = getelementptr inbounds float* %tmp18926, i64 1
- %tmp18928 = getelementptr inbounds float* %tmp18927, i64 1
- %tmp18929 = getelementptr inbounds float* %tmp18928, i64 1
- %tmp18930 = getelementptr inbounds float* %tmp18929, i64 1
- %tmp18931 = getelementptr inbounds float* %tmp18930, i64 1
- %tmp18932 = getelementptr inbounds float* %tmp18931, i64 1
- %tmp18933 = getelementptr inbounds float* %tmp18932, i64 1
- %tmp18934 = getelementptr inbounds float* %tmp18933, i64 1
- %tmp18935 = getelementptr inbounds float* %tmp18934, i64 1
- %tmp18936 = getelementptr inbounds float* %tmp18935, i64 1
- %tmp18937 = getelementptr inbounds float* %tmp18936, i64 1
- %tmp18938 = getelementptr inbounds float* %tmp18937, i64 1
- %tmp18939 = getelementptr inbounds float* %tmp18938, i64 1
- %tmp18940 = getelementptr inbounds float* %tmp18939, i64 1
- %tmp18941 = getelementptr inbounds float* %tmp18940, i64 1
- %tmp18942 = getelementptr inbounds float* %tmp18941, i64 1
- %tmp18943 = getelementptr inbounds float* %tmp18942, i64 1
- %tmp18944 = getelementptr inbounds float* %tmp18943, i64 1
- %tmp18945 = getelementptr inbounds float* %tmp18944, i64 1
- %tmp18946 = getelementptr inbounds float* %tmp18945, i64 1
- %tmp18947 = getelementptr inbounds float* %tmp18946, i64 1
- %tmp18948 = getelementptr inbounds float* %tmp18947, i64 1
- %tmp18949 = getelementptr inbounds float* %tmp18948, i64 1
- %tmp18950 = getelementptr inbounds float* %tmp18949, i64 1
- %tmp18951 = getelementptr inbounds float* %tmp18950, i64 1
- %tmp18952 = getelementptr inbounds float* %tmp18951, i64 1
- %tmp18953 = getelementptr inbounds float* %tmp18952, i64 1
- %tmp18954 = getelementptr inbounds float* %tmp18953, i64 1
- %tmp18955 = getelementptr inbounds float* %tmp18954, i64 1
- %tmp18956 = getelementptr inbounds float* %tmp18955, i64 1
- %tmp18957 = getelementptr inbounds float* %tmp18956, i64 1
- %tmp18958 = getelementptr inbounds float* %tmp18957, i64 1
- %tmp18959 = getelementptr inbounds float* %tmp18958, i64 1
- %tmp18960 = getelementptr inbounds float* %tmp18959, i64 1
- %tmp18961 = getelementptr inbounds float* %tmp18960, i64 1
- %tmp18962 = getelementptr inbounds float* %tmp18961, i64 1
- %tmp18963 = getelementptr inbounds float* %tmp18962, i64 1
- %tmp18964 = getelementptr inbounds float* %tmp18963, i64 1
- %tmp18965 = getelementptr inbounds float* %tmp18964, i64 1
- %tmp18966 = getelementptr inbounds float* %tmp18965, i64 1
- %tmp18967 = getelementptr inbounds float* %tmp18966, i64 1
- %tmp18968 = getelementptr inbounds float* %tmp18967, i64 1
- %tmp18969 = getelementptr inbounds float* %tmp18968, i64 1
- %tmp18970 = getelementptr inbounds float* %tmp18969, i64 1
- %tmp18971 = getelementptr inbounds float* %tmp18970, i64 1
- %tmp18972 = getelementptr inbounds float* %tmp18971, i64 1
- %tmp18973 = getelementptr inbounds float* %tmp18972, i64 1
- %tmp18974 = getelementptr inbounds float* %tmp18973, i64 1
- %tmp18975 = getelementptr inbounds float* %tmp18974, i64 1
- %tmp18976 = getelementptr inbounds float* %tmp18975, i64 1
- %tmp18977 = getelementptr inbounds float* %tmp18976, i64 1
- %tmp18978 = getelementptr inbounds float* %tmp18977, i64 1
- %tmp18979 = getelementptr inbounds float* %tmp18978, i64 1
- %tmp18980 = getelementptr inbounds float* %tmp18979, i64 1
- %tmp18981 = getelementptr inbounds float* %tmp18980, i64 1
- %tmp18982 = getelementptr inbounds float* %tmp18981, i64 1
- %tmp18983 = getelementptr inbounds float* %tmp18982, i64 1
- %tmp18984 = getelementptr inbounds float* %tmp18983, i64 1
- %tmp18985 = getelementptr inbounds float* %tmp18984, i64 1
- %tmp18986 = getelementptr inbounds float* %tmp18985, i64 1
- %tmp18987 = getelementptr inbounds float* %tmp18986, i64 1
- %tmp18988 = getelementptr inbounds float* %tmp18987, i64 1
- %tmp18989 = getelementptr inbounds float* %tmp18988, i64 1
- %tmp18990 = getelementptr inbounds float* %tmp18989, i64 1
- %tmp18991 = getelementptr inbounds float* %tmp18990, i64 1
- %tmp18992 = getelementptr inbounds float* %tmp18991, i64 1
- %tmp18993 = getelementptr inbounds float* %tmp18992, i64 1
- %tmp18994 = getelementptr inbounds float* %tmp18993, i64 1
- %tmp18995 = getelementptr inbounds float* %tmp18994, i64 1
- %tmp18996 = getelementptr inbounds float* %tmp18995, i64 1
- %tmp18997 = getelementptr inbounds float* %tmp18996, i64 1
- %tmp18998 = getelementptr inbounds float* %tmp18997, i64 1
- %tmp18999 = getelementptr inbounds float* %tmp18998, i64 1
- %tmp19000 = getelementptr inbounds float* %tmp18999, i64 1
- %tmp19001 = getelementptr inbounds float* %tmp19000, i64 1
- %tmp19002 = getelementptr inbounds float* %tmp19001, i64 1
- %tmp19003 = getelementptr inbounds float* %tmp19002, i64 1
- %tmp19004 = getelementptr inbounds float* %tmp19003, i64 1
- %tmp19005 = getelementptr inbounds float* %tmp19004, i64 1
- %tmp19006 = getelementptr inbounds float* %tmp19005, i64 1
- %tmp19007 = getelementptr inbounds float* %tmp19006, i64 1
- %tmp19008 = getelementptr inbounds float* %tmp19007, i64 1
- %tmp19009 = getelementptr inbounds float* %tmp19008, i64 1
- %tmp19010 = getelementptr inbounds float* %tmp19009, i64 1
- %tmp19011 = getelementptr inbounds float* %tmp19010, i64 1
- %tmp19012 = getelementptr inbounds float* %tmp19011, i64 1
- %tmp19013 = getelementptr inbounds float* %tmp19012, i64 1
- %tmp19014 = getelementptr inbounds float* %tmp19013, i64 1
- %tmp19015 = getelementptr inbounds float* %tmp19014, i64 1
- %tmp19016 = getelementptr inbounds float* %tmp19015, i64 1
- %tmp19017 = getelementptr inbounds float* %tmp19016, i64 1
- %tmp19018 = getelementptr inbounds float* %tmp19017, i64 1
- %tmp19019 = getelementptr inbounds float* %tmp19018, i64 1
- %tmp19020 = getelementptr inbounds float* %tmp19019, i64 1
- %tmp19021 = getelementptr inbounds float* %tmp19020, i64 1
- %tmp19022 = getelementptr inbounds float* %tmp19021, i64 1
- %tmp19023 = getelementptr inbounds float* %tmp19022, i64 1
- %tmp19024 = getelementptr inbounds float* %tmp19023, i64 1
- %tmp19025 = getelementptr inbounds float* %tmp19024, i64 1
- %tmp19026 = getelementptr inbounds float* %tmp19025, i64 1
- %tmp19027 = getelementptr inbounds float* %tmp19026, i64 1
- %tmp19028 = getelementptr inbounds float* %tmp19027, i64 1
- %tmp19029 = getelementptr inbounds float* %tmp19028, i64 1
- %tmp19030 = getelementptr inbounds float* %tmp19029, i64 1
- %tmp19031 = getelementptr inbounds float* %tmp19030, i64 1
- %tmp19032 = getelementptr inbounds float* %tmp19031, i64 1
- %tmp19033 = getelementptr inbounds float* %tmp19032, i64 1
- %tmp19034 = getelementptr inbounds float* %tmp19033, i64 1
- %tmp19035 = getelementptr inbounds float* %tmp19034, i64 1
- %tmp19036 = getelementptr inbounds float* %tmp19035, i64 1
- %tmp19037 = getelementptr inbounds float* %tmp19036, i64 1
- %tmp19038 = getelementptr inbounds float* %tmp19037, i64 1
- %tmp19039 = getelementptr inbounds float* %tmp19038, i64 1
- %tmp19040 = getelementptr inbounds float* %tmp19039, i64 1
- %tmp19041 = getelementptr inbounds float* %tmp19040, i64 1
- %tmp19042 = getelementptr inbounds float* %tmp19041, i64 1
- %tmp19043 = getelementptr inbounds float* %tmp19042, i64 1
- %tmp19044 = getelementptr inbounds float* %tmp19043, i64 1
- %tmp19045 = getelementptr inbounds float* %tmp19044, i64 1
- %tmp19046 = getelementptr inbounds float* %tmp19045, i64 1
- %tmp19047 = getelementptr inbounds float* %tmp19046, i64 1
- %tmp19048 = getelementptr inbounds float* %tmp19047, i64 1
- %tmp19049 = getelementptr inbounds float* %tmp19048, i64 1
- %tmp19050 = getelementptr inbounds float* %tmp19049, i64 1
- %tmp19051 = getelementptr inbounds float* %tmp19050, i64 1
- %tmp19052 = getelementptr inbounds float* %tmp19051, i64 1
- %tmp19053 = getelementptr inbounds float* %tmp19052, i64 1
- %tmp19054 = getelementptr inbounds float* %tmp19053, i64 1
- %tmp19055 = getelementptr inbounds float* %tmp19054, i64 1
- %tmp19056 = getelementptr inbounds float* %tmp19055, i64 1
- %tmp19057 = getelementptr inbounds float* %tmp19056, i64 1
- %tmp19058 = getelementptr inbounds float* %tmp19057, i64 1
- %tmp19059 = getelementptr inbounds float* %tmp19058, i64 1
- %tmp19060 = getelementptr inbounds float* %tmp19059, i64 1
- %tmp19061 = getelementptr inbounds float* %tmp19060, i64 1
- %tmp19062 = getelementptr inbounds float* %tmp19061, i64 1
- %tmp19063 = getelementptr inbounds float* %tmp19062, i64 1
- %tmp19064 = getelementptr inbounds float* %tmp19063, i64 1
- %tmp19065 = getelementptr inbounds float* %tmp19064, i64 1
- %tmp19066 = getelementptr inbounds float* %tmp19065, i64 1
- %tmp19067 = getelementptr inbounds float* %tmp19066, i64 1
- %tmp19068 = getelementptr inbounds float* %tmp19067, i64 1
- %tmp19069 = getelementptr inbounds float* %tmp19068, i64 1
- %tmp19070 = getelementptr inbounds float* %tmp19069, i64 1
- %tmp19071 = getelementptr inbounds float* %tmp19070, i64 1
- %tmp19072 = getelementptr inbounds float* %tmp19071, i64 1
- %tmp19073 = getelementptr inbounds float* %tmp19072, i64 1
- %tmp19074 = getelementptr inbounds float* %tmp19073, i64 1
- %tmp19075 = getelementptr inbounds float* %tmp19074, i64 1
- %tmp19076 = getelementptr inbounds float* %tmp19075, i64 1
- %tmp19077 = getelementptr inbounds float* %tmp19076, i64 1
- %tmp19078 = getelementptr inbounds float* %tmp19077, i64 1
- %tmp19079 = getelementptr inbounds float* %tmp19078, i64 1
- %tmp19080 = getelementptr inbounds float* %tmp19079, i64 1
- %tmp19081 = getelementptr inbounds float* %tmp19080, i64 1
- %tmp19082 = getelementptr inbounds float* %tmp19081, i64 1
- %tmp19083 = getelementptr inbounds float* %tmp19082, i64 1
- %tmp19084 = getelementptr inbounds float* %tmp19083, i64 1
- %tmp19085 = getelementptr inbounds float* %tmp19084, i64 1
- %tmp19086 = getelementptr inbounds float* %tmp19085, i64 1
- %tmp19087 = getelementptr inbounds float* %tmp19086, i64 1
- %tmp19088 = getelementptr inbounds float* %tmp19087, i64 1
- %tmp19089 = getelementptr inbounds float* %tmp19088, i64 1
- %tmp19090 = getelementptr inbounds float* %tmp19089, i64 1
- %tmp19091 = getelementptr inbounds float* %tmp19090, i64 1
- %tmp19092 = getelementptr inbounds float* %tmp19091, i64 1
- %tmp19093 = getelementptr inbounds float* %tmp19092, i64 1
- %tmp19094 = getelementptr inbounds float* %tmp19093, i64 1
- %tmp19095 = getelementptr inbounds float* %tmp19094, i64 1
- %tmp19096 = getelementptr inbounds float* %tmp19095, i64 1
- %tmp19097 = getelementptr inbounds float* %tmp19096, i64 1
- %tmp19098 = getelementptr inbounds float* %tmp19097, i64 1
- %tmp19099 = getelementptr inbounds float* %tmp19098, i64 1
- %tmp19100 = getelementptr inbounds float* %tmp19099, i64 1
- %tmp19101 = getelementptr inbounds float* %tmp19100, i64 1
- %tmp19102 = getelementptr inbounds float* %tmp19101, i64 1
- %tmp19103 = getelementptr inbounds float* %tmp19102, i64 1
- %tmp19104 = getelementptr inbounds float* %tmp19103, i64 1
- %tmp19105 = getelementptr inbounds float* %tmp19104, i64 1
- %tmp19106 = getelementptr inbounds float* %tmp19105, i64 1
- %tmp19107 = getelementptr inbounds float* %tmp19106, i64 1
- %tmp19108 = getelementptr inbounds float* %tmp19107, i64 1
- %tmp19109 = getelementptr inbounds float* %tmp19108, i64 1
- %tmp19110 = getelementptr inbounds float* %tmp19109, i64 1
- %tmp19111 = getelementptr inbounds float* %tmp19110, i64 1
- %tmp19112 = getelementptr inbounds float* %tmp19111, i64 1
- %tmp19113 = getelementptr inbounds float* %tmp19112, i64 1
- %tmp19114 = getelementptr inbounds float* %tmp19113, i64 1
- %tmp19115 = getelementptr inbounds float* %tmp19114, i64 1
- %tmp19116 = getelementptr inbounds float* %tmp19115, i64 1
- %tmp19117 = getelementptr inbounds float* %tmp19116, i64 1
- %tmp19118 = getelementptr inbounds float* %tmp19117, i64 1
- %tmp19119 = getelementptr inbounds float* %tmp19118, i64 1
- %tmp19120 = getelementptr inbounds float* %tmp19119, i64 1
- %tmp19121 = getelementptr inbounds float* %tmp19120, i64 1
- %tmp19122 = getelementptr inbounds float* %tmp19121, i64 1
- %tmp19123 = getelementptr inbounds float* %tmp19122, i64 1
- %tmp19124 = getelementptr inbounds float* %tmp19123, i64 1
- %tmp19125 = getelementptr inbounds float* %tmp19124, i64 1
- %tmp19126 = getelementptr inbounds float* %tmp19125, i64 1
- %tmp19127 = getelementptr inbounds float* %tmp19126, i64 1
- %tmp19128 = getelementptr inbounds float* %tmp19127, i64 1
- %tmp19129 = getelementptr inbounds float* %tmp19128, i64 1
- %tmp19130 = getelementptr inbounds float* %tmp19129, i64 1
- %tmp19131 = getelementptr inbounds float* %tmp19130, i64 1
- %tmp19132 = getelementptr inbounds float* %tmp19131, i64 1
- %tmp19133 = getelementptr inbounds float* %tmp19132, i64 1
- %tmp19134 = getelementptr inbounds float* %tmp19133, i64 1
- %tmp19135 = getelementptr inbounds float* %tmp19134, i64 1
- %tmp19136 = getelementptr inbounds float* %tmp19135, i64 1
- %tmp19137 = getelementptr inbounds float* %tmp19136, i64 1
- %tmp19138 = getelementptr inbounds float* %tmp19137, i64 1
- %tmp19139 = getelementptr inbounds float* %tmp19138, i64 1
- %tmp19140 = getelementptr inbounds float* %tmp19139, i64 1
- %tmp19141 = getelementptr inbounds float* %tmp19140, i64 1
- %tmp19142 = getelementptr inbounds float* %tmp19141, i64 1
- %tmp19143 = getelementptr inbounds float* %tmp19142, i64 1
- %tmp19144 = getelementptr inbounds float* %tmp19143, i64 1
- %tmp19145 = getelementptr inbounds float* %tmp19144, i64 1
- %tmp19146 = getelementptr inbounds float* %tmp19145, i64 1
- %tmp19147 = getelementptr inbounds float* %tmp19146, i64 1
- %tmp19148 = getelementptr inbounds float* %tmp19147, i64 1
- %tmp19149 = getelementptr inbounds float* %tmp19148, i64 1
- %tmp19150 = getelementptr inbounds float* %tmp19149, i64 1
- %tmp19151 = getelementptr inbounds float* %tmp19150, i64 1
- %tmp19152 = getelementptr inbounds float* %tmp19151, i64 1
- %tmp19153 = getelementptr inbounds float* %tmp19152, i64 1
- %tmp19154 = getelementptr inbounds float* %tmp19153, i64 1
- %tmp19155 = getelementptr inbounds float* %tmp19154, i64 1
- %tmp19156 = getelementptr inbounds float* %tmp19155, i64 1
- %tmp19157 = getelementptr inbounds float* %tmp19156, i64 1
- %tmp19158 = getelementptr inbounds float* %tmp19157, i64 1
- %tmp19159 = getelementptr inbounds float* %tmp19158, i64 1
- %tmp19160 = getelementptr inbounds float* %tmp19159, i64 1
- %tmp19161 = getelementptr inbounds float* %tmp19160, i64 1
- %tmp19162 = getelementptr inbounds float* %tmp19161, i64 1
- %tmp19163 = getelementptr inbounds float* %tmp19162, i64 1
- %tmp19164 = getelementptr inbounds float* %tmp19163, i64 1
- %tmp19165 = getelementptr inbounds float* %tmp19164, i64 1
- %tmp19166 = getelementptr inbounds float* %tmp19165, i64 1
- %tmp19167 = getelementptr inbounds float* %tmp19166, i64 1
- %tmp19168 = getelementptr inbounds float* %tmp19167, i64 1
- %tmp19169 = getelementptr inbounds float* %tmp19168, i64 1
- %tmp19170 = getelementptr inbounds float* %tmp19169, i64 1
- %tmp19171 = getelementptr inbounds float* %tmp19170, i64 1
- %tmp19172 = getelementptr inbounds float* %tmp19171, i64 1
- %tmp19173 = getelementptr inbounds float* %tmp19172, i64 1
- %tmp19174 = getelementptr inbounds float* %tmp19173, i64 1
- %tmp19175 = getelementptr inbounds float* %tmp19174, i64 1
- %tmp19176 = getelementptr inbounds float* %tmp19175, i64 1
- %tmp19177 = getelementptr inbounds float* %tmp19176, i64 1
- %tmp19178 = getelementptr inbounds float* %tmp19177, i64 1
- %tmp19179 = getelementptr inbounds float* %tmp19178, i64 1
- %tmp19180 = getelementptr inbounds float* %tmp19179, i64 1
- %tmp19181 = getelementptr inbounds float* %tmp19180, i64 1
- %tmp19182 = getelementptr inbounds float* %tmp19181, i64 1
- %tmp19183 = getelementptr inbounds float* %tmp19182, i64 1
- %tmp19184 = getelementptr inbounds float* %tmp19183, i64 1
- %tmp19185 = getelementptr inbounds float* %tmp19184, i64 1
- %tmp19186 = getelementptr inbounds float* %tmp19185, i64 1
- %tmp19187 = getelementptr inbounds float* %tmp19186, i64 1
- %tmp19188 = getelementptr inbounds float* %tmp19187, i64 1
- %tmp19189 = getelementptr inbounds float* %tmp19188, i64 1
- %tmp19190 = getelementptr inbounds float* %tmp19189, i64 1
- %tmp19191 = getelementptr inbounds float* %tmp19190, i64 1
- %tmp19192 = getelementptr inbounds float* %tmp19191, i64 1
- %tmp19193 = getelementptr inbounds float* %tmp19192, i64 1
- %tmp19194 = getelementptr inbounds float* %tmp19193, i64 1
- %tmp19195 = getelementptr inbounds float* %tmp19194, i64 1
- %tmp19196 = getelementptr inbounds float* %tmp19195, i64 1
- %tmp19197 = getelementptr inbounds float* %tmp19196, i64 1
- %tmp19198 = getelementptr inbounds float* %tmp19197, i64 1
- %tmp19199 = getelementptr inbounds float* %tmp19198, i64 1
- %tmp19200 = getelementptr inbounds float* %tmp19199, i64 1
- %tmp19201 = getelementptr inbounds float* %tmp19200, i64 1
- %tmp19202 = getelementptr inbounds float* %tmp19201, i64 1
- %tmp19203 = getelementptr inbounds float* %tmp19202, i64 1
- %tmp19204 = getelementptr inbounds float* %tmp19203, i64 1
- %tmp19205 = getelementptr inbounds float* %tmp19204, i64 1
- %tmp19206 = getelementptr inbounds float* %tmp19205, i64 1
- %tmp19207 = getelementptr inbounds float* %tmp19206, i64 1
- %tmp19208 = getelementptr inbounds float* %tmp19207, i64 1
- %tmp19209 = getelementptr inbounds float* %tmp19208, i64 1
- %tmp19210 = getelementptr inbounds float* %tmp19209, i64 1
- %tmp19211 = getelementptr inbounds float* %tmp19210, i64 1
- %tmp19212 = getelementptr inbounds float* %tmp19211, i64 1
- %tmp19213 = getelementptr inbounds float* %tmp19212, i64 1
- %tmp19214 = getelementptr inbounds float* %tmp19213, i64 1
- %tmp19215 = getelementptr inbounds float* %tmp19214, i64 1
- %tmp19216 = getelementptr inbounds float* %tmp19215, i64 1
- %tmp19217 = getelementptr inbounds float* %tmp19216, i64 1
- %tmp19218 = getelementptr inbounds float* %tmp19217, i64 1
- %tmp19219 = getelementptr inbounds float* %tmp19218, i64 1
- %tmp19220 = getelementptr inbounds float* %tmp19219, i64 1
- %tmp19221 = getelementptr inbounds float* %tmp19220, i64 1
- %tmp19222 = getelementptr inbounds float* %tmp19221, i64 1
- %tmp19223 = getelementptr inbounds float* %tmp19222, i64 1
- %tmp19224 = getelementptr inbounds float* %tmp19223, i64 1
- %tmp19225 = getelementptr inbounds float* %tmp19224, i64 1
- %tmp19226 = getelementptr inbounds float* %tmp19225, i64 1
- %tmp19227 = getelementptr inbounds float* %tmp19226, i64 1
- %tmp19228 = getelementptr inbounds float* %tmp19227, i64 1
- %tmp19229 = getelementptr inbounds float* %tmp19228, i64 1
- %tmp19230 = getelementptr inbounds float* %tmp19229, i64 1
- %tmp19231 = getelementptr inbounds float* %tmp19230, i64 1
- %tmp19232 = getelementptr inbounds float* %tmp19231, i64 1
- %tmp19233 = getelementptr inbounds float* %tmp19232, i64 1
- %tmp19234 = getelementptr inbounds float* %tmp19233, i64 1
- %tmp19235 = getelementptr inbounds float* %tmp19234, i64 1
- %tmp19236 = getelementptr inbounds float* %tmp19235, i64 1
- %tmp19237 = getelementptr inbounds float* %tmp19236, i64 1
- %tmp19238 = getelementptr inbounds float* %tmp19237, i64 1
- %tmp19239 = getelementptr inbounds float* %tmp19238, i64 1
- %tmp19240 = getelementptr inbounds float* %tmp19239, i64 1
- %tmp19241 = getelementptr inbounds float* %tmp19240, i64 1
- %tmp19242 = getelementptr inbounds float* %tmp19241, i64 1
- %tmp19243 = getelementptr inbounds float* %tmp19242, i64 1
- %tmp19244 = getelementptr inbounds float* %tmp19243, i64 1
- %tmp19245 = getelementptr inbounds float* %tmp19244, i64 1
- %tmp19246 = getelementptr inbounds float* %tmp19245, i64 1
- %tmp19247 = getelementptr inbounds float* %tmp19246, i64 1
- %tmp19248 = getelementptr inbounds float* %tmp19247, i64 1
- %tmp19249 = getelementptr inbounds float* %tmp19248, i64 1
- %tmp19250 = getelementptr inbounds float* %tmp19249, i64 1
- %tmp19251 = getelementptr inbounds float* %tmp19250, i64 1
- %tmp19252 = getelementptr inbounds float* %tmp19251, i64 1
- %tmp19253 = getelementptr inbounds float* %tmp19252, i64 1
- %tmp19254 = getelementptr inbounds float* %tmp19253, i64 1
- %tmp19255 = getelementptr inbounds float* %tmp19254, i64 1
- %tmp19256 = getelementptr inbounds float* %tmp19255, i64 1
- %tmp19257 = getelementptr inbounds float* %tmp19256, i64 1
- %tmp19258 = getelementptr inbounds float* %tmp19257, i64 1
- %tmp19259 = getelementptr inbounds float* %tmp19258, i64 1
- %tmp19260 = getelementptr inbounds float* %tmp19259, i64 1
- %tmp19261 = getelementptr inbounds float* %tmp19260, i64 1
- %tmp19262 = getelementptr inbounds float* %tmp19261, i64 1
- %tmp19263 = getelementptr inbounds float* %tmp19262, i64 1
- %tmp19264 = getelementptr inbounds float* %tmp19263, i64 1
- %tmp19265 = getelementptr inbounds float* %tmp19264, i64 1
- %tmp19266 = getelementptr inbounds float* %tmp19265, i64 1
- %tmp19267 = getelementptr inbounds float* %tmp19266, i64 1
- %tmp19268 = getelementptr inbounds float* %tmp19267, i64 1
- %tmp19269 = getelementptr inbounds float* %tmp19268, i64 1
- %tmp19270 = getelementptr inbounds float* %tmp19269, i64 1
- %tmp19271 = getelementptr inbounds float* %tmp19270, i64 1
- %tmp19272 = getelementptr inbounds float* %tmp19271, i64 1
- %tmp19273 = getelementptr inbounds float* %tmp19272, i64 1
- %tmp19274 = getelementptr inbounds float* %tmp19273, i64 1
- %tmp19275 = getelementptr inbounds float* %tmp19274, i64 1
- %tmp19276 = getelementptr inbounds float* %tmp19275, i64 1
- %tmp19277 = getelementptr inbounds float* %tmp19276, i64 1
- %tmp19278 = getelementptr inbounds float* %tmp19277, i64 1
- %tmp19279 = getelementptr inbounds float* %tmp19278, i64 1
- %tmp19280 = getelementptr inbounds float* %tmp19279, i64 1
- %tmp19281 = getelementptr inbounds float* %tmp19280, i64 1
- %tmp19282 = getelementptr inbounds float* %tmp19281, i64 1
- %tmp19283 = getelementptr inbounds float* %tmp19282, i64 1
- %tmp19284 = getelementptr inbounds float* %tmp19283, i64 1
- %tmp19285 = getelementptr inbounds float* %tmp19284, i64 1
- %tmp19286 = getelementptr inbounds float* %tmp19285, i64 1
- %tmp19287 = getelementptr inbounds float* %tmp19286, i64 1
- %tmp19288 = getelementptr inbounds float* %tmp19287, i64 1
- %tmp19289 = getelementptr inbounds float* %tmp19288, i64 1
- %tmp19290 = getelementptr inbounds float* %tmp19289, i64 1
- %tmp19291 = getelementptr inbounds float* %tmp19290, i64 1
- %tmp19292 = getelementptr inbounds float* %tmp19291, i64 1
- %tmp19293 = getelementptr inbounds float* %tmp19292, i64 1
- %tmp19294 = getelementptr inbounds float* %tmp19293, i64 1
- %tmp19295 = getelementptr inbounds float* %tmp19294, i64 1
- %tmp19296 = getelementptr inbounds float* %tmp19295, i64 1
- %tmp19297 = getelementptr inbounds float* %tmp19296, i64 1
- %tmp19298 = getelementptr inbounds float* %tmp19297, i64 1
- %tmp19299 = getelementptr inbounds float* %tmp19298, i64 1
- %tmp19300 = getelementptr inbounds float* %tmp19299, i64 1
- %tmp19301 = getelementptr inbounds float* %tmp19300, i64 1
- %tmp19302 = getelementptr inbounds float* %tmp19301, i64 1
- %tmp19303 = getelementptr inbounds float* %tmp19302, i64 1
- %tmp19304 = getelementptr inbounds float* %tmp19303, i64 1
- %tmp19305 = getelementptr inbounds float* %tmp19304, i64 1
- %tmp19306 = getelementptr inbounds float* %tmp19305, i64 1
- %tmp19307 = getelementptr inbounds float* %tmp19306, i64 1
- %tmp19308 = getelementptr inbounds float* %tmp19307, i64 1
- %tmp19309 = getelementptr inbounds float* %tmp19308, i64 1
- %tmp19310 = getelementptr inbounds float* %tmp19309, i64 1
- %tmp19311 = getelementptr inbounds float* %tmp19310, i64 1
- %tmp19312 = getelementptr inbounds float* %tmp19311, i64 1
- %tmp19313 = getelementptr inbounds float* %tmp19312, i64 1
- %tmp19314 = getelementptr inbounds float* %tmp19313, i64 1
- %tmp19315 = getelementptr inbounds float* %tmp19314, i64 1
- %tmp19316 = getelementptr inbounds float* %tmp19315, i64 1
- %tmp19317 = getelementptr inbounds float* %tmp19316, i64 1
- %tmp19318 = getelementptr inbounds float* %tmp19317, i64 1
- %tmp19319 = getelementptr inbounds float* %tmp19318, i64 1
- %tmp19320 = getelementptr inbounds float* %tmp19319, i64 1
- %tmp19321 = getelementptr inbounds float* %tmp19320, i64 1
- %tmp19322 = getelementptr inbounds float* %tmp19321, i64 1
- %tmp19323 = getelementptr inbounds float* %tmp19322, i64 1
- %tmp19324 = getelementptr inbounds float* %tmp19323, i64 1
- %tmp19325 = getelementptr inbounds float* %tmp19324, i64 1
- %tmp19326 = getelementptr inbounds float* %tmp19325, i64 1
- %tmp19327 = getelementptr inbounds float* %tmp19326, i64 1
- %tmp19328 = getelementptr inbounds float* %tmp19327, i64 1
- %tmp19329 = getelementptr inbounds float* %tmp19328, i64 1
- %tmp19330 = getelementptr inbounds float* %tmp19329, i64 1
- %tmp19331 = getelementptr inbounds float* %tmp19330, i64 1
- %tmp19332 = getelementptr inbounds float* %tmp19331, i64 1
- %tmp19333 = getelementptr inbounds float* %tmp19332, i64 1
- %tmp19334 = getelementptr inbounds float* %tmp19333, i64 1
- %tmp19335 = getelementptr inbounds float* %tmp19334, i64 1
- %tmp19336 = getelementptr inbounds float* %tmp19335, i64 1
- %tmp19337 = getelementptr inbounds float* %tmp19336, i64 1
- %tmp19338 = getelementptr inbounds float* %tmp19337, i64 1
- %tmp19339 = getelementptr inbounds float* %tmp19338, i64 1
- %tmp19340 = getelementptr inbounds float* %tmp19339, i64 1
- %tmp19341 = getelementptr inbounds float* %tmp19340, i64 1
- %tmp19342 = getelementptr inbounds float* %tmp19341, i64 1
- %tmp19343 = getelementptr inbounds float* %tmp19342, i64 1
- %tmp19344 = getelementptr inbounds float* %tmp19343, i64 1
- %tmp19345 = getelementptr inbounds float* %tmp19344, i64 1
- %tmp19346 = getelementptr inbounds float* %tmp19345, i64 1
- %tmp19347 = getelementptr inbounds float* %tmp19346, i64 1
- %tmp19348 = getelementptr inbounds float* %tmp19347, i64 1
- %tmp19349 = getelementptr inbounds float* %tmp19348, i64 1
- %tmp19350 = getelementptr inbounds float* %tmp19349, i64 1
- %tmp19351 = getelementptr inbounds float* %tmp19350, i64 1
- %tmp19352 = getelementptr inbounds float* %tmp19351, i64 1
- %tmp19353 = getelementptr inbounds float* %tmp19352, i64 1
- %tmp19354 = getelementptr inbounds float* %tmp19353, i64 1
- %tmp19355 = getelementptr inbounds float* %tmp19354, i64 1
- %tmp19356 = getelementptr inbounds float* %tmp19355, i64 1
- %tmp19357 = getelementptr inbounds float* %tmp19356, i64 1
- %tmp19358 = getelementptr inbounds float* %tmp19357, i64 1
- %tmp19359 = getelementptr inbounds float* %tmp19358, i64 1
- %tmp19360 = getelementptr inbounds float* %tmp19359, i64 1
- %tmp19361 = getelementptr inbounds float* %tmp19360, i64 1
- %tmp19362 = getelementptr inbounds float* %tmp19361, i64 1
- %tmp19363 = getelementptr inbounds float* %tmp19362, i64 1
- %tmp19364 = getelementptr inbounds float* %tmp19363, i64 1
- %tmp19365 = getelementptr inbounds float* %tmp19364, i64 1
- %tmp19366 = getelementptr inbounds float* %tmp19365, i64 1
- %tmp19367 = getelementptr inbounds float* %tmp19366, i64 1
- %tmp19368 = getelementptr inbounds float* %tmp19367, i64 1
- %tmp19369 = getelementptr inbounds float* %tmp19368, i64 1
- %tmp19370 = getelementptr inbounds float* %tmp19369, i64 1
- %tmp19371 = getelementptr inbounds float* %tmp19370, i64 1
- %tmp19372 = getelementptr inbounds float* %tmp19371, i64 1
- %tmp19373 = getelementptr inbounds float* %tmp19372, i64 1
- %tmp19374 = getelementptr inbounds float* %tmp19373, i64 1
- %tmp19375 = getelementptr inbounds float* %tmp19374, i64 1
- %tmp19376 = getelementptr inbounds float* %tmp19375, i64 1
- %tmp19377 = getelementptr inbounds float* %tmp19376, i64 1
- %tmp19378 = getelementptr inbounds float* %tmp19377, i64 1
- %tmp19379 = getelementptr inbounds float* %tmp19378, i64 1
- %tmp19380 = getelementptr inbounds float* %tmp19379, i64 1
- %tmp19381 = getelementptr inbounds float* %tmp19380, i64 1
- %tmp19382 = getelementptr inbounds float* %tmp19381, i64 1
- %tmp19383 = getelementptr inbounds float* %tmp19382, i64 1
- %tmp19384 = getelementptr inbounds float* %tmp19383, i64 1
- %tmp19385 = getelementptr inbounds float* %tmp19384, i64 1
- %tmp19386 = getelementptr inbounds float* %tmp19385, i64 1
- %tmp19387 = getelementptr inbounds float* %tmp19386, i64 1
- %tmp19388 = getelementptr inbounds float* %tmp19387, i64 1
- %tmp19389 = getelementptr inbounds float* %tmp19388, i64 1
- %tmp19390 = getelementptr inbounds float* %tmp19389, i64 1
- %tmp19391 = getelementptr inbounds float* %tmp19390, i64 1
- %tmp19392 = getelementptr inbounds float* %tmp19391, i64 1
- %tmp19393 = getelementptr inbounds float* %tmp19392, i64 1
- %tmp19394 = getelementptr inbounds float* %tmp19393, i64 1
- %tmp19395 = getelementptr inbounds float* %tmp19394, i64 1
- %tmp19396 = getelementptr inbounds float* %tmp19395, i64 1
- %tmp19397 = getelementptr inbounds float* %tmp19396, i64 1
- %tmp19398 = getelementptr inbounds float* %tmp19397, i64 1
- %tmp19399 = getelementptr inbounds float* %tmp19398, i64 1
- %tmp19400 = getelementptr inbounds float* %tmp19399, i64 1
- %tmp19401 = getelementptr inbounds float* %tmp19400, i64 1
- %tmp19402 = getelementptr inbounds float* %tmp19401, i64 1
- %tmp19403 = getelementptr inbounds float* %tmp19402, i64 1
- %tmp19404 = getelementptr inbounds float* %tmp19403, i64 1
- %tmp19405 = getelementptr inbounds float* %tmp19404, i64 1
- %tmp19406 = getelementptr inbounds float* %tmp19405, i64 1
- %tmp19407 = getelementptr inbounds float* %tmp19406, i64 1
- %tmp19408 = getelementptr inbounds float* %tmp19407, i64 1
- %tmp19409 = getelementptr inbounds float* %tmp19408, i64 1
- %tmp19410 = getelementptr inbounds float* %tmp19409, i64 1
- %tmp19411 = getelementptr inbounds float* %tmp19410, i64 1
- %tmp19412 = getelementptr inbounds float* %tmp19411, i64 1
- %tmp19413 = getelementptr inbounds float* %tmp19412, i64 1
- %tmp19414 = getelementptr inbounds float* %tmp19413, i64 1
- %tmp19415 = getelementptr inbounds float* %tmp19414, i64 1
- %tmp19416 = getelementptr inbounds float* %tmp19415, i64 1
- %tmp19417 = getelementptr inbounds float* %tmp19416, i64 1
- %tmp19418 = getelementptr inbounds float* %tmp19417, i64 1
- %tmp19419 = getelementptr inbounds float* %tmp19418, i64 1
- %tmp19420 = getelementptr inbounds float* %tmp19419, i64 1
- %tmp19421 = getelementptr inbounds float* %tmp19420, i64 1
- %tmp19422 = getelementptr inbounds float* %tmp19421, i64 1
- %tmp19423 = getelementptr inbounds float* %tmp19422, i64 1
- %tmp19424 = getelementptr inbounds float* %tmp19423, i64 1
- %tmp19425 = getelementptr inbounds float* %tmp19424, i64 1
- %tmp19426 = getelementptr inbounds float* %tmp19425, i64 1
- %tmp19427 = getelementptr inbounds float* %tmp19426, i64 1
- %tmp19428 = getelementptr inbounds float* %tmp19427, i64 1
- %tmp19429 = getelementptr inbounds float* %tmp19428, i64 1
- %tmp19430 = getelementptr inbounds float* %tmp19429, i64 1
- %tmp19431 = getelementptr inbounds float* %tmp19430, i64 1
- %tmp19432 = getelementptr inbounds float* %tmp19431, i64 1
- %tmp19433 = getelementptr inbounds float* %tmp19432, i64 1
- %tmp19434 = getelementptr inbounds float* %tmp19433, i64 1
- %tmp19435 = getelementptr inbounds float* %tmp19434, i64 1
- %tmp19436 = getelementptr inbounds float* %tmp19435, i64 1
- %tmp19437 = getelementptr inbounds float* %tmp19436, i64 1
- %tmp19438 = getelementptr inbounds float* %tmp19437, i64 1
- %tmp19439 = getelementptr inbounds float* %tmp19438, i64 1
- %tmp19440 = getelementptr inbounds float* %tmp19439, i64 1
- %tmp19441 = getelementptr inbounds float* %tmp19440, i64 1
- %tmp19442 = getelementptr inbounds float* %tmp19441, i64 1
- %tmp19443 = getelementptr inbounds float* %tmp19442, i64 1
- %tmp19444 = getelementptr inbounds float* %tmp19443, i64 1
- %tmp19445 = getelementptr inbounds float* %tmp19444, i64 1
- %tmp19446 = getelementptr inbounds float* %tmp19445, i64 1
- %tmp19447 = getelementptr inbounds float* %tmp19446, i64 1
- %tmp19448 = getelementptr inbounds float* %tmp19447, i64 1
- %tmp19449 = getelementptr inbounds float* %tmp19448, i64 1
- %tmp19450 = getelementptr inbounds float* %tmp19449, i64 1
- %tmp19451 = getelementptr inbounds float* %tmp19450, i64 1
- %tmp19452 = getelementptr inbounds float* %tmp19451, i64 1
- %tmp19453 = getelementptr inbounds float* %tmp19452, i64 1
- %tmp19454 = getelementptr inbounds float* %tmp19453, i64 1
- %tmp19455 = getelementptr inbounds float* %tmp19454, i64 1
- %tmp19456 = getelementptr inbounds float* %tmp19455, i64 1
- %tmp19457 = getelementptr inbounds float* %tmp19456, i64 1
- %tmp19458 = getelementptr inbounds float* %tmp19457, i64 1
- %tmp19459 = getelementptr inbounds float* %tmp19458, i64 1
- %tmp19460 = getelementptr inbounds float* %tmp19459, i64 1
- %tmp19461 = getelementptr inbounds float* %tmp19460, i64 1
- %tmp19462 = getelementptr inbounds float* %tmp19461, i64 1
- %tmp19463 = getelementptr inbounds float* %tmp19462, i64 1
- %tmp19464 = getelementptr inbounds float* %tmp19463, i64 1
- %tmp19465 = getelementptr inbounds float* %tmp19464, i64 1
- %tmp19466 = getelementptr inbounds float* %tmp19465, i64 1
- %tmp19467 = getelementptr inbounds float* %tmp19466, i64 1
- %tmp19468 = getelementptr inbounds float* %tmp19467, i64 1
- %tmp19469 = getelementptr inbounds float* %tmp19468, i64 1
- %tmp19470 = getelementptr inbounds float* %tmp19469, i64 1
- %tmp19471 = getelementptr inbounds float* %tmp19470, i64 1
- %tmp19472 = getelementptr inbounds float* %tmp19471, i64 1
- %tmp19473 = getelementptr inbounds float* %tmp19472, i64 1
- %tmp19474 = getelementptr inbounds float* %tmp19473, i64 1
- %tmp19475 = getelementptr inbounds float* %tmp19474, i64 1
- %tmp19476 = getelementptr inbounds float* %tmp19475, i64 1
- %tmp19477 = getelementptr inbounds float* %tmp19476, i64 1
- %tmp19478 = getelementptr inbounds float* %tmp19477, i64 1
- %tmp19479 = getelementptr inbounds float* %tmp19478, i64 1
- %tmp19480 = getelementptr inbounds float* %tmp19479, i64 1
- %tmp19481 = getelementptr inbounds float* %tmp19480, i64 1
- %tmp19482 = getelementptr inbounds float* %tmp19481, i64 1
- %tmp19483 = getelementptr inbounds float* %tmp19482, i64 1
- %tmp19484 = getelementptr inbounds float* %tmp19483, i64 1
- %tmp19485 = getelementptr inbounds float* %tmp19484, i64 1
- %tmp19486 = getelementptr inbounds float* %tmp19485, i64 1
- %tmp19487 = getelementptr inbounds float* %tmp19486, i64 1
- %tmp19488 = getelementptr inbounds float* %tmp19487, i64 1
- %tmp19489 = getelementptr inbounds float* %tmp19488, i64 1
- %tmp19490 = getelementptr inbounds float* %tmp19489, i64 1
- %tmp19491 = getelementptr inbounds float* %tmp19490, i64 1
- %tmp19492 = getelementptr inbounds float* %tmp19491, i64 1
- %tmp19493 = getelementptr inbounds float* %tmp19492, i64 1
- %tmp19494 = getelementptr inbounds float* %tmp19493, i64 1
- %tmp19495 = getelementptr inbounds float* %tmp19494, i64 1
- %tmp19496 = getelementptr inbounds float* %tmp19495, i64 1
- %tmp19497 = getelementptr inbounds float* %tmp19496, i64 1
- %tmp19498 = getelementptr inbounds float* %tmp19497, i64 1
- %tmp19499 = getelementptr inbounds float* %tmp19498, i64 1
- %tmp19500 = getelementptr inbounds float* %tmp19499, i64 1
- %tmp19501 = getelementptr inbounds float* %tmp19500, i64 1
- %tmp19502 = getelementptr inbounds float* %tmp19501, i64 1
- %tmp19503 = getelementptr inbounds float* %tmp19502, i64 1
- %tmp19504 = getelementptr inbounds float* %tmp19503, i64 1
- %tmp19505 = getelementptr inbounds float* %tmp19504, i64 1
- %tmp19506 = getelementptr inbounds float* %tmp19505, i64 1
- %tmp19507 = getelementptr inbounds float* %tmp19506, i64 1
- %tmp19508 = getelementptr inbounds float* %tmp19507, i64 1
- %tmp19509 = getelementptr inbounds float* %tmp19508, i64 1
- %tmp19510 = getelementptr inbounds float* %tmp19509, i64 1
- %tmp19511 = getelementptr inbounds float* %tmp19510, i64 1
- %tmp19512 = getelementptr inbounds float* %tmp19511, i64 1
- %tmp19513 = getelementptr inbounds float* %tmp19512, i64 1
- %tmp19514 = getelementptr inbounds float* %tmp19513, i64 1
- %tmp19515 = getelementptr inbounds float* %tmp19514, i64 1
- %tmp19516 = getelementptr inbounds float* %tmp19515, i64 1
- %tmp19517 = getelementptr inbounds float* %tmp19516, i64 1
- %tmp19518 = getelementptr inbounds float* %tmp19517, i64 1
- %tmp19519 = getelementptr inbounds float* %tmp19518, i64 1
- %tmp19520 = getelementptr inbounds float* %tmp19519, i64 1
- %tmp19521 = getelementptr inbounds float* %tmp19520, i64 1
- %tmp19522 = getelementptr inbounds float* %tmp19521, i64 1
- %tmp19523 = getelementptr inbounds float* %tmp19522, i64 1
- %tmp19524 = getelementptr inbounds float* %tmp19523, i64 1
- %tmp19525 = getelementptr inbounds float* %tmp19524, i64 1
- %tmp19526 = getelementptr inbounds float* %tmp19525, i64 1
- %tmp19527 = getelementptr inbounds float* %tmp19526, i64 1
- %tmp19528 = getelementptr inbounds float* %tmp19527, i64 1
- %tmp19529 = getelementptr inbounds float* %tmp19528, i64 1
- %tmp19530 = getelementptr inbounds float* %tmp19529, i64 1
- %tmp19531 = getelementptr inbounds float* %tmp19530, i64 1
- %tmp19532 = getelementptr inbounds float* %tmp19531, i64 1
- %tmp19533 = getelementptr inbounds float* %tmp19532, i64 1
- %tmp19534 = getelementptr inbounds float* %tmp19533, i64 1
- %tmp19535 = getelementptr inbounds float* %tmp19534, i64 1
- %tmp19536 = getelementptr inbounds float* %tmp19535, i64 1
- %tmp19537 = getelementptr inbounds float* %tmp19536, i64 1
- %tmp19538 = getelementptr inbounds float* %tmp19537, i64 1
- %tmp19539 = getelementptr inbounds float* %tmp19538, i64 1
- %tmp19540 = getelementptr inbounds float* %tmp19539, i64 1
- %tmp19541 = getelementptr inbounds float* %tmp19540, i64 1
- %tmp19542 = getelementptr inbounds float* %tmp19541, i64 1
- %tmp19543 = getelementptr inbounds float* %tmp19542, i64 1
- %tmp19544 = getelementptr inbounds float* %tmp19543, i64 1
- %tmp19545 = getelementptr inbounds float* %tmp19544, i64 1
- %tmp19546 = getelementptr inbounds float* %tmp19545, i64 1
- %tmp19547 = getelementptr inbounds float* %tmp19546, i64 1
- %tmp19548 = getelementptr inbounds float* %tmp19547, i64 1
- %tmp19549 = getelementptr inbounds float* %tmp19548, i64 1
- %tmp19550 = getelementptr inbounds float* %tmp19549, i64 1
- %tmp19551 = getelementptr inbounds float* %tmp19550, i64 1
- %tmp19552 = getelementptr inbounds float* %tmp19551, i64 1
- %tmp19553 = getelementptr inbounds float* %tmp19552, i64 1
- %tmp19554 = getelementptr inbounds float* %tmp19553, i64 1
- %tmp19555 = getelementptr inbounds float* %tmp19554, i64 1
- %tmp19556 = getelementptr inbounds float* %tmp19555, i64 1
- %tmp19557 = getelementptr inbounds float* %tmp19556, i64 1
- %tmp19558 = getelementptr inbounds float* %tmp19557, i64 1
- %tmp19559 = getelementptr inbounds float* %tmp19558, i64 1
- %tmp19560 = getelementptr inbounds float* %tmp19559, i64 1
- %tmp19561 = getelementptr inbounds float* %tmp19560, i64 1
- %tmp19562 = getelementptr inbounds float* %tmp19561, i64 1
- %tmp19563 = getelementptr inbounds float* %tmp19562, i64 1
- %tmp19564 = getelementptr inbounds float* %tmp19563, i64 1
- %tmp19565 = getelementptr inbounds float* %tmp19564, i64 1
- %tmp19566 = getelementptr inbounds float* %tmp19565, i64 1
- %tmp19567 = getelementptr inbounds float* %tmp19566, i64 1
- %tmp19568 = getelementptr inbounds float* %tmp19567, i64 1
- %tmp19569 = getelementptr inbounds float* %tmp19568, i64 1
- %tmp19570 = getelementptr inbounds float* %tmp19569, i64 1
- %tmp19571 = getelementptr inbounds float* %tmp19570, i64 1
- %tmp19572 = getelementptr inbounds float* %tmp19571, i64 1
- %tmp19573 = getelementptr inbounds float* %tmp19572, i64 1
- %tmp19574 = getelementptr inbounds float* %tmp19573, i64 1
- %tmp19575 = getelementptr inbounds float* %tmp19574, i64 1
- %tmp19576 = getelementptr inbounds float* %tmp19575, i64 1
- %tmp19577 = getelementptr inbounds float* %tmp19576, i64 1
- %tmp19578 = getelementptr inbounds float* %tmp19577, i64 1
- %tmp19579 = getelementptr inbounds float* %tmp19578, i64 1
- %tmp19580 = getelementptr inbounds float* %tmp19579, i64 1
- %tmp19581 = getelementptr inbounds float* %tmp19580, i64 1
- %tmp19582 = getelementptr inbounds float* %tmp19581, i64 1
- %tmp19583 = getelementptr inbounds float* %tmp19582, i64 1
- %tmp19584 = getelementptr inbounds float* %tmp19583, i64 1
- %tmp19585 = getelementptr inbounds float* %tmp19584, i64 1
- %tmp19586 = getelementptr inbounds float* %tmp19585, i64 1
- %tmp19587 = getelementptr inbounds float* %tmp19586, i64 1
- %tmp19588 = getelementptr inbounds float* %tmp19587, i64 1
- %tmp19589 = getelementptr inbounds float* %tmp19588, i64 1
- %tmp19590 = getelementptr inbounds float* %tmp19589, i64 1
- %tmp19591 = getelementptr inbounds float* %tmp19590, i64 1
- %tmp19592 = getelementptr inbounds float* %tmp19591, i64 1
- %tmp19593 = getelementptr inbounds float* %tmp19592, i64 1
- %tmp19594 = getelementptr inbounds float* %tmp19593, i64 1
- %tmp19595 = getelementptr inbounds float* %tmp19594, i64 1
- %tmp19596 = getelementptr inbounds float* %tmp19595, i64 1
- %tmp19597 = getelementptr inbounds float* %tmp19596, i64 1
- %tmp19598 = getelementptr inbounds float* %tmp19597, i64 1
- %tmp19599 = getelementptr inbounds float* %tmp19598, i64 1
- %tmp19600 = getelementptr inbounds float* %tmp19599, i64 1
- %tmp19601 = getelementptr inbounds float* %tmp19600, i64 1
- %tmp19602 = getelementptr inbounds float* %tmp19601, i64 1
- %tmp19603 = getelementptr inbounds float* %tmp19602, i64 1
- %tmp19604 = getelementptr inbounds float* %tmp19603, i64 1
- %tmp19605 = getelementptr inbounds float* %tmp19604, i64 1
- %tmp19606 = getelementptr inbounds float* %tmp19605, i64 1
- %tmp19607 = getelementptr inbounds float* %tmp19606, i64 1
- %tmp19608 = getelementptr inbounds float* %tmp19607, i64 1
- %tmp19609 = getelementptr inbounds float* %tmp19608, i64 1
- %tmp19610 = getelementptr inbounds float* %tmp19609, i64 1
- %tmp19611 = getelementptr inbounds float* %tmp19610, i64 1
- %tmp19612 = getelementptr inbounds float* %tmp19611, i64 1
- %tmp19613 = getelementptr inbounds float* %tmp19612, i64 1
- %tmp19614 = getelementptr inbounds float* %tmp19613, i64 1
- %tmp19615 = getelementptr inbounds float* %tmp19614, i64 1
- %tmp19616 = getelementptr inbounds float* %tmp19615, i64 1
- %tmp19617 = getelementptr inbounds float* %tmp19616, i64 1
- %tmp19618 = getelementptr inbounds float* %tmp19617, i64 1
- %tmp19619 = getelementptr inbounds float* %tmp19618, i64 1
- %tmp19620 = getelementptr inbounds float* %tmp19619, i64 1
- %tmp19621 = getelementptr inbounds float* %tmp19620, i64 1
- %tmp19622 = getelementptr inbounds float* %tmp19621, i64 1
- %tmp19623 = getelementptr inbounds float* %tmp19622, i64 1
- %tmp19624 = getelementptr inbounds float* %tmp19623, i64 1
- %tmp19625 = getelementptr inbounds float* %tmp19624, i64 1
- %tmp19626 = getelementptr inbounds float* %tmp19625, i64 1
- %tmp19627 = getelementptr inbounds float* %tmp19626, i64 1
- %tmp19628 = getelementptr inbounds float* %tmp19627, i64 1
- %tmp19629 = getelementptr inbounds float* %tmp19628, i64 1
- %tmp19630 = getelementptr inbounds float* %tmp19629, i64 1
- %tmp19631 = getelementptr inbounds float* %tmp19630, i64 1
- %tmp19632 = getelementptr inbounds float* %tmp19631, i64 1
- %tmp19633 = getelementptr inbounds float* %tmp19632, i64 1
- %tmp19634 = getelementptr inbounds float* %tmp19633, i64 1
- %tmp19635 = getelementptr inbounds float* %tmp19634, i64 1
- %tmp19636 = getelementptr inbounds float* %tmp19635, i64 1
- %tmp19637 = getelementptr inbounds float* %tmp19636, i64 1
- %tmp19638 = getelementptr inbounds float* %tmp19637, i64 1
- %tmp19639 = getelementptr inbounds float* %tmp19638, i64 1
- %tmp19640 = getelementptr inbounds float* %tmp19639, i64 1
- %tmp19641 = getelementptr inbounds float* %tmp19640, i64 1
- %tmp19642 = getelementptr inbounds float* %tmp19641, i64 1
- %tmp19643 = getelementptr inbounds float* %tmp19642, i64 1
- %tmp19644 = getelementptr inbounds float* %tmp19643, i64 1
- %tmp19645 = getelementptr inbounds float* %tmp19644, i64 1
- %tmp19646 = getelementptr inbounds float* %tmp19645, i64 1
- %tmp19647 = getelementptr inbounds float* %tmp19646, i64 1
- %tmp19648 = getelementptr inbounds float* %tmp19647, i64 1
- %tmp19649 = getelementptr inbounds float* %tmp19648, i64 1
- %tmp19650 = getelementptr inbounds float* %tmp19649, i64 1
- %tmp19651 = getelementptr inbounds float* %tmp19650, i64 1
- %tmp19652 = getelementptr inbounds float* %tmp19651, i64 1
- %tmp19653 = getelementptr inbounds float* %tmp19652, i64 1
- %tmp19654 = getelementptr inbounds float* %tmp19653, i64 1
- %tmp19655 = getelementptr inbounds float* %tmp19654, i64 1
- %tmp19656 = getelementptr inbounds float* %tmp19655, i64 1
- %tmp19657 = getelementptr inbounds float* %tmp19656, i64 1
- %tmp19658 = getelementptr inbounds float* %tmp19657, i64 1
- %tmp19659 = getelementptr inbounds float* %tmp19658, i64 1
- %tmp19660 = getelementptr inbounds float* %tmp19659, i64 1
- %tmp19661 = getelementptr inbounds float* %tmp19660, i64 1
- %tmp19662 = getelementptr inbounds float* %tmp19661, i64 1
- %tmp19663 = getelementptr inbounds float* %tmp19662, i64 1
- %tmp19664 = getelementptr inbounds float* %tmp19663, i64 1
- %tmp19665 = getelementptr inbounds float* %tmp19664, i64 1
- %tmp19666 = getelementptr inbounds float* %tmp19665, i64 1
- %tmp19667 = getelementptr inbounds float* %tmp19666, i64 1
- %tmp19668 = getelementptr inbounds float* %tmp19667, i64 1
- %tmp19669 = getelementptr inbounds float* %tmp19668, i64 1
- %tmp19670 = getelementptr inbounds float* %tmp19669, i64 1
- %tmp19671 = getelementptr inbounds float* %tmp19670, i64 1
- %tmp19672 = getelementptr inbounds float* %tmp19671, i64 1
- %tmp19673 = getelementptr inbounds float* %tmp19672, i64 1
- %tmp19674 = getelementptr inbounds float* %tmp19673, i64 1
- %tmp19675 = getelementptr inbounds float* %tmp19674, i64 1
- %tmp19676 = getelementptr inbounds float* %tmp19675, i64 1
- %tmp19677 = getelementptr inbounds float* %tmp19676, i64 1
- %tmp19678 = getelementptr inbounds float* %tmp19677, i64 1
- %tmp19679 = getelementptr inbounds float* %tmp19678, i64 1
- %tmp19680 = getelementptr inbounds float* %tmp19679, i64 1
- %tmp19681 = getelementptr inbounds float* %tmp19680, i64 1
- %tmp19682 = getelementptr inbounds float* %tmp19681, i64 1
- %tmp19683 = getelementptr inbounds float* %tmp19682, i64 1
- %tmp19684 = getelementptr inbounds float* %tmp19683, i64 1
- %tmp19685 = getelementptr inbounds float* %tmp19684, i64 1
- %tmp19686 = getelementptr inbounds float* %tmp19685, i64 1
- %tmp19687 = getelementptr inbounds float* %tmp19686, i64 1
- %tmp19688 = getelementptr inbounds float* %tmp19687, i64 1
- %tmp19689 = getelementptr inbounds float* %tmp19688, i64 1
- %tmp19690 = getelementptr inbounds float* %tmp19689, i64 1
- %tmp19691 = getelementptr inbounds float* %tmp19690, i64 1
- %tmp19692 = getelementptr inbounds float* %tmp19691, i64 1
- %tmp19693 = getelementptr inbounds float* %tmp19692, i64 1
- %tmp19694 = getelementptr inbounds float* %tmp19693, i64 1
- %tmp19695 = getelementptr inbounds float* %tmp19694, i64 1
- %tmp19696 = getelementptr inbounds float* %tmp19695, i64 1
- %tmp19697 = getelementptr inbounds float* %tmp19696, i64 1
- %tmp19698 = getelementptr inbounds float* %tmp19697, i64 1
- %tmp19699 = getelementptr inbounds float* %tmp19698, i64 1
- %tmp19700 = getelementptr inbounds float* %tmp19699, i64 1
- %tmp19701 = getelementptr inbounds float* %tmp19700, i64 1
- %tmp19702 = getelementptr inbounds float* %tmp19701, i64 1
- %tmp19703 = getelementptr inbounds float* %tmp19702, i64 1
- %tmp19704 = getelementptr inbounds float* %tmp19703, i64 1
- %tmp19705 = getelementptr inbounds float* %tmp19704, i64 1
- %tmp19706 = getelementptr inbounds float* %tmp19705, i64 1
- %tmp19707 = getelementptr inbounds float* %tmp19706, i64 1
- %tmp19708 = getelementptr inbounds float* %tmp19707, i64 1
- %tmp19709 = getelementptr inbounds float* %tmp19708, i64 1
- %tmp19710 = getelementptr inbounds float* %tmp19709, i64 1
- %tmp19711 = getelementptr inbounds float* %tmp19710, i64 1
- %tmp19712 = getelementptr inbounds float* %tmp19711, i64 1
- %tmp19713 = getelementptr inbounds float* %tmp19712, i64 1
- %tmp19714 = getelementptr inbounds float* %tmp19713, i64 1
- %tmp19715 = getelementptr inbounds float* %tmp19714, i64 1
- %tmp19716 = getelementptr inbounds float* %tmp19715, i64 1
- %tmp19717 = getelementptr inbounds float* %tmp19716, i64 1
- %tmp19718 = getelementptr inbounds float* %tmp19717, i64 1
- %tmp19719 = getelementptr inbounds float* %tmp19718, i64 1
- %tmp19720 = getelementptr inbounds float* %tmp19719, i64 1
- %tmp19721 = getelementptr inbounds float* %tmp19720, i64 1
- %tmp19722 = getelementptr inbounds float* %tmp19721, i64 1
- %tmp19723 = getelementptr inbounds float* %tmp19722, i64 1
- %tmp19724 = getelementptr inbounds float* %tmp19723, i64 1
- %tmp19725 = getelementptr inbounds float* %tmp19724, i64 1
- %tmp19726 = getelementptr inbounds float* %tmp19725, i64 1
- %tmp19727 = getelementptr inbounds float* %tmp19726, i64 1
- %tmp19728 = getelementptr inbounds float* %tmp19727, i64 1
- %tmp19729 = getelementptr inbounds float* %tmp19728, i64 1
- %tmp19730 = getelementptr inbounds float* %tmp19729, i64 1
- %tmp19731 = getelementptr inbounds float* %tmp19730, i64 1
- %tmp19732 = getelementptr inbounds float* %tmp19731, i64 1
- %tmp19733 = getelementptr inbounds float* %tmp19732, i64 1
- %tmp19734 = getelementptr inbounds float* %tmp19733, i64 1
- %tmp19735 = getelementptr inbounds float* %tmp19734, i64 1
- %tmp19736 = getelementptr inbounds float* %tmp19735, i64 1
- %tmp19737 = getelementptr inbounds float* %tmp19736, i64 1
- %tmp19738 = getelementptr inbounds float* %tmp19737, i64 1
- %tmp19739 = getelementptr inbounds float* %tmp19738, i64 1
- %tmp19740 = getelementptr inbounds float* %tmp19739, i64 1
- %tmp19741 = getelementptr inbounds float* %tmp19740, i64 1
- %tmp19742 = getelementptr inbounds float* %tmp19741, i64 1
- %tmp19743 = getelementptr inbounds float* %tmp19742, i64 1
- %tmp19744 = getelementptr inbounds float* %tmp19743, i64 1
- %tmp19745 = getelementptr inbounds float* %tmp19744, i64 1
- %tmp19746 = getelementptr inbounds float* %tmp19745, i64 1
- %tmp19747 = getelementptr inbounds float* %tmp19746, i64 1
- %tmp19748 = getelementptr inbounds float* %tmp19747, i64 1
- %tmp19749 = getelementptr inbounds float* %tmp19748, i64 1
- %tmp19750 = getelementptr inbounds float* %tmp19749, i64 1
- %tmp19751 = getelementptr inbounds float* %tmp19750, i64 1
- %tmp19752 = getelementptr inbounds float* %tmp19751, i64 1
- %tmp19753 = getelementptr inbounds float* %tmp19752, i64 1
- %tmp19754 = getelementptr inbounds float* %tmp19753, i64 1
- %tmp19755 = getelementptr inbounds float* %tmp19754, i64 1
- %tmp19756 = getelementptr inbounds float* %tmp19755, i64 1
- %tmp19757 = getelementptr inbounds float* %tmp19756, i64 1
- %tmp19758 = getelementptr inbounds float* %tmp19757, i64 1
- %tmp19759 = getelementptr inbounds float* %tmp19758, i64 1
- %tmp19760 = getelementptr inbounds float* %tmp19759, i64 1
- %tmp19761 = getelementptr inbounds float* %tmp19760, i64 1
- %tmp19762 = getelementptr inbounds float* %tmp19761, i64 1
- %tmp19763 = getelementptr inbounds float* %tmp19762, i64 1
- %tmp19764 = getelementptr inbounds float* %tmp19763, i64 1
- %tmp19765 = getelementptr inbounds float* %tmp19764, i64 1
- %tmp19766 = getelementptr inbounds float* %tmp19765, i64 1
- %tmp19767 = getelementptr inbounds float* %tmp19766, i64 1
- %tmp19768 = getelementptr inbounds float* %tmp19767, i64 1
- %tmp19769 = getelementptr inbounds float* %tmp19768, i64 1
- %tmp19770 = getelementptr inbounds float* %tmp19769, i64 1
- %tmp19771 = getelementptr inbounds float* %tmp19770, i64 1
- %tmp19772 = getelementptr inbounds float* %tmp19771, i64 1
- %tmp19773 = getelementptr inbounds float* %tmp19772, i64 1
- %tmp19774 = getelementptr inbounds float* %tmp19773, i64 1
- %tmp19775 = getelementptr inbounds float* %tmp19774, i64 1
- %tmp19776 = getelementptr inbounds float* %tmp19775, i64 1
- %tmp19777 = getelementptr inbounds float* %tmp19776, i64 1
- %tmp19778 = getelementptr inbounds float* %tmp19777, i64 1
- %tmp19779 = getelementptr inbounds float* %tmp19778, i64 1
- %tmp19780 = getelementptr inbounds float* %tmp19779, i64 1
- %tmp19781 = getelementptr inbounds float* %tmp19780, i64 1
- %tmp19782 = getelementptr inbounds float* %tmp19781, i64 1
- %tmp19783 = getelementptr inbounds float* %tmp19782, i64 1
- %tmp19784 = getelementptr inbounds float* %tmp19783, i64 1
- %tmp19785 = getelementptr inbounds float* %tmp19784, i64 1
- %tmp19786 = getelementptr inbounds float* %tmp19785, i64 1
- %tmp19787 = getelementptr inbounds float* %tmp19786, i64 1
- %tmp19788 = getelementptr inbounds float* %tmp19787, i64 1
- %tmp19789 = getelementptr inbounds float* %tmp19788, i64 1
- %tmp19790 = getelementptr inbounds float* %tmp19789, i64 1
- %tmp19791 = getelementptr inbounds float* %tmp19790, i64 1
- %tmp19792 = getelementptr inbounds float* %tmp19791, i64 1
- %tmp19793 = getelementptr inbounds float* %tmp19792, i64 1
- %tmp19794 = getelementptr inbounds float* %tmp19793, i64 1
- %tmp19795 = getelementptr inbounds float* %tmp19794, i64 1
- %tmp19796 = getelementptr inbounds float* %tmp19795, i64 1
- %tmp19797 = getelementptr inbounds float* %tmp19796, i64 1
- %tmp19798 = getelementptr inbounds float* %tmp19797, i64 1
- %tmp19799 = getelementptr inbounds float* %tmp19798, i64 1
- %tmp19800 = getelementptr inbounds float* %tmp19799, i64 1
- %tmp19801 = getelementptr inbounds float* %tmp19800, i64 1
- %tmp19802 = getelementptr inbounds float* %tmp19801, i64 1
- %tmp19803 = getelementptr inbounds float* %tmp19802, i64 1
- %tmp19804 = getelementptr inbounds float* %tmp19803, i64 1
- %tmp19805 = getelementptr inbounds float* %tmp19804, i64 1
- %tmp19806 = getelementptr inbounds float* %tmp19805, i64 1
- %tmp19807 = getelementptr inbounds float* %tmp19806, i64 1
- %tmp19808 = getelementptr inbounds float* %tmp19807, i64 1
- %tmp19809 = getelementptr inbounds float* %tmp19808, i64 1
- %tmp19810 = getelementptr inbounds float* %tmp19809, i64 1
- %tmp19811 = getelementptr inbounds float* %tmp19810, i64 1
- %tmp19812 = getelementptr inbounds float* %tmp19811, i64 1
- %tmp19813 = getelementptr inbounds float* %tmp19812, i64 1
- %tmp19814 = getelementptr inbounds float* %tmp19813, i64 1
- %tmp19815 = getelementptr inbounds float* %tmp19814, i64 1
- %tmp19816 = getelementptr inbounds float* %tmp19815, i64 1
- %tmp19817 = getelementptr inbounds float* %tmp19816, i64 1
- %tmp19818 = getelementptr inbounds float* %tmp19817, i64 1
- %tmp19819 = getelementptr inbounds float* %tmp19818, i64 1
- %tmp19820 = getelementptr inbounds float* %tmp19819, i64 1
- %tmp19821 = getelementptr inbounds float* %tmp19820, i64 1
- %tmp19822 = getelementptr inbounds float* %tmp19821, i64 1
- %tmp19823 = getelementptr inbounds float* %tmp19822, i64 1
- %tmp19824 = getelementptr inbounds float* %tmp19823, i64 1
- %tmp19825 = getelementptr inbounds float* %tmp19824, i64 1
- %tmp19826 = getelementptr inbounds float* %tmp19825, i64 1
- %tmp19827 = getelementptr inbounds float* %tmp19826, i64 1
- %tmp19828 = getelementptr inbounds float* %tmp19827, i64 1
- %tmp19829 = getelementptr inbounds float* %tmp19828, i64 1
- %tmp19830 = getelementptr inbounds float* %tmp19829, i64 1
- %tmp19831 = getelementptr inbounds float* %tmp19830, i64 1
- %tmp19832 = getelementptr inbounds float* %tmp19831, i64 1
- %tmp19833 = getelementptr inbounds float* %tmp19832, i64 1
- %tmp19834 = getelementptr inbounds float* %tmp19833, i64 1
- %tmp19835 = getelementptr inbounds float* %tmp19834, i64 1
- %tmp19836 = getelementptr inbounds float* %tmp19835, i64 1
- %tmp19837 = getelementptr inbounds float* %tmp19836, i64 1
- %tmp19838 = getelementptr inbounds float* %tmp19837, i64 1
- %tmp19839 = getelementptr inbounds float* %tmp19838, i64 1
- %tmp19840 = getelementptr inbounds float* %tmp19839, i64 1
- %tmp19841 = getelementptr inbounds float* %tmp19840, i64 1
- %tmp19842 = getelementptr inbounds float* %tmp19841, i64 1
- %tmp19843 = getelementptr inbounds float* %tmp19842, i64 1
- %tmp19844 = getelementptr inbounds float* %tmp19843, i64 1
- %tmp19845 = getelementptr inbounds float* %tmp19844, i64 1
- %tmp19846 = getelementptr inbounds float* %tmp19845, i64 1
- %tmp19847 = getelementptr inbounds float* %tmp19846, i64 1
- %tmp19848 = getelementptr inbounds float* %tmp19847, i64 1
- %tmp19849 = getelementptr inbounds float* %tmp19848, i64 1
- %tmp19850 = getelementptr inbounds float* %tmp19849, i64 1
- %tmp19851 = getelementptr inbounds float* %tmp19850, i64 1
- %tmp19852 = getelementptr inbounds float* %tmp19851, i64 1
- %tmp19853 = getelementptr inbounds float* %tmp19852, i64 1
- %tmp19854 = getelementptr inbounds float* %tmp19853, i64 1
- %tmp19855 = getelementptr inbounds float* %tmp19854, i64 1
- %tmp19856 = getelementptr inbounds float* %tmp19855, i64 1
- %tmp19857 = getelementptr inbounds float* %tmp19856, i64 1
- %tmp19858 = getelementptr inbounds float* %tmp19857, i64 1
- %tmp19859 = getelementptr inbounds float* %tmp19858, i64 1
- %tmp19860 = getelementptr inbounds float* %tmp19859, i64 1
- %tmp19861 = getelementptr inbounds float* %tmp19860, i64 1
- %tmp19862 = getelementptr inbounds float* %tmp19861, i64 1
- %tmp19863 = getelementptr inbounds float* %tmp19862, i64 1
- %tmp19864 = getelementptr inbounds float* %tmp19863, i64 1
- %tmp19865 = getelementptr inbounds float* %tmp19864, i64 1
- %tmp19866 = getelementptr inbounds float* %tmp19865, i64 1
- %tmp19867 = getelementptr inbounds float* %tmp19866, i64 1
- %tmp19868 = getelementptr inbounds float* %tmp19867, i64 1
- %tmp19869 = getelementptr inbounds float* %tmp19868, i64 1
- %tmp19870 = getelementptr inbounds float* %tmp19869, i64 1
- %tmp19871 = getelementptr inbounds float* %tmp19870, i64 1
- %tmp19872 = getelementptr inbounds float* %tmp19871, i64 1
- %tmp19873 = getelementptr inbounds float* %tmp19872, i64 1
- %tmp19874 = getelementptr inbounds float* %tmp19873, i64 1
- %tmp19875 = getelementptr inbounds float* %tmp19874, i64 1
- %tmp19876 = getelementptr inbounds float* %tmp19875, i64 1
- %tmp19877 = getelementptr inbounds float* %tmp19876, i64 1
- %tmp19878 = getelementptr inbounds float* %tmp19877, i64 1
- %tmp19879 = getelementptr inbounds float* %tmp19878, i64 1
- %tmp19880 = getelementptr inbounds float* %tmp19879, i64 1
- %tmp19881 = getelementptr inbounds float* %tmp19880, i64 1
- %tmp19882 = getelementptr inbounds float* %tmp19881, i64 1
- %tmp19883 = getelementptr inbounds float* %tmp19882, i64 1
- %tmp19884 = getelementptr inbounds float* %tmp19883, i64 1
- %tmp19885 = getelementptr inbounds float* %tmp19884, i64 1
- %tmp19886 = getelementptr inbounds float* %tmp19885, i64 1
- %tmp19887 = getelementptr inbounds float* %tmp19886, i64 1
- %tmp19888 = getelementptr inbounds float* %tmp19887, i64 1
- %tmp19889 = getelementptr inbounds float* %tmp19888, i64 1
- %tmp19890 = getelementptr inbounds float* %tmp19889, i64 1
- %tmp19891 = getelementptr inbounds float* %tmp19890, i64 1
- %tmp19892 = getelementptr inbounds float* %tmp19891, i64 1
- %tmp19893 = getelementptr inbounds float* %tmp19892, i64 1
- %tmp19894 = getelementptr inbounds float* %tmp19893, i64 1
- %tmp19895 = getelementptr inbounds float* %tmp19894, i64 1
- %tmp19896 = getelementptr inbounds float* %tmp19895, i64 1
- %tmp19897 = getelementptr inbounds float* %tmp19896, i64 1
- %tmp19898 = getelementptr inbounds float* %tmp19897, i64 1
- %tmp19899 = getelementptr inbounds float* %tmp19898, i64 1
- %tmp19900 = getelementptr inbounds float* %tmp19899, i64 1
- %tmp19901 = getelementptr inbounds float* %tmp19900, i64 1
- %tmp19902 = getelementptr inbounds float* %tmp19901, i64 1
- %tmp19903 = getelementptr inbounds float* %tmp19902, i64 1
- %tmp19904 = getelementptr inbounds float* %tmp19903, i64 1
- %tmp19905 = getelementptr inbounds float* %tmp19904, i64 1
- %tmp19906 = getelementptr inbounds float* %tmp19905, i64 1
- %tmp19907 = getelementptr inbounds float* %tmp19906, i64 1
- %tmp19908 = getelementptr inbounds float* %tmp19907, i64 1
- %tmp19909 = getelementptr inbounds float* %tmp19908, i64 1
- %tmp19910 = getelementptr inbounds float* %tmp19909, i64 1
- %tmp19911 = getelementptr inbounds float* %tmp19910, i64 1
- %tmp19912 = getelementptr inbounds float* %tmp19911, i64 1
- %tmp19913 = getelementptr inbounds float* %tmp19912, i64 1
- %tmp19914 = getelementptr inbounds float* %tmp19913, i64 1
- %tmp19915 = getelementptr inbounds float* %tmp19914, i64 1
- %tmp19916 = getelementptr inbounds float* %tmp19915, i64 1
- %tmp19917 = getelementptr inbounds float* %tmp19916, i64 1
- %tmp19918 = getelementptr inbounds float* %tmp19917, i64 1
- %tmp19919 = getelementptr inbounds float* %tmp19918, i64 1
- %tmp19920 = getelementptr inbounds float* %tmp19919, i64 1
- %tmp19921 = getelementptr inbounds float* %tmp19920, i64 1
- %tmp19922 = getelementptr inbounds float* %tmp19921, i64 1
- %tmp19923 = getelementptr inbounds float* %tmp19922, i64 1
- %tmp19924 = getelementptr inbounds float* %tmp19923, i64 1
- %tmp19925 = getelementptr inbounds float* %tmp19924, i64 1
- %tmp19926 = getelementptr inbounds float* %tmp19925, i64 1
- %tmp19927 = getelementptr inbounds float* %tmp19926, i64 1
- %tmp19928 = getelementptr inbounds float* %tmp19927, i64 1
- %tmp19929 = getelementptr inbounds float* %tmp19928, i64 1
- %tmp19930 = getelementptr inbounds float* %tmp19929, i64 1
- %tmp19931 = getelementptr inbounds float* %tmp19930, i64 1
- %tmp19932 = getelementptr inbounds float* %tmp19931, i64 1
- %tmp19933 = getelementptr inbounds float* %tmp19932, i64 1
- %tmp19934 = getelementptr inbounds float* %tmp19933, i64 1
- %tmp19935 = getelementptr inbounds float* %tmp19934, i64 1
- %tmp19936 = getelementptr inbounds float* %tmp19935, i64 1
- %tmp19937 = getelementptr inbounds float* %tmp19936, i64 1
- %tmp19938 = getelementptr inbounds float* %tmp19937, i64 1
- %tmp19939 = getelementptr inbounds float* %tmp19938, i64 1
- %tmp19940 = getelementptr inbounds float* %tmp19939, i64 1
- %tmp19941 = getelementptr inbounds float* %tmp19940, i64 1
- %tmp19942 = getelementptr inbounds float* %tmp19941, i64 1
- %tmp19943 = getelementptr inbounds float* %tmp19942, i64 1
- %tmp19944 = getelementptr inbounds float* %tmp19943, i64 1
- %tmp19945 = getelementptr inbounds float* %tmp19944, i64 1
- %tmp19946 = getelementptr inbounds float* %tmp19945, i64 1
- %tmp19947 = getelementptr inbounds float* %tmp19946, i64 1
- %tmp19948 = getelementptr inbounds float* %tmp19947, i64 1
- %tmp19949 = getelementptr inbounds float* %tmp19948, i64 1
- %tmp19950 = getelementptr inbounds float* %tmp19949, i64 1
- %tmp19951 = getelementptr inbounds float* %tmp19950, i64 1
- %tmp19952 = getelementptr inbounds float* %tmp19951, i64 1
- %tmp19953 = getelementptr inbounds float* %tmp19952, i64 1
- %tmp19954 = getelementptr inbounds float* %tmp19953, i64 1
- %tmp19955 = getelementptr inbounds float* %tmp19954, i64 1
- %tmp19956 = getelementptr inbounds float* %tmp19955, i64 1
- %tmp19957 = getelementptr inbounds float* %tmp19956, i64 1
- %tmp19958 = getelementptr inbounds float* %tmp19957, i64 1
- %tmp19959 = getelementptr inbounds float* %tmp19958, i64 1
- %tmp19960 = getelementptr inbounds float* %tmp19959, i64 1
- %tmp19961 = getelementptr inbounds float* %tmp19960, i64 1
- %tmp19962 = getelementptr inbounds float* %tmp19961, i64 1
- %tmp19963 = getelementptr inbounds float* %tmp19962, i64 1
- %tmp19964 = getelementptr inbounds float* %tmp19963, i64 1
- %tmp19965 = getelementptr inbounds float* %tmp19964, i64 1
- %tmp19966 = getelementptr inbounds float* %tmp19965, i64 1
- %tmp19967 = getelementptr inbounds float* %tmp19966, i64 1
- %tmp19968 = getelementptr inbounds float* %tmp19967, i64 1
- %tmp19969 = getelementptr inbounds float* %tmp19968, i64 1
- %tmp19970 = getelementptr inbounds float* %tmp19969, i64 1
- %tmp19971 = getelementptr inbounds float* %tmp19970, i64 1
- %tmp19972 = getelementptr inbounds float* %tmp19971, i64 1
- %tmp19973 = getelementptr inbounds float* %tmp19972, i64 1
- %tmp19974 = getelementptr inbounds float* %tmp19973, i64 1
- %tmp19975 = getelementptr inbounds float* %tmp19974, i64 1
- %tmp19976 = getelementptr inbounds float* %tmp19975, i64 1
- %tmp19977 = getelementptr inbounds float* %tmp19976, i64 1
- %tmp19978 = getelementptr inbounds float* %tmp19977, i64 1
- %tmp19979 = getelementptr inbounds float* %tmp19978, i64 1
- %tmp19980 = getelementptr inbounds float* %tmp19979, i64 1
- %tmp19981 = getelementptr inbounds float* %tmp19980, i64 1
- %tmp19982 = getelementptr inbounds float* %tmp19981, i64 1
- %tmp19983 = getelementptr inbounds float* %tmp19982, i64 1
- %tmp19984 = getelementptr inbounds float* %tmp19983, i64 1
- %tmp19985 = getelementptr inbounds float* %tmp19984, i64 1
- %tmp19986 = getelementptr inbounds float* %tmp19985, i64 1
- %tmp19987 = getelementptr inbounds float* %tmp19986, i64 1
- %tmp19988 = getelementptr inbounds float* %tmp19987, i64 1
- %tmp19989 = getelementptr inbounds float* %tmp19988, i64 1
- %tmp19990 = getelementptr inbounds float* %tmp19989, i64 1
- %tmp19991 = getelementptr inbounds float* %tmp19990, i64 1
- %tmp19992 = getelementptr inbounds float* %tmp19991, i64 1
- %tmp19993 = getelementptr inbounds float* %tmp19992, i64 1
- %tmp19994 = getelementptr inbounds float* %tmp19993, i64 1
- %tmp19995 = getelementptr inbounds float* %tmp19994, i64 1
- %tmp19996 = getelementptr inbounds float* %tmp19995, i64 1
- %tmp19997 = getelementptr inbounds float* %tmp19996, i64 1
- %tmp19998 = getelementptr inbounds float* %tmp19997, i64 1
- %tmp19999 = getelementptr inbounds float* %tmp19998, i64 1
- %tmp20000 = getelementptr inbounds float* %tmp19999, i64 1
- %tmp20001 = getelementptr inbounds float* %tmp20000, i64 1
- %tmp20002 = getelementptr inbounds float* %tmp20001, i64 1
- %tmp20003 = getelementptr inbounds float* %tmp20002, i64 1
- %tmp20004 = getelementptr inbounds float* %tmp20003, i64 1
- %tmp20005 = getelementptr inbounds float* %tmp20004, i64 1
- %tmp20006 = getelementptr inbounds float* %tmp20005, i64 1
- %tmp20007 = getelementptr inbounds float* %tmp20006, i64 1
- %tmp20008 = getelementptr inbounds float* %tmp20007, i64 1
- %tmp20009 = getelementptr inbounds float* %tmp20008, i64 1
- %tmp20010 = getelementptr inbounds float* %tmp20009, i64 1
- %tmp20011 = getelementptr inbounds float* %tmp20010, i64 1
- %tmp20012 = getelementptr inbounds float* %tmp20011, i64 1
- %tmp20013 = getelementptr inbounds float* %tmp20012, i64 1
- %tmp20014 = getelementptr inbounds float* %tmp20013, i64 1
- %tmp20015 = getelementptr inbounds float* %tmp20014, i64 1
- %tmp20016 = getelementptr inbounds float* %tmp20015, i64 1
- %tmp20017 = getelementptr inbounds float* %tmp20016, i64 1
- %tmp20018 = getelementptr inbounds float* %tmp20017, i64 1
- %tmp20019 = getelementptr inbounds float* %tmp20018, i64 1
- %tmp20020 = getelementptr inbounds float* %tmp20019, i64 1
- %tmp20021 = getelementptr inbounds float* %tmp20020, i64 1
- %tmp20022 = getelementptr inbounds float* %tmp20021, i64 1
- %tmp20023 = getelementptr inbounds float* %tmp20022, i64 1
- %tmp20024 = getelementptr inbounds float* %tmp20023, i64 1
- %tmp20025 = getelementptr inbounds float* %tmp20024, i64 1
- %tmp20026 = getelementptr inbounds float* %tmp20025, i64 1
- %tmp20027 = getelementptr inbounds float* %tmp20026, i64 1
- %tmp20028 = getelementptr inbounds float* %tmp20027, i64 1
- %tmp20029 = getelementptr inbounds float* %tmp20028, i64 1
- %tmp20030 = getelementptr inbounds float* %tmp20029, i64 1
- %tmp20031 = getelementptr inbounds float* %tmp20030, i64 1
- %tmp20032 = getelementptr inbounds float* %tmp20031, i64 1
- %tmp20033 = getelementptr inbounds float* %tmp20032, i64 1
- %tmp20034 = getelementptr inbounds float* %tmp20033, i64 1
- %tmp20035 = getelementptr inbounds float* %tmp20034, i64 1
- %tmp20036 = getelementptr inbounds float* %tmp20035, i64 1
- %tmp20037 = getelementptr inbounds float* %tmp20036, i64 1
- %tmp20038 = getelementptr inbounds float* %tmp20037, i64 1
- %tmp20039 = getelementptr inbounds float* %tmp20038, i64 1
- %tmp20040 = getelementptr inbounds float* %tmp20039, i64 1
- %tmp20041 = getelementptr inbounds float* %tmp20040, i64 1
- %tmp20042 = getelementptr inbounds float* %tmp20041, i64 1
- %tmp20043 = getelementptr inbounds float* %tmp20042, i64 1
- %tmp20044 = getelementptr inbounds float* %tmp20043, i64 1
- %tmp20045 = getelementptr inbounds float* %tmp20044, i64 1
- %tmp20046 = getelementptr inbounds float* %tmp20045, i64 1
- %tmp20047 = getelementptr inbounds float* %tmp20046, i64 1
- %tmp20048 = getelementptr inbounds float* %tmp20047, i64 1
- %tmp20049 = getelementptr inbounds float* %tmp20048, i64 1
- %tmp20050 = getelementptr inbounds float* %tmp20049, i64 1
- %tmp20051 = getelementptr inbounds float* %tmp20050, i64 1
- %tmp20052 = getelementptr inbounds float* %tmp20051, i64 1
- %tmp20053 = getelementptr inbounds float* %tmp20052, i64 1
- %tmp20054 = getelementptr inbounds float* %tmp20053, i64 1
- %tmp20055 = getelementptr inbounds float* %tmp20054, i64 1
- %tmp20056 = getelementptr inbounds float* %tmp20055, i64 1
- %tmp20057 = getelementptr inbounds float* %tmp20056, i64 1
- %tmp20058 = getelementptr inbounds float* %tmp20057, i64 1
- %tmp20059 = getelementptr inbounds float* %tmp20058, i64 1
- %tmp20060 = getelementptr inbounds float* %tmp20059, i64 1
- %tmp20061 = getelementptr inbounds float* %tmp20060, i64 1
- %tmp20062 = getelementptr inbounds float* %tmp20061, i64 1
- %tmp20063 = getelementptr inbounds float* %tmp20062, i64 1
- %tmp20064 = getelementptr inbounds float* %tmp20063, i64 1
- %tmp20065 = getelementptr inbounds float* %tmp20064, i64 1
- %tmp20066 = getelementptr inbounds float* %tmp20065, i64 1
- %tmp20067 = getelementptr inbounds float* %tmp20066, i64 1
- %tmp20068 = getelementptr inbounds float* %tmp20067, i64 1
- %tmp20069 = getelementptr inbounds float* %tmp20068, i64 1
- %tmp20070 = getelementptr inbounds float* %tmp20069, i64 1
- %tmp20071 = getelementptr inbounds float* %tmp20070, i64 1
- %tmp20072 = getelementptr inbounds float* %tmp20071, i64 1
- %tmp20073 = getelementptr inbounds float* %tmp20072, i64 1
- %tmp20074 = getelementptr inbounds float* %tmp20073, i64 1
- %tmp20075 = getelementptr inbounds float* %tmp20074, i64 1
- %tmp20076 = getelementptr inbounds float* %tmp20075, i64 1
- %tmp20077 = getelementptr inbounds float* %tmp20076, i64 1
- %tmp20078 = getelementptr inbounds float* %tmp20077, i64 1
- %tmp20079 = getelementptr inbounds float* %tmp20078, i64 1
- %tmp20080 = getelementptr inbounds float* %tmp20079, i64 1
- %tmp20081 = getelementptr inbounds float* %tmp20080, i64 1
- %tmp20082 = getelementptr inbounds float* %tmp20081, i64 1
- %tmp20083 = getelementptr inbounds float* %tmp20082, i64 1
- %tmp20084 = getelementptr inbounds float* %tmp20083, i64 1
- %tmp20085 = getelementptr inbounds float* %tmp20084, i64 1
- %tmp20086 = getelementptr inbounds float* %tmp20085, i64 1
- %tmp20087 = getelementptr inbounds float* %tmp20086, i64 1
- %tmp20088 = getelementptr inbounds float* %tmp20087, i64 1
- %tmp20089 = getelementptr inbounds float* %tmp20088, i64 1
- %tmp20090 = getelementptr inbounds float* %tmp20089, i64 1
- %tmp20091 = getelementptr inbounds float* %tmp20090, i64 1
- %tmp20092 = getelementptr inbounds float* %tmp20091, i64 1
- %tmp20093 = getelementptr inbounds float* %tmp20092, i64 1
- %tmp20094 = getelementptr inbounds float* %tmp20093, i64 1
- %tmp20095 = getelementptr inbounds float* %tmp20094, i64 1
- %tmp20096 = getelementptr inbounds float* %tmp20095, i64 1
- %tmp20097 = getelementptr inbounds float* %tmp20096, i64 1
- %tmp20098 = getelementptr inbounds float* %tmp20097, i64 1
- %tmp20099 = getelementptr inbounds float* %tmp20098, i64 1
- %tmp20100 = getelementptr inbounds float* %tmp20099, i64 1
- %tmp20101 = getelementptr inbounds float* %tmp20100, i64 1
- %tmp20102 = getelementptr inbounds float* %tmp20101, i64 1
- %tmp20103 = getelementptr inbounds float* %tmp20102, i64 1
- %tmp20104 = getelementptr inbounds float* %tmp20103, i64 1
- %tmp20105 = getelementptr inbounds float* %tmp20104, i64 1
- %tmp20106 = getelementptr inbounds float* %tmp20105, i64 1
- %tmp20107 = getelementptr inbounds float* %tmp20106, i64 1
- %tmp20108 = getelementptr inbounds float* %tmp20107, i64 1
- %tmp20109 = getelementptr inbounds float* %tmp20108, i64 1
- %tmp20110 = getelementptr inbounds float* %tmp20109, i64 1
- %tmp20111 = getelementptr inbounds float* %tmp20110, i64 1
- %tmp20112 = getelementptr inbounds float* %tmp20111, i64 1
- %tmp20113 = getelementptr inbounds float* %tmp20112, i64 1
- %tmp20114 = getelementptr inbounds float* %tmp20113, i64 1
- %tmp20115 = getelementptr inbounds float* %tmp20114, i64 1
- %tmp20116 = getelementptr inbounds float* %tmp20115, i64 1
- %tmp20117 = getelementptr inbounds float* %tmp20116, i64 1
- %tmp20118 = getelementptr inbounds float* %tmp20117, i64 1
- %tmp20119 = getelementptr inbounds float* %tmp20118, i64 1
- %tmp20120 = getelementptr inbounds float* %tmp20119, i64 1
- %tmp20121 = getelementptr inbounds float* %tmp20120, i64 1
- %tmp20122 = getelementptr inbounds float* %tmp20121, i64 1
- %tmp20123 = getelementptr inbounds float* %tmp20122, i64 1
- %tmp20124 = getelementptr inbounds float* %tmp20123, i64 1
- %tmp20125 = getelementptr inbounds float* %tmp20124, i64 1
- %tmp20126 = getelementptr inbounds float* %tmp20125, i64 1
- %tmp20127 = getelementptr inbounds float* %tmp20126, i64 1
- %tmp20128 = getelementptr inbounds float* %tmp20127, i64 1
- %tmp20129 = getelementptr inbounds float* %tmp20128, i64 1
- %tmp20130 = getelementptr inbounds float* %tmp20129, i64 1
- %tmp20131 = getelementptr inbounds float* %tmp20130, i64 1
- %tmp20132 = getelementptr inbounds float* %tmp20131, i64 1
- %tmp20133 = getelementptr inbounds float* %tmp20132, i64 1
- %tmp20134 = getelementptr inbounds float* %tmp20133, i64 1
- %tmp20135 = getelementptr inbounds float* %tmp20134, i64 1
- %tmp20136 = getelementptr inbounds float* %tmp20135, i64 1
- %tmp20137 = getelementptr inbounds float* %tmp20136, i64 1
- %tmp20138 = getelementptr inbounds float* %tmp20137, i64 1
- %tmp20139 = getelementptr inbounds float* %tmp20138, i64 1
- %tmp20140 = getelementptr inbounds float* %tmp20139, i64 1
- %tmp20141 = getelementptr inbounds float* %tmp20140, i64 1
- %tmp20142 = getelementptr inbounds float* %tmp20141, i64 1
- %tmp20143 = getelementptr inbounds float* %tmp20142, i64 1
- %tmp20144 = getelementptr inbounds float* %tmp20143, i64 1
- %tmp20145 = getelementptr inbounds float* %tmp20144, i64 1
- %tmp20146 = getelementptr inbounds float* %tmp20145, i64 1
- %tmp20147 = getelementptr inbounds float* %tmp20146, i64 1
- %tmp20148 = getelementptr inbounds float* %tmp20147, i64 1
- %tmp20149 = getelementptr inbounds float* %tmp20148, i64 1
- %tmp20150 = getelementptr inbounds float* %tmp20149, i64 1
- %tmp20151 = getelementptr inbounds float* %tmp20150, i64 1
- %tmp20152 = getelementptr inbounds float* %tmp20151, i64 1
- %tmp20153 = getelementptr inbounds float* %tmp20152, i64 1
- %tmp20154 = getelementptr inbounds float* %tmp20153, i64 1
- %tmp20155 = getelementptr inbounds float* %tmp20154, i64 1
- %tmp20156 = getelementptr inbounds float* %tmp20155, i64 1
- %tmp20157 = getelementptr inbounds float* %tmp20156, i64 1
- %tmp20158 = getelementptr inbounds float* %tmp20157, i64 1
- %tmp20159 = getelementptr inbounds float* %tmp20158, i64 1
- %tmp20160 = getelementptr inbounds float* %tmp20159, i64 1
- %tmp20161 = getelementptr inbounds float* %tmp20160, i64 1
- %tmp20162 = getelementptr inbounds float* %tmp20161, i64 1
- %tmp20163 = getelementptr inbounds float* %tmp20162, i64 1
- %tmp20164 = getelementptr inbounds float* %tmp20163, i64 1
- %tmp20165 = getelementptr inbounds float* %tmp20164, i64 1
- %tmp20166 = getelementptr inbounds float* %tmp20165, i64 1
- %tmp20167 = getelementptr inbounds float* %tmp20166, i64 1
- %tmp20168 = getelementptr inbounds float* %tmp20167, i64 1
- %tmp20169 = getelementptr inbounds float* %tmp20168, i64 1
- %tmp20170 = getelementptr inbounds float* %tmp20169, i64 1
- %tmp20171 = getelementptr inbounds float* %tmp20170, i64 1
- %tmp20172 = getelementptr inbounds float* %tmp20171, i64 1
- %tmp20173 = getelementptr inbounds float* %tmp20172, i64 1
- %tmp20174 = getelementptr inbounds float* %tmp20173, i64 1
- %tmp20175 = getelementptr inbounds float* %tmp20174, i64 1
- %tmp20176 = getelementptr inbounds float* %tmp20175, i64 1
- %tmp20177 = getelementptr inbounds float* %tmp20176, i64 1
- %tmp20178 = getelementptr inbounds float* %tmp20177, i64 1
- %tmp20179 = getelementptr inbounds float* %tmp20178, i64 1
- %tmp20180 = getelementptr inbounds float* %tmp20179, i64 1
- %tmp20181 = getelementptr inbounds float* %tmp20180, i64 1
- %tmp20182 = getelementptr inbounds float* %tmp20181, i64 1
- %tmp20183 = getelementptr inbounds float* %tmp20182, i64 1
- %tmp20184 = getelementptr inbounds float* %tmp20183, i64 1
- %tmp20185 = getelementptr inbounds float* %tmp20184, i64 1
- %tmp20186 = getelementptr inbounds float* %tmp20185, i64 1
- %tmp20187 = getelementptr inbounds float* %tmp20186, i64 1
- %tmp20188 = getelementptr inbounds float* %tmp20187, i64 1
- %tmp20189 = getelementptr inbounds float* %tmp20188, i64 1
- %tmp20190 = getelementptr inbounds float* %tmp20189, i64 1
- %tmp20191 = getelementptr inbounds float* %tmp20190, i64 1
- %tmp20192 = getelementptr inbounds float* %tmp20191, i64 1
- %tmp20193 = getelementptr inbounds float* %tmp20192, i64 1
- %tmp20194 = getelementptr inbounds float* %tmp20193, i64 1
- %tmp20195 = getelementptr inbounds float* %tmp20194, i64 1
- %tmp20196 = getelementptr inbounds float* %tmp20195, i64 1
- %tmp20197 = getelementptr inbounds float* %tmp20196, i64 1
- %tmp20198 = getelementptr inbounds float* %tmp20197, i64 1
- %tmp20199 = getelementptr inbounds float* %tmp20198, i64 1
- %tmp20200 = getelementptr inbounds float* %tmp20199, i64 1
- %tmp20201 = getelementptr inbounds float* %tmp20200, i64 1
- %tmp20202 = getelementptr inbounds float* %tmp20201, i64 1
- %tmp20203 = getelementptr inbounds float* %tmp20202, i64 1
- %tmp20204 = getelementptr inbounds float* %tmp20203, i64 1
- %tmp20205 = getelementptr inbounds float* %tmp20204, i64 1
- %tmp20206 = getelementptr inbounds float* %tmp20205, i64 1
- %tmp20207 = getelementptr inbounds float* %tmp20206, i64 1
- %tmp20208 = getelementptr inbounds float* %tmp20207, i64 1
- %tmp20209 = getelementptr inbounds float* %tmp20208, i64 1
- %tmp20210 = getelementptr inbounds float* %tmp20209, i64 1
- %tmp20211 = getelementptr inbounds float* %tmp20210, i64 1
- %tmp20212 = getelementptr inbounds float* %tmp20211, i64 1
- %tmp20213 = getelementptr inbounds float* %tmp20212, i64 1
- %tmp20214 = getelementptr inbounds float* %tmp20213, i64 1
- %tmp20215 = getelementptr inbounds float* %tmp20214, i64 1
- %tmp20216 = getelementptr inbounds float* %tmp20215, i64 1
- %tmp20217 = getelementptr inbounds float* %tmp20216, i64 1
- %tmp20218 = getelementptr inbounds float* %tmp20217, i64 1
- %tmp20219 = getelementptr inbounds float* %tmp20218, i64 1
- %tmp20220 = getelementptr inbounds float* %tmp20219, i64 1
- %tmp20221 = getelementptr inbounds float* %tmp20220, i64 1
- %tmp20222 = getelementptr inbounds float* %tmp20221, i64 1
- %tmp20223 = getelementptr inbounds float* %tmp20222, i64 1
- %tmp20224 = getelementptr inbounds float* %tmp20223, i64 1
- %tmp20225 = getelementptr inbounds float* %tmp20224, i64 1
- %tmp20226 = getelementptr inbounds float* %tmp20225, i64 1
- %tmp20227 = getelementptr inbounds float* %tmp20226, i64 1
- %tmp20228 = getelementptr inbounds float* %tmp20227, i64 1
- %tmp20229 = getelementptr inbounds float* %tmp20228, i64 1
- %tmp20230 = getelementptr inbounds float* %tmp20229, i64 1
- %tmp20231 = getelementptr inbounds float* %tmp20230, i64 1
- %tmp20232 = getelementptr inbounds float* %tmp20231, i64 1
- %tmp20233 = getelementptr inbounds float* %tmp20232, i64 1
- %tmp20234 = getelementptr inbounds float* %tmp20233, i64 1
- %tmp20235 = getelementptr inbounds float* %tmp20234, i64 1
- %tmp20236 = getelementptr inbounds float* %tmp20235, i64 1
- %tmp20237 = getelementptr inbounds float* %tmp20236, i64 1
- %tmp20238 = getelementptr inbounds float* %tmp20237, i64 1
- %tmp20239 = getelementptr inbounds float* %tmp20238, i64 1
- %tmp20240 = getelementptr inbounds float* %tmp20239, i64 1
- %tmp20241 = getelementptr inbounds float* %tmp20240, i64 1
- %tmp20242 = getelementptr inbounds float* %tmp20241, i64 1
- %tmp20243 = getelementptr inbounds float* %tmp20242, i64 1
- %tmp20244 = getelementptr inbounds float* %tmp20243, i64 1
- %tmp20245 = getelementptr inbounds float* %tmp20244, i64 1
- %tmp20246 = getelementptr inbounds float* %tmp20245, i64 1
- %tmp20247 = getelementptr inbounds float* %tmp20246, i64 1
- %tmp20248 = getelementptr inbounds float* %tmp20247, i64 1
- %tmp20249 = getelementptr inbounds float* %tmp20248, i64 1
- %tmp20250 = getelementptr inbounds float* %tmp20249, i64 1
- %tmp20251 = getelementptr inbounds float* %tmp20250, i64 1
- %tmp20252 = getelementptr inbounds float* %tmp20251, i64 1
- %tmp20253 = getelementptr inbounds float* %tmp20252, i64 1
- %tmp20254 = getelementptr inbounds float* %tmp20253, i64 1
- %tmp20255 = getelementptr inbounds float* %tmp20254, i64 1
- %tmp20256 = getelementptr inbounds float* %tmp20255, i64 1
- %tmp20257 = getelementptr inbounds float* %tmp20256, i64 1
- %tmp20258 = getelementptr inbounds float* %tmp20257, i64 1
- %tmp20259 = getelementptr inbounds float* %tmp20258, i64 1
- %tmp20260 = getelementptr inbounds float* %tmp20259, i64 1
- %tmp20261 = getelementptr inbounds float* %tmp20260, i64 1
- %tmp20262 = getelementptr inbounds float* %tmp20261, i64 1
- %tmp20263 = getelementptr inbounds float* %tmp20262, i64 1
- %tmp20264 = getelementptr inbounds float* %tmp20263, i64 1
- %tmp20265 = getelementptr inbounds float* %tmp20264, i64 1
- %tmp20266 = getelementptr inbounds float* %tmp20265, i64 1
- %tmp20267 = getelementptr inbounds float* %tmp20266, i64 1
- %tmp20268 = getelementptr inbounds float* %tmp20267, i64 1
- %tmp20269 = getelementptr inbounds float* %tmp20268, i64 1
- %tmp20270 = getelementptr inbounds float* %tmp20269, i64 1
- %tmp20271 = getelementptr inbounds float* %tmp20270, i64 1
- %tmp20272 = getelementptr inbounds float* %tmp20271, i64 1
- %tmp20273 = getelementptr inbounds float* %tmp20272, i64 1
- %tmp20274 = getelementptr inbounds float* %tmp20273, i64 1
- %tmp20275 = getelementptr inbounds float* %tmp20274, i64 1
- %tmp20276 = getelementptr inbounds float* %tmp20275, i64 1
- %tmp20277 = getelementptr inbounds float* %tmp20276, i64 1
- %tmp20278 = getelementptr inbounds float* %tmp20277, i64 1
- %tmp20279 = getelementptr inbounds float* %tmp20278, i64 1
- %tmp20280 = getelementptr inbounds float* %tmp20279, i64 1
- %tmp20281 = getelementptr inbounds float* %tmp20280, i64 1
- %tmp20282 = getelementptr inbounds float* %tmp20281, i64 1
- %tmp20283 = getelementptr inbounds float* %tmp20282, i64 1
- %tmp20284 = getelementptr inbounds float* %tmp20283, i64 1
- %tmp20285 = getelementptr inbounds float* %tmp20284, i64 1
- %tmp20286 = getelementptr inbounds float* %tmp20285, i64 1
- %tmp20287 = getelementptr inbounds float* %tmp20286, i64 1
- %tmp20288 = getelementptr inbounds float* %tmp20287, i64 1
- %tmp20289 = getelementptr inbounds float* %tmp20288, i64 1
- %tmp20290 = getelementptr inbounds float* %tmp20289, i64 1
- %tmp20291 = getelementptr inbounds float* %tmp20290, i64 1
- %tmp20292 = getelementptr inbounds float* %tmp20291, i64 1
- %tmp20293 = getelementptr inbounds float* %tmp20292, i64 1
- %tmp20294 = getelementptr inbounds float* %tmp20293, i64 1
- %tmp20295 = getelementptr inbounds float* %tmp20294, i64 1
- %tmp20296 = getelementptr inbounds float* %tmp20295, i64 1
- %tmp20297 = getelementptr inbounds float* %tmp20296, i64 1
- %tmp20298 = getelementptr inbounds float* %tmp20297, i64 1
- %tmp20299 = getelementptr inbounds float* %tmp20298, i64 1
- %tmp20300 = getelementptr inbounds float* %tmp20299, i64 1
- %tmp20301 = getelementptr inbounds float* %tmp20300, i64 1
- %tmp20302 = getelementptr inbounds float* %tmp20301, i64 1
- %tmp20303 = getelementptr inbounds float* %tmp20302, i64 1
- %tmp20304 = getelementptr inbounds float* %tmp20303, i64 1
- %tmp20305 = getelementptr inbounds float* %tmp20304, i64 1
- %tmp20306 = getelementptr inbounds float* %tmp20305, i64 1
- %tmp20307 = getelementptr inbounds float* %tmp20306, i64 1
- %tmp20308 = getelementptr inbounds float* %tmp20307, i64 1
- %tmp20309 = getelementptr inbounds float* %tmp20308, i64 1
- %tmp20310 = getelementptr inbounds float* %tmp20309, i64 1
- %tmp20311 = getelementptr inbounds float* %tmp20310, i64 1
- %tmp20312 = getelementptr inbounds float* %tmp20311, i64 1
- %tmp20313 = getelementptr inbounds float* %tmp20312, i64 1
- %tmp20314 = getelementptr inbounds float* %tmp20313, i64 1
- %tmp20315 = getelementptr inbounds float* %tmp20314, i64 1
- %tmp20316 = getelementptr inbounds float* %tmp20315, i64 1
- %tmp20317 = getelementptr inbounds float* %tmp20316, i64 1
- %tmp20318 = getelementptr inbounds float* %tmp20317, i64 1
- %tmp20319 = getelementptr inbounds float* %tmp20318, i64 1
- %tmp20320 = getelementptr inbounds float* %tmp20319, i64 1
- %tmp20321 = getelementptr inbounds float* %tmp20320, i64 1
- %tmp20322 = getelementptr inbounds float* %tmp20321, i64 1
- %tmp20323 = getelementptr inbounds float* %tmp20322, i64 1
- %tmp20324 = getelementptr inbounds float* %tmp20323, i64 1
- %tmp20325 = getelementptr inbounds float* %tmp20324, i64 1
- %tmp20326 = getelementptr inbounds float* %tmp20325, i64 1
- %tmp20327 = getelementptr inbounds float* %tmp20326, i64 1
- %tmp20328 = getelementptr inbounds float* %tmp20327, i64 1
- %tmp20329 = getelementptr inbounds float* %tmp20328, i64 1
- %tmp20330 = getelementptr inbounds float* %tmp20329, i64 1
- %tmp20331 = getelementptr inbounds float* %tmp20330, i64 1
- %tmp20332 = getelementptr inbounds float* %tmp20331, i64 1
- %tmp20333 = getelementptr inbounds float* %tmp20332, i64 1
- %tmp20334 = getelementptr inbounds float* %tmp20333, i64 1
- %tmp20335 = getelementptr inbounds float* %tmp20334, i64 1
- %tmp20336 = getelementptr inbounds float* %tmp20335, i64 1
- %tmp20337 = getelementptr inbounds float* %tmp20336, i64 1
- %tmp20338 = getelementptr inbounds float* %tmp20337, i64 1
- %tmp20339 = getelementptr inbounds float* %tmp20338, i64 1
- %tmp20340 = getelementptr inbounds float* %tmp20339, i64 1
- %tmp20341 = getelementptr inbounds float* %tmp20340, i64 1
- %tmp20342 = getelementptr inbounds float* %tmp20341, i64 1
- %tmp20343 = getelementptr inbounds float* %tmp20342, i64 1
- %tmp20344 = getelementptr inbounds float* %tmp20343, i64 1
- %tmp20345 = getelementptr inbounds float* %tmp20344, i64 1
- %tmp20346 = getelementptr inbounds float* %tmp20345, i64 1
- %tmp20347 = getelementptr inbounds float* %tmp20346, i64 1
- %tmp20348 = getelementptr inbounds float* %tmp20347, i64 1
- %tmp20349 = getelementptr inbounds float* %tmp20348, i64 1
- %tmp20350 = getelementptr inbounds float* %tmp20349, i64 1
- %tmp20351 = getelementptr inbounds float* %tmp20350, i64 1
- %tmp20352 = getelementptr inbounds float* %tmp20351, i64 1
- %tmp20353 = getelementptr inbounds float* %tmp20352, i64 1
- %tmp20354 = getelementptr inbounds float* %tmp20353, i64 1
- %tmp20355 = getelementptr inbounds float* %tmp20354, i64 1
- %tmp20356 = getelementptr inbounds float* %tmp20355, i64 1
- %tmp20357 = getelementptr inbounds float* %tmp20356, i64 1
- %tmp20358 = getelementptr inbounds float* %tmp20357, i64 1
- %tmp20359 = getelementptr inbounds float* %tmp20358, i64 1
- %tmp20360 = getelementptr inbounds float* %tmp20359, i64 1
- %tmp20361 = getelementptr inbounds float* %tmp20360, i64 1
- %tmp20362 = getelementptr inbounds float* %tmp20361, i64 1
- %tmp20363 = getelementptr inbounds float* %tmp20362, i64 1
- %tmp20364 = getelementptr inbounds float* %tmp20363, i64 1
- %tmp20365 = getelementptr inbounds float* %tmp20364, i64 1
- %tmp20366 = getelementptr inbounds float* %tmp20365, i64 1
- %tmp20367 = getelementptr inbounds float* %tmp20366, i64 1
- %tmp20368 = getelementptr inbounds float* %tmp20367, i64 1
- %tmp20369 = getelementptr inbounds float* %tmp20368, i64 1
- %tmp20370 = getelementptr inbounds float* %tmp20369, i64 1
- %tmp20371 = getelementptr inbounds float* %tmp20370, i64 1
- %tmp20372 = getelementptr inbounds float* %tmp20371, i64 1
- %tmp20373 = getelementptr inbounds float* %tmp20372, i64 1
- %tmp20374 = getelementptr inbounds float* %tmp20373, i64 1
- %tmp20375 = getelementptr inbounds float* %tmp20374, i64 1
- %tmp20376 = getelementptr inbounds float* %tmp20375, i64 1
- %tmp20377 = getelementptr inbounds float* %tmp20376, i64 1
- %tmp20378 = getelementptr inbounds float* %tmp20377, i64 1
- %tmp20379 = getelementptr inbounds float* %tmp20378, i64 1
- %tmp20380 = getelementptr inbounds float* %tmp20379, i64 1
- %tmp20381 = getelementptr inbounds float* %tmp20380, i64 1
- %tmp20382 = getelementptr inbounds float* %tmp20381, i64 1
- %tmp20383 = getelementptr inbounds float* %tmp20382, i64 1
- %tmp20384 = getelementptr inbounds float* %tmp20383, i64 1
- %tmp20385 = getelementptr inbounds float* %tmp20384, i64 1
- %tmp20386 = getelementptr inbounds float* %tmp20385, i64 1
- %tmp20387 = getelementptr inbounds float* %tmp20386, i64 1
- %tmp20388 = getelementptr inbounds float* %tmp20387, i64 1
- %tmp20389 = getelementptr inbounds float* %tmp20388, i64 1
- %tmp20390 = getelementptr inbounds float* %tmp20389, i64 1
- %tmp20391 = getelementptr inbounds float* %tmp20390, i64 1
- %tmp20392 = getelementptr inbounds float* %tmp20391, i64 1
- %tmp20393 = getelementptr inbounds float* %tmp20392, i64 1
- %tmp20394 = getelementptr inbounds float* %tmp20393, i64 1
- %tmp20395 = getelementptr inbounds float* %tmp20394, i64 1
- %tmp20396 = getelementptr inbounds float* %tmp20395, i64 1
- %tmp20397 = getelementptr inbounds float* %tmp20396, i64 1
- %tmp20398 = getelementptr inbounds float* %tmp20397, i64 1
- %tmp20399 = getelementptr inbounds float* %tmp20398, i64 1
- %tmp20400 = getelementptr inbounds float* %tmp20399, i64 1
- %tmp20401 = getelementptr inbounds float* %tmp20400, i64 1
- %tmp20402 = getelementptr inbounds float* %tmp20401, i64 1
- %tmp20403 = getelementptr inbounds float* %tmp20402, i64 1
- %tmp20404 = getelementptr inbounds float* %tmp20403, i64 1
- %tmp20405 = getelementptr inbounds float* %tmp20404, i64 1
- %tmp20406 = getelementptr inbounds float* %tmp20405, i64 1
- %tmp20407 = getelementptr inbounds float* %tmp20406, i64 1
- %tmp20408 = getelementptr inbounds float* %tmp20407, i64 1
- %tmp20409 = getelementptr inbounds float* %tmp20408, i64 1
- %tmp20410 = getelementptr inbounds float* %tmp20409, i64 1
- %tmp20411 = getelementptr inbounds float* %tmp20410, i64 1
- %tmp20412 = getelementptr inbounds float* %tmp20411, i64 1
- %tmp20413 = getelementptr inbounds float* %tmp20412, i64 1
- %tmp20414 = getelementptr inbounds float* %tmp20413, i64 1
- %tmp20415 = getelementptr inbounds float* %tmp20414, i64 1
- %tmp20416 = getelementptr inbounds float* %tmp20415, i64 1
- %tmp20417 = getelementptr inbounds float* %tmp20416, i64 1
- %tmp20418 = getelementptr inbounds float* %tmp20417, i64 1
- %tmp20419 = getelementptr inbounds float* %tmp20418, i64 1
- %tmp20420 = getelementptr inbounds float* %tmp20419, i64 1
- %tmp20421 = getelementptr inbounds float* %tmp20420, i64 1
- %tmp20422 = getelementptr inbounds float* %tmp20421, i64 1
- %tmp20423 = getelementptr inbounds float* %tmp20422, i64 1
- %tmp20424 = getelementptr inbounds float* %tmp20423, i64 1
- %tmp20425 = getelementptr inbounds float* %tmp20424, i64 1
- %tmp20426 = getelementptr inbounds float* %tmp20425, i64 1
- %tmp20427 = getelementptr inbounds float* %tmp20426, i64 1
- %tmp20428 = getelementptr inbounds float* %tmp20427, i64 1
- %tmp20429 = getelementptr inbounds float* %tmp20428, i64 1
- %tmp20430 = getelementptr inbounds float* %tmp20429, i64 1
- %tmp20431 = getelementptr inbounds float* %tmp20430, i64 1
- %tmp20432 = getelementptr inbounds float* %tmp20431, i64 1
- %tmp20433 = getelementptr inbounds float* %tmp20432, i64 1
- %tmp20434 = getelementptr inbounds float* %tmp20433, i64 1
- %tmp20435 = getelementptr inbounds float* %tmp20434, i64 1
- %tmp20436 = getelementptr inbounds float* %tmp20435, i64 1
- %tmp20437 = getelementptr inbounds float* %tmp20436, i64 1
- %tmp20438 = getelementptr inbounds float* %tmp20437, i64 1
- %tmp20439 = getelementptr inbounds float* %tmp20438, i64 1
- %tmp20440 = getelementptr inbounds float* %tmp20439, i64 1
- %tmp20441 = getelementptr inbounds float* %tmp20440, i64 1
- %tmp20442 = getelementptr inbounds float* %tmp20441, i64 1
- %tmp20443 = getelementptr inbounds float* %tmp20442, i64 1
- %tmp20444 = getelementptr inbounds float* %tmp20443, i64 1
- %tmp20445 = getelementptr inbounds float* %tmp20444, i64 1
- %tmp20446 = getelementptr inbounds float* %tmp20445, i64 1
- %tmp20447 = getelementptr inbounds float* %tmp20446, i64 1
- %tmp20448 = getelementptr inbounds float* %tmp20447, i64 1
- %tmp20449 = getelementptr inbounds float* %tmp20448, i64 1
- %tmp20450 = getelementptr inbounds float* %tmp20449, i64 1
- %tmp20451 = getelementptr inbounds float* %tmp20450, i64 1
- %tmp20452 = getelementptr inbounds float* %tmp20451, i64 1
- %tmp20453 = getelementptr inbounds float* %tmp20452, i64 1
- %tmp20454 = getelementptr inbounds float* %tmp20453, i64 1
- %tmp20455 = getelementptr inbounds float* %tmp20454, i64 1
- %tmp20456 = getelementptr inbounds float* %tmp20455, i64 1
- %tmp20457 = getelementptr inbounds float* %tmp20456, i64 1
- %tmp20458 = getelementptr inbounds float* %tmp20457, i64 1
- %tmp20459 = getelementptr inbounds float* %tmp20458, i64 1
- %tmp20460 = getelementptr inbounds float* %tmp20459, i64 1
- %tmp20461 = getelementptr inbounds float* %tmp20460, i64 1
- %tmp20462 = getelementptr inbounds float* %tmp20461, i64 1
- %tmp20463 = getelementptr inbounds float* %tmp20462, i64 1
- %tmp20464 = getelementptr inbounds float* %tmp20463, i64 1
- %tmp20465 = getelementptr inbounds float* %tmp20464, i64 1
- %tmp20466 = getelementptr inbounds float* %tmp20465, i64 1
- %tmp20467 = getelementptr inbounds float* %tmp20466, i64 1
- %tmp20468 = getelementptr inbounds float* %tmp20467, i64 1
- %tmp20469 = getelementptr inbounds float* %tmp20468, i64 1
- %tmp20470 = getelementptr inbounds float* %tmp20469, i64 1
- %tmp20471 = getelementptr inbounds float* %tmp20470, i64 1
- %tmp20472 = getelementptr inbounds float* %tmp20471, i64 1
- %tmp20473 = getelementptr inbounds float* %tmp20472, i64 1
- %tmp20474 = getelementptr inbounds float* %tmp20473, i64 1
- %tmp20475 = getelementptr inbounds float* %tmp20474, i64 1
- %tmp20476 = getelementptr inbounds float* %tmp20475, i64 1
- %tmp20477 = getelementptr inbounds float* %tmp20476, i64 1
- %tmp20478 = getelementptr inbounds float* %tmp20477, i64 1
- %tmp20479 = getelementptr inbounds float* %tmp20478, i64 1
- %tmp20480 = getelementptr inbounds float* %tmp20479, i64 1
- %tmp20481 = getelementptr inbounds float* %tmp20480, i64 1
- %tmp20482 = getelementptr inbounds float* %tmp20481, i64 1
- %tmp20483 = getelementptr inbounds float* %tmp20482, i64 1
- %tmp20484 = getelementptr inbounds float* %tmp20483, i64 1
- %tmp20485 = getelementptr inbounds float* %tmp20484, i64 1
- %tmp20486 = getelementptr inbounds float* %tmp20485, i64 1
- %tmp20487 = getelementptr inbounds float* %tmp20486, i64 1
- %tmp20488 = getelementptr inbounds float* %tmp20487, i64 1
- %tmp20489 = getelementptr inbounds float* %tmp20488, i64 1
- %tmp20490 = getelementptr inbounds float* %tmp20489, i64 1
- %tmp20491 = getelementptr inbounds float* %tmp20490, i64 1
- %tmp20492 = getelementptr inbounds float* %tmp20491, i64 1
- %tmp20493 = getelementptr inbounds float* %tmp20492, i64 1
- %tmp20494 = getelementptr inbounds float* %tmp20493, i64 1
- %tmp20495 = getelementptr inbounds float* %tmp20494, i64 1
- %tmp20496 = getelementptr inbounds float* %tmp20495, i64 1
- %tmp20497 = getelementptr inbounds float* %tmp20496, i64 1
- %tmp20498 = getelementptr inbounds float* %tmp20497, i64 1
- %tmp20499 = getelementptr inbounds float* %tmp20498, i64 1
- %tmp20500 = getelementptr inbounds float* %tmp20499, i64 1
- %tmp20501 = getelementptr inbounds float* %tmp20500, i64 1
- %tmp20502 = getelementptr inbounds float* %tmp20501, i64 1
- %tmp20503 = getelementptr inbounds float* %tmp20502, i64 1
- %tmp20504 = getelementptr inbounds float* %tmp20503, i64 1
- %tmp20505 = getelementptr inbounds float* %tmp20504, i64 1
- %tmp20506 = getelementptr inbounds float* %tmp20505, i64 1
- %tmp20507 = getelementptr inbounds float* %tmp20506, i64 1
- %tmp20508 = getelementptr inbounds float* %tmp20507, i64 1
- %tmp20509 = getelementptr inbounds float* %tmp20508, i64 1
- %tmp20510 = getelementptr inbounds float* %tmp20509, i64 1
- %tmp20511 = getelementptr inbounds float* %tmp20510, i64 1
- %tmp20512 = getelementptr inbounds float* %tmp20511, i64 1
- %tmp20513 = getelementptr inbounds float* %tmp20512, i64 1
- %tmp20514 = getelementptr inbounds float* %tmp20513, i64 1
- %tmp20515 = getelementptr inbounds float* %tmp20514, i64 1
- %tmp20516 = getelementptr inbounds float* %tmp20515, i64 1
- %tmp20517 = getelementptr inbounds float* %tmp20516, i64 1
- %tmp20518 = getelementptr inbounds float* %tmp20517, i64 1
- %tmp20519 = getelementptr inbounds float* %tmp20518, i64 1
- %tmp20520 = getelementptr inbounds float* %tmp20519, i64 1
- %tmp20521 = getelementptr inbounds float* %tmp20520, i64 1
- %tmp20522 = getelementptr inbounds float* %tmp20521, i64 1
- %tmp20523 = getelementptr inbounds float* %tmp20522, i64 1
- %tmp20524 = getelementptr inbounds float* %tmp20523, i64 1
- %tmp20525 = getelementptr inbounds float* %tmp20524, i64 1
- %tmp20526 = getelementptr inbounds float* %tmp20525, i64 1
- %tmp20527 = getelementptr inbounds float* %tmp20526, i64 1
- %tmp20528 = getelementptr inbounds float* %tmp20527, i64 1
- %tmp20529 = getelementptr inbounds float* %tmp20528, i64 1
- %tmp20530 = getelementptr inbounds float* %tmp20529, i64 1
- %tmp20531 = getelementptr inbounds float* %tmp20530, i64 1
- %tmp20532 = getelementptr inbounds float* %tmp20531, i64 1
- %tmp20533 = getelementptr inbounds float* %tmp20532, i64 1
- %tmp20534 = getelementptr inbounds float* %tmp20533, i64 1
- %tmp20535 = getelementptr inbounds float* %tmp20534, i64 1
- %tmp20536 = getelementptr inbounds float* %tmp20535, i64 1
- %tmp20537 = getelementptr inbounds float* %tmp20536, i64 1
- %tmp20538 = getelementptr inbounds float* %tmp20537, i64 1
- %tmp20539 = getelementptr inbounds float* %tmp20538, i64 1
- %tmp20540 = getelementptr inbounds float* %tmp20539, i64 1
- %tmp20541 = getelementptr inbounds float* %tmp20540, i64 1
- %tmp20542 = getelementptr inbounds float* %tmp20541, i64 1
- %tmp20543 = getelementptr inbounds float* %tmp20542, i64 1
- %tmp20544 = getelementptr inbounds float* %tmp20543, i64 1
- %tmp20545 = getelementptr inbounds float* %tmp20544, i64 1
- %tmp20546 = getelementptr inbounds float* %tmp20545, i64 1
- %tmp20547 = getelementptr inbounds float* %tmp20546, i64 1
- %tmp20548 = getelementptr inbounds float* %tmp20547, i64 1
- %tmp20549 = getelementptr inbounds float* %tmp20548, i64 1
- %tmp20550 = getelementptr inbounds float* %tmp20549, i64 1
- %tmp20551 = getelementptr inbounds float* %tmp20550, i64 1
- %tmp20552 = getelementptr inbounds float* %tmp20551, i64 1
- %tmp20553 = getelementptr inbounds float* %tmp20552, i64 1
- %tmp20554 = getelementptr inbounds float* %tmp20553, i64 1
- %tmp20555 = getelementptr inbounds float* %tmp20554, i64 1
- %tmp20556 = getelementptr inbounds float* %tmp20555, i64 1
- %tmp20557 = getelementptr inbounds float* %tmp20556, i64 1
- %tmp20558 = getelementptr inbounds float* %tmp20557, i64 1
- %tmp20559 = getelementptr inbounds float* %tmp20558, i64 1
- %tmp20560 = getelementptr inbounds float* %tmp20559, i64 1
- %tmp20561 = getelementptr inbounds float* %tmp20560, i64 1
- %tmp20562 = getelementptr inbounds float* %tmp20561, i64 1
- %tmp20563 = getelementptr inbounds float* %tmp20562, i64 1
- %tmp20564 = getelementptr inbounds float* %tmp20563, i64 1
- %tmp20565 = getelementptr inbounds float* %tmp20564, i64 1
- %tmp20566 = getelementptr inbounds float* %tmp20565, i64 1
- %tmp20567 = getelementptr inbounds float* %tmp20566, i64 1
- %tmp20568 = getelementptr inbounds float* %tmp20567, i64 1
- %tmp20569 = getelementptr inbounds float* %tmp20568, i64 1
- %tmp20570 = getelementptr inbounds float* %tmp20569, i64 1
- %tmp20571 = getelementptr inbounds float* %tmp20570, i64 1
- %tmp20572 = getelementptr inbounds float* %tmp20571, i64 1
- %tmp20573 = getelementptr inbounds float* %tmp20572, i64 1
- %tmp20574 = getelementptr inbounds float* %tmp20573, i64 1
- %tmp20575 = getelementptr inbounds float* %tmp20574, i64 1
- %tmp20576 = getelementptr inbounds float* %tmp20575, i64 1
- %tmp20577 = getelementptr inbounds float* %tmp20576, i64 1
- %tmp20578 = getelementptr inbounds float* %tmp20577, i64 1
- %tmp20579 = getelementptr inbounds float* %tmp20578, i64 1
- %tmp20580 = getelementptr inbounds float* %tmp20579, i64 1
- %tmp20581 = getelementptr inbounds float* %tmp20580, i64 1
- %tmp20582 = getelementptr inbounds float* %tmp20581, i64 1
- %tmp20583 = getelementptr inbounds float* %tmp20582, i64 1
- %tmp20584 = getelementptr inbounds float* %tmp20583, i64 1
- %tmp20585 = getelementptr inbounds float* %tmp20584, i64 1
- %tmp20586 = getelementptr inbounds float* %tmp20585, i64 1
- %tmp20587 = getelementptr inbounds float* %tmp20586, i64 1
- %tmp20588 = getelementptr inbounds float* %tmp20587, i64 1
- %tmp20589 = getelementptr inbounds float* %tmp20588, i64 1
- %tmp20590 = getelementptr inbounds float* %tmp20589, i64 1
- %tmp20591 = getelementptr inbounds float* %tmp20590, i64 1
- %tmp20592 = getelementptr inbounds float* %tmp20591, i64 1
- %tmp20593 = getelementptr inbounds float* %tmp20592, i64 1
- %tmp20594 = getelementptr inbounds float* %tmp20593, i64 1
- %tmp20595 = getelementptr inbounds float* %tmp20594, i64 1
- %tmp20596 = getelementptr inbounds float* %tmp20595, i64 1
- %tmp20597 = getelementptr inbounds float* %tmp20596, i64 1
- %tmp20598 = getelementptr inbounds float* %tmp20597, i64 1
- %tmp20599 = getelementptr inbounds float* %tmp20598, i64 1
- %tmp20600 = getelementptr inbounds float* %tmp20599, i64 1
- %tmp20601 = getelementptr inbounds float* %tmp20600, i64 1
- %tmp20602 = getelementptr inbounds float* %tmp20601, i64 1
- %tmp20603 = getelementptr inbounds float* %tmp20602, i64 1
- %tmp20604 = getelementptr inbounds float* %tmp20603, i64 1
- %tmp20605 = getelementptr inbounds float* %tmp20604, i64 1
- %tmp20606 = getelementptr inbounds float* %tmp20605, i64 1
- %tmp20607 = getelementptr inbounds float* %tmp20606, i64 1
- %tmp20608 = getelementptr inbounds float* %tmp20607, i64 1
- %tmp20609 = getelementptr inbounds float* %tmp20608, i64 1
- %tmp20610 = getelementptr inbounds float* %tmp20609, i64 1
- %tmp20611 = getelementptr inbounds float* %tmp20610, i64 1
- %tmp20612 = getelementptr inbounds float* %tmp20611, i64 1
- %tmp20613 = getelementptr inbounds float* %tmp20612, i64 1
- %tmp20614 = getelementptr inbounds float* %tmp20613, i64 1
- %tmp20615 = getelementptr inbounds float* %tmp20614, i64 1
- %tmp20616 = getelementptr inbounds float* %tmp20615, i64 1
- %tmp20617 = getelementptr inbounds float* %tmp20616, i64 1
- %tmp20618 = getelementptr inbounds float* %tmp20617, i64 1
- %tmp20619 = getelementptr inbounds float* %tmp20618, i64 1
- %tmp20620 = getelementptr inbounds float* %tmp20619, i64 1
- %tmp20621 = getelementptr inbounds float* %tmp20620, i64 1
- %tmp20622 = getelementptr inbounds float* %tmp20621, i64 1
- %tmp20623 = getelementptr inbounds float* %tmp20622, i64 1
- %tmp20624 = getelementptr inbounds float* %tmp20623, i64 1
- %tmp20625 = getelementptr inbounds float* %tmp20624, i64 1
- %tmp20626 = getelementptr inbounds float* %tmp20625, i64 1
- %tmp20627 = getelementptr inbounds float* %tmp20626, i64 1
- %tmp20628 = getelementptr inbounds float* %tmp20627, i64 1
- %tmp20629 = getelementptr inbounds float* %tmp20628, i64 1
- %tmp20630 = getelementptr inbounds float* %tmp20629, i64 1
- %tmp20631 = getelementptr inbounds float* %tmp20630, i64 1
- %tmp20632 = getelementptr inbounds float* %tmp20631, i64 1
- %tmp20633 = getelementptr inbounds float* %tmp20632, i64 1
- %tmp20634 = getelementptr inbounds float* %tmp20633, i64 1
- %tmp20635 = getelementptr inbounds float* %tmp20634, i64 1
- %tmp20636 = getelementptr inbounds float* %tmp20635, i64 1
- %tmp20637 = getelementptr inbounds float* %tmp20636, i64 1
- %tmp20638 = getelementptr inbounds float* %tmp20637, i64 1
- %tmp20639 = getelementptr inbounds float* %tmp20638, i64 1
- %tmp20640 = getelementptr inbounds float* %tmp20639, i64 1
- %tmp20641 = getelementptr inbounds float* %tmp20640, i64 1
- %tmp20642 = getelementptr inbounds float* %tmp20641, i64 1
- %tmp20643 = getelementptr inbounds float* %tmp20642, i64 1
- %tmp20644 = getelementptr inbounds float* %tmp20643, i64 1
- %tmp20645 = getelementptr inbounds float* %tmp20644, i64 1
- %tmp20646 = getelementptr inbounds float* %tmp20645, i64 1
- %tmp20647 = getelementptr inbounds float* %tmp20646, i64 1
- %tmp20648 = getelementptr inbounds float* %tmp20647, i64 1
- %tmp20649 = getelementptr inbounds float* %tmp20648, i64 1
- %tmp20650 = getelementptr inbounds float* %tmp20649, i64 1
- %tmp20651 = getelementptr inbounds float* %tmp20650, i64 1
- %tmp20652 = getelementptr inbounds float* %tmp20651, i64 1
- %tmp20653 = getelementptr inbounds float* %tmp20652, i64 1
- %tmp20654 = getelementptr inbounds float* %tmp20653, i64 1
- %tmp20655 = getelementptr inbounds float* %tmp20654, i64 1
- %tmp20656 = getelementptr inbounds float* %tmp20655, i64 1
- %tmp20657 = getelementptr inbounds float* %tmp20656, i64 1
- %tmp20658 = getelementptr inbounds float* %tmp20657, i64 1
- %tmp20659 = getelementptr inbounds float* %tmp20658, i64 1
- %tmp20660 = getelementptr inbounds float* %tmp20659, i64 1
- %tmp20661 = getelementptr inbounds float* %tmp20660, i64 1
- %tmp20662 = getelementptr inbounds float* %tmp20661, i64 1
- %tmp20663 = getelementptr inbounds float* %tmp20662, i64 1
- %tmp20664 = getelementptr inbounds float* %tmp20663, i64 1
- %tmp20665 = getelementptr inbounds float* %tmp20664, i64 1
- %tmp20666 = getelementptr inbounds float* %tmp20665, i64 1
- %tmp20667 = getelementptr inbounds float* %tmp20666, i64 1
- %tmp20668 = getelementptr inbounds float* %tmp20667, i64 1
- %tmp20669 = getelementptr inbounds float* %tmp20668, i64 1
- %tmp20670 = getelementptr inbounds float* %tmp20669, i64 1
- %tmp20671 = getelementptr inbounds float* %tmp20670, i64 1
- %tmp20672 = getelementptr inbounds float* %tmp20671, i64 1
- %tmp20673 = getelementptr inbounds float* %tmp20672, i64 1
- %tmp20674 = getelementptr inbounds float* %tmp20673, i64 1
- %tmp20675 = getelementptr inbounds float* %tmp20674, i64 1
- %tmp20676 = getelementptr inbounds float* %tmp20675, i64 1
- %tmp20677 = getelementptr inbounds float* %tmp20676, i64 1
- %tmp20678 = getelementptr inbounds float* %tmp20677, i64 1
- %tmp20679 = getelementptr inbounds float* %tmp20678, i64 1
- %tmp20680 = getelementptr inbounds float* %tmp20679, i64 1
- %tmp20681 = getelementptr inbounds float* %tmp20680, i64 1
- %tmp20682 = getelementptr inbounds float* %tmp20681, i64 1
- %tmp20683 = getelementptr inbounds float* %tmp20682, i64 1
- %tmp20684 = getelementptr inbounds float* %tmp20683, i64 1
- %tmp20685 = getelementptr inbounds float* %tmp20684, i64 1
- %tmp20686 = getelementptr inbounds float* %tmp20685, i64 1
- %tmp20687 = getelementptr inbounds float* %tmp20686, i64 1
- %tmp20688 = getelementptr inbounds float* %tmp20687, i64 1
- %tmp20689 = getelementptr inbounds float* %tmp20688, i64 1
- %tmp20690 = getelementptr inbounds float* %tmp20689, i64 1
- %tmp20691 = getelementptr inbounds float* %tmp20690, i64 1
- %tmp20692 = getelementptr inbounds float* %tmp20691, i64 1
- %tmp20693 = getelementptr inbounds float* %tmp20692, i64 1
- %tmp20694 = getelementptr inbounds float* %tmp20693, i64 1
- %tmp20695 = getelementptr inbounds float* %tmp20694, i64 1
- %tmp20696 = getelementptr inbounds float* %tmp20695, i64 1
- %tmp20697 = getelementptr inbounds float* %tmp20696, i64 1
- %tmp20698 = getelementptr inbounds float* %tmp20697, i64 1
- %tmp20699 = getelementptr inbounds float* %tmp20698, i64 1
- %tmp20700 = getelementptr inbounds float* %tmp20699, i64 1
- %tmp20701 = getelementptr inbounds float* %tmp20700, i64 1
- %tmp20702 = getelementptr inbounds float* %tmp20701, i64 1
- %tmp20703 = getelementptr inbounds float* %tmp20702, i64 1
- %tmp20704 = getelementptr inbounds float* %tmp20703, i64 1
- %tmp20705 = getelementptr inbounds float* %tmp20704, i64 1
- %tmp20706 = getelementptr inbounds float* %tmp20705, i64 1
- %tmp20707 = getelementptr inbounds float* %tmp20706, i64 1
- %tmp20708 = getelementptr inbounds float* %tmp20707, i64 1
- %tmp20709 = getelementptr inbounds float* %tmp20708, i64 1
- %tmp20710 = getelementptr inbounds float* %tmp20709, i64 1
- %tmp20711 = getelementptr inbounds float* %tmp20710, i64 1
- %tmp20712 = getelementptr inbounds float* %tmp20711, i64 1
- %tmp20713 = getelementptr inbounds float* %tmp20712, i64 1
- %tmp20714 = getelementptr inbounds float* %tmp20713, i64 1
- %tmp20715 = getelementptr inbounds float* %tmp20714, i64 1
- %tmp20716 = getelementptr inbounds float* %tmp20715, i64 1
- %tmp20717 = getelementptr inbounds float* %tmp20716, i64 1
- %tmp20718 = getelementptr inbounds float* %tmp20717, i64 1
- %tmp20719 = getelementptr inbounds float* %tmp20718, i64 1
- %tmp20720 = getelementptr inbounds float* %tmp20719, i64 1
- %tmp20721 = getelementptr inbounds float* %tmp20720, i64 1
- %tmp20722 = getelementptr inbounds float* %tmp20721, i64 1
- %tmp20723 = getelementptr inbounds float* %tmp20722, i64 1
- %tmp20724 = getelementptr inbounds float* %tmp20723, i64 1
- %tmp20725 = getelementptr inbounds float* %tmp20724, i64 1
- %tmp20726 = getelementptr inbounds float* %tmp20725, i64 1
- %tmp20727 = getelementptr inbounds float* %tmp20726, i64 1
- %tmp20728 = getelementptr inbounds float* %tmp20727, i64 1
- %tmp20729 = getelementptr inbounds float* %tmp20728, i64 1
- %tmp20730 = getelementptr inbounds float* %tmp20729, i64 1
- %tmp20731 = getelementptr inbounds float* %tmp20730, i64 1
- %tmp20732 = getelementptr inbounds float* %tmp20731, i64 1
- %tmp20733 = getelementptr inbounds float* %tmp20732, i64 1
- %tmp20734 = getelementptr inbounds float* %tmp20733, i64 1
- %tmp20735 = getelementptr inbounds float* %tmp20734, i64 1
- %tmp20736 = getelementptr inbounds float* %tmp20735, i64 1
- %tmp20737 = getelementptr inbounds float* %tmp20736, i64 1
- %tmp20738 = getelementptr inbounds float* %tmp20737, i64 1
- %tmp20739 = getelementptr inbounds float* %tmp20738, i64 1
- %tmp20740 = getelementptr inbounds float* %tmp20739, i64 1
- %tmp20741 = getelementptr inbounds float* %tmp20740, i64 1
- %tmp20742 = getelementptr inbounds float* %tmp20741, i64 1
- %tmp20743 = getelementptr inbounds float* %tmp20742, i64 1
- %tmp20744 = getelementptr inbounds float* %tmp20743, i64 1
- %tmp20745 = getelementptr inbounds float* %tmp20744, i64 1
- %tmp20746 = getelementptr inbounds float* %tmp20745, i64 1
- %tmp20747 = getelementptr inbounds float* %tmp20746, i64 1
- %tmp20748 = getelementptr inbounds float* %tmp20747, i64 1
- %tmp20749 = getelementptr inbounds float* %tmp20748, i64 1
- %tmp20750 = getelementptr inbounds float* %tmp20749, i64 1
- %tmp20751 = getelementptr inbounds float* %tmp20750, i64 1
- %tmp20752 = getelementptr inbounds float* %tmp20751, i64 1
- %tmp20753 = getelementptr inbounds float* %tmp20752, i64 1
- %tmp20754 = getelementptr inbounds float* %tmp20753, i64 1
- %tmp20755 = getelementptr inbounds float* %tmp20754, i64 1
- %tmp20756 = getelementptr inbounds float* %tmp20755, i64 1
- %tmp20757 = getelementptr inbounds float* %tmp20756, i64 1
- %tmp20758 = getelementptr inbounds float* %tmp20757, i64 1
- %tmp20759 = getelementptr inbounds float* %tmp20758, i64 1
- %tmp20760 = getelementptr inbounds float* %tmp20759, i64 1
- %tmp20761 = getelementptr inbounds float* %tmp20760, i64 1
- %tmp20762 = getelementptr inbounds float* %tmp20761, i64 1
- %tmp20763 = getelementptr inbounds float* %tmp20762, i64 1
- %tmp20764 = getelementptr inbounds float* %tmp20763, i64 1
- %tmp20765 = getelementptr inbounds float* %tmp20764, i64 1
- %tmp20766 = getelementptr inbounds float* %tmp20765, i64 1
- %tmp20767 = getelementptr inbounds float* %tmp20766, i64 1
- %tmp20768 = getelementptr inbounds float* %tmp20767, i64 1
- %tmp20769 = getelementptr inbounds float* %tmp20768, i64 1
- %tmp20770 = getelementptr inbounds float* %tmp20769, i64 1
- %tmp20771 = getelementptr inbounds float* %tmp20770, i64 1
- %tmp20772 = getelementptr inbounds float* %tmp20771, i64 1
- %tmp20773 = getelementptr inbounds float* %tmp20772, i64 1
- %tmp20774 = getelementptr inbounds float* %tmp20773, i64 1
- %tmp20775 = getelementptr inbounds float* %tmp20774, i64 1
- %tmp20776 = getelementptr inbounds float* %tmp20775, i64 1
- %tmp20777 = getelementptr inbounds float* %tmp20776, i64 1
- %tmp20778 = getelementptr inbounds float* %tmp20777, i64 1
- %tmp20779 = getelementptr inbounds float* %tmp20778, i64 1
- %tmp20780 = getelementptr inbounds float* %tmp20779, i64 1
- %tmp20781 = getelementptr inbounds float* %tmp20780, i64 1
- %tmp20782 = getelementptr inbounds float* %tmp20781, i64 1
- %tmp20783 = getelementptr inbounds float* %tmp20782, i64 1
- %tmp20784 = getelementptr inbounds float* %tmp20783, i64 1
- %tmp20785 = getelementptr inbounds float* %tmp20784, i64 1
- %tmp20786 = getelementptr inbounds float* %tmp20785, i64 1
- %tmp20787 = getelementptr inbounds float* %tmp20786, i64 1
- %tmp20788 = getelementptr inbounds float* %tmp20787, i64 1
- %tmp20789 = getelementptr inbounds float* %tmp20788, i64 1
- %tmp20790 = getelementptr inbounds float* %tmp20789, i64 1
- %tmp20791 = getelementptr inbounds float* %tmp20790, i64 1
- %tmp20792 = getelementptr inbounds float* %tmp20791, i64 1
- %tmp20793 = getelementptr inbounds float* %tmp20792, i64 1
- %tmp20794 = getelementptr inbounds float* %tmp20793, i64 1
- %tmp20795 = getelementptr inbounds float* %tmp20794, i64 1
- %tmp20796 = getelementptr inbounds float* %tmp20795, i64 1
- %tmp20797 = getelementptr inbounds float* %tmp20796, i64 1
- %tmp20798 = getelementptr inbounds float* %tmp20797, i64 1
- %tmp20799 = getelementptr inbounds float* %tmp20798, i64 1
- %tmp20800 = getelementptr inbounds float* %tmp20799, i64 1
- %tmp20801 = getelementptr inbounds float* %tmp20800, i64 1
- %tmp20802 = getelementptr inbounds float* %tmp20801, i64 1
- %tmp20803 = getelementptr inbounds float* %tmp20802, i64 1
- %tmp20804 = getelementptr inbounds float* %tmp20803, i64 1
- %tmp20805 = getelementptr inbounds float* %tmp20804, i64 1
- %tmp20806 = getelementptr inbounds float* %tmp20805, i64 1
- %tmp20807 = getelementptr inbounds float* %tmp20806, i64 1
- %tmp20808 = getelementptr inbounds float* %tmp20807, i64 1
- %tmp20809 = getelementptr inbounds float* %tmp20808, i64 1
- %tmp20810 = getelementptr inbounds float* %tmp20809, i64 1
- %tmp20811 = getelementptr inbounds float* %tmp20810, i64 1
- %tmp20812 = getelementptr inbounds float* %tmp20811, i64 1
- %tmp20813 = getelementptr inbounds float* %tmp20812, i64 1
- %tmp20814 = getelementptr inbounds float* %tmp20813, i64 1
- %tmp20815 = getelementptr inbounds float* %tmp20814, i64 1
- %tmp20816 = getelementptr inbounds float* %tmp20815, i64 1
- %tmp20817 = getelementptr inbounds float* %tmp20816, i64 1
- %tmp20818 = getelementptr inbounds float* %tmp20817, i64 1
- %tmp20819 = getelementptr inbounds float* %tmp20818, i64 1
- %tmp20820 = getelementptr inbounds float* %tmp20819, i64 1
- %tmp20821 = getelementptr inbounds float* %tmp20820, i64 1
- %tmp20822 = getelementptr inbounds float* %tmp20821, i64 1
- %tmp20823 = getelementptr inbounds float* %tmp20822, i64 1
- %tmp20824 = getelementptr inbounds float* %tmp20823, i64 1
- %tmp20825 = getelementptr inbounds float* %tmp20824, i64 1
- %tmp20826 = getelementptr inbounds float* %tmp20825, i64 1
- %tmp20827 = getelementptr inbounds float* %tmp20826, i64 1
- %tmp20828 = getelementptr inbounds float* %tmp20827, i64 1
- %tmp20829 = getelementptr inbounds float* %tmp20828, i64 1
- %tmp20830 = getelementptr inbounds float* %tmp20829, i64 1
- %tmp20831 = getelementptr inbounds float* %tmp20830, i64 1
- %tmp20832 = getelementptr inbounds float* %tmp20831, i64 1
- %tmp20833 = getelementptr inbounds float* %tmp20832, i64 1
- %tmp20834 = getelementptr inbounds float* %tmp20833, i64 1
- %tmp20835 = getelementptr inbounds float* %tmp20834, i64 1
- %tmp20836 = getelementptr inbounds float* %tmp20835, i64 1
- %tmp20837 = getelementptr inbounds float* %tmp20836, i64 1
- %tmp20838 = getelementptr inbounds float* %tmp20837, i64 1
- %tmp20839 = getelementptr inbounds float* %tmp20838, i64 1
- %tmp20840 = getelementptr inbounds float* %tmp20839, i64 1
- %tmp20841 = getelementptr inbounds float* %tmp20840, i64 1
- %tmp20842 = getelementptr inbounds float* %tmp20841, i64 1
- %tmp20843 = getelementptr inbounds float* %tmp20842, i64 1
- %tmp20844 = getelementptr inbounds float* %tmp20843, i64 1
- %tmp20845 = getelementptr inbounds float* %tmp20844, i64 1
- %tmp20846 = getelementptr inbounds float* %tmp20845, i64 1
- %tmp20847 = getelementptr inbounds float* %tmp20846, i64 1
- %tmp20848 = getelementptr inbounds float* %tmp20847, i64 1
- %tmp20849 = getelementptr inbounds float* %tmp20848, i64 1
- %tmp20850 = getelementptr inbounds float* %tmp20849, i64 1
- %tmp20851 = getelementptr inbounds float* %tmp20850, i64 1
- %tmp20852 = getelementptr inbounds float* %tmp20851, i64 1
- %tmp20853 = getelementptr inbounds float* %tmp20852, i64 1
- %tmp20854 = getelementptr inbounds float* %tmp20853, i64 1
- %tmp20855 = getelementptr inbounds float* %tmp20854, i64 1
- %tmp20856 = getelementptr inbounds float* %tmp20855, i64 1
- %tmp20857 = getelementptr inbounds float* %tmp20856, i64 1
- %tmp20858 = getelementptr inbounds float* %tmp20857, i64 1
- %tmp20859 = getelementptr inbounds float* %tmp20858, i64 1
- %tmp20860 = getelementptr inbounds float* %tmp20859, i64 1
- %tmp20861 = getelementptr inbounds float* %tmp20860, i64 1
- %tmp20862 = getelementptr inbounds float* %tmp20861, i64 1
- %tmp20863 = getelementptr inbounds float* %tmp20862, i64 1
- %tmp20864 = getelementptr inbounds float* %tmp20863, i64 1
- %tmp20865 = getelementptr inbounds float* %tmp20864, i64 1
- %tmp20866 = getelementptr inbounds float* %tmp20865, i64 1
- %tmp20867 = getelementptr inbounds float* %tmp20866, i64 1
- %tmp20868 = getelementptr inbounds float* %tmp20867, i64 1
- %tmp20869 = getelementptr inbounds float* %tmp20868, i64 1
- %tmp20870 = getelementptr inbounds float* %tmp20869, i64 1
- %tmp20871 = getelementptr inbounds float* %tmp20870, i64 1
- %tmp20872 = getelementptr inbounds float* %tmp20871, i64 1
- %tmp20873 = getelementptr inbounds float* %tmp20872, i64 1
- %tmp20874 = getelementptr inbounds float* %tmp20873, i64 1
- %tmp20875 = getelementptr inbounds float* %tmp20874, i64 1
- %tmp20876 = getelementptr inbounds float* %tmp20875, i64 1
- %tmp20877 = getelementptr inbounds float* %tmp20876, i64 1
- %tmp20878 = getelementptr inbounds float* %tmp20877, i64 1
- %tmp20879 = getelementptr inbounds float* %tmp20878, i64 1
- %tmp20880 = getelementptr inbounds float* %tmp20879, i64 1
- %tmp20881 = getelementptr inbounds float* %tmp20880, i64 1
- %tmp20882 = getelementptr inbounds float* %tmp20881, i64 1
- %tmp20883 = getelementptr inbounds float* %tmp20882, i64 1
- %tmp20884 = getelementptr inbounds float* %tmp20883, i64 1
- %tmp20885 = getelementptr inbounds float* %tmp20884, i64 1
- %tmp20886 = getelementptr inbounds float* %tmp20885, i64 1
- %tmp20887 = getelementptr inbounds float* %tmp20886, i64 1
- %tmp20888 = getelementptr inbounds float* %tmp20887, i64 1
- %tmp20889 = getelementptr inbounds float* %tmp20888, i64 1
- %tmp20890 = getelementptr inbounds float* %tmp20889, i64 1
- %tmp20891 = getelementptr inbounds float* %tmp20890, i64 1
- %tmp20892 = getelementptr inbounds float* %tmp20891, i64 1
- %tmp20893 = getelementptr inbounds float* %tmp20892, i64 1
- %tmp20894 = getelementptr inbounds float* %tmp20893, i64 1
- %tmp20895 = getelementptr inbounds float* %tmp20894, i64 1
- %tmp20896 = getelementptr inbounds float* %tmp20895, i64 1
- %tmp20897 = getelementptr inbounds float* %tmp20896, i64 1
- %tmp20898 = getelementptr inbounds float* %tmp20897, i64 1
- %tmp20899 = getelementptr inbounds float* %tmp20898, i64 1
- %tmp20900 = getelementptr inbounds float* %tmp20899, i64 1
- %tmp20901 = getelementptr inbounds float* %tmp20900, i64 1
- %tmp20902 = getelementptr inbounds float* %tmp20901, i64 1
- %tmp20903 = getelementptr inbounds float* %tmp20902, i64 1
- %tmp20904 = getelementptr inbounds float* %tmp20903, i64 1
- %tmp20905 = getelementptr inbounds float* %tmp20904, i64 1
- %tmp20906 = getelementptr inbounds float* %tmp20905, i64 1
- %tmp20907 = getelementptr inbounds float* %tmp20906, i64 1
- %tmp20908 = getelementptr inbounds float* %tmp20907, i64 1
- %tmp20909 = getelementptr inbounds float* %tmp20908, i64 1
- %tmp20910 = getelementptr inbounds float* %tmp20909, i64 1
- %tmp20911 = getelementptr inbounds float* %tmp20910, i64 1
- %tmp20912 = getelementptr inbounds float* %tmp20911, i64 1
- %tmp20913 = getelementptr inbounds float* %tmp20912, i64 1
- %tmp20914 = getelementptr inbounds float* %tmp20913, i64 1
- %tmp20915 = getelementptr inbounds float* %tmp20914, i64 1
- %tmp20916 = getelementptr inbounds float* %tmp20915, i64 1
- %tmp20917 = getelementptr inbounds float* %tmp20916, i64 1
- %tmp20918 = getelementptr inbounds float* %tmp20917, i64 1
- %tmp20919 = getelementptr inbounds float* %tmp20918, i64 1
- %tmp20920 = getelementptr inbounds float* %tmp20919, i64 1
- %tmp20921 = getelementptr inbounds float* %tmp20920, i64 1
- %tmp20922 = getelementptr inbounds float* %tmp20921, i64 1
- %tmp20923 = getelementptr inbounds float* %tmp20922, i64 1
- %tmp20924 = getelementptr inbounds float* %tmp20923, i64 1
- %tmp20925 = getelementptr inbounds float* %tmp20924, i64 1
- %tmp20926 = getelementptr inbounds float* %tmp20925, i64 1
- %tmp20927 = getelementptr inbounds float* %tmp20926, i64 1
- %tmp20928 = getelementptr inbounds float* %tmp20927, i64 1
- %tmp20929 = getelementptr inbounds float* %tmp20928, i64 1
- %tmp20930 = getelementptr inbounds float* %tmp20929, i64 1
- %tmp20931 = getelementptr inbounds float* %tmp20930, i64 1
- %tmp20932 = getelementptr inbounds float* %tmp20931, i64 1
- %tmp20933 = getelementptr inbounds float* %tmp20932, i64 1
- %tmp20934 = getelementptr inbounds float* %tmp20933, i64 1
- %tmp20935 = getelementptr inbounds float* %tmp20934, i64 1
- %tmp20936 = getelementptr inbounds float* %tmp20935, i64 1
- %tmp20937 = getelementptr inbounds float* %tmp20936, i64 1
- %tmp20938 = getelementptr inbounds float* %tmp20937, i64 1
- %tmp20939 = getelementptr inbounds float* %tmp20938, i64 1
- %tmp20940 = getelementptr inbounds float* %tmp20939, i64 1
- %tmp20941 = getelementptr inbounds float* %tmp20940, i64 1
- %tmp20942 = getelementptr inbounds float* %tmp20941, i64 1
- %tmp20943 = getelementptr inbounds float* %tmp20942, i64 1
- %tmp20944 = getelementptr inbounds float* %tmp20943, i64 1
- %tmp20945 = getelementptr inbounds float* %tmp20944, i64 1
- %tmp20946 = getelementptr inbounds float* %tmp20945, i64 1
- %tmp20947 = getelementptr inbounds float* %tmp20946, i64 1
- %tmp20948 = getelementptr inbounds float* %tmp20947, i64 1
- %tmp20949 = getelementptr inbounds float* %tmp20948, i64 1
- %tmp20950 = getelementptr inbounds float* %tmp20949, i64 1
- %tmp20951 = getelementptr inbounds float* %tmp20950, i64 1
- %tmp20952 = getelementptr inbounds float* %tmp20951, i64 1
- %tmp20953 = getelementptr inbounds float* %tmp20952, i64 1
- %tmp20954 = getelementptr inbounds float* %tmp20953, i64 1
- %tmp20955 = getelementptr inbounds float* %tmp20954, i64 1
- %tmp20956 = getelementptr inbounds float* %tmp20955, i64 1
- %tmp20957 = getelementptr inbounds float* %tmp20956, i64 1
- %tmp20958 = getelementptr inbounds float* %tmp20957, i64 1
- %tmp20959 = getelementptr inbounds float* %tmp20958, i64 1
- %tmp20960 = getelementptr inbounds float* %tmp20959, i64 1
- %tmp20961 = getelementptr inbounds float* %tmp20960, i64 1
- %tmp20962 = getelementptr inbounds float* %tmp20961, i64 1
- %tmp20963 = getelementptr inbounds float* %tmp20962, i64 1
- %tmp20964 = getelementptr inbounds float* %tmp20963, i64 1
- %tmp20965 = getelementptr inbounds float* %tmp20964, i64 1
- %tmp20966 = getelementptr inbounds float* %tmp20965, i64 1
- %tmp20967 = getelementptr inbounds float* %tmp20966, i64 1
- %tmp20968 = getelementptr inbounds float* %tmp20967, i64 1
- %tmp20969 = getelementptr inbounds float* %tmp20968, i64 1
- %tmp20970 = getelementptr inbounds float* %tmp20969, i64 1
- %tmp20971 = getelementptr inbounds float* %tmp20970, i64 1
- %tmp20972 = getelementptr inbounds float* %tmp20971, i64 1
- %tmp20973 = getelementptr inbounds float* %tmp20972, i64 1
- %tmp20974 = getelementptr inbounds float* %tmp20973, i64 1
- %tmp20975 = getelementptr inbounds float* %tmp20974, i64 1
- %tmp20976 = getelementptr inbounds float* %tmp20975, i64 1
- %tmp20977 = getelementptr inbounds float* %tmp20976, i64 1
- %tmp20978 = getelementptr inbounds float* %tmp20977, i64 1
- %tmp20979 = getelementptr inbounds float* %tmp20978, i64 1
- %tmp20980 = getelementptr inbounds float* %tmp20979, i64 1
- %tmp20981 = getelementptr inbounds float* %tmp20980, i64 1
- %tmp20982 = getelementptr inbounds float* %tmp20981, i64 1
- %tmp20983 = getelementptr inbounds float* %tmp20982, i64 1
- %tmp20984 = getelementptr inbounds float* %tmp20983, i64 1
- %tmp20985 = getelementptr inbounds float* %tmp20984, i64 1
- %tmp20986 = getelementptr inbounds float* %tmp20985, i64 1
- %tmp20987 = getelementptr inbounds float* %tmp20986, i64 1
- %tmp20988 = getelementptr inbounds float* %tmp20987, i64 1
- %tmp20989 = getelementptr inbounds float* %tmp20988, i64 1
- %tmp20990 = getelementptr inbounds float* %tmp20989, i64 1
- %tmp20991 = getelementptr inbounds float* %tmp20990, i64 1
- %tmp20992 = getelementptr inbounds float* %tmp20991, i64 1
- %tmp20993 = getelementptr inbounds float* %tmp20992, i64 1
- %tmp20994 = getelementptr inbounds float* %tmp20993, i64 1
- %tmp20995 = getelementptr inbounds float* %tmp20994, i64 1
- %tmp20996 = getelementptr inbounds float* %tmp20995, i64 1
- %tmp20997 = getelementptr inbounds float* %tmp20996, i64 1
- %tmp20998 = getelementptr inbounds float* %tmp20997, i64 1
- %tmp20999 = getelementptr inbounds float* %tmp20998, i64 1
- %tmp21000 = getelementptr inbounds float* %tmp20999, i64 1
- %tmp21001 = getelementptr inbounds float* %tmp21000, i64 1
- %tmp21002 = getelementptr inbounds float* %tmp21001, i64 1
- %tmp21003 = getelementptr inbounds float* %tmp21002, i64 1
- %tmp21004 = getelementptr inbounds float* %tmp21003, i64 1
- %tmp21005 = getelementptr inbounds float* %tmp21004, i64 1
- %tmp21006 = getelementptr inbounds float* %tmp21005, i64 1
- %tmp21007 = getelementptr inbounds float* %tmp21006, i64 1
- %tmp21008 = getelementptr inbounds float* %tmp21007, i64 1
- %tmp21009 = getelementptr inbounds float* %tmp21008, i64 1
- %tmp21010 = getelementptr inbounds float* %tmp21009, i64 1
- %tmp21011 = getelementptr inbounds float* %tmp21010, i64 1
- %tmp21012 = getelementptr inbounds float* %tmp21011, i64 1
- %tmp21013 = getelementptr inbounds float* %tmp21012, i64 1
- %tmp21014 = getelementptr inbounds float* %tmp21013, i64 1
- %tmp21015 = getelementptr inbounds float* %tmp21014, i64 1
- %tmp21016 = getelementptr inbounds float* %tmp21015, i64 1
- %tmp21017 = getelementptr inbounds float* %tmp21016, i64 1
- %tmp21018 = getelementptr inbounds float* %tmp21017, i64 1
- %tmp21019 = getelementptr inbounds float* %tmp21018, i64 1
- %tmp21020 = getelementptr inbounds float* %tmp21019, i64 1
- %tmp21021 = getelementptr inbounds float* %tmp21020, i64 1
- %tmp21022 = getelementptr inbounds float* %tmp21021, i64 1
- %tmp21023 = getelementptr inbounds float* %tmp21022, i64 1
- %tmp21024 = getelementptr inbounds float* %tmp21023, i64 1
- %tmp21025 = getelementptr inbounds float* %tmp21024, i64 1
- %tmp21026 = getelementptr inbounds float* %tmp21025, i64 1
- %tmp21027 = getelementptr inbounds float* %tmp21026, i64 1
- %tmp21028 = getelementptr inbounds float* %tmp21027, i64 1
- %tmp21029 = getelementptr inbounds float* %tmp21028, i64 1
- %tmp21030 = getelementptr inbounds float* %tmp21029, i64 1
- %tmp21031 = getelementptr inbounds float* %tmp21030, i64 1
- %tmp21032 = getelementptr inbounds float* %tmp21031, i64 1
- %tmp21033 = getelementptr inbounds float* %tmp21032, i64 1
- %tmp21034 = getelementptr inbounds float* %tmp21033, i64 1
- %tmp21035 = getelementptr inbounds float* %tmp21034, i64 1
- %tmp21036 = getelementptr inbounds float* %tmp21035, i64 1
- %tmp21037 = getelementptr inbounds float* %tmp21036, i64 1
- %tmp21038 = getelementptr inbounds float* %tmp21037, i64 1
- %tmp21039 = getelementptr inbounds float* %tmp21038, i64 1
- %tmp21040 = getelementptr inbounds float* %tmp21039, i64 1
- %tmp21041 = getelementptr inbounds float* %tmp21040, i64 1
- %tmp21042 = getelementptr inbounds float* %tmp21041, i64 1
- %tmp21043 = getelementptr inbounds float* %tmp21042, i64 1
- %tmp21044 = getelementptr inbounds float* %tmp21043, i64 1
- %tmp21045 = getelementptr inbounds float* %tmp21044, i64 1
- %tmp21046 = getelementptr inbounds float* %tmp21045, i64 1
- %tmp21047 = getelementptr inbounds float* %tmp21046, i64 1
- %tmp21048 = getelementptr inbounds float* %tmp21047, i64 1
- %tmp21049 = getelementptr inbounds float* %tmp21048, i64 1
- %tmp21050 = getelementptr inbounds float* %tmp21049, i64 1
- %tmp21051 = getelementptr inbounds float* %tmp21050, i64 1
- %tmp21052 = getelementptr inbounds float* %tmp21051, i64 1
- %tmp21053 = getelementptr inbounds float* %tmp21052, i64 1
- %tmp21054 = getelementptr inbounds float* %tmp21053, i64 1
- %tmp21055 = getelementptr inbounds float* %tmp21054, i64 1
- %tmp21056 = getelementptr inbounds float* %tmp21055, i64 1
- %tmp21057 = getelementptr inbounds float* %tmp21056, i64 1
- %tmp21058 = getelementptr inbounds float* %tmp21057, i64 1
- %tmp21059 = getelementptr inbounds float* %tmp21058, i64 1
- %tmp21060 = getelementptr inbounds float* %tmp21059, i64 1
- %tmp21061 = getelementptr inbounds float* %tmp21060, i64 1
- %tmp21062 = getelementptr inbounds float* %tmp21061, i64 1
- %tmp21063 = getelementptr inbounds float* %tmp21062, i64 1
- %tmp21064 = getelementptr inbounds float* %tmp21063, i64 1
- %tmp21065 = getelementptr inbounds float* %tmp21064, i64 1
- %tmp21066 = getelementptr inbounds float* %tmp21065, i64 1
- %tmp21067 = getelementptr inbounds float* %tmp21066, i64 1
- %tmp21068 = getelementptr inbounds float* %tmp21067, i64 1
- %tmp21069 = getelementptr inbounds float* %tmp21068, i64 1
- %tmp21070 = getelementptr inbounds float* %tmp21069, i64 1
- %tmp21071 = getelementptr inbounds float* %tmp21070, i64 1
- %tmp21072 = getelementptr inbounds float* %tmp21071, i64 1
- %tmp21073 = getelementptr inbounds float* %tmp21072, i64 1
- %tmp21074 = getelementptr inbounds float* %tmp21073, i64 1
- %tmp21075 = getelementptr inbounds float* %tmp21074, i64 1
- %tmp21076 = getelementptr inbounds float* %tmp21075, i64 1
- %tmp21077 = getelementptr inbounds float* %tmp21076, i64 1
- %tmp21078 = getelementptr inbounds float* %tmp21077, i64 1
- %tmp21079 = getelementptr inbounds float* %tmp21078, i64 1
- %tmp21080 = getelementptr inbounds float* %tmp21079, i64 1
- %tmp21081 = getelementptr inbounds float* %tmp21080, i64 1
- %tmp21082 = getelementptr inbounds float* %tmp21081, i64 1
- %tmp21083 = getelementptr inbounds float* %tmp21082, i64 1
- %tmp21084 = getelementptr inbounds float* %tmp21083, i64 1
- %tmp21085 = getelementptr inbounds float* %tmp21084, i64 1
- %tmp21086 = getelementptr inbounds float* %tmp21085, i64 1
- %tmp21087 = getelementptr inbounds float* %tmp21086, i64 1
- %tmp21088 = getelementptr inbounds float* %tmp21087, i64 1
- %tmp21089 = getelementptr inbounds float* %tmp21088, i64 1
- %tmp21090 = getelementptr inbounds float* %tmp21089, i64 1
- %tmp21091 = getelementptr inbounds float* %tmp21090, i64 1
- %tmp21092 = getelementptr inbounds float* %tmp21091, i64 1
- %tmp21093 = getelementptr inbounds float* %tmp21092, i64 1
- %tmp21094 = getelementptr inbounds float* %tmp21093, i64 1
- %tmp21095 = getelementptr inbounds float* %tmp21094, i64 1
- %tmp21096 = getelementptr inbounds float* %tmp21095, i64 1
- %tmp21097 = getelementptr inbounds float* %tmp21096, i64 1
- %tmp21098 = getelementptr inbounds float* %tmp21097, i64 1
- %tmp21099 = getelementptr inbounds float* %tmp21098, i64 1
- %tmp21100 = getelementptr inbounds float* %tmp21099, i64 1
- %tmp21101 = getelementptr inbounds float* %tmp21100, i64 1
- %tmp21102 = getelementptr inbounds float* %tmp21101, i64 1
- %tmp21103 = getelementptr inbounds float* %tmp21102, i64 1
- %tmp21104 = getelementptr inbounds float* %tmp21103, i64 1
- %tmp21105 = getelementptr inbounds float* %tmp21104, i64 1
- %tmp21106 = getelementptr inbounds float* %tmp21105, i64 1
- %tmp21107 = getelementptr inbounds float* %tmp21106, i64 1
- %tmp21108 = getelementptr inbounds float* %tmp21107, i64 1
- %tmp21109 = getelementptr inbounds float* %tmp21108, i64 1
- %tmp21110 = getelementptr inbounds float* %tmp21109, i64 1
- %tmp21111 = getelementptr inbounds float* %tmp21110, i64 1
- %tmp21112 = getelementptr inbounds float* %tmp21111, i64 1
- %tmp21113 = getelementptr inbounds float* %tmp21112, i64 1
- %tmp21114 = getelementptr inbounds float* %tmp21113, i64 1
- %tmp21115 = getelementptr inbounds float* %tmp21114, i64 1
- %tmp21116 = getelementptr inbounds float* %tmp21115, i64 1
- %tmp21117 = getelementptr inbounds float* %tmp21116, i64 1
- %tmp21118 = getelementptr inbounds float* %tmp21117, i64 1
- %tmp21119 = getelementptr inbounds float* %tmp21118, i64 1
- %tmp21120 = getelementptr inbounds float* %tmp21119, i64 1
- %tmp21121 = getelementptr inbounds float* %tmp21120, i64 1
- %tmp21122 = getelementptr inbounds float* %tmp21121, i64 1
- %tmp21123 = getelementptr inbounds float* %tmp21122, i64 1
- %tmp21124 = getelementptr inbounds float* %tmp21123, i64 1
- %tmp21125 = getelementptr inbounds float* %tmp21124, i64 1
- %tmp21126 = getelementptr inbounds float* %tmp21125, i64 1
- %tmp21127 = getelementptr inbounds float* %tmp21126, i64 1
- %tmp21128 = getelementptr inbounds float* %tmp21127, i64 1
- %tmp21129 = getelementptr inbounds float* %tmp21128, i64 1
- %tmp21130 = getelementptr inbounds float* %tmp21129, i64 1
- %tmp21131 = getelementptr inbounds float* %tmp21130, i64 1
- %tmp21132 = getelementptr inbounds float* %tmp21131, i64 1
- %tmp21133 = getelementptr inbounds float* %tmp21132, i64 1
- %tmp21134 = getelementptr inbounds float* %tmp21133, i64 1
- %tmp21135 = getelementptr inbounds float* %tmp21134, i64 1
- %tmp21136 = getelementptr inbounds float* %tmp21135, i64 1
- %tmp21137 = getelementptr inbounds float* %tmp21136, i64 1
- %tmp21138 = getelementptr inbounds float* %tmp21137, i64 1
- %tmp21139 = getelementptr inbounds float* %tmp21138, i64 1
- %tmp21140 = getelementptr inbounds float* %tmp21139, i64 1
- %tmp21141 = getelementptr inbounds float* %tmp21140, i64 1
- %tmp21142 = getelementptr inbounds float* %tmp21141, i64 1
- %tmp21143 = getelementptr inbounds float* %tmp21142, i64 1
- %tmp21144 = getelementptr inbounds float* %tmp21143, i64 1
- %tmp21145 = getelementptr inbounds float* %tmp21144, i64 1
- %tmp21146 = getelementptr inbounds float* %tmp21145, i64 1
- %tmp21147 = getelementptr inbounds float* %tmp21146, i64 1
- %tmp21148 = getelementptr inbounds float* %tmp21147, i64 1
- %tmp21149 = getelementptr inbounds float* %tmp21148, i64 1
- %tmp21150 = getelementptr inbounds float* %tmp21149, i64 1
- %tmp21151 = getelementptr inbounds float* %tmp21150, i64 1
- %tmp21152 = getelementptr inbounds float* %tmp21151, i64 1
- %tmp21153 = getelementptr inbounds float* %tmp21152, i64 1
- %tmp21154 = getelementptr inbounds float* %tmp21153, i64 1
- %tmp21155 = getelementptr inbounds float* %tmp21154, i64 1
- %tmp21156 = getelementptr inbounds float* %tmp21155, i64 1
- %tmp21157 = getelementptr inbounds float* %tmp21156, i64 1
- %tmp21158 = getelementptr inbounds float* %tmp21157, i64 1
- %tmp21159 = getelementptr inbounds float* %tmp21158, i64 1
- %tmp21160 = getelementptr inbounds float* %tmp21159, i64 1
- %tmp21161 = getelementptr inbounds float* %tmp21160, i64 1
- %tmp21162 = getelementptr inbounds float* %tmp21161, i64 1
- %tmp21163 = getelementptr inbounds float* %tmp21162, i64 1
- %tmp21164 = getelementptr inbounds float* %tmp21163, i64 1
- %tmp21165 = getelementptr inbounds float* %tmp21164, i64 1
- %tmp21166 = getelementptr inbounds float* %tmp21165, i64 1
- %tmp21167 = getelementptr inbounds float* %tmp21166, i64 1
- %tmp21168 = getelementptr inbounds float* %tmp21167, i64 1
- %tmp21169 = getelementptr inbounds float* %tmp21168, i64 1
- %tmp21170 = getelementptr inbounds float* %tmp21169, i64 1
- %tmp21171 = getelementptr inbounds float* %tmp21170, i64 1
- %tmp21172 = getelementptr inbounds float* %tmp21171, i64 1
- %tmp21173 = getelementptr inbounds float* %tmp21172, i64 1
- %tmp21174 = getelementptr inbounds float* %tmp21173, i64 1
- %tmp21175 = getelementptr inbounds float* %tmp21174, i64 1
- %tmp21176 = getelementptr inbounds float* %tmp21175, i64 1
- %tmp21177 = getelementptr inbounds float* %tmp21176, i64 1
- %tmp21178 = getelementptr inbounds float* %tmp21177, i64 1
- %tmp21179 = getelementptr inbounds float* %tmp21178, i64 1
- %tmp21180 = getelementptr inbounds float* %tmp21179, i64 1
- %tmp21181 = getelementptr inbounds float* %tmp21180, i64 1
- %tmp21182 = getelementptr inbounds float* %tmp21181, i64 1
- %tmp21183 = getelementptr inbounds float* %tmp21182, i64 1
- %tmp21184 = getelementptr inbounds float* %tmp21183, i64 1
- %tmp21185 = getelementptr inbounds float* %tmp21184, i64 1
- %tmp21186 = getelementptr inbounds float* %tmp21185, i64 1
- %tmp21187 = getelementptr inbounds float* %tmp21186, i64 1
- %tmp21188 = getelementptr inbounds float* %tmp21187, i64 1
- %tmp21189 = getelementptr inbounds float* %tmp21188, i64 1
- %tmp21190 = getelementptr inbounds float* %tmp21189, i64 1
- %tmp21191 = getelementptr inbounds float* %tmp21190, i64 1
- %tmp21192 = getelementptr inbounds float* %tmp21191, i64 1
- %tmp21193 = getelementptr inbounds float* %tmp21192, i64 1
- %tmp21194 = getelementptr inbounds float* %tmp21193, i64 1
- %tmp21195 = getelementptr inbounds float* %tmp21194, i64 1
- %tmp21196 = getelementptr inbounds float* %tmp21195, i64 1
- %tmp21197 = getelementptr inbounds float* %tmp21196, i64 1
- %tmp21198 = getelementptr inbounds float* %tmp21197, i64 1
- %tmp21199 = getelementptr inbounds float* %tmp21198, i64 1
- %tmp21200 = getelementptr inbounds float* %tmp21199, i64 1
- %tmp21201 = getelementptr inbounds float* %tmp21200, i64 1
- %tmp21202 = getelementptr inbounds float* %tmp21201, i64 1
- %tmp21203 = getelementptr inbounds float* %tmp21202, i64 1
- %tmp21204 = getelementptr inbounds float* %tmp21203, i64 1
- %tmp21205 = getelementptr inbounds float* %tmp21204, i64 1
- %tmp21206 = getelementptr inbounds float* %tmp21205, i64 1
- %tmp21207 = getelementptr inbounds float* %tmp21206, i64 1
- %tmp21208 = getelementptr inbounds float* %tmp21207, i64 1
- %tmp21209 = getelementptr inbounds float* %tmp21208, i64 1
- %tmp21210 = getelementptr inbounds float* %tmp21209, i64 1
- %tmp21211 = getelementptr inbounds float* %tmp21210, i64 1
- %tmp21212 = getelementptr inbounds float* %tmp21211, i64 1
- %tmp21213 = getelementptr inbounds float* %tmp21212, i64 1
- %tmp21214 = getelementptr inbounds float* %tmp21213, i64 1
- %tmp21215 = getelementptr inbounds float* %tmp21214, i64 1
- %tmp21216 = getelementptr inbounds float* %tmp21215, i64 1
- %tmp21217 = getelementptr inbounds float* %tmp21216, i64 1
- %tmp21218 = getelementptr inbounds float* %tmp21217, i64 1
- %tmp21219 = getelementptr inbounds float* %tmp21218, i64 1
- %tmp21220 = getelementptr inbounds float* %tmp21219, i64 1
- %tmp21221 = getelementptr inbounds float* %tmp21220, i64 1
- %tmp21222 = getelementptr inbounds float* %tmp21221, i64 1
- %tmp21223 = getelementptr inbounds float* %tmp21222, i64 1
- %tmp21224 = getelementptr inbounds float* %tmp21223, i64 1
- %tmp21225 = getelementptr inbounds float* %tmp21224, i64 1
- %tmp21226 = getelementptr inbounds float* %tmp21225, i64 1
- %tmp21227 = getelementptr inbounds float* %tmp21226, i64 1
- %tmp21228 = getelementptr inbounds float* %tmp21227, i64 1
- %tmp21229 = getelementptr inbounds float* %tmp21228, i64 1
- %tmp21230 = getelementptr inbounds float* %tmp21229, i64 1
- %tmp21231 = getelementptr inbounds float* %tmp21230, i64 1
- %tmp21232 = getelementptr inbounds float* %tmp21231, i64 1
- %tmp21233 = getelementptr inbounds float* %tmp21232, i64 1
- %tmp21234 = getelementptr inbounds float* %tmp21233, i64 1
- %tmp21235 = getelementptr inbounds float* %tmp21234, i64 1
- %tmp21236 = getelementptr inbounds float* %tmp21235, i64 1
- %tmp21237 = getelementptr inbounds float* %tmp21236, i64 1
- %tmp21238 = getelementptr inbounds float* %tmp21237, i64 1
- %tmp21239 = getelementptr inbounds float* %tmp21238, i64 1
- %tmp21240 = getelementptr inbounds float* %tmp21239, i64 1
- %tmp21241 = getelementptr inbounds float* %tmp21240, i64 1
- %tmp21242 = getelementptr inbounds float* %tmp21241, i64 1
- %tmp21243 = getelementptr inbounds float* %tmp21242, i64 1
- %tmp21244 = getelementptr inbounds float* %tmp21243, i64 1
- %tmp21245 = getelementptr inbounds float* %tmp21244, i64 1
- %tmp21246 = getelementptr inbounds float* %tmp21245, i64 1
- %tmp21247 = getelementptr inbounds float* %tmp21246, i64 1
- %tmp21248 = getelementptr inbounds float* %tmp21247, i64 1
- %tmp21249 = getelementptr inbounds float* %tmp21248, i64 1
- %tmp21250 = getelementptr inbounds float* %tmp21249, i64 1
- %tmp21251 = getelementptr inbounds float* %tmp21250, i64 1
- %tmp21252 = getelementptr inbounds float* %tmp21251, i64 1
- %tmp21253 = getelementptr inbounds float* %tmp21252, i64 1
- %tmp21254 = getelementptr inbounds float* %tmp21253, i64 1
- %tmp21255 = getelementptr inbounds float* %tmp21254, i64 1
- %tmp21256 = getelementptr inbounds float* %tmp21255, i64 1
- %tmp21257 = getelementptr inbounds float* %tmp21256, i64 1
- %tmp21258 = getelementptr inbounds float* %tmp21257, i64 1
- %tmp21259 = getelementptr inbounds float* %tmp21258, i64 1
- %tmp21260 = getelementptr inbounds float* %tmp21259, i64 1
- %tmp21261 = getelementptr inbounds float* %tmp21260, i64 1
- %tmp21262 = getelementptr inbounds float* %tmp21261, i64 1
- %tmp21263 = getelementptr inbounds float* %tmp21262, i64 1
- %tmp21264 = getelementptr inbounds float* %tmp21263, i64 1
- %tmp21265 = getelementptr inbounds float* %tmp21264, i64 1
- %tmp21266 = getelementptr inbounds float* %tmp21265, i64 1
- %tmp21267 = getelementptr inbounds float* %tmp21266, i64 1
- %tmp21268 = getelementptr inbounds float* %tmp21267, i64 1
- %tmp21269 = getelementptr inbounds float* %tmp21268, i64 1
- %tmp21270 = getelementptr inbounds float* %tmp21269, i64 1
- %tmp21271 = getelementptr inbounds float* %tmp21270, i64 1
- %tmp21272 = getelementptr inbounds float* %tmp21271, i64 1
- %tmp21273 = getelementptr inbounds float* %tmp21272, i64 1
- %tmp21274 = getelementptr inbounds float* %tmp21273, i64 1
- %tmp21275 = getelementptr inbounds float* %tmp21274, i64 1
- %tmp21276 = getelementptr inbounds float* %tmp21275, i64 1
- %tmp21277 = getelementptr inbounds float* %tmp21276, i64 1
- %tmp21278 = getelementptr inbounds float* %tmp21277, i64 1
- %tmp21279 = getelementptr inbounds float* %tmp21278, i64 1
- %tmp21280 = getelementptr inbounds float* %tmp21279, i64 1
- %tmp21281 = getelementptr inbounds float* %tmp21280, i64 1
- %tmp21282 = getelementptr inbounds float* %tmp21281, i64 1
- %tmp21283 = getelementptr inbounds float* %tmp21282, i64 1
- %tmp21284 = getelementptr inbounds float* %tmp21283, i64 1
- %tmp21285 = getelementptr inbounds float* %tmp21284, i64 1
- %tmp21286 = getelementptr inbounds float* %tmp21285, i64 1
- %tmp21287 = getelementptr inbounds float* %tmp21286, i64 1
- %tmp21288 = getelementptr inbounds float* %tmp21287, i64 1
- %tmp21289 = getelementptr inbounds float* %tmp21288, i64 1
- %tmp21290 = getelementptr inbounds float* %tmp21289, i64 1
- %tmp21291 = getelementptr inbounds float* %tmp21290, i64 1
- %tmp21292 = getelementptr inbounds float* %tmp21291, i64 1
- %tmp21293 = getelementptr inbounds float* %tmp21292, i64 1
- %tmp21294 = getelementptr inbounds float* %tmp21293, i64 1
- %tmp21295 = getelementptr inbounds float* %tmp21294, i64 1
- %tmp21296 = getelementptr inbounds float* %tmp21295, i64 1
- %tmp21297 = getelementptr inbounds float* %tmp21296, i64 1
- %tmp21298 = getelementptr inbounds float* %tmp21297, i64 1
- %tmp21299 = getelementptr inbounds float* %tmp21298, i64 1
- %tmp21300 = getelementptr inbounds float* %tmp21299, i64 1
- %tmp21301 = getelementptr inbounds float* %tmp21300, i64 1
- %tmp21302 = getelementptr inbounds float* %tmp21301, i64 1
- %tmp21303 = getelementptr inbounds float* %tmp21302, i64 1
- %tmp21304 = getelementptr inbounds float* %tmp21303, i64 1
- %tmp21305 = getelementptr inbounds float* %tmp21304, i64 1
- %tmp21306 = getelementptr inbounds float* %tmp21305, i64 1
- %tmp21307 = getelementptr inbounds float* %tmp21306, i64 1
- %tmp21308 = getelementptr inbounds float* %tmp21307, i64 1
- %tmp21309 = getelementptr inbounds float* %tmp21308, i64 1
- %tmp21310 = getelementptr inbounds float* %tmp21309, i64 1
- %tmp21311 = getelementptr inbounds float* %tmp21310, i64 1
- %tmp21312 = getelementptr inbounds float* %tmp21311, i64 1
- %tmp21313 = getelementptr inbounds float* %tmp21312, i64 1
- %tmp21314 = getelementptr inbounds float* %tmp21313, i64 1
- %tmp21315 = getelementptr inbounds float* %tmp21314, i64 1
- %tmp21316 = getelementptr inbounds float* %tmp21315, i64 1
- %tmp21317 = getelementptr inbounds float* %tmp21316, i64 1
- %tmp21318 = getelementptr inbounds float* %tmp21317, i64 1
- %tmp21319 = getelementptr inbounds float* %tmp21318, i64 1
- %tmp21320 = getelementptr inbounds float* %tmp21319, i64 1
- %tmp21321 = getelementptr inbounds float* %tmp21320, i64 1
- %tmp21322 = getelementptr inbounds float* %tmp21321, i64 1
- %tmp21323 = getelementptr inbounds float* %tmp21322, i64 1
- %tmp21324 = getelementptr inbounds float* %tmp21323, i64 1
- %tmp21325 = getelementptr inbounds float* %tmp21324, i64 1
- %tmp21326 = getelementptr inbounds float* %tmp21325, i64 1
- %tmp21327 = getelementptr inbounds float* %tmp21326, i64 1
- %tmp21328 = getelementptr inbounds float* %tmp21327, i64 1
- %tmp21329 = getelementptr inbounds float* %tmp21328, i64 1
- %tmp21330 = getelementptr inbounds float* %tmp21329, i64 1
- %tmp21331 = getelementptr inbounds float* %tmp21330, i64 1
- %tmp21332 = getelementptr inbounds float* %tmp21331, i64 1
- %tmp21333 = getelementptr inbounds float* %tmp21332, i64 1
- %tmp21334 = getelementptr inbounds float* %tmp21333, i64 1
- %tmp21335 = getelementptr inbounds float* %tmp21334, i64 1
- %tmp21336 = getelementptr inbounds float* %tmp21335, i64 1
- %tmp21337 = getelementptr inbounds float* %tmp21336, i64 1
- %tmp21338 = getelementptr inbounds float* %tmp21337, i64 1
- %tmp21339 = getelementptr inbounds float* %tmp21338, i64 1
- %tmp21340 = getelementptr inbounds float* %tmp21339, i64 1
- %tmp21341 = getelementptr inbounds float* %tmp21340, i64 1
- %tmp21342 = getelementptr inbounds float* %tmp21341, i64 1
- %tmp21343 = getelementptr inbounds float* %tmp21342, i64 1
- %tmp21344 = getelementptr inbounds float* %tmp21343, i64 1
- %tmp21345 = getelementptr inbounds float* %tmp21344, i64 1
- %tmp21346 = getelementptr inbounds float* %tmp21345, i64 1
- %tmp21347 = getelementptr inbounds float* %tmp21346, i64 1
- %tmp21348 = getelementptr inbounds float* %tmp21347, i64 1
- %tmp21349 = getelementptr inbounds float* %tmp21348, i64 1
- %tmp21350 = getelementptr inbounds float* %tmp21349, i64 1
- %tmp21351 = getelementptr inbounds float* %tmp21350, i64 1
- %tmp21352 = getelementptr inbounds float* %tmp21351, i64 1
- %tmp21353 = getelementptr inbounds float* %tmp21352, i64 1
- %tmp21354 = getelementptr inbounds float* %tmp21353, i64 1
- %tmp21355 = getelementptr inbounds float* %tmp21354, i64 1
- %tmp21356 = getelementptr inbounds float* %tmp21355, i64 1
- %tmp21357 = getelementptr inbounds float* %tmp21356, i64 1
- %tmp21358 = getelementptr inbounds float* %tmp21357, i64 1
- %tmp21359 = getelementptr inbounds float* %tmp21358, i64 1
- %tmp21360 = getelementptr inbounds float* %tmp21359, i64 1
- %tmp21361 = getelementptr inbounds float* %tmp21360, i64 1
- %tmp21362 = getelementptr inbounds float* %tmp21361, i64 1
- %tmp21363 = getelementptr inbounds float* %tmp21362, i64 1
- %tmp21364 = getelementptr inbounds float* %tmp21363, i64 1
- %tmp21365 = getelementptr inbounds float* %tmp21364, i64 1
- %tmp21366 = getelementptr inbounds float* %tmp21365, i64 1
- %tmp21367 = getelementptr inbounds float* %tmp21366, i64 1
- %tmp21368 = getelementptr inbounds float* %tmp21367, i64 1
- %tmp21369 = getelementptr inbounds float* %tmp21368, i64 1
- %tmp21370 = getelementptr inbounds float* %tmp21369, i64 1
- %tmp21371 = getelementptr inbounds float* %tmp21370, i64 1
- %tmp21372 = getelementptr inbounds float* %tmp21371, i64 1
- %tmp21373 = getelementptr inbounds float* %tmp21372, i64 1
- %tmp21374 = getelementptr inbounds float* %tmp21373, i64 1
- %tmp21375 = getelementptr inbounds float* %tmp21374, i64 1
- %tmp21376 = getelementptr inbounds float* %tmp21375, i64 1
- %tmp21377 = getelementptr inbounds float* %tmp21376, i64 1
- %tmp21378 = getelementptr inbounds float* %tmp21377, i64 1
- %tmp21379 = getelementptr inbounds float* %tmp21378, i64 1
- %tmp21380 = getelementptr inbounds float* %tmp21379, i64 1
- %tmp21381 = getelementptr inbounds float* %tmp21380, i64 1
- %tmp21382 = getelementptr inbounds float* %tmp21381, i64 1
- %tmp21383 = getelementptr inbounds float* %tmp21382, i64 1
- %tmp21384 = getelementptr inbounds float* %tmp21383, i64 1
- %tmp21385 = getelementptr inbounds float* %tmp21384, i64 1
- %tmp21386 = getelementptr inbounds float* %tmp21385, i64 1
- %tmp21387 = getelementptr inbounds float* %tmp21386, i64 1
- %tmp21388 = getelementptr inbounds float* %tmp21387, i64 1
- %tmp21389 = getelementptr inbounds float* %tmp21388, i64 1
- %tmp21390 = getelementptr inbounds float* %tmp21389, i64 1
- %tmp21391 = getelementptr inbounds float* %tmp21390, i64 1
- %tmp21392 = getelementptr inbounds float* %tmp21391, i64 1
- %tmp21393 = getelementptr inbounds float* %tmp21392, i64 1
- %tmp21394 = getelementptr inbounds float* %tmp21393, i64 1
- %tmp21395 = getelementptr inbounds float* %tmp21394, i64 1
- %tmp21396 = getelementptr inbounds float* %tmp21395, i64 1
- %tmp21397 = getelementptr inbounds float* %tmp21396, i64 1
- %tmp21398 = getelementptr inbounds float* %tmp21397, i64 1
- %tmp21399 = getelementptr inbounds float* %tmp21398, i64 1
- %tmp21400 = getelementptr inbounds float* %tmp21399, i64 1
- %tmp21401 = getelementptr inbounds float* %tmp21400, i64 1
- %tmp21402 = getelementptr inbounds float* %tmp21401, i64 1
- %tmp21403 = getelementptr inbounds float* %tmp21402, i64 1
- %tmp21404 = getelementptr inbounds float* %tmp21403, i64 1
- %tmp21405 = getelementptr inbounds float* %tmp21404, i64 1
- %tmp21406 = getelementptr inbounds float* %tmp21405, i64 1
- %tmp21407 = getelementptr inbounds float* %tmp21406, i64 1
- %tmp21408 = getelementptr inbounds float* %tmp21407, i64 1
- %tmp21409 = getelementptr inbounds float* %tmp21408, i64 1
- %tmp21410 = getelementptr inbounds float* %tmp21409, i64 1
- %tmp21411 = getelementptr inbounds float* %tmp21410, i64 1
- %tmp21412 = getelementptr inbounds float* %tmp21411, i64 1
- %tmp21413 = getelementptr inbounds float* %tmp21412, i64 1
- %tmp21414 = getelementptr inbounds float* %tmp21413, i64 1
- %tmp21415 = getelementptr inbounds float* %tmp21414, i64 1
- %tmp21416 = getelementptr inbounds float* %tmp21415, i64 1
- %tmp21417 = getelementptr inbounds float* %tmp21416, i64 1
- %tmp21418 = getelementptr inbounds float* %tmp21417, i64 1
- %tmp21419 = getelementptr inbounds float* %tmp21418, i64 1
- %tmp21420 = getelementptr inbounds float* %tmp21419, i64 1
- %tmp21421 = getelementptr inbounds float* %tmp21420, i64 1
- %tmp21422 = getelementptr inbounds float* %tmp21421, i64 1
- %tmp21423 = getelementptr inbounds float* %tmp21422, i64 1
- %tmp21424 = getelementptr inbounds float* %tmp21423, i64 1
- %tmp21425 = getelementptr inbounds float* %tmp21424, i64 1
- %tmp21426 = getelementptr inbounds float* %tmp21425, i64 1
- %tmp21427 = getelementptr inbounds float* %tmp21426, i64 1
- %tmp21428 = getelementptr inbounds float* %tmp21427, i64 1
- %tmp21429 = getelementptr inbounds float* %tmp21428, i64 1
- %tmp21430 = getelementptr inbounds float* %tmp21429, i64 1
- %tmp21431 = getelementptr inbounds float* %tmp21430, i64 1
- %tmp21432 = getelementptr inbounds float* %tmp21431, i64 1
- %tmp21433 = getelementptr inbounds float* %tmp21432, i64 1
- %tmp21434 = getelementptr inbounds float* %tmp21433, i64 1
- %tmp21435 = getelementptr inbounds float* %tmp21434, i64 1
- %tmp21436 = getelementptr inbounds float* %tmp21435, i64 1
- %tmp21437 = getelementptr inbounds float* %tmp21436, i64 1
- %tmp21438 = getelementptr inbounds float* %tmp21437, i64 1
- %tmp21439 = getelementptr inbounds float* %tmp21438, i64 1
- %tmp21440 = getelementptr inbounds float* %tmp21439, i64 1
- %tmp21441 = getelementptr inbounds float* %tmp21440, i64 1
- %tmp21442 = getelementptr inbounds float* %tmp21441, i64 1
- %tmp21443 = getelementptr inbounds float* %tmp21442, i64 1
- %tmp21444 = getelementptr inbounds float* %tmp21443, i64 1
- %tmp21445 = getelementptr inbounds float* %tmp21444, i64 1
- %tmp21446 = getelementptr inbounds float* %tmp21445, i64 1
- %tmp21447 = getelementptr inbounds float* %tmp21446, i64 1
- %tmp21448 = getelementptr inbounds float* %tmp21447, i64 1
- %tmp21449 = getelementptr inbounds float* %tmp21448, i64 1
- %tmp21450 = getelementptr inbounds float* %tmp21449, i64 1
- %tmp21451 = getelementptr inbounds float* %tmp21450, i64 1
- %tmp21452 = getelementptr inbounds float* %tmp21451, i64 1
- %tmp21453 = getelementptr inbounds float* %tmp21452, i64 1
- %tmp21454 = getelementptr inbounds float* %tmp21453, i64 1
- %tmp21455 = getelementptr inbounds float* %tmp21454, i64 1
- %tmp21456 = getelementptr inbounds float* %tmp21455, i64 1
- %tmp21457 = getelementptr inbounds float* %tmp21456, i64 1
- %tmp21458 = getelementptr inbounds float* %tmp21457, i64 1
- %tmp21459 = getelementptr inbounds float* %tmp21458, i64 1
- %tmp21460 = getelementptr inbounds float* %tmp21459, i64 1
- %tmp21461 = getelementptr inbounds float* %tmp21460, i64 1
- %tmp21462 = getelementptr inbounds float* %tmp21461, i64 1
- %tmp21463 = getelementptr inbounds float* %tmp21462, i64 1
- %tmp21464 = getelementptr inbounds float* %tmp21463, i64 1
- %tmp21465 = getelementptr inbounds float* %tmp21464, i64 1
- %tmp21466 = getelementptr inbounds float* %tmp21465, i64 1
- %tmp21467 = getelementptr inbounds float* %tmp21466, i64 1
- %tmp21468 = getelementptr inbounds float* %tmp21467, i64 1
- %tmp21469 = getelementptr inbounds float* %tmp21468, i64 1
- %tmp21470 = getelementptr inbounds float* %tmp21469, i64 1
- %tmp21471 = getelementptr inbounds float* %tmp21470, i64 1
- %tmp21472 = getelementptr inbounds float* %tmp21471, i64 1
- %tmp21473 = getelementptr inbounds float* %tmp21472, i64 1
- %tmp21474 = getelementptr inbounds float* %tmp21473, i64 1
- %tmp21475 = getelementptr inbounds float* %tmp21474, i64 1
- %tmp21476 = getelementptr inbounds float* %tmp21475, i64 1
- %tmp21477 = getelementptr inbounds float* %tmp21476, i64 1
- %tmp21478 = getelementptr inbounds float* %tmp21477, i64 1
- %tmp21479 = getelementptr inbounds float* %tmp21478, i64 1
- %tmp21480 = getelementptr inbounds float* %tmp21479, i64 1
- %tmp21481 = getelementptr inbounds float* %tmp21480, i64 1
- %tmp21482 = getelementptr inbounds float* %tmp21481, i64 1
- %tmp21483 = getelementptr inbounds float* %tmp21482, i64 1
- %tmp21484 = getelementptr inbounds float* %tmp21483, i64 1
- %tmp21485 = getelementptr inbounds float* %tmp21484, i64 1
- %tmp21486 = getelementptr inbounds float* %tmp21485, i64 1
- %tmp21487 = getelementptr inbounds float* %tmp21486, i64 1
- %tmp21488 = getelementptr inbounds float* %tmp21487, i64 1
- %tmp21489 = getelementptr inbounds float* %tmp21488, i64 1
- %tmp21490 = getelementptr inbounds float* %tmp21489, i64 1
- %tmp21491 = getelementptr inbounds float* %tmp21490, i64 1
- %tmp21492 = getelementptr inbounds float* %tmp21491, i64 1
- %tmp21493 = getelementptr inbounds float* %tmp21492, i64 1
- %tmp21494 = getelementptr inbounds float* %tmp21493, i64 1
- %tmp21495 = getelementptr inbounds float* %tmp21494, i64 1
- %tmp21496 = getelementptr inbounds float* %tmp21495, i64 1
- %tmp21497 = getelementptr inbounds float* %tmp21496, i64 1
- %tmp21498 = getelementptr inbounds float* %tmp21497, i64 1
- %tmp21499 = getelementptr inbounds float* %tmp21498, i64 1
- %tmp21500 = getelementptr inbounds float* %tmp21499, i64 1
- %tmp21501 = getelementptr inbounds float* %tmp21500, i64 1
- %tmp21502 = getelementptr inbounds float* %tmp21501, i64 1
- %tmp21503 = getelementptr inbounds float* %tmp21502, i64 1
- %tmp21504 = getelementptr inbounds float* %tmp21503, i64 1
- %tmp21505 = getelementptr inbounds float* %tmp21504, i64 1
- %tmp21506 = getelementptr inbounds float* %tmp21505, i64 1
- %tmp21507 = getelementptr inbounds float* %tmp21506, i64 1
- %tmp21508 = getelementptr inbounds float* %tmp21507, i64 1
- %tmp21509 = getelementptr inbounds float* %tmp21508, i64 1
- %tmp21510 = getelementptr inbounds float* %tmp21509, i64 1
- %tmp21511 = getelementptr inbounds float* %tmp21510, i64 1
- %tmp21512 = getelementptr inbounds float* %tmp21511, i64 1
- %tmp21513 = getelementptr inbounds float* %tmp21512, i64 1
- %tmp21514 = getelementptr inbounds float* %tmp21513, i64 1
- %tmp21515 = getelementptr inbounds float* %tmp21514, i64 1
- %tmp21516 = getelementptr inbounds float* %tmp21515, i64 1
- %tmp21517 = getelementptr inbounds float* %tmp21516, i64 1
- %tmp21518 = getelementptr inbounds float* %tmp21517, i64 1
- %tmp21519 = getelementptr inbounds float* %tmp21518, i64 1
- %tmp21520 = getelementptr inbounds float* %tmp21519, i64 1
- %tmp21521 = getelementptr inbounds float* %tmp21520, i64 1
- %tmp21522 = getelementptr inbounds float* %tmp21521, i64 1
- %tmp21523 = getelementptr inbounds float* %tmp21522, i64 1
- %tmp21524 = getelementptr inbounds float* %tmp21523, i64 1
- %tmp21525 = getelementptr inbounds float* %tmp21524, i64 1
- %tmp21526 = getelementptr inbounds float* %tmp21525, i64 1
- %tmp21527 = getelementptr inbounds float* %tmp21526, i64 1
- %tmp21528 = getelementptr inbounds float* %tmp21527, i64 1
- %tmp21529 = getelementptr inbounds float* %tmp21528, i64 1
- %tmp21530 = getelementptr inbounds float* %tmp21529, i64 1
- %tmp21531 = getelementptr inbounds float* %tmp21530, i64 1
- %tmp21532 = getelementptr inbounds float* %tmp21531, i64 1
- %tmp21533 = getelementptr inbounds float* %tmp21532, i64 1
- %tmp21534 = getelementptr inbounds float* %tmp21533, i64 1
- %tmp21535 = getelementptr inbounds float* %tmp21534, i64 1
- %tmp21536 = getelementptr inbounds float* %tmp21535, i64 1
- %tmp21537 = getelementptr inbounds float* %tmp21536, i64 1
- %tmp21538 = getelementptr inbounds float* %tmp21537, i64 1
- %tmp21539 = getelementptr inbounds float* %tmp21538, i64 1
- %tmp21540 = getelementptr inbounds float* %tmp21539, i64 1
- %tmp21541 = getelementptr inbounds float* %tmp21540, i64 1
- %tmp21542 = getelementptr inbounds float* %tmp21541, i64 1
- %tmp21543 = getelementptr inbounds float* %tmp21542, i64 1
- %tmp21544 = getelementptr inbounds float* %tmp21543, i64 1
- %tmp21545 = getelementptr inbounds float* %tmp21544, i64 1
- %tmp21546 = getelementptr inbounds float* %tmp21545, i64 1
- %tmp21547 = getelementptr inbounds float* %tmp21546, i64 1
- %tmp21548 = getelementptr inbounds float* %tmp21547, i64 1
- %tmp21549 = getelementptr inbounds float* %tmp21548, i64 1
- %tmp21550 = getelementptr inbounds float* %tmp21549, i64 1
- %tmp21551 = getelementptr inbounds float* %tmp21550, i64 1
- %tmp21552 = getelementptr inbounds float* %tmp21551, i64 1
- %tmp21553 = getelementptr inbounds float* %tmp21552, i64 1
- %tmp21554 = getelementptr inbounds float* %tmp21553, i64 1
- %tmp21555 = getelementptr inbounds float* %tmp21554, i64 1
- %tmp21556 = getelementptr inbounds float* %tmp21555, i64 1
- %tmp21557 = getelementptr inbounds float* %tmp21556, i64 1
- %tmp21558 = getelementptr inbounds float* %tmp21557, i64 1
- %tmp21559 = getelementptr inbounds float* %tmp21558, i64 1
- %tmp21560 = getelementptr inbounds float* %tmp21559, i64 1
- %tmp21561 = getelementptr inbounds float* %tmp21560, i64 1
- %tmp21562 = getelementptr inbounds float* %tmp21561, i64 1
- %tmp21563 = getelementptr inbounds float* %tmp21562, i64 1
- %tmp21564 = getelementptr inbounds float* %tmp21563, i64 1
- %tmp21565 = getelementptr inbounds float* %tmp21564, i64 1
- %tmp21566 = getelementptr inbounds float* %tmp21565, i64 1
- %tmp21567 = getelementptr inbounds float* %tmp21566, i64 1
- %tmp21568 = getelementptr inbounds float* %tmp21567, i64 1
- %tmp21569 = getelementptr inbounds float* %tmp21568, i64 1
- %tmp21570 = getelementptr inbounds float* %tmp21569, i64 1
- %tmp21571 = getelementptr inbounds float* %tmp21570, i64 1
- %tmp21572 = getelementptr inbounds float* %tmp21571, i64 1
- %tmp21573 = getelementptr inbounds float* %tmp21572, i64 1
- %tmp21574 = getelementptr inbounds float* %tmp21573, i64 1
- %tmp21575 = getelementptr inbounds float* %tmp21574, i64 1
- %tmp21576 = getelementptr inbounds float* %tmp21575, i64 1
- %tmp21577 = getelementptr inbounds float* %tmp21576, i64 1
- %tmp21578 = getelementptr inbounds float* %tmp21577, i64 1
- %tmp21579 = getelementptr inbounds float* %tmp21578, i64 1
- %tmp21580 = getelementptr inbounds float* %tmp21579, i64 1
- %tmp21581 = getelementptr inbounds float* %tmp21580, i64 1
- %tmp21582 = getelementptr inbounds float* %tmp21581, i64 1
- %tmp21583 = getelementptr inbounds float* %tmp21582, i64 1
- %tmp21584 = getelementptr inbounds float* %tmp21583, i64 1
- %tmp21585 = getelementptr inbounds float* %tmp21584, i64 1
- %tmp21586 = getelementptr inbounds float* %tmp21585, i64 1
- %tmp21587 = getelementptr inbounds float* %tmp21586, i64 1
- %tmp21588 = getelementptr inbounds float* %tmp21587, i64 1
- %tmp21589 = getelementptr inbounds float* %tmp21588, i64 1
- %tmp21590 = getelementptr inbounds float* %tmp21589, i64 1
- %tmp21591 = getelementptr inbounds float* %tmp21590, i64 1
- %tmp21592 = getelementptr inbounds float* %tmp21591, i64 1
- %tmp21593 = getelementptr inbounds float* %tmp21592, i64 1
- %tmp21594 = getelementptr inbounds float* %tmp21593, i64 1
- %tmp21595 = getelementptr inbounds float* %tmp21594, i64 1
- %tmp21596 = getelementptr inbounds float* %tmp21595, i64 1
- %tmp21597 = getelementptr inbounds float* %tmp21596, i64 1
- %tmp21598 = getelementptr inbounds float* %tmp21597, i64 1
- %tmp21599 = getelementptr inbounds float* %tmp21598, i64 1
- %tmp21600 = getelementptr inbounds float* %tmp21599, i64 1
- %tmp21601 = getelementptr inbounds float* %tmp21600, i64 1
- %tmp21602 = getelementptr inbounds float* %tmp21601, i64 1
- %tmp21603 = getelementptr inbounds float* %tmp21602, i64 1
- %tmp21604 = getelementptr inbounds float* %tmp21603, i64 1
- %tmp21605 = getelementptr inbounds float* %tmp21604, i64 1
- %tmp21606 = getelementptr inbounds float* %tmp21605, i64 1
- %tmp21607 = getelementptr inbounds float* %tmp21606, i64 1
- %tmp21608 = getelementptr inbounds float* %tmp21607, i64 1
- %tmp21609 = getelementptr inbounds float* %tmp21608, i64 1
- %tmp21610 = getelementptr inbounds float* %tmp21609, i64 1
- %tmp21611 = getelementptr inbounds float* %tmp21610, i64 1
- %tmp21612 = getelementptr inbounds float* %tmp21611, i64 1
- %tmp21613 = getelementptr inbounds float* %tmp21612, i64 1
- %tmp21614 = getelementptr inbounds float* %tmp21613, i64 1
- %tmp21615 = getelementptr inbounds float* %tmp21614, i64 1
- %tmp21616 = getelementptr inbounds float* %tmp21615, i64 1
- %tmp21617 = getelementptr inbounds float* %tmp21616, i64 1
- %tmp21618 = getelementptr inbounds float* %tmp21617, i64 1
- %tmp21619 = getelementptr inbounds float* %tmp21618, i64 1
- %tmp21620 = getelementptr inbounds float* %tmp21619, i64 1
- %tmp21621 = getelementptr inbounds float* %tmp21620, i64 1
- %tmp21622 = getelementptr inbounds float* %tmp21621, i64 1
- %tmp21623 = getelementptr inbounds float* %tmp21622, i64 1
- %tmp21624 = getelementptr inbounds float* %tmp21623, i64 1
- %tmp21625 = getelementptr inbounds float* %tmp21624, i64 1
- %tmp21626 = getelementptr inbounds float* %tmp21625, i64 1
- %tmp21627 = getelementptr inbounds float* %tmp21626, i64 1
- %tmp21628 = getelementptr inbounds float* %tmp21627, i64 1
- %tmp21629 = getelementptr inbounds float* %tmp21628, i64 1
- %tmp21630 = getelementptr inbounds float* %tmp21629, i64 1
- %tmp21631 = getelementptr inbounds float* %tmp21630, i64 1
- %tmp21632 = getelementptr inbounds float* %tmp21631, i64 1
- %tmp21633 = getelementptr inbounds float* %tmp21632, i64 1
- %tmp21634 = getelementptr inbounds float* %tmp21633, i64 1
- %tmp21635 = getelementptr inbounds float* %tmp21634, i64 1
- %tmp21636 = getelementptr inbounds float* %tmp21635, i64 1
- %tmp21637 = getelementptr inbounds float* %tmp21636, i64 1
- %tmp21638 = getelementptr inbounds float* %tmp21637, i64 1
- %tmp21639 = getelementptr inbounds float* %tmp21638, i64 1
- %tmp21640 = getelementptr inbounds float* %tmp21639, i64 1
- %tmp21641 = getelementptr inbounds float* %tmp21640, i64 1
- %tmp21642 = getelementptr inbounds float* %tmp21641, i64 1
- %tmp21643 = getelementptr inbounds float* %tmp21642, i64 1
- %tmp21644 = getelementptr inbounds float* %tmp21643, i64 1
- %tmp21645 = getelementptr inbounds float* %tmp21644, i64 1
- %tmp21646 = getelementptr inbounds float* %tmp21645, i64 1
- %tmp21647 = getelementptr inbounds float* %tmp21646, i64 1
- %tmp21648 = getelementptr inbounds float* %tmp21647, i64 1
- %tmp21649 = getelementptr inbounds float* %tmp21648, i64 1
- %tmp21650 = getelementptr inbounds float* %tmp21649, i64 1
- %tmp21651 = getelementptr inbounds float* %tmp21650, i64 1
- %tmp21652 = getelementptr inbounds float* %tmp21651, i64 1
- %tmp21653 = getelementptr inbounds float* %tmp21652, i64 1
- %tmp21654 = getelementptr inbounds float* %tmp21653, i64 1
- %tmp21655 = getelementptr inbounds float* %tmp21654, i64 1
- %tmp21656 = getelementptr inbounds float* %tmp21655, i64 1
- %tmp21657 = getelementptr inbounds float* %tmp21656, i64 1
- %tmp21658 = getelementptr inbounds float* %tmp21657, i64 1
- %tmp21659 = getelementptr inbounds float* %tmp21658, i64 1
- %tmp21660 = getelementptr inbounds float* %tmp21659, i64 1
- %tmp21661 = getelementptr inbounds float* %tmp21660, i64 1
- %tmp21662 = getelementptr inbounds float* %tmp21661, i64 1
- %tmp21663 = getelementptr inbounds float* %tmp21662, i64 1
- %tmp21664 = getelementptr inbounds float* %tmp21663, i64 1
- %tmp21665 = getelementptr inbounds float* %tmp21664, i64 1
- %tmp21666 = getelementptr inbounds float* %tmp21665, i64 1
- %tmp21667 = getelementptr inbounds float* %tmp21666, i64 1
- %tmp21668 = getelementptr inbounds float* %tmp21667, i64 1
- %tmp21669 = getelementptr inbounds float* %tmp21668, i64 1
- %tmp21670 = getelementptr inbounds float* %tmp21669, i64 1
- %tmp21671 = getelementptr inbounds float* %tmp21670, i64 1
- %tmp21672 = getelementptr inbounds float* %tmp21671, i64 1
- %tmp21673 = getelementptr inbounds float* %tmp21672, i64 1
- %tmp21674 = getelementptr inbounds float* %tmp21673, i64 1
- %tmp21675 = getelementptr inbounds float* %tmp21674, i64 1
- %tmp21676 = getelementptr inbounds float* %tmp21675, i64 1
- %tmp21677 = getelementptr inbounds float* %tmp21676, i64 1
- %tmp21678 = getelementptr inbounds float* %tmp21677, i64 1
- %tmp21679 = getelementptr inbounds float* %tmp21678, i64 1
- %tmp21680 = getelementptr inbounds float* %tmp21679, i64 1
- %tmp21681 = getelementptr inbounds float* %tmp21680, i64 1
- %tmp21682 = getelementptr inbounds float* %tmp21681, i64 1
- %tmp21683 = getelementptr inbounds float* %tmp21682, i64 1
- %tmp21684 = getelementptr inbounds float* %tmp21683, i64 1
- %tmp21685 = getelementptr inbounds float* %tmp21684, i64 1
- %tmp21686 = getelementptr inbounds float* %tmp21685, i64 1
- %tmp21687 = getelementptr inbounds float* %tmp21686, i64 1
- %tmp21688 = getelementptr inbounds float* %tmp21687, i64 1
- %tmp21689 = getelementptr inbounds float* %tmp21688, i64 1
- %tmp21690 = getelementptr inbounds float* %tmp21689, i64 1
- %tmp21691 = getelementptr inbounds float* %tmp21690, i64 1
- %tmp21692 = getelementptr inbounds float* %tmp21691, i64 1
- %tmp21693 = getelementptr inbounds float* %tmp21692, i64 1
- %tmp21694 = getelementptr inbounds float* %tmp21693, i64 1
- %tmp21695 = getelementptr inbounds float* %tmp21694, i64 1
- %tmp21696 = getelementptr inbounds float* %tmp21695, i64 1
- %tmp21697 = getelementptr inbounds float* %tmp21696, i64 1
- %tmp21698 = getelementptr inbounds float* %tmp21697, i64 1
- %tmp21699 = getelementptr inbounds float* %tmp21698, i64 1
- %tmp21700 = getelementptr inbounds float* %tmp21699, i64 1
- %tmp21701 = getelementptr inbounds float* %tmp21700, i64 1
- %tmp21702 = getelementptr inbounds float* %tmp21701, i64 1
- %tmp21703 = getelementptr inbounds float* %tmp21702, i64 1
- %tmp21704 = getelementptr inbounds float* %tmp21703, i64 1
- %tmp21705 = getelementptr inbounds float* %tmp21704, i64 1
- %tmp21706 = getelementptr inbounds float* %tmp21705, i64 1
- %tmp21707 = getelementptr inbounds float* %tmp21706, i64 1
- %tmp21708 = getelementptr inbounds float* %tmp21707, i64 1
- %tmp21709 = getelementptr inbounds float* %tmp21708, i64 1
- %tmp21710 = getelementptr inbounds float* %tmp21709, i64 1
- %tmp21711 = getelementptr inbounds float* %tmp21710, i64 1
- %tmp21712 = getelementptr inbounds float* %tmp21711, i64 1
- %tmp21713 = getelementptr inbounds float* %tmp21712, i64 1
- %tmp21714 = getelementptr inbounds float* %tmp21713, i64 1
- %tmp21715 = getelementptr inbounds float* %tmp21714, i64 1
- %tmp21716 = getelementptr inbounds float* %tmp21715, i64 1
- %tmp21717 = getelementptr inbounds float* %tmp21716, i64 1
- %tmp21718 = getelementptr inbounds float* %tmp21717, i64 1
- %tmp21719 = getelementptr inbounds float* %tmp21718, i64 1
- %tmp21720 = getelementptr inbounds float* %tmp21719, i64 1
- %tmp21721 = getelementptr inbounds float* %tmp21720, i64 1
- %tmp21722 = getelementptr inbounds float* %tmp21721, i64 1
- %tmp21723 = getelementptr inbounds float* %tmp21722, i64 1
- %tmp21724 = getelementptr inbounds float* %tmp21723, i64 1
- %tmp21725 = getelementptr inbounds float* %tmp21724, i64 1
- %tmp21726 = getelementptr inbounds float* %tmp21725, i64 1
- %tmp21727 = getelementptr inbounds float* %tmp21726, i64 1
- %tmp21728 = getelementptr inbounds float* %tmp21727, i64 1
- %tmp21729 = getelementptr inbounds float* %tmp21728, i64 1
- %tmp21730 = getelementptr inbounds float* %tmp21729, i64 1
- %tmp21731 = getelementptr inbounds float* %tmp21730, i64 1
- %tmp21732 = getelementptr inbounds float* %tmp21731, i64 1
- %tmp21733 = getelementptr inbounds float* %tmp21732, i64 1
- %tmp21734 = getelementptr inbounds float* %tmp21733, i64 1
- %tmp21735 = getelementptr inbounds float* %tmp21734, i64 1
- %tmp21736 = getelementptr inbounds float* %tmp21735, i64 1
- %tmp21737 = getelementptr inbounds float* %tmp21736, i64 1
- %tmp21738 = getelementptr inbounds float* %tmp21737, i64 1
- %tmp21739 = getelementptr inbounds float* %tmp21738, i64 1
- %tmp21740 = getelementptr inbounds float* %tmp21739, i64 1
- %tmp21741 = getelementptr inbounds float* %tmp21740, i64 1
- %tmp21742 = getelementptr inbounds float* %tmp21741, i64 1
- %tmp21743 = getelementptr inbounds float* %tmp21742, i64 1
- %tmp21744 = getelementptr inbounds float* %tmp21743, i64 1
- %tmp21745 = getelementptr inbounds float* %tmp21744, i64 1
- %tmp21746 = getelementptr inbounds float* %tmp21745, i64 1
- %tmp21747 = getelementptr inbounds float* %tmp21746, i64 1
- %tmp21748 = getelementptr inbounds float* %tmp21747, i64 1
- %tmp21749 = getelementptr inbounds float* %tmp21748, i64 1
- %tmp21750 = getelementptr inbounds float* %tmp21749, i64 1
- %tmp21751 = getelementptr inbounds float* %tmp21750, i64 1
- %tmp21752 = getelementptr inbounds float* %tmp21751, i64 1
- %tmp21753 = getelementptr inbounds float* %tmp21752, i64 1
- %tmp21754 = getelementptr inbounds float* %tmp21753, i64 1
- %tmp21755 = getelementptr inbounds float* %tmp21754, i64 1
- %tmp21756 = getelementptr inbounds float* %tmp21755, i64 1
- %tmp21757 = getelementptr inbounds float* %tmp21756, i64 1
- %tmp21758 = getelementptr inbounds float* %tmp21757, i64 1
- %tmp21759 = getelementptr inbounds float* %tmp21758, i64 1
- %tmp21760 = getelementptr inbounds float* %tmp21759, i64 1
- %tmp21761 = getelementptr inbounds float* %tmp21760, i64 1
- %tmp21762 = getelementptr inbounds float* %tmp21761, i64 1
- %tmp21763 = getelementptr inbounds float* %tmp21762, i64 1
- %tmp21764 = getelementptr inbounds float* %tmp21763, i64 1
- %tmp21765 = getelementptr inbounds float* %tmp21764, i64 1
- %tmp21766 = getelementptr inbounds float* %tmp21765, i64 1
- %tmp21767 = getelementptr inbounds float* %tmp21766, i64 1
- %tmp21768 = getelementptr inbounds float* %tmp21767, i64 1
- %tmp21769 = getelementptr inbounds float* %tmp21768, i64 1
- %tmp21770 = getelementptr inbounds float* %tmp21769, i64 1
- %tmp21771 = getelementptr inbounds float* %tmp21770, i64 1
- %tmp21772 = getelementptr inbounds float* %tmp21771, i64 1
- %tmp21773 = getelementptr inbounds float* %tmp21772, i64 1
- %tmp21774 = getelementptr inbounds float* %tmp21773, i64 1
- %tmp21775 = getelementptr inbounds float* %tmp21774, i64 1
- %tmp21776 = getelementptr inbounds float* %tmp21775, i64 1
- %tmp21777 = getelementptr inbounds float* %tmp21776, i64 1
- %tmp21778 = getelementptr inbounds float* %tmp21777, i64 1
- %tmp21779 = getelementptr inbounds float* %tmp21778, i64 1
- %tmp21780 = getelementptr inbounds float* %tmp21779, i64 1
- %tmp21781 = getelementptr inbounds float* %tmp21780, i64 1
- %tmp21782 = getelementptr inbounds float* %tmp21781, i64 1
- %tmp21783 = getelementptr inbounds float* %tmp21782, i64 1
- %tmp21784 = getelementptr inbounds float* %tmp21783, i64 1
- %tmp21785 = getelementptr inbounds float* %tmp21784, i64 1
- %tmp21786 = getelementptr inbounds float* %tmp21785, i64 1
- %tmp21787 = getelementptr inbounds float* %tmp21786, i64 1
- %tmp21788 = getelementptr inbounds float* %tmp21787, i64 1
- %tmp21789 = getelementptr inbounds float* %tmp21788, i64 1
- %tmp21790 = getelementptr inbounds float* %tmp21789, i64 1
- %tmp21791 = getelementptr inbounds float* %tmp21790, i64 1
- %tmp21792 = getelementptr inbounds float* %tmp21791, i64 1
- %tmp21793 = getelementptr inbounds float* %tmp21792, i64 1
- %tmp21794 = getelementptr inbounds float* %tmp21793, i64 1
- %tmp21795 = getelementptr inbounds float* %tmp21794, i64 1
- %tmp21796 = getelementptr inbounds float* %tmp21795, i64 1
- %tmp21797 = getelementptr inbounds float* %tmp21796, i64 1
- %tmp21798 = getelementptr inbounds float* %tmp21797, i64 1
- %tmp21799 = getelementptr inbounds float* %tmp21798, i64 1
- %tmp21800 = getelementptr inbounds float* %tmp21799, i64 1
- %tmp21801 = getelementptr inbounds float* %tmp21800, i64 1
- %tmp21802 = getelementptr inbounds float* %tmp21801, i64 1
- %tmp21803 = getelementptr inbounds float* %tmp21802, i64 1
- %tmp21804 = getelementptr inbounds float* %tmp21803, i64 1
- %tmp21805 = getelementptr inbounds float* %tmp21804, i64 1
- %tmp21806 = getelementptr inbounds float* %tmp21805, i64 1
- %tmp21807 = getelementptr inbounds float* %tmp21806, i64 1
- %tmp21808 = getelementptr inbounds float* %tmp21807, i64 1
- %tmp21809 = getelementptr inbounds float* %tmp21808, i64 1
- %tmp21810 = getelementptr inbounds float* %tmp21809, i64 1
- %tmp21811 = getelementptr inbounds float* %tmp21810, i64 1
- %tmp21812 = getelementptr inbounds float* %tmp21811, i64 1
- %tmp21813 = getelementptr inbounds float* %tmp21812, i64 1
- %tmp21814 = getelementptr inbounds float* %tmp21813, i64 1
- %tmp21815 = getelementptr inbounds float* %tmp21814, i64 1
- %tmp21816 = getelementptr inbounds float* %tmp21815, i64 1
- %tmp21817 = getelementptr inbounds float* %tmp21816, i64 1
- %tmp21818 = getelementptr inbounds float* %tmp21817, i64 1
- %tmp21819 = getelementptr inbounds float* %tmp21818, i64 1
- %tmp21820 = getelementptr inbounds float* %tmp21819, i64 1
- %tmp21821 = getelementptr inbounds float* %tmp21820, i64 1
- %tmp21822 = getelementptr inbounds float* %tmp21821, i64 1
- %tmp21823 = getelementptr inbounds float* %tmp21822, i64 1
- %tmp21824 = getelementptr inbounds float* %tmp21823, i64 1
- %tmp21825 = getelementptr inbounds float* %tmp21824, i64 1
- %tmp21826 = getelementptr inbounds float* %tmp21825, i64 1
- %tmp21827 = getelementptr inbounds float* %tmp21826, i64 1
- %tmp21828 = getelementptr inbounds float* %tmp21827, i64 1
- %tmp21829 = getelementptr inbounds float* %tmp21828, i64 1
- %tmp21830 = getelementptr inbounds float* %tmp21829, i64 1
- %tmp21831 = getelementptr inbounds float* %tmp21830, i64 1
- %tmp21832 = getelementptr inbounds float* %tmp21831, i64 1
- %tmp21833 = getelementptr inbounds float* %tmp21832, i64 1
- %tmp21834 = getelementptr inbounds float* %tmp21833, i64 1
- %tmp21835 = getelementptr inbounds float* %tmp21834, i64 1
- %tmp21836 = getelementptr inbounds float* %tmp21835, i64 1
- %tmp21837 = getelementptr inbounds float* %tmp21836, i64 1
- %tmp21838 = getelementptr inbounds float* %tmp21837, i64 1
- %tmp21839 = getelementptr inbounds float* %tmp21838, i64 1
- %tmp21840 = getelementptr inbounds float* %tmp21839, i64 1
- %tmp21841 = getelementptr inbounds float* %tmp21840, i64 1
- %tmp21842 = getelementptr inbounds float* %tmp21841, i64 1
- %tmp21843 = getelementptr inbounds float* %tmp21842, i64 1
- %tmp21844 = getelementptr inbounds float* %tmp21843, i64 1
- %tmp21845 = getelementptr inbounds float* %tmp21844, i64 1
- %tmp21846 = getelementptr inbounds float* %tmp21845, i64 1
- %tmp21847 = getelementptr inbounds float* %tmp21846, i64 1
- %tmp21848 = getelementptr inbounds float* %tmp21847, i64 1
- %tmp21849 = getelementptr inbounds float* %tmp21848, i64 1
- %tmp21850 = getelementptr inbounds float* %tmp21849, i64 1
- %tmp21851 = getelementptr inbounds float* %tmp21850, i64 1
- %tmp21852 = getelementptr inbounds float* %tmp21851, i64 1
- %tmp21853 = getelementptr inbounds float* %tmp21852, i64 1
- %tmp21854 = getelementptr inbounds float* %tmp21853, i64 1
- %tmp21855 = getelementptr inbounds float* %tmp21854, i64 1
- %tmp21856 = getelementptr inbounds float* %tmp21855, i64 1
- %tmp21857 = getelementptr inbounds float* %tmp21856, i64 1
- %tmp21858 = getelementptr inbounds float* %tmp21857, i64 1
- %tmp21859 = getelementptr inbounds float* %tmp21858, i64 1
- %tmp21860 = getelementptr inbounds float* %tmp21859, i64 1
- %tmp21861 = getelementptr inbounds float* %tmp21860, i64 1
- %tmp21862 = getelementptr inbounds float* %tmp21861, i64 1
- %tmp21863 = getelementptr inbounds float* %tmp21862, i64 1
- %tmp21864 = getelementptr inbounds float* %tmp21863, i64 1
- %tmp21865 = getelementptr inbounds float* %tmp21864, i64 1
- %tmp21866 = getelementptr inbounds float* %tmp21865, i64 1
- %tmp21867 = getelementptr inbounds float* %tmp21866, i64 1
- %tmp21868 = getelementptr inbounds float* %tmp21867, i64 1
- %tmp21869 = getelementptr inbounds float* %tmp21868, i64 1
- %tmp21870 = getelementptr inbounds float* %tmp21869, i64 1
- %tmp21871 = getelementptr inbounds float* %tmp21870, i64 1
- %tmp21872 = getelementptr inbounds float* %tmp21871, i64 1
- %tmp21873 = getelementptr inbounds float* %tmp21872, i64 1
- %tmp21874 = getelementptr inbounds float* %tmp21873, i64 1
- %tmp21875 = getelementptr inbounds float* %tmp21874, i64 1
- %tmp21876 = getelementptr inbounds float* %tmp21875, i64 1
- %tmp21877 = getelementptr inbounds float* %tmp21876, i64 1
- %tmp21878 = getelementptr inbounds float* %tmp21877, i64 1
- %tmp21879 = getelementptr inbounds float* %tmp21878, i64 1
- %tmp21880 = getelementptr inbounds float* %tmp21879, i64 1
- %tmp21881 = getelementptr inbounds float* %tmp21880, i64 1
- %tmp21882 = getelementptr inbounds float* %tmp21881, i64 1
- %tmp21883 = getelementptr inbounds float* %tmp21882, i64 1
- %tmp21884 = getelementptr inbounds float* %tmp21883, i64 1
- %tmp21885 = getelementptr inbounds float* %tmp21884, i64 1
- %tmp21886 = getelementptr inbounds float* %tmp21885, i64 1
- %tmp21887 = getelementptr inbounds float* %tmp21886, i64 1
- %tmp21888 = getelementptr inbounds float* %tmp21887, i64 1
- %tmp21889 = getelementptr inbounds float* %tmp21888, i64 1
- %tmp21890 = getelementptr inbounds float* %tmp21889, i64 1
- %tmp21891 = getelementptr inbounds float* %tmp21890, i64 1
- %tmp21892 = getelementptr inbounds float* %tmp21891, i64 1
- %tmp21893 = getelementptr inbounds float* %tmp21892, i64 1
- %tmp21894 = getelementptr inbounds float* %tmp21893, i64 1
- %tmp21895 = getelementptr inbounds float* %tmp21894, i64 1
- %tmp21896 = getelementptr inbounds float* %tmp21895, i64 1
- %tmp21897 = getelementptr inbounds float* %tmp21896, i64 1
- %tmp21898 = getelementptr inbounds float* %tmp21897, i64 1
- %tmp21899 = getelementptr inbounds float* %tmp21898, i64 1
- %tmp21900 = getelementptr inbounds float* %tmp21899, i64 1
- %tmp21901 = getelementptr inbounds float* %tmp21900, i64 1
- %tmp21902 = getelementptr inbounds float* %tmp21901, i64 1
- %tmp21903 = getelementptr inbounds float* %tmp21902, i64 1
- %tmp21904 = getelementptr inbounds float* %tmp21903, i64 1
- %tmp21905 = getelementptr inbounds float* %tmp21904, i64 1
- %tmp21906 = getelementptr inbounds float* %tmp21905, i64 1
- %tmp21907 = getelementptr inbounds float* %tmp21906, i64 1
- %tmp21908 = getelementptr inbounds float* %tmp21907, i64 1
- %tmp21909 = getelementptr inbounds float* %tmp21908, i64 1
- %tmp21910 = getelementptr inbounds float* %tmp21909, i64 1
- %tmp21911 = getelementptr inbounds float* %tmp21910, i64 1
- %tmp21912 = getelementptr inbounds float* %tmp21911, i64 1
- %tmp21913 = getelementptr inbounds float* %tmp21912, i64 1
- %tmp21914 = getelementptr inbounds float* %tmp21913, i64 1
- %tmp21915 = getelementptr inbounds float* %tmp21914, i64 1
- %tmp21916 = getelementptr inbounds float* %tmp21915, i64 1
- %tmp21917 = getelementptr inbounds float* %tmp21916, i64 1
- %tmp21918 = getelementptr inbounds float* %tmp21917, i64 1
- %tmp21919 = getelementptr inbounds float* %tmp21918, i64 1
- %tmp21920 = getelementptr inbounds float* %tmp21919, i64 1
- %tmp21921 = getelementptr inbounds float* %tmp21920, i64 1
- %tmp21922 = getelementptr inbounds float* %tmp21921, i64 1
- %tmp21923 = getelementptr inbounds float* %tmp21922, i64 1
- %tmp21924 = getelementptr inbounds float* %tmp21923, i64 1
- %tmp21925 = getelementptr inbounds float* %tmp21924, i64 1
- %tmp21926 = getelementptr inbounds float* %tmp21925, i64 1
- %tmp21927 = getelementptr inbounds float* %tmp21926, i64 1
- %tmp21928 = getelementptr inbounds float* %tmp21927, i64 1
- %tmp21929 = getelementptr inbounds float* %tmp21928, i64 1
- %tmp21930 = getelementptr inbounds float* %tmp21929, i64 1
- %tmp21931 = getelementptr inbounds float* %tmp21930, i64 1
- %tmp21932 = getelementptr inbounds float* %tmp21931, i64 1
- %tmp21933 = getelementptr inbounds float* %tmp21932, i64 1
- %tmp21934 = getelementptr inbounds float* %tmp21933, i64 1
- %tmp21935 = getelementptr inbounds float* %tmp21934, i64 1
- %tmp21936 = getelementptr inbounds float* %tmp21935, i64 1
- %tmp21937 = getelementptr inbounds float* %tmp21936, i64 1
- %tmp21938 = getelementptr inbounds float* %tmp21937, i64 1
- %tmp21939 = getelementptr inbounds float* %tmp21938, i64 1
- %tmp21940 = getelementptr inbounds float* %tmp21939, i64 1
- %tmp21941 = getelementptr inbounds float* %tmp21940, i64 1
- %tmp21942 = getelementptr inbounds float* %tmp21941, i64 1
- %tmp21943 = getelementptr inbounds float* %tmp21942, i64 1
- %tmp21944 = getelementptr inbounds float* %tmp21943, i64 1
- %tmp21945 = getelementptr inbounds float* %tmp21944, i64 1
- %tmp21946 = getelementptr inbounds float* %tmp21945, i64 1
- %tmp21947 = getelementptr inbounds float* %tmp21946, i64 1
- %tmp21948 = getelementptr inbounds float* %tmp21947, i64 1
- %tmp21949 = getelementptr inbounds float* %tmp21948, i64 1
- %tmp21950 = getelementptr inbounds float* %tmp21949, i64 1
- %tmp21951 = getelementptr inbounds float* %tmp21950, i64 1
- %tmp21952 = getelementptr inbounds float* %tmp21951, i64 1
- %tmp21953 = getelementptr inbounds float* %tmp21952, i64 1
- %tmp21954 = getelementptr inbounds float* %tmp21953, i64 1
- %tmp21955 = getelementptr inbounds float* %tmp21954, i64 1
- %tmp21956 = getelementptr inbounds float* %tmp21955, i64 1
- %tmp21957 = getelementptr inbounds float* %tmp21956, i64 1
- %tmp21958 = getelementptr inbounds float* %tmp21957, i64 1
- %tmp21959 = getelementptr inbounds float* %tmp21958, i64 1
- %tmp21960 = getelementptr inbounds float* %tmp21959, i64 1
- %tmp21961 = getelementptr inbounds float* %tmp21960, i64 1
- %tmp21962 = getelementptr inbounds float* %tmp21961, i64 1
- %tmp21963 = getelementptr inbounds float* %tmp21962, i64 1
- %tmp21964 = getelementptr inbounds float* %tmp21963, i64 1
- %tmp21965 = getelementptr inbounds float* %tmp21964, i64 1
- %tmp21966 = getelementptr inbounds float* %tmp21965, i64 1
- %tmp21967 = getelementptr inbounds float* %tmp21966, i64 1
- %tmp21968 = getelementptr inbounds float* %tmp21967, i64 1
- %tmp21969 = getelementptr inbounds float* %tmp21968, i64 1
- %tmp21970 = getelementptr inbounds float* %tmp21969, i64 1
- %tmp21971 = getelementptr inbounds float* %tmp21970, i64 1
- %tmp21972 = getelementptr inbounds float* %tmp21971, i64 1
- %tmp21973 = getelementptr inbounds float* %tmp21972, i64 1
- %tmp21974 = getelementptr inbounds float* %tmp21973, i64 1
- %tmp21975 = getelementptr inbounds float* %tmp21974, i64 1
- %tmp21976 = getelementptr inbounds float* %tmp21975, i64 1
- %tmp21977 = getelementptr inbounds float* %tmp21976, i64 1
- %tmp21978 = getelementptr inbounds float* %tmp21977, i64 1
- %tmp21979 = getelementptr inbounds float* %tmp21978, i64 1
- %tmp21980 = getelementptr inbounds float* %tmp21979, i64 1
- %tmp21981 = getelementptr inbounds float* %tmp21980, i64 1
- %tmp21982 = getelementptr inbounds float* %tmp21981, i64 1
- %tmp21983 = getelementptr inbounds float* %tmp21982, i64 1
- %tmp21984 = getelementptr inbounds float* %tmp21983, i64 1
- %tmp21985 = getelementptr inbounds float* %tmp21984, i64 1
- %tmp21986 = getelementptr inbounds float* %tmp21985, i64 1
- %tmp21987 = getelementptr inbounds float* %tmp21986, i64 1
- %tmp21988 = getelementptr inbounds float* %tmp21987, i64 1
- %tmp21989 = getelementptr inbounds float* %tmp21988, i64 1
- %tmp21990 = getelementptr inbounds float* %tmp21989, i64 1
- %tmp21991 = getelementptr inbounds float* %tmp21990, i64 1
- %tmp21992 = getelementptr inbounds float* %tmp21991, i64 1
- %tmp21993 = getelementptr inbounds float* %tmp21992, i64 1
- %tmp21994 = getelementptr inbounds float* %tmp21993, i64 1
- %tmp21995 = getelementptr inbounds float* %tmp21994, i64 1
- %tmp21996 = getelementptr inbounds float* %tmp21995, i64 1
- %tmp21997 = getelementptr inbounds float* %tmp21996, i64 1
- %tmp21998 = getelementptr inbounds float* %tmp21997, i64 1
- %tmp21999 = getelementptr inbounds float* %tmp21998, i64 1
- %tmp22000 = getelementptr inbounds float* %tmp21999, i64 1
- %tmp22001 = getelementptr inbounds float* %tmp22000, i64 1
- %tmp22002 = getelementptr inbounds float* %tmp22001, i64 1
- %tmp22003 = getelementptr inbounds float* %tmp22002, i64 1
- %tmp22004 = getelementptr inbounds float* %tmp22003, i64 1
- %tmp22005 = getelementptr inbounds float* %tmp22004, i64 1
- %tmp22006 = getelementptr inbounds float* %tmp22005, i64 1
- %tmp22007 = getelementptr inbounds float* %tmp22006, i64 1
- %tmp22008 = getelementptr inbounds float* %tmp22007, i64 1
- %tmp22009 = getelementptr inbounds float* %tmp22008, i64 1
- %tmp22010 = getelementptr inbounds float* %tmp22009, i64 1
- %tmp22011 = getelementptr inbounds float* %tmp22010, i64 1
- %tmp22012 = getelementptr inbounds float* %tmp22011, i64 1
- %tmp22013 = getelementptr inbounds float* %tmp22012, i64 1
- %tmp22014 = getelementptr inbounds float* %tmp22013, i64 1
- %tmp22015 = getelementptr inbounds float* %tmp22014, i64 1
- %tmp22016 = getelementptr inbounds float* %tmp22015, i64 1
- %tmp22017 = getelementptr inbounds float* %tmp22016, i64 1
- %tmp22018 = getelementptr inbounds float* %tmp22017, i64 1
- %tmp22019 = getelementptr inbounds float* %tmp22018, i64 1
- %tmp22020 = getelementptr inbounds float* %tmp22019, i64 1
- %tmp22021 = getelementptr inbounds float* %tmp22020, i64 1
- %tmp22022 = getelementptr inbounds float* %tmp22021, i64 1
- %tmp22023 = getelementptr inbounds float* %tmp22022, i64 1
- %tmp22024 = getelementptr inbounds float* %tmp22023, i64 1
- %tmp22025 = getelementptr inbounds float* %tmp22024, i64 1
- %tmp22026 = getelementptr inbounds float* %tmp22025, i64 1
- %tmp22027 = getelementptr inbounds float* %tmp22026, i64 1
- %tmp22028 = getelementptr inbounds float* %tmp22027, i64 1
- %tmp22029 = getelementptr inbounds float* %tmp22028, i64 1
- %tmp22030 = getelementptr inbounds float* %tmp22029, i64 1
- %tmp22031 = getelementptr inbounds float* %tmp22030, i64 1
- %tmp22032 = getelementptr inbounds float* %tmp22031, i64 1
- %tmp22033 = getelementptr inbounds float* %tmp22032, i64 1
- %tmp22034 = getelementptr inbounds float* %tmp22033, i64 1
- %tmp22035 = getelementptr inbounds float* %tmp22034, i64 1
- %tmp22036 = getelementptr inbounds float* %tmp22035, i64 1
- %tmp22037 = getelementptr inbounds float* %tmp22036, i64 1
- %tmp22038 = getelementptr inbounds float* %tmp22037, i64 1
- %tmp22039 = getelementptr inbounds float* %tmp22038, i64 1
- %tmp22040 = getelementptr inbounds float* %tmp22039, i64 1
- %tmp22041 = getelementptr inbounds float* %tmp22040, i64 1
- %tmp22042 = getelementptr inbounds float* %tmp22041, i64 1
- %tmp22043 = getelementptr inbounds float* %tmp22042, i64 1
- %tmp22044 = getelementptr inbounds float* %tmp22043, i64 1
- %tmp22045 = getelementptr inbounds float* %tmp22044, i64 1
- %tmp22046 = getelementptr inbounds float* %tmp22045, i64 1
- %tmp22047 = getelementptr inbounds float* %tmp22046, i64 1
- %tmp22048 = getelementptr inbounds float* %tmp22047, i64 1
- %tmp22049 = getelementptr inbounds float* %tmp22048, i64 1
- %tmp22050 = getelementptr inbounds float* %tmp22049, i64 1
- %tmp22051 = getelementptr inbounds float* %tmp22050, i64 1
- %tmp22052 = getelementptr inbounds float* %tmp22051, i64 1
- %tmp22053 = getelementptr inbounds float* %tmp22052, i64 1
- %tmp22054 = getelementptr inbounds float* %tmp22053, i64 1
- %tmp22055 = getelementptr inbounds float* %tmp22054, i64 1
- %tmp22056 = getelementptr inbounds float* %tmp22055, i64 1
- %tmp22057 = getelementptr inbounds float* %tmp22056, i64 1
- %tmp22058 = getelementptr inbounds float* %tmp22057, i64 1
- %tmp22059 = getelementptr inbounds float* %tmp22058, i64 1
- %tmp22060 = getelementptr inbounds float* %tmp22059, i64 1
- %tmp22061 = getelementptr inbounds float* %tmp22060, i64 1
- %tmp22062 = getelementptr inbounds float* %tmp22061, i64 1
- %tmp22063 = getelementptr inbounds float* %tmp22062, i64 1
- %tmp22064 = getelementptr inbounds float* %tmp22063, i64 1
- %tmp22065 = getelementptr inbounds float* %tmp22064, i64 1
- %tmp22066 = getelementptr inbounds float* %tmp22065, i64 1
- %tmp22067 = getelementptr inbounds float* %tmp22066, i64 1
- %tmp22068 = getelementptr inbounds float* %tmp22067, i64 1
- %tmp22069 = getelementptr inbounds float* %tmp22068, i64 1
- %tmp22070 = getelementptr inbounds float* %tmp22069, i64 1
- %tmp22071 = getelementptr inbounds float* %tmp22070, i64 1
- %tmp22072 = getelementptr inbounds float* %tmp22071, i64 1
- %tmp22073 = getelementptr inbounds float* %tmp22072, i64 1
- %tmp22074 = getelementptr inbounds float* %tmp22073, i64 1
- %tmp22075 = getelementptr inbounds float* %tmp22074, i64 1
- %tmp22076 = getelementptr inbounds float* %tmp22075, i64 1
- %tmp22077 = getelementptr inbounds float* %tmp22076, i64 1
- %tmp22078 = getelementptr inbounds float* %tmp22077, i64 1
- %tmp22079 = getelementptr inbounds float* %tmp22078, i64 1
- %tmp22080 = getelementptr inbounds float* %tmp22079, i64 1
- %tmp22081 = getelementptr inbounds float* %tmp22080, i64 1
- %tmp22082 = getelementptr inbounds float* %tmp22081, i64 1
- %tmp22083 = getelementptr inbounds float* %tmp22082, i64 1
- %tmp22084 = getelementptr inbounds float* %tmp22083, i64 1
- %tmp22085 = getelementptr inbounds float* %tmp22084, i64 1
- %tmp22086 = getelementptr inbounds float* %tmp22085, i64 1
- %tmp22087 = getelementptr inbounds float* %tmp22086, i64 1
- %tmp22088 = getelementptr inbounds float* %tmp22087, i64 1
- %tmp22089 = getelementptr inbounds float* %tmp22088, i64 1
- %tmp22090 = getelementptr inbounds float* %tmp22089, i64 1
- %tmp22091 = getelementptr inbounds float* %tmp22090, i64 1
- %tmp22092 = getelementptr inbounds float* %tmp22091, i64 1
- %tmp22093 = getelementptr inbounds float* %tmp22092, i64 1
- %tmp22094 = getelementptr inbounds float* %tmp22093, i64 1
- %tmp22095 = getelementptr inbounds float* %tmp22094, i64 1
- %tmp22096 = getelementptr inbounds float* %tmp22095, i64 1
- %tmp22097 = getelementptr inbounds float* %tmp22096, i64 1
- %tmp22098 = getelementptr inbounds float* %tmp22097, i64 1
- %tmp22099 = getelementptr inbounds float* %tmp22098, i64 1
- %tmp22100 = getelementptr inbounds float* %tmp22099, i64 1
- %tmp22101 = getelementptr inbounds float* %tmp22100, i64 1
- %tmp22102 = getelementptr inbounds float* %tmp22101, i64 1
- %tmp22103 = getelementptr inbounds float* %tmp22102, i64 1
- %tmp22104 = getelementptr inbounds float* %tmp22103, i64 1
- %tmp22105 = getelementptr inbounds float* %tmp22104, i64 1
- %tmp22106 = getelementptr inbounds float* %tmp22105, i64 1
- %tmp22107 = getelementptr inbounds float* %tmp22106, i64 1
- %tmp22108 = getelementptr inbounds float* %tmp22107, i64 1
- %tmp22109 = getelementptr inbounds float* %tmp22108, i64 1
- %tmp22110 = getelementptr inbounds float* %tmp22109, i64 1
- %tmp22111 = getelementptr inbounds float* %tmp22110, i64 1
- %tmp22112 = getelementptr inbounds float* %tmp22111, i64 1
- %tmp22113 = getelementptr inbounds float* %tmp22112, i64 1
- %tmp22114 = getelementptr inbounds float* %tmp22113, i64 1
- %tmp22115 = getelementptr inbounds float* %tmp22114, i64 1
- %tmp22116 = getelementptr inbounds float* %tmp22115, i64 1
- %tmp22117 = getelementptr inbounds float* %tmp22116, i64 1
- %tmp22118 = getelementptr inbounds float* %tmp22117, i64 1
- %tmp22119 = getelementptr inbounds float* %tmp22118, i64 1
- %tmp22120 = getelementptr inbounds float* %tmp22119, i64 1
- %tmp22121 = getelementptr inbounds float* %tmp22120, i64 1
- %tmp22122 = getelementptr inbounds float* %tmp22121, i64 1
- %tmp22123 = getelementptr inbounds float* %tmp22122, i64 1
- %tmp22124 = getelementptr inbounds float* %tmp22123, i64 1
- %tmp22125 = getelementptr inbounds float* %tmp22124, i64 1
- %tmp22126 = getelementptr inbounds float* %tmp22125, i64 1
- %tmp22127 = getelementptr inbounds float* %tmp22126, i64 1
- %tmp22128 = getelementptr inbounds float* %tmp22127, i64 1
- %tmp22129 = getelementptr inbounds float* %tmp22128, i64 1
- %tmp22130 = getelementptr inbounds float* %tmp22129, i64 1
- %tmp22131 = getelementptr inbounds float* %tmp22130, i64 1
- %tmp22132 = getelementptr inbounds float* %tmp22131, i64 1
- %tmp22133 = getelementptr inbounds float* %tmp22132, i64 1
- %tmp22134 = getelementptr inbounds float* %tmp22133, i64 1
- %tmp22135 = getelementptr inbounds float* %tmp22134, i64 1
- %tmp22136 = getelementptr inbounds float* %tmp22135, i64 1
- %tmp22137 = getelementptr inbounds float* %tmp22136, i64 1
- %tmp22138 = getelementptr inbounds float* %tmp22137, i64 1
- %tmp22139 = getelementptr inbounds float* %tmp22138, i64 1
- %tmp22140 = getelementptr inbounds float* %tmp22139, i64 1
- %tmp22141 = getelementptr inbounds float* %tmp22140, i64 1
- %tmp22142 = getelementptr inbounds float* %tmp22141, i64 1
- %tmp22143 = getelementptr inbounds float* %tmp22142, i64 1
- %tmp22144 = getelementptr inbounds float* %tmp22143, i64 1
- %tmp22145 = getelementptr inbounds float* %tmp22144, i64 1
- %tmp22146 = getelementptr inbounds float* %tmp22145, i64 1
- %tmp22147 = getelementptr inbounds float* %tmp22146, i64 1
- %tmp22148 = getelementptr inbounds float* %tmp22147, i64 1
- %tmp22149 = getelementptr inbounds float* %tmp22148, i64 1
- %tmp22150 = getelementptr inbounds float* %tmp22149, i64 1
- %tmp22151 = getelementptr inbounds float* %tmp22150, i64 1
- %tmp22152 = getelementptr inbounds float* %tmp22151, i64 1
- %tmp22153 = getelementptr inbounds float* %tmp22152, i64 1
- %tmp22154 = getelementptr inbounds float* %tmp22153, i64 1
- %tmp22155 = getelementptr inbounds float* %tmp22154, i64 1
- %tmp22156 = getelementptr inbounds float* %tmp22155, i64 1
- %tmp22157 = getelementptr inbounds float* %tmp22156, i64 1
- %tmp22158 = getelementptr inbounds float* %tmp22157, i64 1
- %tmp22159 = getelementptr inbounds float* %tmp22158, i64 1
- %tmp22160 = getelementptr inbounds float* %tmp22159, i64 1
- %tmp22161 = getelementptr inbounds float* %tmp22160, i64 1
- %tmp22162 = getelementptr inbounds float* %tmp22161, i64 1
- %tmp22163 = getelementptr inbounds float* %tmp22162, i64 1
- %tmp22164 = getelementptr inbounds float* %tmp22163, i64 1
- %tmp22165 = getelementptr inbounds float* %tmp22164, i64 1
- %tmp22166 = getelementptr inbounds float* %tmp22165, i64 1
- %tmp22167 = getelementptr inbounds float* %tmp22166, i64 1
- %tmp22168 = getelementptr inbounds float* %tmp22167, i64 1
- %tmp22169 = getelementptr inbounds float* %tmp22168, i64 1
- %tmp22170 = getelementptr inbounds float* %tmp22169, i64 1
- %tmp22171 = getelementptr inbounds float* %tmp22170, i64 1
- %tmp22172 = getelementptr inbounds float* %tmp22171, i64 1
- %tmp22173 = getelementptr inbounds float* %tmp22172, i64 1
- %tmp22174 = getelementptr inbounds float* %tmp22173, i64 1
- %tmp22175 = getelementptr inbounds float* %tmp22174, i64 1
- %tmp22176 = getelementptr inbounds float* %tmp22175, i64 1
- %tmp22177 = getelementptr inbounds float* %tmp22176, i64 1
- %tmp22178 = getelementptr inbounds float* %tmp22177, i64 1
- %tmp22179 = getelementptr inbounds float* %tmp22178, i64 1
- %tmp22180 = getelementptr inbounds float* %tmp22179, i64 1
- %tmp22181 = getelementptr inbounds float* %tmp22180, i64 1
- %tmp22182 = getelementptr inbounds float* %tmp22181, i64 1
- %tmp22183 = getelementptr inbounds float* %tmp22182, i64 1
- %tmp22184 = getelementptr inbounds float* %tmp22183, i64 1
- %tmp22185 = getelementptr inbounds float* %tmp22184, i64 1
- %tmp22186 = getelementptr inbounds float* %tmp22185, i64 1
- %tmp22187 = getelementptr inbounds float* %tmp22186, i64 1
- %tmp22188 = getelementptr inbounds float* %tmp22187, i64 1
- %tmp22189 = getelementptr inbounds float* %tmp22188, i64 1
- %tmp22190 = getelementptr inbounds float* %tmp22189, i64 1
- %tmp22191 = getelementptr inbounds float* %tmp22190, i64 1
- %tmp22192 = getelementptr inbounds float* %tmp22191, i64 1
- %tmp22193 = getelementptr inbounds float* %tmp22192, i64 1
- %tmp22194 = getelementptr inbounds float* %tmp22193, i64 1
- %tmp22195 = getelementptr inbounds float* %tmp22194, i64 1
- %tmp22196 = getelementptr inbounds float* %tmp22195, i64 1
- %tmp22197 = getelementptr inbounds float* %tmp22196, i64 1
- %tmp22198 = getelementptr inbounds float* %tmp22197, i64 1
- %tmp22199 = getelementptr inbounds float* %tmp22198, i64 1
- %tmp22200 = getelementptr inbounds float* %tmp22199, i64 1
- %tmp22201 = getelementptr inbounds float* %tmp22200, i64 1
- %tmp22202 = getelementptr inbounds float* %tmp22201, i64 1
- %tmp22203 = getelementptr inbounds float* %tmp22202, i64 1
- %tmp22204 = getelementptr inbounds float* %tmp22203, i64 1
- %tmp22205 = getelementptr inbounds float* %tmp22204, i64 1
- %tmp22206 = getelementptr inbounds float* %tmp22205, i64 1
- %tmp22207 = getelementptr inbounds float* %tmp22206, i64 1
- %tmp22208 = getelementptr inbounds float* %tmp22207, i64 1
- %tmp22209 = getelementptr inbounds float* %tmp22208, i64 1
- %tmp22210 = getelementptr inbounds float* %tmp22209, i64 1
- %tmp22211 = getelementptr inbounds float* %tmp22210, i64 1
- %tmp22212 = getelementptr inbounds float* %tmp22211, i64 1
- %tmp22213 = getelementptr inbounds float* %tmp22212, i64 1
- %tmp22214 = getelementptr inbounds float* %tmp22213, i64 1
- %tmp22215 = getelementptr inbounds float* %tmp22214, i64 1
- %tmp22216 = getelementptr inbounds float* %tmp22215, i64 1
- %tmp22217 = getelementptr inbounds float* %tmp22216, i64 1
- %tmp22218 = getelementptr inbounds float* %tmp22217, i64 1
- %tmp22219 = getelementptr inbounds float* %tmp22218, i64 1
- %tmp22220 = getelementptr inbounds float* %tmp22219, i64 1
- %tmp22221 = getelementptr inbounds float* %tmp22220, i64 1
- %tmp22222 = getelementptr inbounds float* %tmp22221, i64 1
- %tmp22223 = getelementptr inbounds float* %tmp22222, i64 1
- %tmp22224 = getelementptr inbounds float* %tmp22223, i64 1
- %tmp22225 = getelementptr inbounds float* %tmp22224, i64 1
- %tmp22226 = getelementptr inbounds float* %tmp22225, i64 1
- %tmp22227 = getelementptr inbounds float* %tmp22226, i64 1
- %tmp22228 = getelementptr inbounds float* %tmp22227, i64 1
- %tmp22229 = getelementptr inbounds float* %tmp22228, i64 1
- %tmp22230 = getelementptr inbounds float* %tmp22229, i64 1
- %tmp22231 = getelementptr inbounds float* %tmp22230, i64 1
- %tmp22232 = getelementptr inbounds float* %tmp22231, i64 1
- %tmp22233 = getelementptr inbounds float* %tmp22232, i64 1
- %tmp22234 = getelementptr inbounds float* %tmp22233, i64 1
- %tmp22235 = getelementptr inbounds float* %tmp22234, i64 1
- %tmp22236 = getelementptr inbounds float* %tmp22235, i64 1
- %tmp22237 = getelementptr inbounds float* %tmp22236, i64 1
- %tmp22238 = getelementptr inbounds float* %tmp22237, i64 1
- %tmp22239 = getelementptr inbounds float* %tmp22238, i64 1
- %tmp22240 = getelementptr inbounds float* %tmp22239, i64 1
- %tmp22241 = getelementptr inbounds float* %tmp22240, i64 1
- %tmp22242 = getelementptr inbounds float* %tmp22241, i64 1
- %tmp22243 = getelementptr inbounds float* %tmp22242, i64 1
- %tmp22244 = getelementptr inbounds float* %tmp22243, i64 1
- %tmp22245 = getelementptr inbounds float* %tmp22244, i64 1
- %tmp22246 = getelementptr inbounds float* %tmp22245, i64 1
- %tmp22247 = getelementptr inbounds float* %tmp22246, i64 1
- %tmp22248 = getelementptr inbounds float* %tmp22247, i64 1
- %tmp22249 = getelementptr inbounds float* %tmp22248, i64 1
- %tmp22250 = getelementptr inbounds float* %tmp22249, i64 1
- %tmp22251 = getelementptr inbounds float* %tmp22250, i64 1
- %tmp22252 = getelementptr inbounds float* %tmp22251, i64 1
- %tmp22253 = getelementptr inbounds float* %tmp22252, i64 1
- %tmp22254 = getelementptr inbounds float* %tmp22253, i64 1
- %tmp22255 = getelementptr inbounds float* %tmp22254, i64 1
- %tmp22256 = getelementptr inbounds float* %tmp22255, i64 1
- %tmp22257 = getelementptr inbounds float* %tmp22256, i64 1
- %tmp22258 = getelementptr inbounds float* %tmp22257, i64 1
- %tmp22259 = getelementptr inbounds float* %tmp22258, i64 1
- %tmp22260 = getelementptr inbounds float* %tmp22259, i64 1
- %tmp22261 = getelementptr inbounds float* %tmp22260, i64 1
- %tmp22262 = getelementptr inbounds float* %tmp22261, i64 1
- %tmp22263 = getelementptr inbounds float* %tmp22262, i64 1
- %tmp22264 = getelementptr inbounds float* %tmp22263, i64 1
- %tmp22265 = getelementptr inbounds float* %tmp22264, i64 1
- %tmp22266 = getelementptr inbounds float* %tmp22265, i64 1
- %tmp22267 = getelementptr inbounds float* %tmp22266, i64 1
- %tmp22268 = getelementptr inbounds float* %tmp22267, i64 1
- %tmp22269 = getelementptr inbounds float* %tmp22268, i64 1
- %tmp22270 = getelementptr inbounds float* %tmp22269, i64 1
- %tmp22271 = getelementptr inbounds float* %tmp22270, i64 1
- %tmp22272 = getelementptr inbounds float* %tmp22271, i64 1
- %tmp22273 = getelementptr inbounds float* %tmp22272, i64 1
- %tmp22274 = getelementptr inbounds float* %tmp22273, i64 1
- %tmp22275 = getelementptr inbounds float* %tmp22274, i64 1
- %tmp22276 = getelementptr inbounds float* %tmp22275, i64 1
- %tmp22277 = getelementptr inbounds float* %tmp22276, i64 1
- %tmp22278 = getelementptr inbounds float* %tmp22277, i64 1
- %tmp22279 = getelementptr inbounds float* %tmp22278, i64 1
- %tmp22280 = getelementptr inbounds float* %tmp22279, i64 1
- %tmp22281 = getelementptr inbounds float* %tmp22280, i64 1
- %tmp22282 = getelementptr inbounds float* %tmp22281, i64 1
- %tmp22283 = getelementptr inbounds float* %tmp22282, i64 1
- %tmp22284 = getelementptr inbounds float* %tmp22283, i64 1
- %tmp22285 = getelementptr inbounds float* %tmp22284, i64 1
- %tmp22286 = getelementptr inbounds float* %tmp22285, i64 1
- %tmp22287 = getelementptr inbounds float* %tmp22286, i64 1
- %tmp22288 = getelementptr inbounds float* %tmp22287, i64 1
- %tmp22289 = getelementptr inbounds float* %tmp22288, i64 1
- %tmp22290 = getelementptr inbounds float* %tmp22289, i64 1
- %tmp22291 = getelementptr inbounds float* %tmp22290, i64 1
- %tmp22292 = getelementptr inbounds float* %tmp22291, i64 1
- %tmp22293 = getelementptr inbounds float* %tmp22292, i64 1
- %tmp22294 = getelementptr inbounds float* %tmp22293, i64 1
- %tmp22295 = getelementptr inbounds float* %tmp22294, i64 1
- %tmp22296 = getelementptr inbounds float* %tmp22295, i64 1
- %tmp22297 = getelementptr inbounds float* %tmp22296, i64 1
- %tmp22298 = getelementptr inbounds float* %tmp22297, i64 1
- %tmp22299 = getelementptr inbounds float* %tmp22298, i64 1
- %tmp22300 = getelementptr inbounds float* %tmp22299, i64 1
- %tmp22301 = getelementptr inbounds float* %tmp22300, i64 1
- %tmp22302 = getelementptr inbounds float* %tmp22301, i64 1
- %tmp22303 = getelementptr inbounds float* %tmp22302, i64 1
- %tmp22304 = getelementptr inbounds float* %tmp22303, i64 1
- %tmp22305 = getelementptr inbounds float* %tmp22304, i64 1
- %tmp22306 = getelementptr inbounds float* %tmp22305, i64 1
- %tmp22307 = getelementptr inbounds float* %tmp22306, i64 1
- %tmp22308 = getelementptr inbounds float* %tmp22307, i64 1
- %tmp22309 = getelementptr inbounds float* %tmp22308, i64 1
- %tmp22310 = getelementptr inbounds float* %tmp22309, i64 1
- %tmp22311 = getelementptr inbounds float* %tmp22310, i64 1
- %tmp22312 = getelementptr inbounds float* %tmp22311, i64 1
- %tmp22313 = getelementptr inbounds float* %tmp22312, i64 1
- %tmp22314 = getelementptr inbounds float* %tmp22313, i64 1
- %tmp22315 = getelementptr inbounds float* %tmp22314, i64 1
- %tmp22316 = getelementptr inbounds float* %tmp22315, i64 1
- %tmp22317 = getelementptr inbounds float* %tmp22316, i64 1
- %tmp22318 = getelementptr inbounds float* %tmp22317, i64 1
- %tmp22319 = getelementptr inbounds float* %tmp22318, i64 1
- %tmp22320 = getelementptr inbounds float* %tmp22319, i64 1
- %tmp22321 = getelementptr inbounds float* %tmp22320, i64 1
- %tmp22322 = getelementptr inbounds float* %tmp22321, i64 1
- %tmp22323 = getelementptr inbounds float* %tmp22322, i64 1
- %tmp22324 = getelementptr inbounds float* %tmp22323, i64 1
- %tmp22325 = getelementptr inbounds float* %tmp22324, i64 1
- %tmp22326 = getelementptr inbounds float* %tmp22325, i64 1
- %tmp22327 = getelementptr inbounds float* %tmp22326, i64 1
- %tmp22328 = getelementptr inbounds float* %tmp22327, i64 1
- %tmp22329 = getelementptr inbounds float* %tmp22328, i64 1
- %tmp22330 = getelementptr inbounds float* %tmp22329, i64 1
- %tmp22331 = getelementptr inbounds float* %tmp22330, i64 1
- %tmp22332 = getelementptr inbounds float* %tmp22331, i64 1
- %tmp22333 = getelementptr inbounds float* %tmp22332, i64 1
- %tmp22334 = getelementptr inbounds float* %tmp22333, i64 1
- %tmp22335 = getelementptr inbounds float* %tmp22334, i64 1
- %tmp22336 = getelementptr inbounds float* %tmp22335, i64 1
- %tmp22337 = getelementptr inbounds float* %tmp22336, i64 1
- %tmp22338 = getelementptr inbounds float* %tmp22337, i64 1
- %tmp22339 = getelementptr inbounds float* %tmp22338, i64 1
- %tmp22340 = getelementptr inbounds float* %tmp22339, i64 1
- %tmp22341 = getelementptr inbounds float* %tmp22340, i64 1
- %tmp22342 = getelementptr inbounds float* %tmp22341, i64 1
- %tmp22343 = getelementptr inbounds float* %tmp22342, i64 1
- %tmp22344 = getelementptr inbounds float* %tmp22343, i64 1
- %tmp22345 = getelementptr inbounds float* %tmp22344, i64 1
- %tmp22346 = getelementptr inbounds float* %tmp22345, i64 1
- %tmp22347 = getelementptr inbounds float* %tmp22346, i64 1
- %tmp22348 = getelementptr inbounds float* %tmp22347, i64 1
- %tmp22349 = getelementptr inbounds float* %tmp22348, i64 1
- %tmp22350 = getelementptr inbounds float* %tmp22349, i64 1
- %tmp22351 = getelementptr inbounds float* %tmp22350, i64 1
- %tmp22352 = getelementptr inbounds float* %tmp22351, i64 1
- %tmp22353 = getelementptr inbounds float* %tmp22352, i64 1
- %tmp22354 = getelementptr inbounds float* %tmp22353, i64 1
- %tmp22355 = getelementptr inbounds float* %tmp22354, i64 1
- %tmp22356 = getelementptr inbounds float* %tmp22355, i64 1
- %tmp22357 = getelementptr inbounds float* %tmp22356, i64 1
- %tmp22358 = getelementptr inbounds float* %tmp22357, i64 1
- %tmp22359 = getelementptr inbounds float* %tmp22358, i64 1
- %tmp22360 = getelementptr inbounds float* %tmp22359, i64 1
- %tmp22361 = getelementptr inbounds float* %tmp22360, i64 1
- %tmp22362 = getelementptr inbounds float* %tmp22361, i64 1
- %tmp22363 = getelementptr inbounds float* %tmp22362, i64 1
- %tmp22364 = getelementptr inbounds float* %tmp22363, i64 1
- %tmp22365 = getelementptr inbounds float* %tmp22364, i64 1
- %tmp22366 = getelementptr inbounds float* %tmp22365, i64 1
- %tmp22367 = getelementptr inbounds float* %tmp22366, i64 1
- %tmp22368 = getelementptr inbounds float* %tmp22367, i64 1
- %tmp22369 = getelementptr inbounds float* %tmp22368, i64 1
- %tmp22370 = getelementptr inbounds float* %tmp22369, i64 1
- %tmp22371 = getelementptr inbounds float* %tmp22370, i64 1
- %tmp22372 = getelementptr inbounds float* %tmp22371, i64 1
- %tmp22373 = getelementptr inbounds float* %tmp22372, i64 1
- %tmp22374 = getelementptr inbounds float* %tmp22373, i64 1
- %tmp22375 = getelementptr inbounds float* %tmp22374, i64 1
- %tmp22376 = getelementptr inbounds float* %tmp22375, i64 1
- %tmp22377 = getelementptr inbounds float* %tmp22376, i64 1
- %tmp22378 = getelementptr inbounds float* %tmp22377, i64 1
- %tmp22379 = getelementptr inbounds float* %tmp22378, i64 1
- %tmp22380 = getelementptr inbounds float* %tmp22379, i64 1
- %tmp22381 = getelementptr inbounds float* %tmp22380, i64 1
- %tmp22382 = getelementptr inbounds float* %tmp22381, i64 1
- %tmp22383 = getelementptr inbounds float* %tmp22382, i64 1
- %tmp22384 = getelementptr inbounds float* %tmp22383, i64 1
- %tmp22385 = getelementptr inbounds float* %tmp22384, i64 1
- %tmp22386 = getelementptr inbounds float* %tmp22385, i64 1
- %tmp22387 = getelementptr inbounds float* %tmp22386, i64 1
- %tmp22388 = getelementptr inbounds float* %tmp22387, i64 1
- %tmp22389 = getelementptr inbounds float* %tmp22388, i64 1
- %tmp22390 = getelementptr inbounds float* %tmp22389, i64 1
- %tmp22391 = getelementptr inbounds float* %tmp22390, i64 1
- %tmp22392 = getelementptr inbounds float* %tmp22391, i64 1
- %tmp22393 = getelementptr inbounds float* %tmp22392, i64 1
- %tmp22394 = getelementptr inbounds float* %tmp22393, i64 1
- %tmp22395 = getelementptr inbounds float* %tmp22394, i64 1
- %tmp22396 = getelementptr inbounds float* %tmp22395, i64 1
- %tmp22397 = getelementptr inbounds float* %tmp22396, i64 1
- %tmp22398 = getelementptr inbounds float* %tmp22397, i64 1
- %tmp22399 = getelementptr inbounds float* %tmp22398, i64 1
- %tmp22400 = getelementptr inbounds float* %tmp22399, i64 1
- %tmp22401 = getelementptr inbounds float* %tmp22400, i64 1
- %tmp22402 = getelementptr inbounds float* %tmp22401, i64 1
- %tmp22403 = getelementptr inbounds float* %tmp22402, i64 1
- %tmp22404 = getelementptr inbounds float* %tmp22403, i64 1
- %tmp22405 = getelementptr inbounds float* %tmp22404, i64 1
- %tmp22406 = getelementptr inbounds float* %tmp22405, i64 1
- %tmp22407 = getelementptr inbounds float* %tmp22406, i64 1
- %tmp22408 = getelementptr inbounds float* %tmp22407, i64 1
- %tmp22409 = getelementptr inbounds float* %tmp22408, i64 1
- %tmp22410 = getelementptr inbounds float* %tmp22409, i64 1
- %tmp22411 = getelementptr inbounds float* %tmp22410, i64 1
- %tmp22412 = getelementptr inbounds float* %tmp22411, i64 1
- %tmp22413 = getelementptr inbounds float* %tmp22412, i64 1
- %tmp22414 = getelementptr inbounds float* %tmp22413, i64 1
- %tmp22415 = getelementptr inbounds float* %tmp22414, i64 1
- %tmp22416 = getelementptr inbounds float* %tmp22415, i64 1
- %tmp22417 = getelementptr inbounds float* %tmp22416, i64 1
- %tmp22418 = getelementptr inbounds float* %tmp22417, i64 1
- %tmp22419 = getelementptr inbounds float* %tmp22418, i64 1
- %tmp22420 = getelementptr inbounds float* %tmp22419, i64 1
- %tmp22421 = getelementptr inbounds float* %tmp22420, i64 1
- %tmp22422 = getelementptr inbounds float* %tmp22421, i64 1
- %tmp22423 = getelementptr inbounds float* %tmp22422, i64 1
- %tmp22424 = getelementptr inbounds float* %tmp22423, i64 1
- %tmp22425 = getelementptr inbounds float* %tmp22424, i64 1
- %tmp22426 = getelementptr inbounds float* %tmp22425, i64 1
- %tmp22427 = getelementptr inbounds float* %tmp22426, i64 1
- %tmp22428 = getelementptr inbounds float* %tmp22427, i64 1
- %tmp22429 = getelementptr inbounds float* %tmp22428, i64 1
- %tmp22430 = getelementptr inbounds float* %tmp22429, i64 1
- %tmp22431 = getelementptr inbounds float* %tmp22430, i64 1
- %tmp22432 = getelementptr inbounds float* %tmp22431, i64 1
- %tmp22433 = getelementptr inbounds float* %tmp22432, i64 1
- %tmp22434 = getelementptr inbounds float* %tmp22433, i64 1
- %tmp22435 = getelementptr inbounds float* %tmp22434, i64 1
- %tmp22436 = getelementptr inbounds float* %tmp22435, i64 1
- %tmp22437 = getelementptr inbounds float* %tmp22436, i64 1
- %tmp22438 = getelementptr inbounds float* %tmp22437, i64 1
- %tmp22439 = getelementptr inbounds float* %tmp22438, i64 1
- %tmp22440 = getelementptr inbounds float* %tmp22439, i64 1
- %tmp22441 = getelementptr inbounds float* %tmp22440, i64 1
- %tmp22442 = getelementptr inbounds float* %tmp22441, i64 1
- %tmp22443 = getelementptr inbounds float* %tmp22442, i64 1
- %tmp22444 = getelementptr inbounds float* %tmp22443, i64 1
- %tmp22445 = getelementptr inbounds float* %tmp22444, i64 1
- %tmp22446 = getelementptr inbounds float* %tmp22445, i64 1
- %tmp22447 = getelementptr inbounds float* %tmp22446, i64 1
- %tmp22448 = getelementptr inbounds float* %tmp22447, i64 1
- %tmp22449 = getelementptr inbounds float* %tmp22448, i64 1
- %tmp22450 = getelementptr inbounds float* %tmp22449, i64 1
- %tmp22451 = getelementptr inbounds float* %tmp22450, i64 1
- %tmp22452 = getelementptr inbounds float* %tmp22451, i64 1
- %tmp22453 = getelementptr inbounds float* %tmp22452, i64 1
- %tmp22454 = getelementptr inbounds float* %tmp22453, i64 1
- %tmp22455 = getelementptr inbounds float* %tmp22454, i64 1
- %tmp22456 = getelementptr inbounds float* %tmp22455, i64 1
- %tmp22457 = getelementptr inbounds float* %tmp22456, i64 1
- %tmp22458 = getelementptr inbounds float* %tmp22457, i64 1
- %tmp22459 = getelementptr inbounds float* %tmp22458, i64 1
- %tmp22460 = getelementptr inbounds float* %tmp22459, i64 1
- %tmp22461 = getelementptr inbounds float* %tmp22460, i64 1
- %tmp22462 = getelementptr inbounds float* %tmp22461, i64 1
- %tmp22463 = getelementptr inbounds float* %tmp22462, i64 1
- %tmp22464 = getelementptr inbounds float* %tmp22463, i64 1
- %tmp22465 = getelementptr inbounds float* %tmp22464, i64 1
- %tmp22466 = getelementptr inbounds float* %tmp22465, i64 1
- %tmp22467 = getelementptr inbounds float* %tmp22466, i64 1
- %tmp22468 = getelementptr inbounds float* %tmp22467, i64 1
- %tmp22469 = getelementptr inbounds float* %tmp22468, i64 1
- %tmp22470 = getelementptr inbounds float* %tmp22469, i64 1
- %tmp22471 = getelementptr inbounds float* %tmp22470, i64 1
- %tmp22472 = getelementptr inbounds float* %tmp22471, i64 1
- %tmp22473 = getelementptr inbounds float* %tmp22472, i64 1
- %tmp22474 = getelementptr inbounds float* %tmp22473, i64 1
- %tmp22475 = getelementptr inbounds float* %tmp22474, i64 1
- %tmp22476 = getelementptr inbounds float* %tmp22475, i64 1
- %tmp22477 = getelementptr inbounds float* %tmp22476, i64 1
- %tmp22478 = getelementptr inbounds float* %tmp22477, i64 1
- %tmp22479 = getelementptr inbounds float* %tmp22478, i64 1
- %tmp22480 = getelementptr inbounds float* %tmp22479, i64 1
- %tmp22481 = getelementptr inbounds float* %tmp22480, i64 1
- %tmp22482 = getelementptr inbounds float* %tmp22481, i64 1
- %tmp22483 = getelementptr inbounds float* %tmp22482, i64 1
- %tmp22484 = getelementptr inbounds float* %tmp22483, i64 1
- %tmp22485 = getelementptr inbounds float* %tmp22484, i64 1
- %tmp22486 = getelementptr inbounds float* %tmp22485, i64 1
- %tmp22487 = getelementptr inbounds float* %tmp22486, i64 1
- %tmp22488 = getelementptr inbounds float* %tmp22487, i64 1
- %tmp22489 = getelementptr inbounds float* %tmp22488, i64 1
- %tmp22490 = getelementptr inbounds float* %tmp22489, i64 1
- %tmp22491 = getelementptr inbounds float* %tmp22490, i64 1
- %tmp22492 = getelementptr inbounds float* %tmp22491, i64 1
- %tmp22493 = getelementptr inbounds float* %tmp22492, i64 1
- %tmp22494 = getelementptr inbounds float* %tmp22493, i64 1
- %tmp22495 = getelementptr inbounds float* %tmp22494, i64 1
- %tmp22496 = getelementptr inbounds float* %tmp22495, i64 1
- %tmp22497 = getelementptr inbounds float* %tmp22496, i64 1
- %tmp22498 = getelementptr inbounds float* %tmp22497, i64 1
- %tmp22499 = getelementptr inbounds float* %tmp22498, i64 1
- %tmp22500 = getelementptr inbounds float* %tmp22499, i64 1
- %tmp22501 = getelementptr inbounds float* %tmp22500, i64 1
- %tmp22502 = getelementptr inbounds float* %tmp22501, i64 1
- %tmp22503 = getelementptr inbounds float* %tmp22502, i64 1
- %tmp22504 = getelementptr inbounds float* %tmp22503, i64 1
- %tmp22505 = getelementptr inbounds float* %tmp22504, i64 1
- %tmp22506 = getelementptr inbounds float* %tmp22505, i64 1
- %tmp22507 = getelementptr inbounds float* %tmp22506, i64 1
- %tmp22508 = getelementptr inbounds float* %tmp22507, i64 1
- %tmp22509 = getelementptr inbounds float* %tmp22508, i64 1
- %tmp22510 = getelementptr inbounds float* %tmp22509, i64 1
- %tmp22511 = getelementptr inbounds float* %tmp22510, i64 1
- %tmp22512 = getelementptr inbounds float* %tmp22511, i64 1
- %tmp22513 = getelementptr inbounds float* %tmp22512, i64 1
- %tmp22514 = getelementptr inbounds float* %tmp22513, i64 1
- %tmp22515 = getelementptr inbounds float* %tmp22514, i64 1
- %tmp22516 = getelementptr inbounds float* %tmp22515, i64 1
- %tmp22517 = getelementptr inbounds float* %tmp22516, i64 1
- %tmp22518 = getelementptr inbounds float* %tmp22517, i64 1
- %tmp22519 = getelementptr inbounds float* %tmp22518, i64 1
- %tmp22520 = getelementptr inbounds float* %tmp22519, i64 1
- %tmp22521 = getelementptr inbounds float* %tmp22520, i64 1
- %tmp22522 = getelementptr inbounds float* %tmp22521, i64 1
- %tmp22523 = getelementptr inbounds float* %tmp22522, i64 1
- %tmp22524 = getelementptr inbounds float* %tmp22523, i64 1
- %tmp22525 = getelementptr inbounds float* %tmp22524, i64 1
- %tmp22526 = getelementptr inbounds float* %tmp22525, i64 1
- %tmp22527 = getelementptr inbounds float* %tmp22526, i64 1
- %tmp22528 = getelementptr inbounds float* %tmp22527, i64 1
- %tmp22529 = getelementptr inbounds float* %tmp22528, i64 1
- %tmp22530 = getelementptr inbounds float* %tmp22529, i64 1
- %tmp22531 = getelementptr inbounds float* %tmp22530, i64 1
- %tmp22532 = getelementptr inbounds float* %tmp22531, i64 1
- %tmp22533 = getelementptr inbounds float* %tmp22532, i64 1
- %tmp22534 = getelementptr inbounds float* %tmp22533, i64 1
- %tmp22535 = getelementptr inbounds float* %tmp22534, i64 1
- %tmp22536 = getelementptr inbounds float* %tmp22535, i64 1
- %tmp22537 = getelementptr inbounds float* %tmp22536, i64 1
- %tmp22538 = getelementptr inbounds float* %tmp22537, i64 1
- %tmp22539 = getelementptr inbounds float* %tmp22538, i64 1
- %tmp22540 = getelementptr inbounds float* %tmp22539, i64 1
- %tmp22541 = getelementptr inbounds float* %tmp22540, i64 1
- %tmp22542 = getelementptr inbounds float* %tmp22541, i64 1
- %tmp22543 = getelementptr inbounds float* %tmp22542, i64 1
- %tmp22544 = getelementptr inbounds float* %tmp22543, i64 1
- %tmp22545 = getelementptr inbounds float* %tmp22544, i64 1
- %tmp22546 = getelementptr inbounds float* %tmp22545, i64 1
- %tmp22547 = getelementptr inbounds float* %tmp22546, i64 1
- %tmp22548 = getelementptr inbounds float* %tmp22547, i64 1
- %tmp22549 = getelementptr inbounds float* %tmp22548, i64 1
- %tmp22550 = getelementptr inbounds float* %tmp22549, i64 1
- %tmp22551 = getelementptr inbounds float* %tmp22550, i64 1
- %tmp22552 = getelementptr inbounds float* %tmp22551, i64 1
- %tmp22553 = getelementptr inbounds float* %tmp22552, i64 1
- %tmp22554 = getelementptr inbounds float* %tmp22553, i64 1
- %tmp22555 = getelementptr inbounds float* %tmp22554, i64 1
- %tmp22556 = getelementptr inbounds float* %tmp22555, i64 1
- %tmp22557 = getelementptr inbounds float* %tmp22556, i64 1
- %tmp22558 = getelementptr inbounds float* %tmp22557, i64 1
- %tmp22559 = getelementptr inbounds float* %tmp22558, i64 1
- %tmp22560 = getelementptr inbounds float* %tmp22559, i64 1
- %tmp22561 = getelementptr inbounds float* %tmp22560, i64 1
- %tmp22562 = getelementptr inbounds float* %tmp22561, i64 1
- %tmp22563 = getelementptr inbounds float* %tmp22562, i64 1
- %tmp22564 = getelementptr inbounds float* %tmp22563, i64 1
- %tmp22565 = getelementptr inbounds float* %tmp22564, i64 1
- %tmp22566 = getelementptr inbounds float* %tmp22565, i64 1
- %tmp22567 = getelementptr inbounds float* %tmp22566, i64 1
- %tmp22568 = getelementptr inbounds float* %tmp22567, i64 1
- %tmp22569 = getelementptr inbounds float* %tmp22568, i64 1
- %tmp22570 = getelementptr inbounds float* %tmp22569, i64 1
- %tmp22571 = getelementptr inbounds float* %tmp22570, i64 1
- %tmp22572 = getelementptr inbounds float* %tmp22571, i64 1
- %tmp22573 = getelementptr inbounds float* %tmp22572, i64 1
- %tmp22574 = getelementptr inbounds float* %tmp22573, i64 1
- %tmp22575 = getelementptr inbounds float* %tmp22574, i64 1
- %tmp22576 = getelementptr inbounds float* %tmp22575, i64 1
- %tmp22577 = getelementptr inbounds float* %tmp22576, i64 1
- %tmp22578 = getelementptr inbounds float* %tmp22577, i64 1
- %tmp22579 = getelementptr inbounds float* %tmp22578, i64 1
- %tmp22580 = getelementptr inbounds float* %tmp22579, i64 1
- %tmp22581 = getelementptr inbounds float* %tmp22580, i64 1
- %tmp22582 = getelementptr inbounds float* %tmp22581, i64 1
- %tmp22583 = getelementptr inbounds float* %tmp22582, i64 1
- %tmp22584 = getelementptr inbounds float* %tmp22583, i64 1
- %tmp22585 = getelementptr inbounds float* %tmp22584, i64 1
- %tmp22586 = getelementptr inbounds float* %tmp22585, i64 1
- %tmp22587 = getelementptr inbounds float* %tmp22586, i64 1
- %tmp22588 = getelementptr inbounds float* %tmp22587, i64 1
- %tmp22589 = getelementptr inbounds float* %tmp22588, i64 1
- %tmp22590 = getelementptr inbounds float* %tmp22589, i64 1
- %tmp22591 = getelementptr inbounds float* %tmp22590, i64 1
- %tmp22592 = getelementptr inbounds float* %tmp22591, i64 1
- %tmp22593 = getelementptr inbounds float* %tmp22592, i64 1
- %tmp22594 = getelementptr inbounds float* %tmp22593, i64 1
- %tmp22595 = getelementptr inbounds float* %tmp22594, i64 1
- %tmp22596 = getelementptr inbounds float* %tmp22595, i64 1
- %tmp22597 = getelementptr inbounds float* %tmp22596, i64 1
- %tmp22598 = getelementptr inbounds float* %tmp22597, i64 1
- %tmp22599 = getelementptr inbounds float* %tmp22598, i64 1
- %tmp22600 = getelementptr inbounds float* %tmp22599, i64 1
- %tmp22601 = getelementptr inbounds float* %tmp22600, i64 1
- %tmp22602 = getelementptr inbounds float* %tmp22601, i64 1
- %tmp22603 = getelementptr inbounds float* %tmp22602, i64 1
- %tmp22604 = getelementptr inbounds float* %tmp22603, i64 1
- %tmp22605 = getelementptr inbounds float* %tmp22604, i64 1
- %tmp22606 = getelementptr inbounds float* %tmp22605, i64 1
- %tmp22607 = getelementptr inbounds float* %tmp22606, i64 1
- %tmp22608 = getelementptr inbounds float* %tmp22607, i64 1
- %tmp22609 = getelementptr inbounds float* %tmp22608, i64 1
- %tmp22610 = getelementptr inbounds float* %tmp22609, i64 1
- %tmp22611 = getelementptr inbounds float* %tmp22610, i64 1
- %tmp22612 = getelementptr inbounds float* %tmp22611, i64 1
- %tmp22613 = getelementptr inbounds float* %tmp22612, i64 1
- %tmp22614 = getelementptr inbounds float* %tmp22613, i64 1
- %tmp22615 = getelementptr inbounds float* %tmp22614, i64 1
- %tmp22616 = getelementptr inbounds float* %tmp22615, i64 1
- %tmp22617 = getelementptr inbounds float* %tmp22616, i64 1
- %tmp22618 = getelementptr inbounds float* %tmp22617, i64 1
- %tmp22619 = getelementptr inbounds float* %tmp22618, i64 1
- %tmp22620 = getelementptr inbounds float* %tmp22619, i64 1
- %tmp22621 = getelementptr inbounds float* %tmp22620, i64 1
- %tmp22622 = getelementptr inbounds float* %tmp22621, i64 1
- %tmp22623 = getelementptr inbounds float* %tmp22622, i64 1
- %tmp22624 = getelementptr inbounds float* %tmp22623, i64 1
- %tmp22625 = getelementptr inbounds float* %tmp22624, i64 1
- %tmp22626 = getelementptr inbounds float* %tmp22625, i64 1
- %tmp22627 = getelementptr inbounds float* %tmp22626, i64 1
- %tmp22628 = getelementptr inbounds float* %tmp22627, i64 1
- %tmp22629 = getelementptr inbounds float* %tmp22628, i64 1
- %tmp22630 = getelementptr inbounds float* %tmp22629, i64 1
- %tmp22631 = getelementptr inbounds float* %tmp22630, i64 1
- %tmp22632 = getelementptr inbounds float* %tmp22631, i64 1
- %tmp22633 = getelementptr inbounds float* %tmp22632, i64 1
- %tmp22634 = getelementptr inbounds float* %tmp22633, i64 1
- %tmp22635 = getelementptr inbounds float* %tmp22634, i64 1
- %tmp22636 = getelementptr inbounds float* %tmp22635, i64 1
- %tmp22637 = getelementptr inbounds float* %tmp22636, i64 1
- %tmp22638 = getelementptr inbounds float* %tmp22637, i64 1
- %tmp22639 = getelementptr inbounds float* %tmp22638, i64 1
- %tmp22640 = getelementptr inbounds float* %tmp22639, i64 1
- %tmp22641 = getelementptr inbounds float* %tmp22640, i64 1
- %tmp22642 = getelementptr inbounds float* %tmp22641, i64 1
- %tmp22643 = getelementptr inbounds float* %tmp22642, i64 1
- %tmp22644 = getelementptr inbounds float* %tmp22643, i64 1
- %tmp22645 = getelementptr inbounds float* %tmp22644, i64 1
- %tmp22646 = getelementptr inbounds float* %tmp22645, i64 1
- %tmp22647 = getelementptr inbounds float* %tmp22646, i64 1
- %tmp22648 = getelementptr inbounds float* %tmp22647, i64 1
- %tmp22649 = getelementptr inbounds float* %tmp22648, i64 1
- %tmp22650 = getelementptr inbounds float* %tmp22649, i64 1
- %tmp22651 = getelementptr inbounds float* %tmp22650, i64 1
- %tmp22652 = getelementptr inbounds float* %tmp22651, i64 1
- %tmp22653 = getelementptr inbounds float* %tmp22652, i64 1
- %tmp22654 = getelementptr inbounds float* %tmp22653, i64 1
- %tmp22655 = getelementptr inbounds float* %tmp22654, i64 1
- %tmp22656 = getelementptr inbounds float* %tmp22655, i64 1
- %tmp22657 = getelementptr inbounds float* %tmp22656, i64 1
- %tmp22658 = getelementptr inbounds float* %tmp22657, i64 1
- %tmp22659 = getelementptr inbounds float* %tmp22658, i64 1
- %tmp22660 = getelementptr inbounds float* %tmp22659, i64 1
- %tmp22661 = getelementptr inbounds float* %tmp22660, i64 1
- %tmp22662 = getelementptr inbounds float* %tmp22661, i64 1
- %tmp22663 = getelementptr inbounds float* %tmp22662, i64 1
- %tmp22664 = getelementptr inbounds float* %tmp22663, i64 1
- %tmp22665 = getelementptr inbounds float* %tmp22664, i64 1
- %tmp22666 = getelementptr inbounds float* %tmp22665, i64 1
- %tmp22667 = getelementptr inbounds float* %tmp22666, i64 1
- %tmp22668 = getelementptr inbounds float* %tmp22667, i64 1
- %tmp22669 = getelementptr inbounds float* %tmp22668, i64 1
- %tmp22670 = getelementptr inbounds float* %tmp22669, i64 1
- %tmp22671 = getelementptr inbounds float* %tmp22670, i64 1
- %tmp22672 = getelementptr inbounds float* %tmp22671, i64 1
- %tmp22673 = getelementptr inbounds float* %tmp22672, i64 1
- %tmp22674 = getelementptr inbounds float* %tmp22673, i64 1
- %tmp22675 = getelementptr inbounds float* %tmp22674, i64 1
- %tmp22676 = getelementptr inbounds float* %tmp22675, i64 1
- %tmp22677 = getelementptr inbounds float* %tmp22676, i64 1
- %tmp22678 = getelementptr inbounds float* %tmp22677, i64 1
- %tmp22679 = getelementptr inbounds float* %tmp22678, i64 1
- %tmp22680 = getelementptr inbounds float* %tmp22679, i64 1
- %tmp22681 = getelementptr inbounds float* %tmp22680, i64 1
- %tmp22682 = getelementptr inbounds float* %tmp22681, i64 1
- %tmp22683 = getelementptr inbounds float* %tmp22682, i64 1
- %tmp22684 = getelementptr inbounds float* %tmp22683, i64 1
- %tmp22685 = getelementptr inbounds float* %tmp22684, i64 1
- %tmp22686 = getelementptr inbounds float* %tmp22685, i64 1
- %tmp22687 = getelementptr inbounds float* %tmp22686, i64 1
- %tmp22688 = getelementptr inbounds float* %tmp22687, i64 1
- %tmp22689 = getelementptr inbounds float* %tmp22688, i64 1
- %tmp22690 = getelementptr inbounds float* %tmp22689, i64 1
- %tmp22691 = getelementptr inbounds float* %tmp22690, i64 1
- %tmp22692 = getelementptr inbounds float* %tmp22691, i64 1
- %tmp22693 = getelementptr inbounds float* %tmp22692, i64 1
- %tmp22694 = getelementptr inbounds float* %tmp22693, i64 1
- %tmp22695 = getelementptr inbounds float* %tmp22694, i64 1
- %tmp22696 = getelementptr inbounds float* %tmp22695, i64 1
- %tmp22697 = getelementptr inbounds float* %tmp22696, i64 1
- %tmp22698 = getelementptr inbounds float* %tmp22697, i64 1
- %tmp22699 = getelementptr inbounds float* %tmp22698, i64 1
- %tmp22700 = getelementptr inbounds float* %tmp22699, i64 1
- %tmp22701 = getelementptr inbounds float* %tmp22700, i64 1
- %tmp22702 = getelementptr inbounds float* %tmp22701, i64 1
- %tmp22703 = getelementptr inbounds float* %tmp22702, i64 1
- %tmp22704 = getelementptr inbounds float* %tmp22703, i64 1
- %tmp22705 = getelementptr inbounds float* %tmp22704, i64 1
- %tmp22706 = getelementptr inbounds float* %tmp22705, i64 1
- %tmp22707 = getelementptr inbounds float* %tmp22706, i64 1
- %tmp22708 = getelementptr inbounds float* %tmp22707, i64 1
- %tmp22709 = getelementptr inbounds float* %tmp22708, i64 1
- %tmp22710 = getelementptr inbounds float* %tmp22709, i64 1
- %tmp22711 = getelementptr inbounds float* %tmp22710, i64 1
- %tmp22712 = getelementptr inbounds float* %tmp22711, i64 1
- %tmp22713 = getelementptr inbounds float* %tmp22712, i64 1
- %tmp22714 = getelementptr inbounds float* %tmp22713, i64 1
- %tmp22715 = getelementptr inbounds float* %tmp22714, i64 1
- %tmp22716 = getelementptr inbounds float* %tmp22715, i64 1
- %tmp22717 = getelementptr inbounds float* %tmp22716, i64 1
- %tmp22718 = getelementptr inbounds float* %tmp22717, i64 1
- %tmp22719 = getelementptr inbounds float* %tmp22718, i64 1
- %tmp22720 = getelementptr inbounds float* %tmp22719, i64 1
- %tmp22721 = getelementptr inbounds float* %tmp22720, i64 1
- %tmp22722 = getelementptr inbounds float* %tmp22721, i64 1
- %tmp22723 = getelementptr inbounds float* %tmp22722, i64 1
- %tmp22724 = getelementptr inbounds float* %tmp22723, i64 1
- %tmp22725 = getelementptr inbounds float* %tmp22724, i64 1
- %tmp22726 = getelementptr inbounds float* %tmp22725, i64 1
- %tmp22727 = getelementptr inbounds float* %tmp22726, i64 1
- %tmp22728 = getelementptr inbounds float* %tmp22727, i64 1
- %tmp22729 = getelementptr inbounds float* %tmp22728, i64 1
- %tmp22730 = getelementptr inbounds float* %tmp22729, i64 1
- %tmp22731 = getelementptr inbounds float* %tmp22730, i64 1
- %tmp22732 = getelementptr inbounds float* %tmp22731, i64 1
- %tmp22733 = getelementptr inbounds float* %tmp22732, i64 1
- %tmp22734 = getelementptr inbounds float* %tmp22733, i64 1
- %tmp22735 = getelementptr inbounds float* %tmp22734, i64 1
- %tmp22736 = getelementptr inbounds float* %tmp22735, i64 1
- %tmp22737 = getelementptr inbounds float* %tmp22736, i64 1
- %tmp22738 = getelementptr inbounds float* %tmp22737, i64 1
- %tmp22739 = getelementptr inbounds float* %tmp22738, i64 1
- %tmp22740 = getelementptr inbounds float* %tmp22739, i64 1
- %tmp22741 = getelementptr inbounds float* %tmp22740, i64 1
- %tmp22742 = getelementptr inbounds float* %tmp22741, i64 1
- %tmp22743 = getelementptr inbounds float* %tmp22742, i64 1
- %tmp22744 = getelementptr inbounds float* %tmp22743, i64 1
- %tmp22745 = getelementptr inbounds float* %tmp22744, i64 1
- %tmp22746 = getelementptr inbounds float* %tmp22745, i64 1
- %tmp22747 = getelementptr inbounds float* %tmp22746, i64 1
- %tmp22748 = getelementptr inbounds float* %tmp22747, i64 1
- %tmp22749 = getelementptr inbounds float* %tmp22748, i64 1
- %tmp22750 = getelementptr inbounds float* %tmp22749, i64 1
- %tmp22751 = getelementptr inbounds float* %tmp22750, i64 1
- %tmp22752 = getelementptr inbounds float* %tmp22751, i64 1
- %tmp22753 = getelementptr inbounds float* %tmp22752, i64 1
- %tmp22754 = getelementptr inbounds float* %tmp22753, i64 1
- %tmp22755 = getelementptr inbounds float* %tmp22754, i64 1
- %tmp22756 = getelementptr inbounds float* %tmp22755, i64 1
- %tmp22757 = getelementptr inbounds float* %tmp22756, i64 1
- %tmp22758 = getelementptr inbounds float* %tmp22757, i64 1
- %tmp22759 = getelementptr inbounds float* %tmp22758, i64 1
- %tmp22760 = getelementptr inbounds float* %tmp22759, i64 1
- %tmp22761 = getelementptr inbounds float* %tmp22760, i64 1
- %tmp22762 = getelementptr inbounds float* %tmp22761, i64 1
- %tmp22763 = getelementptr inbounds float* %tmp22762, i64 1
- %tmp22764 = getelementptr inbounds float* %tmp22763, i64 1
- %tmp22765 = getelementptr inbounds float* %tmp22764, i64 1
- %tmp22766 = getelementptr inbounds float* %tmp22765, i64 1
- %tmp22767 = getelementptr inbounds float* %tmp22766, i64 1
- %tmp22768 = getelementptr inbounds float* %tmp22767, i64 1
- %tmp22769 = getelementptr inbounds float* %tmp22768, i64 1
- %tmp22770 = getelementptr inbounds float* %tmp22769, i64 1
- %tmp22771 = getelementptr inbounds float* %tmp22770, i64 1
- %tmp22772 = getelementptr inbounds float* %tmp22771, i64 1
- %tmp22773 = getelementptr inbounds float* %tmp22772, i64 1
- %tmp22774 = getelementptr inbounds float* %tmp22773, i64 1
- %tmp22775 = getelementptr inbounds float* %tmp22774, i64 1
- %tmp22776 = getelementptr inbounds float* %tmp22775, i64 1
- %tmp22777 = getelementptr inbounds float* %tmp22776, i64 1
- %tmp22778 = getelementptr inbounds float* %tmp22777, i64 1
- %tmp22779 = getelementptr inbounds float* %tmp22778, i64 1
- %tmp22780 = getelementptr inbounds float* %tmp22779, i64 1
- %tmp22781 = getelementptr inbounds float* %tmp22780, i64 1
- %tmp22782 = getelementptr inbounds float* %tmp22781, i64 1
- %tmp22783 = getelementptr inbounds float* %tmp22782, i64 1
- %tmp22784 = getelementptr inbounds float* %tmp22783, i64 1
- %tmp22785 = getelementptr inbounds float* %tmp22784, i64 1
- %tmp22786 = getelementptr inbounds float* %tmp22785, i64 1
- %tmp22787 = getelementptr inbounds float* %tmp22786, i64 1
- %tmp22788 = getelementptr inbounds float* %tmp22787, i64 1
- %tmp22789 = getelementptr inbounds float* %tmp22788, i64 1
- %tmp22790 = getelementptr inbounds float* %tmp22789, i64 1
- %tmp22791 = getelementptr inbounds float* %tmp22790, i64 1
- %tmp22792 = getelementptr inbounds float* %tmp22791, i64 1
- %tmp22793 = getelementptr inbounds float* %tmp22792, i64 1
- %tmp22794 = getelementptr inbounds float* %tmp22793, i64 1
- %tmp22795 = getelementptr inbounds float* %tmp22794, i64 1
- %tmp22796 = getelementptr inbounds float* %tmp22795, i64 1
- %tmp22797 = getelementptr inbounds float* %tmp22796, i64 1
- %tmp22798 = getelementptr inbounds float* %tmp22797, i64 1
- %tmp22799 = getelementptr inbounds float* %tmp22798, i64 1
- %tmp22800 = getelementptr inbounds float* %tmp22799, i64 1
- %tmp22801 = getelementptr inbounds float* %tmp22800, i64 1
- %tmp22802 = getelementptr inbounds float* %tmp22801, i64 1
- %tmp22803 = getelementptr inbounds float* %tmp22802, i64 1
- %tmp22804 = getelementptr inbounds float* %tmp22803, i64 1
- %tmp22805 = getelementptr inbounds float* %tmp22804, i64 1
- %tmp22806 = getelementptr inbounds float* %tmp22805, i64 1
- %tmp22807 = getelementptr inbounds float* %tmp22806, i64 1
- %tmp22808 = getelementptr inbounds float* %tmp22807, i64 1
- %tmp22809 = getelementptr inbounds float* %tmp22808, i64 1
- %tmp22810 = getelementptr inbounds float* %tmp22809, i64 1
- %tmp22811 = getelementptr inbounds float* %tmp22810, i64 1
- %tmp22812 = getelementptr inbounds float* %tmp22811, i64 1
- %tmp22813 = getelementptr inbounds float* %tmp22812, i64 1
- %tmp22814 = getelementptr inbounds float* %tmp22813, i64 1
- %tmp22815 = getelementptr inbounds float* %tmp22814, i64 1
- %tmp22816 = getelementptr inbounds float* %tmp22815, i64 1
- %tmp22817 = getelementptr inbounds float* %tmp22816, i64 1
- %tmp22818 = getelementptr inbounds float* %tmp22817, i64 1
- %tmp22819 = getelementptr inbounds float* %tmp22818, i64 1
- %tmp22820 = getelementptr inbounds float* %tmp22819, i64 1
- %tmp22821 = getelementptr inbounds float* %tmp22820, i64 1
- %tmp22822 = getelementptr inbounds float* %tmp22821, i64 1
- %tmp22823 = getelementptr inbounds float* %tmp22822, i64 1
- %tmp22824 = getelementptr inbounds float* %tmp22823, i64 1
- %tmp22825 = getelementptr inbounds float* %tmp22824, i64 1
- %tmp22826 = getelementptr inbounds float* %tmp22825, i64 1
- %tmp22827 = getelementptr inbounds float* %tmp22826, i64 1
- %tmp22828 = getelementptr inbounds float* %tmp22827, i64 1
- %tmp22829 = getelementptr inbounds float* %tmp22828, i64 1
- %tmp22830 = getelementptr inbounds float* %tmp22829, i64 1
- %tmp22831 = getelementptr inbounds float* %tmp22830, i64 1
- %tmp22832 = getelementptr inbounds float* %tmp22831, i64 1
- %tmp22833 = getelementptr inbounds float* %tmp22832, i64 1
- %tmp22834 = getelementptr inbounds float* %tmp22833, i64 1
- %tmp22835 = getelementptr inbounds float* %tmp22834, i64 1
- %tmp22836 = getelementptr inbounds float* %tmp22835, i64 1
- %tmp22837 = getelementptr inbounds float* %tmp22836, i64 1
- %tmp22838 = getelementptr inbounds float* %tmp22837, i64 1
- %tmp22839 = getelementptr inbounds float* %tmp22838, i64 1
- %tmp22840 = getelementptr inbounds float* %tmp22839, i64 1
- %tmp22841 = getelementptr inbounds float* %tmp22840, i64 1
- %tmp22842 = getelementptr inbounds float* %tmp22841, i64 1
- %tmp22843 = getelementptr inbounds float* %tmp22842, i64 1
- %tmp22844 = getelementptr inbounds float* %tmp22843, i64 1
- %tmp22845 = getelementptr inbounds float* %tmp22844, i64 1
- %tmp22846 = getelementptr inbounds float* %tmp22845, i64 1
- %tmp22847 = getelementptr inbounds float* %tmp22846, i64 1
- %tmp22848 = getelementptr inbounds float* %tmp22847, i64 1
- %tmp22849 = getelementptr inbounds float* %tmp22848, i64 1
- %tmp22850 = getelementptr inbounds float* %tmp22849, i64 1
- %tmp22851 = getelementptr inbounds float* %tmp22850, i64 1
- %tmp22852 = getelementptr inbounds float* %tmp22851, i64 1
- %tmp22853 = getelementptr inbounds float* %tmp22852, i64 1
- %tmp22854 = getelementptr inbounds float* %tmp22853, i64 1
- %tmp22855 = getelementptr inbounds float* %tmp22854, i64 1
- %tmp22856 = getelementptr inbounds float* %tmp22855, i64 1
- %tmp22857 = getelementptr inbounds float* %tmp22856, i64 1
- %tmp22858 = getelementptr inbounds float* %tmp22857, i64 1
- %tmp22859 = getelementptr inbounds float* %tmp22858, i64 1
- %tmp22860 = getelementptr inbounds float* %tmp22859, i64 1
- %tmp22861 = getelementptr inbounds float* %tmp22860, i64 1
- %tmp22862 = getelementptr inbounds float* %tmp22861, i64 1
- %tmp22863 = getelementptr inbounds float* %tmp22862, i64 1
- %tmp22864 = getelementptr inbounds float* %tmp22863, i64 1
- %tmp22865 = getelementptr inbounds float* %tmp22864, i64 1
- %tmp22866 = getelementptr inbounds float* %tmp22865, i64 1
- %tmp22867 = getelementptr inbounds float* %tmp22866, i64 1
- %tmp22868 = getelementptr inbounds float* %tmp22867, i64 1
- %tmp22869 = getelementptr inbounds float* %tmp22868, i64 1
- %tmp22870 = getelementptr inbounds float* %tmp22869, i64 1
- %tmp22871 = getelementptr inbounds float* %tmp22870, i64 1
- %tmp22872 = getelementptr inbounds float* %tmp22871, i64 1
- %tmp22873 = getelementptr inbounds float* %tmp22872, i64 1
- %tmp22874 = getelementptr inbounds float* %tmp22873, i64 1
- %tmp22875 = getelementptr inbounds float* %tmp22874, i64 1
- %tmp22876 = getelementptr inbounds float* %tmp22875, i64 1
- %tmp22877 = getelementptr inbounds float* %tmp22876, i64 1
- %tmp22878 = getelementptr inbounds float* %tmp22877, i64 1
- %tmp22879 = getelementptr inbounds float* %tmp22878, i64 1
- %tmp22880 = getelementptr inbounds float* %tmp22879, i64 1
- %tmp22881 = getelementptr inbounds float* %tmp22880, i64 1
- %tmp22882 = getelementptr inbounds float* %tmp22881, i64 1
- %tmp22883 = getelementptr inbounds float* %tmp22882, i64 1
- %tmp22884 = getelementptr inbounds float* %tmp22883, i64 1
- %tmp22885 = getelementptr inbounds float* %tmp22884, i64 1
- %tmp22886 = getelementptr inbounds float* %tmp22885, i64 1
- %tmp22887 = getelementptr inbounds float* %tmp22886, i64 1
- %tmp22888 = getelementptr inbounds float* %tmp22887, i64 1
- %tmp22889 = getelementptr inbounds float* %tmp22888, i64 1
- %tmp22890 = getelementptr inbounds float* %tmp22889, i64 1
- %tmp22891 = getelementptr inbounds float* %tmp22890, i64 1
- %tmp22892 = getelementptr inbounds float* %tmp22891, i64 1
- %tmp22893 = getelementptr inbounds float* %tmp22892, i64 1
- %tmp22894 = getelementptr inbounds float* %tmp22893, i64 1
- %tmp22895 = getelementptr inbounds float* %tmp22894, i64 1
- %tmp22896 = getelementptr inbounds float* %tmp22895, i64 1
- %tmp22897 = getelementptr inbounds float* %tmp22896, i64 1
- %tmp22898 = getelementptr inbounds float* %tmp22897, i64 1
- %tmp22899 = getelementptr inbounds float* %tmp22898, i64 1
- %tmp22900 = getelementptr inbounds float* %tmp22899, i64 1
- %tmp22901 = getelementptr inbounds float* %tmp22900, i64 1
- %tmp22902 = getelementptr inbounds float* %tmp22901, i64 1
- %tmp22903 = getelementptr inbounds float* %tmp22902, i64 1
- %tmp22904 = getelementptr inbounds float* %tmp22903, i64 1
- %tmp22905 = getelementptr inbounds float* %tmp22904, i64 1
- %tmp22906 = getelementptr inbounds float* %tmp22905, i64 1
- %tmp22907 = getelementptr inbounds float* %tmp22906, i64 1
- %tmp22908 = getelementptr inbounds float* %tmp22907, i64 1
- %tmp22909 = getelementptr inbounds float* %tmp22908, i64 1
- %tmp22910 = getelementptr inbounds float* %tmp22909, i64 1
- %tmp22911 = getelementptr inbounds float* %tmp22910, i64 1
- %tmp22912 = getelementptr inbounds float* %tmp22911, i64 1
- %tmp22913 = getelementptr inbounds float* %tmp22912, i64 1
- %tmp22914 = getelementptr inbounds float* %tmp22913, i64 1
- %tmp22915 = getelementptr inbounds float* %tmp22914, i64 1
- %tmp22916 = getelementptr inbounds float* %tmp22915, i64 1
- %tmp22917 = getelementptr inbounds float* %tmp22916, i64 1
- %tmp22918 = getelementptr inbounds float* %tmp22917, i64 1
- %tmp22919 = getelementptr inbounds float* %tmp22918, i64 1
- %tmp22920 = getelementptr inbounds float* %tmp22919, i64 1
- %tmp22921 = getelementptr inbounds float* %tmp22920, i64 1
- %tmp22922 = getelementptr inbounds float* %tmp22921, i64 1
- %tmp22923 = getelementptr inbounds float* %tmp22922, i64 1
- %tmp22924 = getelementptr inbounds float* %tmp22923, i64 1
- %tmp22925 = getelementptr inbounds float* %tmp22924, i64 1
- %tmp22926 = getelementptr inbounds float* %tmp22925, i64 1
- %tmp22927 = getelementptr inbounds float* %tmp22926, i64 1
- %tmp22928 = getelementptr inbounds float* %tmp22927, i64 1
- %tmp22929 = getelementptr inbounds float* %tmp22928, i64 1
- %tmp22930 = getelementptr inbounds float* %tmp22929, i64 1
- %tmp22931 = getelementptr inbounds float* %tmp22930, i64 1
- %tmp22932 = getelementptr inbounds float* %tmp22931, i64 1
- %tmp22933 = getelementptr inbounds float* %tmp22932, i64 1
- %tmp22934 = getelementptr inbounds float* %tmp22933, i64 1
- %tmp22935 = getelementptr inbounds float* %tmp22934, i64 1
- %tmp22936 = getelementptr inbounds float* %tmp22935, i64 1
- %tmp22937 = getelementptr inbounds float* %tmp22936, i64 1
- %tmp22938 = getelementptr inbounds float* %tmp22937, i64 1
- %tmp22939 = getelementptr inbounds float* %tmp22938, i64 1
- %tmp22940 = getelementptr inbounds float* %tmp22939, i64 1
- %tmp22941 = getelementptr inbounds float* %tmp22940, i64 1
- %tmp22942 = getelementptr inbounds float* %tmp22941, i64 1
- %tmp22943 = getelementptr inbounds float* %tmp22942, i64 1
- %tmp22944 = getelementptr inbounds float* %tmp22943, i64 1
- %tmp22945 = getelementptr inbounds float* %tmp22944, i64 1
- %tmp22946 = getelementptr inbounds float* %tmp22945, i64 1
- %tmp22947 = getelementptr inbounds float* %tmp22946, i64 1
- %tmp22948 = getelementptr inbounds float* %tmp22947, i64 1
- %tmp22949 = getelementptr inbounds float* %tmp22948, i64 1
- %tmp22950 = getelementptr inbounds float* %tmp22949, i64 1
- %tmp22951 = getelementptr inbounds float* %tmp22950, i64 1
- %tmp22952 = getelementptr inbounds float* %tmp22951, i64 1
- %tmp22953 = getelementptr inbounds float* %tmp22952, i64 1
- %tmp22954 = getelementptr inbounds float* %tmp22953, i64 1
- %tmp22955 = getelementptr inbounds float* %tmp22954, i64 1
- %tmp22956 = getelementptr inbounds float* %tmp22955, i64 1
- %tmp22957 = getelementptr inbounds float* %tmp22956, i64 1
- %tmp22958 = getelementptr inbounds float* %tmp22957, i64 1
- %tmp22959 = getelementptr inbounds float* %tmp22958, i64 1
- %tmp22960 = getelementptr inbounds float* %tmp22959, i64 1
- %tmp22961 = getelementptr inbounds float* %tmp22960, i64 1
- %tmp22962 = getelementptr inbounds float* %tmp22961, i64 1
- %tmp22963 = getelementptr inbounds float* %tmp22962, i64 1
- %tmp22964 = getelementptr inbounds float* %tmp22963, i64 1
- %tmp22965 = getelementptr inbounds float* %tmp22964, i64 1
- %tmp22966 = getelementptr inbounds float* %tmp22965, i64 1
- %tmp22967 = getelementptr inbounds float* %tmp22966, i64 1
- %tmp22968 = getelementptr inbounds float* %tmp22967, i64 1
- %tmp22969 = getelementptr inbounds float* %tmp22968, i64 1
- %tmp22970 = getelementptr inbounds float* %tmp22969, i64 1
- %tmp22971 = getelementptr inbounds float* %tmp22970, i64 1
- %tmp22972 = getelementptr inbounds float* %tmp22971, i64 1
- %tmp22973 = getelementptr inbounds float* %tmp22972, i64 1
- %tmp22974 = getelementptr inbounds float* %tmp22973, i64 1
- %tmp22975 = getelementptr inbounds float* %tmp22974, i64 1
- %tmp22976 = getelementptr inbounds float* %tmp22975, i64 1
- %tmp22977 = getelementptr inbounds float* %tmp22976, i64 1
- %tmp22978 = getelementptr inbounds float* %tmp22977, i64 1
- %tmp22979 = getelementptr inbounds float* %tmp22978, i64 1
- %tmp22980 = getelementptr inbounds float* %tmp22979, i64 1
- %tmp22981 = getelementptr inbounds float* %tmp22980, i64 1
- %tmp22982 = getelementptr inbounds float* %tmp22981, i64 1
- %tmp22983 = getelementptr inbounds float* %tmp22982, i64 1
- %tmp22984 = getelementptr inbounds float* %tmp22983, i64 1
- %tmp22985 = getelementptr inbounds float* %tmp22984, i64 1
- %tmp22986 = getelementptr inbounds float* %tmp22985, i64 1
- %tmp22987 = getelementptr inbounds float* %tmp22986, i64 1
- %tmp22988 = getelementptr inbounds float* %tmp22987, i64 1
- %tmp22989 = getelementptr inbounds float* %tmp22988, i64 1
- %tmp22990 = getelementptr inbounds float* %tmp22989, i64 1
- %tmp22991 = getelementptr inbounds float* %tmp22990, i64 1
- %tmp22992 = getelementptr inbounds float* %tmp22991, i64 1
- %tmp22993 = getelementptr inbounds float* %tmp22992, i64 1
- %tmp22994 = getelementptr inbounds float* %tmp22993, i64 1
- %tmp22995 = getelementptr inbounds float* %tmp22994, i64 1
- %tmp22996 = getelementptr inbounds float* %tmp22995, i64 1
- %tmp22997 = getelementptr inbounds float* %tmp22996, i64 1
- %tmp22998 = getelementptr inbounds float* %tmp22997, i64 1
- %tmp22999 = getelementptr inbounds float* %tmp22998, i64 1
- %tmp23000 = getelementptr inbounds float* %tmp22999, i64 1
- %tmp23001 = getelementptr inbounds float* %tmp23000, i64 1
- %tmp23002 = getelementptr inbounds float* %tmp23001, i64 1
- %tmp23003 = getelementptr inbounds float* %tmp23002, i64 1
- %tmp23004 = getelementptr inbounds float* %tmp23003, i64 1
- %tmp23005 = getelementptr inbounds float* %tmp23004, i64 1
- %tmp23006 = getelementptr inbounds float* %tmp23005, i64 1
- %tmp23007 = getelementptr inbounds float* %tmp23006, i64 1
- %tmp23008 = getelementptr inbounds float* %tmp23007, i64 1
- %tmp23009 = getelementptr inbounds float* %tmp23008, i64 1
- %tmp23010 = getelementptr inbounds float* %tmp23009, i64 1
- %tmp23011 = getelementptr inbounds float* %tmp23010, i64 1
- %tmp23012 = getelementptr inbounds float* %tmp23011, i64 1
- %tmp23013 = getelementptr inbounds float* %tmp23012, i64 1
- %tmp23014 = getelementptr inbounds float* %tmp23013, i64 1
- %tmp23015 = getelementptr inbounds float* %tmp23014, i64 1
- %tmp23016 = getelementptr inbounds float* %tmp23015, i64 1
- %tmp23017 = getelementptr inbounds float* %tmp23016, i64 1
- %tmp23018 = getelementptr inbounds float* %tmp23017, i64 1
- %tmp23019 = getelementptr inbounds float* %tmp23018, i64 1
- %tmp23020 = getelementptr inbounds float* %tmp23019, i64 1
- %tmp23021 = getelementptr inbounds float* %tmp23020, i64 1
- %tmp23022 = getelementptr inbounds float* %tmp23021, i64 1
- %tmp23023 = getelementptr inbounds float* %tmp23022, i64 1
- %tmp23024 = getelementptr inbounds float* %tmp23023, i64 1
- %tmp23025 = getelementptr inbounds float* %tmp23024, i64 1
- %tmp23026 = getelementptr inbounds float* %tmp23025, i64 1
- %tmp23027 = getelementptr inbounds float* %tmp23026, i64 1
- %tmp23028 = getelementptr inbounds float* %tmp23027, i64 1
- %tmp23029 = getelementptr inbounds float* %tmp23028, i64 1
- %tmp23030 = getelementptr inbounds float* %tmp23029, i64 1
- %tmp23031 = getelementptr inbounds float* %tmp23030, i64 1
- %tmp23032 = getelementptr inbounds float* %tmp23031, i64 1
- %tmp23033 = getelementptr inbounds float* %tmp23032, i64 1
- %tmp23034 = getelementptr inbounds float* %tmp23033, i64 1
- %tmp23035 = getelementptr inbounds float* %tmp23034, i64 1
- %tmp23036 = getelementptr inbounds float* %tmp23035, i64 1
- %tmp23037 = getelementptr inbounds float* %tmp23036, i64 1
- %tmp23038 = getelementptr inbounds float* %tmp23037, i64 1
- %tmp23039 = getelementptr inbounds float* %tmp23038, i64 1
- %tmp23040 = getelementptr inbounds float* %tmp23039, i64 1
- %tmp23041 = getelementptr inbounds float* %tmp23040, i64 1
- %tmp23042 = getelementptr inbounds float* %tmp23041, i64 1
- %tmp23043 = getelementptr inbounds float* %tmp23042, i64 1
- %tmp23044 = getelementptr inbounds float* %tmp23043, i64 1
- %tmp23045 = getelementptr inbounds float* %tmp23044, i64 1
- %tmp23046 = getelementptr inbounds float* %tmp23045, i64 1
- %tmp23047 = getelementptr inbounds float* %tmp23046, i64 1
- %tmp23048 = getelementptr inbounds float* %tmp23047, i64 1
- %tmp23049 = getelementptr inbounds float* %tmp23048, i64 1
- %tmp23050 = getelementptr inbounds float* %tmp23049, i64 1
- %tmp23051 = getelementptr inbounds float* %tmp23050, i64 1
- %tmp23052 = getelementptr inbounds float* %tmp23051, i64 1
- %tmp23053 = getelementptr inbounds float* %tmp23052, i64 1
- %tmp23054 = getelementptr inbounds float* %tmp23053, i64 1
- %tmp23055 = getelementptr inbounds float* %tmp23054, i64 1
- %tmp23056 = getelementptr inbounds float* %tmp23055, i64 1
- %tmp23057 = getelementptr inbounds float* %tmp23056, i64 1
- %tmp23058 = getelementptr inbounds float* %tmp23057, i64 1
- %tmp23059 = getelementptr inbounds float* %tmp23058, i64 1
- %tmp23060 = getelementptr inbounds float* %tmp23059, i64 1
- %tmp23061 = getelementptr inbounds float* %tmp23060, i64 1
- %tmp23062 = getelementptr inbounds float* %tmp23061, i64 1
- %tmp23063 = getelementptr inbounds float* %tmp23062, i64 1
- %tmp23064 = getelementptr inbounds float* %tmp23063, i64 1
- %tmp23065 = getelementptr inbounds float* %tmp23064, i64 1
- %tmp23066 = getelementptr inbounds float* %tmp23065, i64 1
- %tmp23067 = getelementptr inbounds float* %tmp23066, i64 1
- %tmp23068 = getelementptr inbounds float* %tmp23067, i64 1
- %tmp23069 = getelementptr inbounds float* %tmp23068, i64 1
- %tmp23070 = getelementptr inbounds float* %tmp23069, i64 1
- %tmp23071 = getelementptr inbounds float* %tmp23070, i64 1
- %tmp23072 = getelementptr inbounds float* %tmp23071, i64 1
- %tmp23073 = getelementptr inbounds float* %tmp23072, i64 1
- %tmp23074 = getelementptr inbounds float* %tmp23073, i64 1
- %tmp23075 = getelementptr inbounds float* %tmp23074, i64 1
- %tmp23076 = getelementptr inbounds float* %tmp23075, i64 1
- %tmp23077 = getelementptr inbounds float* %tmp23076, i64 1
- %tmp23078 = getelementptr inbounds float* %tmp23077, i64 1
- %tmp23079 = getelementptr inbounds float* %tmp23078, i64 1
- %tmp23080 = getelementptr inbounds float* %tmp23079, i64 1
- %tmp23081 = getelementptr inbounds float* %tmp23080, i64 1
- %tmp23082 = getelementptr inbounds float* %tmp23081, i64 1
- %tmp23083 = getelementptr inbounds float* %tmp23082, i64 1
- %tmp23084 = getelementptr inbounds float* %tmp23083, i64 1
- %tmp23085 = getelementptr inbounds float* %tmp23084, i64 1
- %tmp23086 = getelementptr inbounds float* %tmp23085, i64 1
- %tmp23087 = getelementptr inbounds float* %tmp23086, i64 1
- %tmp23088 = getelementptr inbounds float* %tmp23087, i64 1
- %tmp23089 = getelementptr inbounds float* %tmp23088, i64 1
- %tmp23090 = getelementptr inbounds float* %tmp23089, i64 1
- %tmp23091 = getelementptr inbounds float* %tmp23090, i64 1
- %tmp23092 = getelementptr inbounds float* %tmp23091, i64 1
- %tmp23093 = getelementptr inbounds float* %tmp23092, i64 1
- %tmp23094 = getelementptr inbounds float* %tmp23093, i64 1
- %tmp23095 = getelementptr inbounds float* %tmp23094, i64 1
- %tmp23096 = getelementptr inbounds float* %tmp23095, i64 1
- %tmp23097 = getelementptr inbounds float* %tmp23096, i64 1
- %tmp23098 = getelementptr inbounds float* %tmp23097, i64 1
- %tmp23099 = getelementptr inbounds float* %tmp23098, i64 1
- %tmp23100 = getelementptr inbounds float* %tmp23099, i64 1
- %tmp23101 = getelementptr inbounds float* %tmp23100, i64 1
- %tmp23102 = getelementptr inbounds float* %tmp23101, i64 1
- %tmp23103 = getelementptr inbounds float* %tmp23102, i64 1
- %tmp23104 = getelementptr inbounds float* %tmp23103, i64 1
- %tmp23105 = getelementptr inbounds float* %tmp23104, i64 1
- %tmp23106 = getelementptr inbounds float* %tmp23105, i64 1
- %tmp23107 = getelementptr inbounds float* %tmp23106, i64 1
- %tmp23108 = getelementptr inbounds float* %tmp23107, i64 1
- %tmp23109 = getelementptr inbounds float* %tmp23108, i64 1
- %tmp23110 = getelementptr inbounds float* %tmp23109, i64 1
- %tmp23111 = getelementptr inbounds float* %tmp23110, i64 1
- %tmp23112 = getelementptr inbounds float* %tmp23111, i64 1
- %tmp23113 = getelementptr inbounds float* %tmp23112, i64 1
- %tmp23114 = getelementptr inbounds float* %tmp23113, i64 1
- %tmp23115 = getelementptr inbounds float* %tmp23114, i64 1
- %tmp23116 = getelementptr inbounds float* %tmp23115, i64 1
- %tmp23117 = getelementptr inbounds float* %tmp23116, i64 1
- %tmp23118 = getelementptr inbounds float* %tmp23117, i64 1
- %tmp23119 = getelementptr inbounds float* %tmp23118, i64 1
- %tmp23120 = getelementptr inbounds float* %tmp23119, i64 1
- %tmp23121 = getelementptr inbounds float* %tmp23120, i64 1
- %tmp23122 = getelementptr inbounds float* %tmp23121, i64 1
- %tmp23123 = getelementptr inbounds float* %tmp23122, i64 1
- %tmp23124 = getelementptr inbounds float* %tmp23123, i64 1
- %tmp23125 = getelementptr inbounds float* %tmp23124, i64 1
- %tmp23126 = getelementptr inbounds float* %tmp23125, i64 1
- %tmp23127 = getelementptr inbounds float* %tmp23126, i64 1
- %tmp23128 = getelementptr inbounds float* %tmp23127, i64 1
- %tmp23129 = getelementptr inbounds float* %tmp23128, i64 1
- %tmp23130 = getelementptr inbounds float* %tmp23129, i64 1
- %tmp23131 = getelementptr inbounds float* %tmp23130, i64 1
- %tmp23132 = getelementptr inbounds float* %tmp23131, i64 1
- %tmp23133 = getelementptr inbounds float* %tmp23132, i64 1
- %tmp23134 = getelementptr inbounds float* %tmp23133, i64 1
- %tmp23135 = getelementptr inbounds float* %tmp23134, i64 1
- %tmp23136 = getelementptr inbounds float* %tmp23135, i64 1
- %tmp23137 = getelementptr inbounds float* %tmp23136, i64 1
- %tmp23138 = getelementptr inbounds float* %tmp23137, i64 1
- %tmp23139 = getelementptr inbounds float* %tmp23138, i64 1
- %tmp23140 = getelementptr inbounds float* %tmp23139, i64 1
- %tmp23141 = getelementptr inbounds float* %tmp23140, i64 1
- %tmp23142 = getelementptr inbounds float* %tmp23141, i64 1
- %tmp23143 = getelementptr inbounds float* %tmp23142, i64 1
- %tmp23144 = getelementptr inbounds float* %tmp23143, i64 1
- %tmp23145 = getelementptr inbounds float* %tmp23144, i64 1
- %tmp23146 = getelementptr inbounds float* %tmp23145, i64 1
- %tmp23147 = getelementptr inbounds float* %tmp23146, i64 1
- %tmp23148 = getelementptr inbounds float* %tmp23147, i64 1
- %tmp23149 = getelementptr inbounds float* %tmp23148, i64 1
- %tmp23150 = getelementptr inbounds float* %tmp23149, i64 1
- %tmp23151 = getelementptr inbounds float* %tmp23150, i64 1
- %tmp23152 = getelementptr inbounds float* %tmp23151, i64 1
- %tmp23153 = getelementptr inbounds float* %tmp23152, i64 1
- %tmp23154 = getelementptr inbounds float* %tmp23153, i64 1
- %tmp23155 = getelementptr inbounds float* %tmp23154, i64 1
- %tmp23156 = getelementptr inbounds float* %tmp23155, i64 1
- %tmp23157 = getelementptr inbounds float* %tmp23156, i64 1
- %tmp23158 = getelementptr inbounds float* %tmp23157, i64 1
- %tmp23159 = getelementptr inbounds float* %tmp23158, i64 1
- %tmp23160 = getelementptr inbounds float* %tmp23159, i64 1
- %tmp23161 = getelementptr inbounds float* %tmp23160, i64 1
- %tmp23162 = getelementptr inbounds float* %tmp23161, i64 1
- %tmp23163 = getelementptr inbounds float* %tmp23162, i64 1
- %tmp23164 = getelementptr inbounds float* %tmp23163, i64 1
- %tmp23165 = getelementptr inbounds float* %tmp23164, i64 1
- %tmp23166 = getelementptr inbounds float* %tmp23165, i64 1
- %tmp23167 = getelementptr inbounds float* %tmp23166, i64 1
- %tmp23168 = getelementptr inbounds float* %tmp23167, i64 1
- %tmp23169 = getelementptr inbounds float* %tmp23168, i64 1
- %tmp23170 = getelementptr inbounds float* %tmp23169, i64 1
- %tmp23171 = getelementptr inbounds float* %tmp23170, i64 1
- %tmp23172 = getelementptr inbounds float* %tmp23171, i64 1
- %tmp23173 = getelementptr inbounds float* %tmp23172, i64 1
- %tmp23174 = getelementptr inbounds float* %tmp23173, i64 1
- %tmp23175 = getelementptr inbounds float* %tmp23174, i64 1
- %tmp23176 = getelementptr inbounds float* %tmp23175, i64 1
- %tmp23177 = getelementptr inbounds float* %tmp23176, i64 1
- %tmp23178 = getelementptr inbounds float* %tmp23177, i64 1
- %tmp23179 = getelementptr inbounds float* %tmp23178, i64 1
- %tmp23180 = getelementptr inbounds float* %tmp23179, i64 1
- %tmp23181 = getelementptr inbounds float* %tmp23180, i64 1
- %tmp23182 = getelementptr inbounds float* %tmp23181, i64 1
- %tmp23183 = getelementptr inbounds float* %tmp23182, i64 1
- %tmp23184 = getelementptr inbounds float* %tmp23183, i64 1
- %tmp23185 = getelementptr inbounds float* %tmp23184, i64 1
- %tmp23186 = getelementptr inbounds float* %tmp23185, i64 1
- %tmp23187 = getelementptr inbounds float* %tmp23186, i64 1
- %tmp23188 = getelementptr inbounds float* %tmp23187, i64 1
- %tmp23189 = getelementptr inbounds float* %tmp23188, i64 1
- %tmp23190 = getelementptr inbounds float* %tmp23189, i64 1
- %tmp23191 = getelementptr inbounds float* %tmp23190, i64 1
- %tmp23192 = getelementptr inbounds float* %tmp23191, i64 1
- %tmp23193 = getelementptr inbounds float* %tmp23192, i64 1
- %tmp23194 = getelementptr inbounds float* %tmp23193, i64 1
- %tmp23195 = getelementptr inbounds float* %tmp23194, i64 1
- %tmp23196 = getelementptr inbounds float* %tmp23195, i64 1
- %tmp23197 = getelementptr inbounds float* %tmp23196, i64 1
- %tmp23198 = getelementptr inbounds float* %tmp23197, i64 1
- %tmp23199 = getelementptr inbounds float* %tmp23198, i64 1
- %tmp23200 = getelementptr inbounds float* %tmp23199, i64 1
- %tmp23201 = getelementptr inbounds float* %tmp23200, i64 1
- %tmp23202 = getelementptr inbounds float* %tmp23201, i64 1
- %tmp23203 = getelementptr inbounds float* %tmp23202, i64 1
- %tmp23204 = getelementptr inbounds float* %tmp23203, i64 1
- %tmp23205 = getelementptr inbounds float* %tmp23204, i64 1
- %tmp23206 = getelementptr inbounds float* %tmp23205, i64 1
- %tmp23207 = getelementptr inbounds float* %tmp23206, i64 1
- %tmp23208 = getelementptr inbounds float* %tmp23207, i64 1
- %tmp23209 = getelementptr inbounds float* %tmp23208, i64 1
- %tmp23210 = getelementptr inbounds float* %tmp23209, i64 1
- %tmp23211 = getelementptr inbounds float* %tmp23210, i64 1
- %tmp23212 = getelementptr inbounds float* %tmp23211, i64 1
- %tmp23213 = getelementptr inbounds float* %tmp23212, i64 1
- %tmp23214 = getelementptr inbounds float* %tmp23213, i64 1
- %tmp23215 = getelementptr inbounds float* %tmp23214, i64 1
- %tmp23216 = getelementptr inbounds float* %tmp23215, i64 1
- %tmp23217 = getelementptr inbounds float* %tmp23216, i64 1
- %tmp23218 = getelementptr inbounds float* %tmp23217, i64 1
- %tmp23219 = getelementptr inbounds float* %tmp23218, i64 1
- %tmp23220 = getelementptr inbounds float* %tmp23219, i64 1
- %tmp23221 = getelementptr inbounds float* %tmp23220, i64 1
- %tmp23222 = getelementptr inbounds float* %tmp23221, i64 1
- %tmp23223 = getelementptr inbounds float* %tmp23222, i64 1
- %tmp23224 = getelementptr inbounds float* %tmp23223, i64 1
- %tmp23225 = getelementptr inbounds float* %tmp23224, i64 1
- %tmp23226 = getelementptr inbounds float* %tmp23225, i64 1
- %tmp23227 = getelementptr inbounds float* %tmp23226, i64 1
- %tmp23228 = getelementptr inbounds float* %tmp23227, i64 1
- %tmp23229 = getelementptr inbounds float* %tmp23228, i64 1
- %tmp23230 = getelementptr inbounds float* %tmp23229, i64 1
- %tmp23231 = getelementptr inbounds float* %tmp23230, i64 1
- %tmp23232 = getelementptr inbounds float* %tmp23231, i64 1
- %tmp23233 = getelementptr inbounds float* %tmp23232, i64 1
- %tmp23234 = getelementptr inbounds float* %tmp23233, i64 1
- %tmp23235 = getelementptr inbounds float* %tmp23234, i64 1
- %tmp23236 = getelementptr inbounds float* %tmp23235, i64 1
- %tmp23237 = getelementptr inbounds float* %tmp23236, i64 1
- %tmp23238 = getelementptr inbounds float* %tmp23237, i64 1
- %tmp23239 = getelementptr inbounds float* %tmp23238, i64 1
- %tmp23240 = getelementptr inbounds float* %tmp23239, i64 1
- %tmp23241 = getelementptr inbounds float* %tmp23240, i64 1
- %tmp23242 = getelementptr inbounds float* %tmp23241, i64 1
- %tmp23243 = getelementptr inbounds float* %tmp23242, i64 1
- %tmp23244 = getelementptr inbounds float* %tmp23243, i64 1
- %tmp23245 = getelementptr inbounds float* %tmp23244, i64 1
- %tmp23246 = getelementptr inbounds float* %tmp23245, i64 1
- %tmp23247 = getelementptr inbounds float* %tmp23246, i64 1
- %tmp23248 = getelementptr inbounds float* %tmp23247, i64 1
- %tmp23249 = getelementptr inbounds float* %tmp23248, i64 1
- %tmp23250 = getelementptr inbounds float* %tmp23249, i64 1
- %tmp23251 = getelementptr inbounds float* %tmp23250, i64 1
- %tmp23252 = getelementptr inbounds float* %tmp23251, i64 1
- %tmp23253 = getelementptr inbounds float* %tmp23252, i64 1
- %tmp23254 = getelementptr inbounds float* %tmp23253, i64 1
- %tmp23255 = getelementptr inbounds float* %tmp23254, i64 1
- %tmp23256 = getelementptr inbounds float* %tmp23255, i64 1
- %tmp23257 = getelementptr inbounds float* %tmp23256, i64 1
- %tmp23258 = getelementptr inbounds float* %tmp23257, i64 1
- %tmp23259 = getelementptr inbounds float* %tmp23258, i64 1
- %tmp23260 = getelementptr inbounds float* %tmp23259, i64 1
- %tmp23261 = getelementptr inbounds float* %tmp23260, i64 1
- %tmp23262 = getelementptr inbounds float* %tmp23261, i64 1
- %tmp23263 = getelementptr inbounds float* %tmp23262, i64 1
- %tmp23264 = getelementptr inbounds float* %tmp23263, i64 1
- %tmp23265 = getelementptr inbounds float* %tmp23264, i64 1
- %tmp23266 = getelementptr inbounds float* %tmp23265, i64 1
- %tmp23267 = getelementptr inbounds float* %tmp23266, i64 1
- %tmp23268 = getelementptr inbounds float* %tmp23267, i64 1
- %tmp23269 = getelementptr inbounds float* %tmp23268, i64 1
- %tmp23270 = getelementptr inbounds float* %tmp23269, i64 1
- %tmp23271 = getelementptr inbounds float* %tmp23270, i64 1
- %tmp23272 = getelementptr inbounds float* %tmp23271, i64 1
- %tmp23273 = getelementptr inbounds float* %tmp23272, i64 1
- %tmp23274 = getelementptr inbounds float* %tmp23273, i64 1
- %tmp23275 = getelementptr inbounds float* %tmp23274, i64 1
- %tmp23276 = getelementptr inbounds float* %tmp23275, i64 1
- %tmp23277 = getelementptr inbounds float* %tmp23276, i64 1
- %tmp23278 = getelementptr inbounds float* %tmp23277, i64 1
- %tmp23279 = getelementptr inbounds float* %tmp23278, i64 1
- %tmp23280 = getelementptr inbounds float* %tmp23279, i64 1
- %tmp23281 = getelementptr inbounds float* %tmp23280, i64 1
- %tmp23282 = getelementptr inbounds float* %tmp23281, i64 1
- %tmp23283 = getelementptr inbounds float* %tmp23282, i64 1
- %tmp23284 = getelementptr inbounds float* %tmp23283, i64 1
- %tmp23285 = getelementptr inbounds float* %tmp23284, i64 1
- %tmp23286 = getelementptr inbounds float* %tmp23285, i64 1
- %tmp23287 = getelementptr inbounds float* %tmp23286, i64 1
- %tmp23288 = getelementptr inbounds float* %tmp23287, i64 1
- %tmp23289 = getelementptr inbounds float* %tmp23288, i64 1
- %tmp23290 = getelementptr inbounds float* %tmp23289, i64 1
- %tmp23291 = getelementptr inbounds float* %tmp23290, i64 1
- %tmp23292 = getelementptr inbounds float* %tmp23291, i64 1
- %tmp23293 = getelementptr inbounds float* %tmp23292, i64 1
- %tmp23294 = getelementptr inbounds float* %tmp23293, i64 1
- %tmp23295 = getelementptr inbounds float* %tmp23294, i64 1
- %tmp23296 = getelementptr inbounds float* %tmp23295, i64 1
- %tmp23297 = getelementptr inbounds float* %tmp23296, i64 1
- %tmp23298 = getelementptr inbounds float* %tmp23297, i64 1
- %tmp23299 = getelementptr inbounds float* %tmp23298, i64 1
- %tmp23300 = getelementptr inbounds float* %tmp23299, i64 1
- %tmp23301 = getelementptr inbounds float* %tmp23300, i64 1
- %tmp23302 = getelementptr inbounds float* %tmp23301, i64 1
- %tmp23303 = getelementptr inbounds float* %tmp23302, i64 1
- %tmp23304 = getelementptr inbounds float* %tmp23303, i64 1
- %tmp23305 = getelementptr inbounds float* %tmp23304, i64 1
- %tmp23306 = getelementptr inbounds float* %tmp23305, i64 1
- %tmp23307 = getelementptr inbounds float* %tmp23306, i64 1
- %tmp23308 = getelementptr inbounds float* %tmp23307, i64 1
- %tmp23309 = getelementptr inbounds float* %tmp23308, i64 1
- %tmp23310 = getelementptr inbounds float* %tmp23309, i64 1
- %tmp23311 = getelementptr inbounds float* %tmp23310, i64 1
- %tmp23312 = getelementptr inbounds float* %tmp23311, i64 1
- %tmp23313 = getelementptr inbounds float* %tmp23312, i64 1
- %tmp23314 = getelementptr inbounds float* %tmp23313, i64 1
- %tmp23315 = getelementptr inbounds float* %tmp23314, i64 1
- %tmp23316 = getelementptr inbounds float* %tmp23315, i64 1
- %tmp23317 = getelementptr inbounds float* %tmp23316, i64 1
- %tmp23318 = getelementptr inbounds float* %tmp23317, i64 1
- %tmp23319 = getelementptr inbounds float* %tmp23318, i64 1
- %tmp23320 = getelementptr inbounds float* %tmp23319, i64 1
- %tmp23321 = getelementptr inbounds float* %tmp23320, i64 1
- %tmp23322 = getelementptr inbounds float* %tmp23321, i64 1
- %tmp23323 = getelementptr inbounds float* %tmp23322, i64 1
- %tmp23324 = getelementptr inbounds float* %tmp23323, i64 1
- %tmp23325 = getelementptr inbounds float* %tmp23324, i64 1
- %tmp23326 = getelementptr inbounds float* %tmp23325, i64 1
- %tmp23327 = getelementptr inbounds float* %tmp23326, i64 1
- %tmp23328 = getelementptr inbounds float* %tmp23327, i64 1
- %tmp23329 = getelementptr inbounds float* %tmp23328, i64 1
- %tmp23330 = getelementptr inbounds float* %tmp23329, i64 1
- %tmp23331 = getelementptr inbounds float* %tmp23330, i64 1
- %tmp23332 = getelementptr inbounds float* %tmp23331, i64 1
- %tmp23333 = getelementptr inbounds float* %tmp23332, i64 1
- %tmp23334 = getelementptr inbounds float* %tmp23333, i64 1
- %tmp23335 = getelementptr inbounds float* %tmp23334, i64 1
- %tmp23336 = getelementptr inbounds float* %tmp23335, i64 1
- %tmp23337 = getelementptr inbounds float* %tmp23336, i64 1
- %tmp23338 = getelementptr inbounds float* %tmp23337, i64 1
- %tmp23339 = getelementptr inbounds float* %tmp23338, i64 1
- %tmp23340 = getelementptr inbounds float* %tmp23339, i64 1
- %tmp23341 = getelementptr inbounds float* %tmp23340, i64 1
- %tmp23342 = getelementptr inbounds float* %tmp23341, i64 1
- %tmp23343 = getelementptr inbounds float* %tmp23342, i64 1
- %tmp23344 = getelementptr inbounds float* %tmp23343, i64 1
- %tmp23345 = getelementptr inbounds float* %tmp23344, i64 1
- %tmp23346 = getelementptr inbounds float* %tmp23345, i64 1
- %tmp23347 = getelementptr inbounds float* %tmp23346, i64 1
- %tmp23348 = getelementptr inbounds float* %tmp23347, i64 1
- %tmp23349 = getelementptr inbounds float* %tmp23348, i64 1
- %tmp23350 = getelementptr inbounds float* %tmp23349, i64 1
- %tmp23351 = getelementptr inbounds float* %tmp23350, i64 1
- %tmp23352 = getelementptr inbounds float* %tmp23351, i64 1
- %tmp23353 = getelementptr inbounds float* %tmp23352, i64 1
- %tmp23354 = getelementptr inbounds float* %tmp23353, i64 1
- %tmp23355 = getelementptr inbounds float* %tmp23354, i64 1
- %tmp23356 = getelementptr inbounds float* %tmp23355, i64 1
- %tmp23357 = getelementptr inbounds float* %tmp23356, i64 1
- %tmp23358 = getelementptr inbounds float* %tmp23357, i64 1
- %tmp23359 = getelementptr inbounds float* %tmp23358, i64 1
- %tmp23360 = getelementptr inbounds float* %tmp23359, i64 1
- %tmp23361 = getelementptr inbounds float* %tmp23360, i64 1
- %tmp23362 = getelementptr inbounds float* %tmp23361, i64 1
- %tmp23363 = getelementptr inbounds float* %tmp23362, i64 1
- %tmp23364 = getelementptr inbounds float* %tmp23363, i64 1
- %tmp23365 = getelementptr inbounds float* %tmp23364, i64 1
- %tmp23366 = getelementptr inbounds float* %tmp23365, i64 1
- %tmp23367 = getelementptr inbounds float* %tmp23366, i64 1
- %tmp23368 = getelementptr inbounds float* %tmp23367, i64 1
- %tmp23369 = getelementptr inbounds float* %tmp23368, i64 1
- %tmp23370 = getelementptr inbounds float* %tmp23369, i64 1
- %tmp23371 = getelementptr inbounds float* %tmp23370, i64 1
- %tmp23372 = getelementptr inbounds float* %tmp23371, i64 1
- %tmp23373 = getelementptr inbounds float* %tmp23372, i64 1
- %tmp23374 = getelementptr inbounds float* %tmp23373, i64 1
- %tmp23375 = getelementptr inbounds float* %tmp23374, i64 1
- %tmp23376 = getelementptr inbounds float* %tmp23375, i64 1
- %tmp23377 = getelementptr inbounds float* %tmp23376, i64 1
- %tmp23378 = getelementptr inbounds float* %tmp23377, i64 1
- %tmp23379 = getelementptr inbounds float* %tmp23378, i64 1
- %tmp23380 = getelementptr inbounds float* %tmp23379, i64 1
- %tmp23381 = getelementptr inbounds float* %tmp23380, i64 1
- %tmp23382 = getelementptr inbounds float* %tmp23381, i64 1
- %tmp23383 = getelementptr inbounds float* %tmp23382, i64 1
- %tmp23384 = getelementptr inbounds float* %tmp23383, i64 1
- %tmp23385 = getelementptr inbounds float* %tmp23384, i64 1
- %tmp23386 = getelementptr inbounds float* %tmp23385, i64 1
- %tmp23387 = getelementptr inbounds float* %tmp23386, i64 1
- %tmp23388 = getelementptr inbounds float* %tmp23387, i64 1
- %tmp23389 = getelementptr inbounds float* %tmp23388, i64 1
- %tmp23390 = getelementptr inbounds float* %tmp23389, i64 1
- %tmp23391 = getelementptr inbounds float* %tmp23390, i64 1
- %tmp23392 = getelementptr inbounds float* %tmp23391, i64 1
- %tmp23393 = getelementptr inbounds float* %tmp23392, i64 1
- %tmp23394 = getelementptr inbounds float* %tmp23393, i64 1
- %tmp23395 = getelementptr inbounds float* %tmp23394, i64 1
- %tmp23396 = getelementptr inbounds float* %tmp23395, i64 1
- %tmp23397 = getelementptr inbounds float* %tmp23396, i64 1
- %tmp23398 = getelementptr inbounds float* %tmp23397, i64 1
- %tmp23399 = getelementptr inbounds float* %tmp23398, i64 1
- %tmp23400 = getelementptr inbounds float* %tmp23399, i64 1
- %tmp23401 = getelementptr inbounds float* %tmp23400, i64 1
- %tmp23402 = getelementptr inbounds float* %tmp23401, i64 1
- %tmp23403 = getelementptr inbounds float* %tmp23402, i64 1
- %tmp23404 = getelementptr inbounds float* %tmp23403, i64 1
- %tmp23405 = getelementptr inbounds float* %tmp23404, i64 1
- %tmp23406 = getelementptr inbounds float* %tmp23405, i64 1
- %tmp23407 = getelementptr inbounds float* %tmp23406, i64 1
- %tmp23408 = getelementptr inbounds float* %tmp23407, i64 1
- %tmp23409 = getelementptr inbounds float* %tmp23408, i64 1
- %tmp23410 = getelementptr inbounds float* %tmp23409, i64 1
- %tmp23411 = getelementptr inbounds float* %tmp23410, i64 1
- %tmp23412 = getelementptr inbounds float* %tmp23411, i64 1
- %tmp23413 = getelementptr inbounds float* %tmp23412, i64 1
- %tmp23414 = getelementptr inbounds float* %tmp23413, i64 1
- %tmp23415 = getelementptr inbounds float* %tmp23414, i64 1
- %tmp23416 = getelementptr inbounds float* %tmp23415, i64 1
- %tmp23417 = getelementptr inbounds float* %tmp23416, i64 1
- %tmp23418 = getelementptr inbounds float* %tmp23417, i64 1
- %tmp23419 = getelementptr inbounds float* %tmp23418, i64 1
- %tmp23420 = getelementptr inbounds float* %tmp23419, i64 1
- %tmp23421 = getelementptr inbounds float* %tmp23420, i64 1
- %tmp23422 = getelementptr inbounds float* %tmp23421, i64 1
- %tmp23423 = getelementptr inbounds float* %tmp23422, i64 1
- %tmp23424 = getelementptr inbounds float* %tmp23423, i64 1
- %tmp23425 = getelementptr inbounds float* %tmp23424, i64 1
- %tmp23426 = getelementptr inbounds float* %tmp23425, i64 1
- %tmp23427 = getelementptr inbounds float* %tmp23426, i64 1
- %tmp23428 = getelementptr inbounds float* %tmp23427, i64 1
- %tmp23429 = getelementptr inbounds float* %tmp23428, i64 1
- %tmp23430 = getelementptr inbounds float* %tmp23429, i64 1
- %tmp23431 = getelementptr inbounds float* %tmp23430, i64 1
- %tmp23432 = getelementptr inbounds float* %tmp23431, i64 1
- %tmp23433 = getelementptr inbounds float* %tmp23432, i64 1
- %tmp23434 = getelementptr inbounds float* %tmp23433, i64 1
- %tmp23435 = getelementptr inbounds float* %tmp23434, i64 1
- %tmp23436 = getelementptr inbounds float* %tmp23435, i64 1
- %tmp23437 = getelementptr inbounds float* %tmp23436, i64 1
- %tmp23438 = getelementptr inbounds float* %tmp23437, i64 1
- %tmp23439 = getelementptr inbounds float* %tmp23438, i64 1
- %tmp23440 = getelementptr inbounds float* %tmp23439, i64 1
- %tmp23441 = getelementptr inbounds float* %tmp23440, i64 1
- %tmp23442 = getelementptr inbounds float* %tmp23441, i64 1
- %tmp23443 = getelementptr inbounds float* %tmp23442, i64 1
- %tmp23444 = getelementptr inbounds float* %tmp23443, i64 1
- %tmp23445 = getelementptr inbounds float* %tmp23444, i64 1
- %tmp23446 = getelementptr inbounds float* %tmp23445, i64 1
- %tmp23447 = getelementptr inbounds float* %tmp23446, i64 1
- %tmp23448 = getelementptr inbounds float* %tmp23447, i64 1
- %tmp23449 = getelementptr inbounds float* %tmp23448, i64 1
- %tmp23450 = getelementptr inbounds float* %tmp23449, i64 1
- %tmp23451 = getelementptr inbounds float* %tmp23450, i64 1
- %tmp23452 = getelementptr inbounds float* %tmp23451, i64 1
- %tmp23453 = getelementptr inbounds float* %tmp23452, i64 1
- %tmp23454 = getelementptr inbounds float* %tmp23453, i64 1
- %tmp23455 = getelementptr inbounds float* %tmp23454, i64 1
- %tmp23456 = getelementptr inbounds float* %tmp23455, i64 1
- %tmp23457 = getelementptr inbounds float* %tmp23456, i64 1
- %tmp23458 = getelementptr inbounds float* %tmp23457, i64 1
- %tmp23459 = getelementptr inbounds float* %tmp23458, i64 1
- %tmp23460 = getelementptr inbounds float* %tmp23459, i64 1
- %tmp23461 = getelementptr inbounds float* %tmp23460, i64 1
- %tmp23462 = getelementptr inbounds float* %tmp23461, i64 1
- %tmp23463 = getelementptr inbounds float* %tmp23462, i64 1
- %tmp23464 = getelementptr inbounds float* %tmp23463, i64 1
- %tmp23465 = getelementptr inbounds float* %tmp23464, i64 1
- %tmp23466 = getelementptr inbounds float* %tmp23465, i64 1
- %tmp23467 = getelementptr inbounds float* %tmp23466, i64 1
- %tmp23468 = getelementptr inbounds float* %tmp23467, i64 1
- %tmp23469 = getelementptr inbounds float* %tmp23468, i64 1
- %tmp23470 = getelementptr inbounds float* %tmp23469, i64 1
- %tmp23471 = getelementptr inbounds float* %tmp23470, i64 1
- %tmp23472 = getelementptr inbounds float* %tmp23471, i64 1
- %tmp23473 = getelementptr inbounds float* %tmp23472, i64 1
- %tmp23474 = getelementptr inbounds float* %tmp23473, i64 1
- %tmp23475 = getelementptr inbounds float* %tmp23474, i64 1
- %tmp23476 = getelementptr inbounds float* %tmp23475, i64 1
- %tmp23477 = getelementptr inbounds float* %tmp23476, i64 1
- %tmp23478 = getelementptr inbounds float* %tmp23477, i64 1
- %tmp23479 = getelementptr inbounds float* %tmp23478, i64 1
- %tmp23480 = getelementptr inbounds float* %tmp23479, i64 1
- %tmp23481 = getelementptr inbounds float* %tmp23480, i64 1
- %tmp23482 = getelementptr inbounds float* %tmp23481, i64 1
- %tmp23483 = getelementptr inbounds float* %tmp23482, i64 1
- %tmp23484 = getelementptr inbounds float* %tmp23483, i64 1
- %tmp23485 = getelementptr inbounds float* %tmp23484, i64 1
- %tmp23486 = getelementptr inbounds float* %tmp23485, i64 1
- %tmp23487 = getelementptr inbounds float* %tmp23486, i64 1
- %tmp23488 = getelementptr inbounds float* %tmp23487, i64 1
- %tmp23489 = getelementptr inbounds float* %tmp23488, i64 1
- %tmp23490 = getelementptr inbounds float* %tmp23489, i64 1
- %tmp23491 = getelementptr inbounds float* %tmp23490, i64 1
- %tmp23492 = getelementptr inbounds float* %tmp23491, i64 1
- %tmp23493 = getelementptr inbounds float* %tmp23492, i64 1
- %tmp23494 = getelementptr inbounds float* %tmp23493, i64 1
- %tmp23495 = getelementptr inbounds float* %tmp23494, i64 1
- %tmp23496 = getelementptr inbounds float* %tmp23495, i64 1
- %tmp23497 = getelementptr inbounds float* %tmp23496, i64 1
- %tmp23498 = getelementptr inbounds float* %tmp23497, i64 1
- %tmp23499 = getelementptr inbounds float* %tmp23498, i64 1
- %tmp23500 = getelementptr inbounds float* %tmp23499, i64 1
- %tmp23501 = getelementptr inbounds float* %tmp23500, i64 1
- %tmp23502 = getelementptr inbounds float* %tmp23501, i64 1
- %tmp23503 = getelementptr inbounds float* %tmp23502, i64 1
- %tmp23504 = getelementptr inbounds float* %tmp23503, i64 1
- %tmp23505 = getelementptr inbounds float* %tmp23504, i64 1
- %tmp23506 = getelementptr inbounds float* %tmp23505, i64 1
- %tmp23507 = getelementptr inbounds float* %tmp23506, i64 1
- %tmp23508 = getelementptr inbounds float* %tmp23507, i64 1
- %tmp23509 = getelementptr inbounds float* %tmp23508, i64 1
- %tmp23510 = getelementptr inbounds float* %tmp23509, i64 1
- %tmp23511 = getelementptr inbounds float* %tmp23510, i64 1
- %tmp23512 = getelementptr inbounds float* %tmp23511, i64 1
- %tmp23513 = getelementptr inbounds float* %tmp23512, i64 1
- %tmp23514 = getelementptr inbounds float* %tmp23513, i64 1
- %tmp23515 = getelementptr inbounds float* %tmp23514, i64 1
- %tmp23516 = getelementptr inbounds float* %tmp23515, i64 1
- %tmp23517 = getelementptr inbounds float* %tmp23516, i64 1
- %tmp23518 = getelementptr inbounds float* %tmp23517, i64 1
- %tmp23519 = getelementptr inbounds float* %tmp23518, i64 1
- %tmp23520 = getelementptr inbounds float* %tmp23519, i64 1
- %tmp23521 = getelementptr inbounds float* %tmp23520, i64 1
- %tmp23522 = getelementptr inbounds float* %tmp23521, i64 1
- %tmp23523 = getelementptr inbounds float* %tmp23522, i64 1
- %tmp23524 = getelementptr inbounds float* %tmp23523, i64 1
- %tmp23525 = getelementptr inbounds float* %tmp23524, i64 1
- %tmp23526 = getelementptr inbounds float* %tmp23525, i64 1
- %tmp23527 = getelementptr inbounds float* %tmp23526, i64 1
- %tmp23528 = getelementptr inbounds float* %tmp23527, i64 1
- %tmp23529 = getelementptr inbounds float* %tmp23528, i64 1
- %tmp23530 = getelementptr inbounds float* %tmp23529, i64 1
- %tmp23531 = getelementptr inbounds float* %tmp23530, i64 1
- %tmp23532 = getelementptr inbounds float* %tmp23531, i64 1
- %tmp23533 = getelementptr inbounds float* %tmp23532, i64 1
- %tmp23534 = getelementptr inbounds float* %tmp23533, i64 1
- %tmp23535 = getelementptr inbounds float* %tmp23534, i64 1
- %tmp23536 = getelementptr inbounds float* %tmp23535, i64 1
- %tmp23537 = getelementptr inbounds float* %tmp23536, i64 1
- %tmp23538 = getelementptr inbounds float* %tmp23537, i64 1
- %tmp23539 = getelementptr inbounds float* %tmp23538, i64 1
- %tmp23540 = getelementptr inbounds float* %tmp23539, i64 1
- %tmp23541 = getelementptr inbounds float* %tmp23540, i64 1
- %tmp23542 = getelementptr inbounds float* %tmp23541, i64 1
- %tmp23543 = getelementptr inbounds float* %tmp23542, i64 1
- %tmp23544 = getelementptr inbounds float* %tmp23543, i64 1
- %tmp23545 = getelementptr inbounds float* %tmp23544, i64 1
- %tmp23546 = getelementptr inbounds float* %tmp23545, i64 1
- %tmp23547 = getelementptr inbounds float* %tmp23546, i64 1
- %tmp23548 = getelementptr inbounds float* %tmp23547, i64 1
- %tmp23549 = getelementptr inbounds float* %tmp23548, i64 1
- %tmp23550 = getelementptr inbounds float* %tmp23549, i64 1
- %tmp23551 = getelementptr inbounds float* %tmp23550, i64 1
- %tmp23552 = getelementptr inbounds float* %tmp23551, i64 1
- %tmp23553 = getelementptr inbounds float* %tmp23552, i64 1
- %tmp23554 = getelementptr inbounds float* %tmp23553, i64 1
- %tmp23555 = getelementptr inbounds float* %tmp23554, i64 1
- %tmp23556 = getelementptr inbounds float* %tmp23555, i64 1
- %tmp23557 = getelementptr inbounds float* %tmp23556, i64 1
- %tmp23558 = getelementptr inbounds float* %tmp23557, i64 1
- %tmp23559 = getelementptr inbounds float* %tmp23558, i64 1
- %tmp23560 = getelementptr inbounds float* %tmp23559, i64 1
- %tmp23561 = getelementptr inbounds float* %tmp23560, i64 1
- %tmp23562 = getelementptr inbounds float* %tmp23561, i64 1
- %tmp23563 = getelementptr inbounds float* %tmp23562, i64 1
- %tmp23564 = getelementptr inbounds float* %tmp23563, i64 1
- %tmp23565 = getelementptr inbounds float* %tmp23564, i64 1
- %tmp23566 = getelementptr inbounds float* %tmp23565, i64 1
- %tmp23567 = getelementptr inbounds float* %tmp23566, i64 1
- %tmp23568 = getelementptr inbounds float* %tmp23567, i64 1
- %tmp23569 = getelementptr inbounds float* %tmp23568, i64 1
- %tmp23570 = getelementptr inbounds float* %tmp23569, i64 1
- %tmp23571 = getelementptr inbounds float* %tmp23570, i64 1
- %tmp23572 = getelementptr inbounds float* %tmp23571, i64 1
- %tmp23573 = getelementptr inbounds float* %tmp23572, i64 1
- %tmp23574 = getelementptr inbounds float* %tmp23573, i64 1
- %tmp23575 = getelementptr inbounds float* %tmp23574, i64 1
- %tmp23576 = getelementptr inbounds float* %tmp23575, i64 1
- %tmp23577 = getelementptr inbounds float* %tmp23576, i64 1
- %tmp23578 = getelementptr inbounds float* %tmp23577, i64 1
- %tmp23579 = getelementptr inbounds float* %tmp23578, i64 1
- %tmp23580 = getelementptr inbounds float* %tmp23579, i64 1
- %tmp23581 = getelementptr inbounds float* %tmp23580, i64 1
- %tmp23582 = getelementptr inbounds float* %tmp23581, i64 1
- %tmp23583 = getelementptr inbounds float* %tmp23582, i64 1
- %tmp23584 = getelementptr inbounds float* %tmp23583, i64 1
- %tmp23585 = getelementptr inbounds float* %tmp23584, i64 1
- %tmp23586 = getelementptr inbounds float* %tmp23585, i64 1
- %tmp23587 = getelementptr inbounds float* %tmp23586, i64 1
- %tmp23588 = getelementptr inbounds float* %tmp23587, i64 1
- %tmp23589 = getelementptr inbounds float* %tmp23588, i64 1
- %tmp23590 = getelementptr inbounds float* %tmp23589, i64 1
- %tmp23591 = getelementptr inbounds float* %tmp23590, i64 1
- %tmp23592 = getelementptr inbounds float* %tmp23591, i64 1
- %tmp23593 = getelementptr inbounds float* %tmp23592, i64 1
- %tmp23594 = getelementptr inbounds float* %tmp23593, i64 1
- %tmp23595 = getelementptr inbounds float* %tmp23594, i64 1
- %tmp23596 = getelementptr inbounds float* %tmp23595, i64 1
- %tmp23597 = getelementptr inbounds float* %tmp23596, i64 1
- %tmp23598 = getelementptr inbounds float* %tmp23597, i64 1
- %tmp23599 = getelementptr inbounds float* %tmp23598, i64 1
- %tmp23600 = getelementptr inbounds float* %tmp23599, i64 1
- %tmp23601 = getelementptr inbounds float* %tmp23600, i64 1
- %tmp23602 = getelementptr inbounds float* %tmp23601, i64 1
- %tmp23603 = getelementptr inbounds float* %tmp23602, i64 1
- %tmp23604 = getelementptr inbounds float* %tmp23603, i64 1
- %tmp23605 = getelementptr inbounds float* %tmp23604, i64 1
- %tmp23606 = getelementptr inbounds float* %tmp23605, i64 1
- %tmp23607 = getelementptr inbounds float* %tmp23606, i64 1
- %tmp23608 = getelementptr inbounds float* %tmp23607, i64 1
- %tmp23609 = getelementptr inbounds float* %tmp23608, i64 1
- %tmp23610 = getelementptr inbounds float* %tmp23609, i64 1
- %tmp23611 = getelementptr inbounds float* %tmp23610, i64 1
- %tmp23612 = getelementptr inbounds float* %tmp23611, i64 1
- %tmp23613 = getelementptr inbounds float* %tmp23612, i64 1
- %tmp23614 = getelementptr inbounds float* %tmp23613, i64 1
- %tmp23615 = getelementptr inbounds float* %tmp23614, i64 1
- %tmp23616 = getelementptr inbounds float* %tmp23615, i64 1
- %tmp23617 = getelementptr inbounds float* %tmp23616, i64 1
- %tmp23618 = getelementptr inbounds float* %tmp23617, i64 1
- %tmp23619 = getelementptr inbounds float* %tmp23618, i64 1
- %tmp23620 = getelementptr inbounds float* %tmp23619, i64 1
- %tmp23621 = getelementptr inbounds float* %tmp23620, i64 1
- %tmp23622 = getelementptr inbounds float* %tmp23621, i64 1
- %tmp23623 = getelementptr inbounds float* %tmp23622, i64 1
- %tmp23624 = getelementptr inbounds float* %tmp23623, i64 1
- %tmp23625 = getelementptr inbounds float* %tmp23624, i64 1
- %tmp23626 = getelementptr inbounds float* %tmp23625, i64 1
- %tmp23627 = getelementptr inbounds float* %tmp23626, i64 1
- %tmp23628 = getelementptr inbounds float* %tmp23627, i64 1
- %tmp23629 = getelementptr inbounds float* %tmp23628, i64 1
- %tmp23630 = getelementptr inbounds float* %tmp23629, i64 1
- %tmp23631 = getelementptr inbounds float* %tmp23630, i64 1
- %tmp23632 = getelementptr inbounds float* %tmp23631, i64 1
- %tmp23633 = getelementptr inbounds float* %tmp23632, i64 1
- %tmp23634 = getelementptr inbounds float* %tmp23633, i64 1
- %tmp23635 = getelementptr inbounds float* %tmp23634, i64 1
- %tmp23636 = getelementptr inbounds float* %tmp23635, i64 1
- %tmp23637 = getelementptr inbounds float* %tmp23636, i64 1
- %tmp23638 = getelementptr inbounds float* %tmp23637, i64 1
- %tmp23639 = getelementptr inbounds float* %tmp23638, i64 1
- %tmp23640 = getelementptr inbounds float* %tmp23639, i64 1
- %tmp23641 = getelementptr inbounds float* %tmp23640, i64 1
- %tmp23642 = getelementptr inbounds float* %tmp23641, i64 1
- %tmp23643 = getelementptr inbounds float* %tmp23642, i64 1
- %tmp23644 = getelementptr inbounds float* %tmp23643, i64 1
- %tmp23645 = getelementptr inbounds float* %tmp23644, i64 1
- %tmp23646 = getelementptr inbounds float* %tmp23645, i64 1
- %tmp23647 = getelementptr inbounds float* %tmp23646, i64 1
- %tmp23648 = getelementptr inbounds float* %tmp23647, i64 1
- %tmp23649 = getelementptr inbounds float* %tmp23648, i64 1
- %tmp23650 = getelementptr inbounds float* %tmp23649, i64 1
- %tmp23651 = getelementptr inbounds float* %tmp23650, i64 1
- %tmp23652 = getelementptr inbounds float* %tmp23651, i64 1
- %tmp23653 = getelementptr inbounds float* %tmp23652, i64 1
- %tmp23654 = getelementptr inbounds float* %tmp23653, i64 1
- %tmp23655 = getelementptr inbounds float* %tmp23654, i64 1
- %tmp23656 = getelementptr inbounds float* %tmp23655, i64 1
- %tmp23657 = getelementptr inbounds float* %tmp23656, i64 1
- %tmp23658 = getelementptr inbounds float* %tmp23657, i64 1
- %tmp23659 = getelementptr inbounds float* %tmp23658, i64 1
- %tmp23660 = getelementptr inbounds float* %tmp23659, i64 1
- %tmp23661 = getelementptr inbounds float* %tmp23660, i64 1
- %tmp23662 = getelementptr inbounds float* %tmp23661, i64 1
- %tmp23663 = getelementptr inbounds float* %tmp23662, i64 1
- %tmp23664 = getelementptr inbounds float* %tmp23663, i64 1
- %tmp23665 = getelementptr inbounds float* %tmp23664, i64 1
- %tmp23666 = getelementptr inbounds float* %tmp23665, i64 1
- %tmp23667 = getelementptr inbounds float* %tmp23666, i64 1
- %tmp23668 = getelementptr inbounds float* %tmp23667, i64 1
- %tmp23669 = getelementptr inbounds float* %tmp23668, i64 1
- %tmp23670 = getelementptr inbounds float* %tmp23669, i64 1
- %tmp23671 = getelementptr inbounds float* %tmp23670, i64 1
- %tmp23672 = getelementptr inbounds float* %tmp23671, i64 1
- %tmp23673 = getelementptr inbounds float* %tmp23672, i64 1
- %tmp23674 = getelementptr inbounds float* %tmp23673, i64 1
- %tmp23675 = getelementptr inbounds float* %tmp23674, i64 1
- %tmp23676 = getelementptr inbounds float* %tmp23675, i64 1
- %tmp23677 = getelementptr inbounds float* %tmp23676, i64 1
- %tmp23678 = getelementptr inbounds float* %tmp23677, i64 1
- %tmp23679 = getelementptr inbounds float* %tmp23678, i64 1
- %tmp23680 = getelementptr inbounds float* %tmp23679, i64 1
- %tmp23681 = getelementptr inbounds float* %tmp23680, i64 1
- %tmp23682 = getelementptr inbounds float* %tmp23681, i64 1
- %tmp23683 = getelementptr inbounds float* %tmp23682, i64 1
- %tmp23684 = getelementptr inbounds float* %tmp23683, i64 1
- %tmp23685 = getelementptr inbounds float* %tmp23684, i64 1
- %tmp23686 = getelementptr inbounds float* %tmp23685, i64 1
- %tmp23687 = getelementptr inbounds float* %tmp23686, i64 1
- %tmp23688 = getelementptr inbounds float* %tmp23687, i64 1
- %tmp23689 = getelementptr inbounds float* %tmp23688, i64 1
- %tmp23690 = getelementptr inbounds float* %tmp23689, i64 1
- %tmp23691 = getelementptr inbounds float* %tmp23690, i64 1
- %tmp23692 = getelementptr inbounds float* %tmp23691, i64 1
- %tmp23693 = getelementptr inbounds float* %tmp23692, i64 1
- %tmp23694 = getelementptr inbounds float* %tmp23693, i64 1
- %tmp23695 = getelementptr inbounds float* %tmp23694, i64 1
- %tmp23696 = getelementptr inbounds float* %tmp23695, i64 1
- %tmp23697 = getelementptr inbounds float* %tmp23696, i64 1
- %tmp23698 = getelementptr inbounds float* %tmp23697, i64 1
- %tmp23699 = getelementptr inbounds float* %tmp23698, i64 1
- %tmp23700 = getelementptr inbounds float* %tmp23699, i64 1
- %tmp23701 = getelementptr inbounds float* %tmp23700, i64 1
- %tmp23702 = getelementptr inbounds float* %tmp23701, i64 1
- %tmp23703 = getelementptr inbounds float* %tmp23702, i64 1
- %tmp23704 = getelementptr inbounds float* %tmp23703, i64 1
- %tmp23705 = getelementptr inbounds float* %tmp23704, i64 1
- %tmp23706 = getelementptr inbounds float* %tmp23705, i64 1
- %tmp23707 = getelementptr inbounds float* %tmp23706, i64 1
- %tmp23708 = getelementptr inbounds float* %tmp23707, i64 1
- %tmp23709 = getelementptr inbounds float* %tmp23708, i64 1
- %tmp23710 = getelementptr inbounds float* %tmp23709, i64 1
- %tmp23711 = getelementptr inbounds float* %tmp23710, i64 1
- %tmp23712 = getelementptr inbounds float* %tmp23711, i64 1
- %tmp23713 = getelementptr inbounds float* %tmp23712, i64 1
- %tmp23714 = getelementptr inbounds float* %tmp23713, i64 1
- %tmp23715 = getelementptr inbounds float* %tmp23714, i64 1
- %tmp23716 = getelementptr inbounds float* %tmp23715, i64 1
- %tmp23717 = getelementptr inbounds float* %tmp23716, i64 1
- %tmp23718 = getelementptr inbounds float* %tmp23717, i64 1
- %tmp23719 = getelementptr inbounds float* %tmp23718, i64 1
- %tmp23720 = getelementptr inbounds float* %tmp23719, i64 1
- %tmp23721 = getelementptr inbounds float* %tmp23720, i64 1
- %tmp23722 = getelementptr inbounds float* %tmp23721, i64 1
- %tmp23723 = getelementptr inbounds float* %tmp23722, i64 1
- %tmp23724 = getelementptr inbounds float* %tmp23723, i64 1
- %tmp23725 = getelementptr inbounds float* %tmp23724, i64 1
- %tmp23726 = getelementptr inbounds float* %tmp23725, i64 1
- %tmp23727 = getelementptr inbounds float* %tmp23726, i64 1
- %tmp23728 = getelementptr inbounds float* %tmp23727, i64 1
- %tmp23729 = getelementptr inbounds float* %tmp23728, i64 1
- %tmp23730 = getelementptr inbounds float* %tmp23729, i64 1
- %tmp23731 = getelementptr inbounds float* %tmp23730, i64 1
- %tmp23732 = getelementptr inbounds float* %tmp23731, i64 1
- %tmp23733 = getelementptr inbounds float* %tmp23732, i64 1
- %tmp23734 = getelementptr inbounds float* %tmp23733, i64 1
- %tmp23735 = getelementptr inbounds float* %tmp23734, i64 1
- %tmp23736 = getelementptr inbounds float* %tmp23735, i64 1
- %tmp23737 = getelementptr inbounds float* %tmp23736, i64 1
- %tmp23738 = getelementptr inbounds float* %tmp23737, i64 1
- %tmp23739 = getelementptr inbounds float* %tmp23738, i64 1
- %tmp23740 = getelementptr inbounds float* %tmp23739, i64 1
- %tmp23741 = getelementptr inbounds float* %tmp23740, i64 1
- %tmp23742 = getelementptr inbounds float* %tmp23741, i64 1
- %tmp23743 = getelementptr inbounds float* %tmp23742, i64 1
- %tmp23744 = getelementptr inbounds float* %tmp23743, i64 1
- %tmp23745 = getelementptr inbounds float* %tmp23744, i64 1
- %tmp23746 = getelementptr inbounds float* %tmp23745, i64 1
- %tmp23747 = getelementptr inbounds float* %tmp23746, i64 1
- %tmp23748 = getelementptr inbounds float* %tmp23747, i64 1
- %tmp23749 = getelementptr inbounds float* %tmp23748, i64 1
- %tmp23750 = getelementptr inbounds float* %tmp23749, i64 1
- %tmp23751 = getelementptr inbounds float* %tmp23750, i64 1
- %tmp23752 = getelementptr inbounds float* %tmp23751, i64 1
- %tmp23753 = getelementptr inbounds float* %tmp23752, i64 1
- %tmp23754 = getelementptr inbounds float* %tmp23753, i64 1
- %tmp23755 = getelementptr inbounds float* %tmp23754, i64 1
- %tmp23756 = getelementptr inbounds float* %tmp23755, i64 1
- %tmp23757 = getelementptr inbounds float* %tmp23756, i64 1
- %tmp23758 = getelementptr inbounds float* %tmp23757, i64 1
- %tmp23759 = getelementptr inbounds float* %tmp23758, i64 1
- %tmp23760 = getelementptr inbounds float* %tmp23759, i64 1
- %tmp23761 = getelementptr inbounds float* %tmp23760, i64 1
- %tmp23762 = getelementptr inbounds float* %tmp23761, i64 1
- %tmp23763 = getelementptr inbounds float* %tmp23762, i64 1
- %tmp23764 = getelementptr inbounds float* %tmp23763, i64 1
- %tmp23765 = getelementptr inbounds float* %tmp23764, i64 1
- %tmp23766 = getelementptr inbounds float* %tmp23765, i64 1
- %tmp23767 = getelementptr inbounds float* %tmp23766, i64 1
- %tmp23768 = getelementptr inbounds float* %tmp23767, i64 1
- %tmp23769 = getelementptr inbounds float* %tmp23768, i64 1
- %tmp23770 = getelementptr inbounds float* %tmp23769, i64 1
- %tmp23771 = getelementptr inbounds float* %tmp23770, i64 1
- %tmp23772 = getelementptr inbounds float* %tmp23771, i64 1
- %tmp23773 = getelementptr inbounds float* %tmp23772, i64 1
- %tmp23774 = getelementptr inbounds float* %tmp23773, i64 1
- %tmp23775 = getelementptr inbounds float* %tmp23774, i64 1
- %tmp23776 = getelementptr inbounds float* %tmp23775, i64 1
- %tmp23777 = getelementptr inbounds float* %tmp23776, i64 1
- %tmp23778 = getelementptr inbounds float* %tmp23777, i64 1
- %tmp23779 = getelementptr inbounds float* %tmp23778, i64 1
- %tmp23780 = getelementptr inbounds float* %tmp23779, i64 1
- %tmp23781 = getelementptr inbounds float* %tmp23780, i64 1
- %tmp23782 = getelementptr inbounds float* %tmp23781, i64 1
- %tmp23783 = getelementptr inbounds float* %tmp23782, i64 1
- %tmp23784 = getelementptr inbounds float* %tmp23783, i64 1
- %tmp23785 = getelementptr inbounds float* %tmp23784, i64 1
- %tmp23786 = getelementptr inbounds float* %tmp23785, i64 1
- %tmp23787 = getelementptr inbounds float* %tmp23786, i64 1
- %tmp23788 = getelementptr inbounds float* %tmp23787, i64 1
- %tmp23789 = getelementptr inbounds float* %tmp23788, i64 1
- %tmp23790 = getelementptr inbounds float* %tmp23789, i64 1
- %tmp23791 = getelementptr inbounds float* %tmp23790, i64 1
- %tmp23792 = getelementptr inbounds float* %tmp23791, i64 1
- %tmp23793 = getelementptr inbounds float* %tmp23792, i64 1
- %tmp23794 = getelementptr inbounds float* %tmp23793, i64 1
- %tmp23795 = getelementptr inbounds float* %tmp23794, i64 1
- %tmp23796 = getelementptr inbounds float* %tmp23795, i64 1
- %tmp23797 = getelementptr inbounds float* %tmp23796, i64 1
- %tmp23798 = getelementptr inbounds float* %tmp23797, i64 1
- %tmp23799 = getelementptr inbounds float* %tmp23798, i64 1
- %tmp23800 = getelementptr inbounds float* %tmp23799, i64 1
- %tmp23801 = getelementptr inbounds float* %tmp23800, i64 1
- %tmp23802 = getelementptr inbounds float* %tmp23801, i64 1
- %tmp23803 = getelementptr inbounds float* %tmp23802, i64 1
- %tmp23804 = getelementptr inbounds float* %tmp23803, i64 1
- %tmp23805 = getelementptr inbounds float* %tmp23804, i64 1
- %tmp23806 = getelementptr inbounds float* %tmp23805, i64 1
- %tmp23807 = getelementptr inbounds float* %tmp23806, i64 1
- %tmp23808 = getelementptr inbounds float* %tmp23807, i64 1
- %tmp23809 = getelementptr inbounds float* %tmp23808, i64 1
- %tmp23810 = getelementptr inbounds float* %tmp23809, i64 1
- %tmp23811 = getelementptr inbounds float* %tmp23810, i64 1
- %tmp23812 = getelementptr inbounds float* %tmp23811, i64 1
- %tmp23813 = getelementptr inbounds float* %tmp23812, i64 1
- %tmp23814 = getelementptr inbounds float* %tmp23813, i64 1
- %tmp23815 = getelementptr inbounds float* %tmp23814, i64 1
- %tmp23816 = getelementptr inbounds float* %tmp23815, i64 1
- %tmp23817 = getelementptr inbounds float* %tmp23816, i64 1
- %tmp23818 = getelementptr inbounds float* %tmp23817, i64 1
- %tmp23819 = getelementptr inbounds float* %tmp23818, i64 1
- %tmp23820 = getelementptr inbounds float* %tmp23819, i64 1
- %tmp23821 = getelementptr inbounds float* %tmp23820, i64 1
- %tmp23822 = getelementptr inbounds float* %tmp23821, i64 1
- %tmp23823 = getelementptr inbounds float* %tmp23822, i64 1
- %tmp23824 = getelementptr inbounds float* %tmp23823, i64 1
- %tmp23825 = getelementptr inbounds float* %tmp23824, i64 1
- %tmp23826 = getelementptr inbounds float* %tmp23825, i64 1
- %tmp23827 = getelementptr inbounds float* %tmp23826, i64 1
- %tmp23828 = getelementptr inbounds float* %tmp23827, i64 1
- %tmp23829 = getelementptr inbounds float* %tmp23828, i64 1
- %tmp23830 = getelementptr inbounds float* %tmp23829, i64 1
- %tmp23831 = getelementptr inbounds float* %tmp23830, i64 1
- %tmp23832 = getelementptr inbounds float* %tmp23831, i64 1
- %tmp23833 = getelementptr inbounds float* %tmp23832, i64 1
- %tmp23834 = getelementptr inbounds float* %tmp23833, i64 1
- %tmp23835 = getelementptr inbounds float* %tmp23834, i64 1
- %tmp23836 = getelementptr inbounds float* %tmp23835, i64 1
- %tmp23837 = getelementptr inbounds float* %tmp23836, i64 1
- %tmp23838 = getelementptr inbounds float* %tmp23837, i64 1
- %tmp23839 = getelementptr inbounds float* %tmp23838, i64 1
- %tmp23840 = getelementptr inbounds float* %tmp23839, i64 1
- %tmp23841 = getelementptr inbounds float* %tmp23840, i64 1
- %tmp23842 = getelementptr inbounds float* %tmp23841, i64 1
- %tmp23843 = getelementptr inbounds float* %tmp23842, i64 1
- %tmp23844 = getelementptr inbounds float* %tmp23843, i64 1
- %tmp23845 = getelementptr inbounds float* %tmp23844, i64 1
- %tmp23846 = getelementptr inbounds float* %tmp23845, i64 1
- %tmp23847 = getelementptr inbounds float* %tmp23846, i64 1
- %tmp23848 = getelementptr inbounds float* %tmp23847, i64 1
- %tmp23849 = getelementptr inbounds float* %tmp23848, i64 1
- %tmp23850 = getelementptr inbounds float* %tmp23849, i64 1
- %tmp23851 = getelementptr inbounds float* %tmp23850, i64 1
- %tmp23852 = getelementptr inbounds float* %tmp23851, i64 1
- %tmp23853 = getelementptr inbounds float* %tmp23852, i64 1
- %tmp23854 = getelementptr inbounds float* %tmp23853, i64 1
- %tmp23855 = getelementptr inbounds float* %tmp23854, i64 1
- %tmp23856 = getelementptr inbounds float* %tmp23855, i64 1
- %tmp23857 = getelementptr inbounds float* %tmp23856, i64 1
- %tmp23858 = getelementptr inbounds float* %tmp23857, i64 1
- %tmp23859 = getelementptr inbounds float* %tmp23858, i64 1
- %tmp23860 = getelementptr inbounds float* %tmp23859, i64 1
- %tmp23861 = getelementptr inbounds float* %tmp23860, i64 1
- %tmp23862 = getelementptr inbounds float* %tmp23861, i64 1
- %tmp23863 = getelementptr inbounds float* %tmp23862, i64 1
- %tmp23864 = getelementptr inbounds float* %tmp23863, i64 1
- %tmp23865 = getelementptr inbounds float* %tmp23864, i64 1
- %tmp23866 = getelementptr inbounds float* %tmp23865, i64 1
- %tmp23867 = getelementptr inbounds float* %tmp23866, i64 1
- %tmp23868 = getelementptr inbounds float* %tmp23867, i64 1
- %tmp23869 = getelementptr inbounds float* %tmp23868, i64 1
- %tmp23870 = getelementptr inbounds float* %tmp23869, i64 1
- %tmp23871 = getelementptr inbounds float* %tmp23870, i64 1
- %tmp23872 = getelementptr inbounds float* %tmp23871, i64 1
- %tmp23873 = getelementptr inbounds float* %tmp23872, i64 1
- %tmp23874 = getelementptr inbounds float* %tmp23873, i64 1
- %tmp23875 = getelementptr inbounds float* %tmp23874, i64 1
- %tmp23876 = getelementptr inbounds float* %tmp23875, i64 1
- %tmp23877 = getelementptr inbounds float* %tmp23876, i64 1
- %tmp23878 = getelementptr inbounds float* %tmp23877, i64 1
- %tmp23879 = getelementptr inbounds float* %tmp23878, i64 1
- %tmp23880 = getelementptr inbounds float* %tmp23879, i64 1
- %tmp23881 = getelementptr inbounds float* %tmp23880, i64 1
- %tmp23882 = getelementptr inbounds float* %tmp23881, i64 1
- %tmp23883 = getelementptr inbounds float* %tmp23882, i64 1
- %tmp23884 = getelementptr inbounds float* %tmp23883, i64 1
- %tmp23885 = getelementptr inbounds float* %tmp23884, i64 1
- %tmp23886 = getelementptr inbounds float* %tmp23885, i64 1
- %tmp23887 = getelementptr inbounds float* %tmp23886, i64 1
- %tmp23888 = getelementptr inbounds float* %tmp23887, i64 1
- %tmp23889 = getelementptr inbounds float* %tmp23888, i64 1
- %tmp23890 = getelementptr inbounds float* %tmp23889, i64 1
- %tmp23891 = getelementptr inbounds float* %tmp23890, i64 1
- %tmp23892 = getelementptr inbounds float* %tmp23891, i64 1
- %tmp23893 = getelementptr inbounds float* %tmp23892, i64 1
- %tmp23894 = getelementptr inbounds float* %tmp23893, i64 1
- %tmp23895 = getelementptr inbounds float* %tmp23894, i64 1
- %tmp23896 = getelementptr inbounds float* %tmp23895, i64 1
- %tmp23897 = getelementptr inbounds float* %tmp23896, i64 1
- %tmp23898 = getelementptr inbounds float* %tmp23897, i64 1
- %tmp23899 = getelementptr inbounds float* %tmp23898, i64 1
- %tmp23900 = getelementptr inbounds float* %tmp23899, i64 1
- %tmp23901 = getelementptr inbounds float* %tmp23900, i64 1
- %tmp23902 = getelementptr inbounds float* %tmp23901, i64 1
- %tmp23903 = getelementptr inbounds float* %tmp23902, i64 1
- %tmp23904 = getelementptr inbounds float* %tmp23903, i64 1
- %tmp23905 = getelementptr inbounds float* %tmp23904, i64 1
- %tmp23906 = getelementptr inbounds float* %tmp23905, i64 1
- %tmp23907 = getelementptr inbounds float* %tmp23906, i64 1
- %tmp23908 = getelementptr inbounds float* %tmp23907, i64 1
- %tmp23909 = getelementptr inbounds float* %tmp23908, i64 1
- %tmp23910 = getelementptr inbounds float* %tmp23909, i64 1
- %tmp23911 = getelementptr inbounds float* %tmp23910, i64 1
- %tmp23912 = getelementptr inbounds float* %tmp23911, i64 1
- %tmp23913 = getelementptr inbounds float* %tmp23912, i64 1
- %tmp23914 = getelementptr inbounds float* %tmp23913, i64 1
- %tmp23915 = getelementptr inbounds float* %tmp23914, i64 1
- %tmp23916 = getelementptr inbounds float* %tmp23915, i64 1
- %tmp23917 = getelementptr inbounds float* %tmp23916, i64 1
- %tmp23918 = getelementptr inbounds float* %tmp23917, i64 1
- %tmp23919 = getelementptr inbounds float* %tmp23918, i64 1
- %tmp23920 = getelementptr inbounds float* %tmp23919, i64 1
- %tmp23921 = getelementptr inbounds float* %tmp23920, i64 1
- %tmp23922 = getelementptr inbounds float* %tmp23921, i64 1
- %tmp23923 = getelementptr inbounds float* %tmp23922, i64 1
- %tmp23924 = getelementptr inbounds float* %tmp23923, i64 1
- %tmp23925 = getelementptr inbounds float* %tmp23924, i64 1
- %tmp23926 = getelementptr inbounds float* %tmp23925, i64 1
- %tmp23927 = getelementptr inbounds float* %tmp23926, i64 1
- %tmp23928 = getelementptr inbounds float* %tmp23927, i64 1
- %tmp23929 = getelementptr inbounds float* %tmp23928, i64 1
- %tmp23930 = getelementptr inbounds float* %tmp23929, i64 1
- %tmp23931 = getelementptr inbounds float* %tmp23930, i64 1
- %tmp23932 = getelementptr inbounds float* %tmp23931, i64 1
- %tmp23933 = getelementptr inbounds float* %tmp23932, i64 1
- %tmp23934 = getelementptr inbounds float* %tmp23933, i64 1
- %tmp23935 = getelementptr inbounds float* %tmp23934, i64 1
- %tmp23936 = getelementptr inbounds float* %tmp23935, i64 1
- %tmp23937 = getelementptr inbounds float* %tmp23936, i64 1
- %tmp23938 = getelementptr inbounds float* %tmp23937, i64 1
- %tmp23939 = getelementptr inbounds float* %tmp23938, i64 1
- %tmp23940 = getelementptr inbounds float* %tmp23939, i64 1
- %tmp23941 = getelementptr inbounds float* %tmp23940, i64 1
- %tmp23942 = getelementptr inbounds float* %tmp23941, i64 1
- %tmp23943 = getelementptr inbounds float* %tmp23942, i64 1
- %tmp23944 = getelementptr inbounds float* %tmp23943, i64 1
- %tmp23945 = getelementptr inbounds float* %tmp23944, i64 1
- %tmp23946 = getelementptr inbounds float* %tmp23945, i64 1
- %tmp23947 = getelementptr inbounds float* %tmp23946, i64 1
- %tmp23948 = getelementptr inbounds float* %tmp23947, i64 1
- %tmp23949 = getelementptr inbounds float* %tmp23948, i64 1
- %tmp23950 = getelementptr inbounds float* %tmp23949, i64 1
- %tmp23951 = getelementptr inbounds float* %tmp23950, i64 1
- %tmp23952 = getelementptr inbounds float* %tmp23951, i64 1
- %tmp23953 = getelementptr inbounds float* %tmp23952, i64 1
- %tmp23954 = getelementptr inbounds float* %tmp23953, i64 1
- %tmp23955 = getelementptr inbounds float* %tmp23954, i64 1
- %tmp23956 = getelementptr inbounds float* %tmp23955, i64 1
- %tmp23957 = getelementptr inbounds float* %tmp23956, i64 1
- %tmp23958 = getelementptr inbounds float* %tmp23957, i64 1
- %tmp23959 = getelementptr inbounds float* %tmp23958, i64 1
- %tmp23960 = getelementptr inbounds float* %tmp23959, i64 1
- %tmp23961 = getelementptr inbounds float* %tmp23960, i64 1
- %tmp23962 = getelementptr inbounds float* %tmp23961, i64 1
- %tmp23963 = getelementptr inbounds float* %tmp23962, i64 1
- %tmp23964 = getelementptr inbounds float* %tmp23963, i64 1
- %tmp23965 = getelementptr inbounds float* %tmp23964, i64 1
- %tmp23966 = getelementptr inbounds float* %tmp23965, i64 1
- %tmp23967 = getelementptr inbounds float* %tmp23966, i64 1
- %tmp23968 = getelementptr inbounds float* %tmp23967, i64 1
- %tmp23969 = getelementptr inbounds float* %tmp23968, i64 1
- %tmp23970 = getelementptr inbounds float* %tmp23969, i64 1
- %tmp23971 = getelementptr inbounds float* %tmp23970, i64 1
- %tmp23972 = getelementptr inbounds float* %tmp23971, i64 1
- %tmp23973 = getelementptr inbounds float* %tmp23972, i64 1
- %tmp23974 = getelementptr inbounds float* %tmp23973, i64 1
- %tmp23975 = getelementptr inbounds float* %tmp23974, i64 1
- %tmp23976 = getelementptr inbounds float* %tmp23975, i64 1
- %tmp23977 = getelementptr inbounds float* %tmp23976, i64 1
- %tmp23978 = getelementptr inbounds float* %tmp23977, i64 1
- %tmp23979 = getelementptr inbounds float* %tmp23978, i64 1
- %tmp23980 = getelementptr inbounds float* %tmp23979, i64 1
- %tmp23981 = getelementptr inbounds float* %tmp23980, i64 1
- %tmp23982 = getelementptr inbounds float* %tmp23981, i64 1
- %tmp23983 = getelementptr inbounds float* %tmp23982, i64 1
- %tmp23984 = getelementptr inbounds float* %tmp23983, i64 1
- %tmp23985 = getelementptr inbounds float* %tmp23984, i64 1
- %tmp23986 = getelementptr inbounds float* %tmp23985, i64 1
- %tmp23987 = getelementptr inbounds float* %tmp23986, i64 1
- %tmp23988 = getelementptr inbounds float* %tmp23987, i64 1
- %tmp23989 = getelementptr inbounds float* %tmp23988, i64 1
- %tmp23990 = getelementptr inbounds float* %tmp23989, i64 1
- %tmp23991 = getelementptr inbounds float* %tmp23990, i64 1
- %tmp23992 = getelementptr inbounds float* %tmp23991, i64 1
- %tmp23993 = getelementptr inbounds float* %tmp23992, i64 1
- %tmp23994 = getelementptr inbounds float* %tmp23993, i64 1
- %tmp23995 = getelementptr inbounds float* %tmp23994, i64 1
- %tmp23996 = getelementptr inbounds float* %tmp23995, i64 1
- %tmp23997 = getelementptr inbounds float* %tmp23996, i64 1
- %tmp23998 = getelementptr inbounds float* %tmp23997, i64 1
- %tmp23999 = getelementptr inbounds float* %tmp23998, i64 1
- %tmp24000 = getelementptr inbounds float* %tmp23999, i64 1
- %tmp24001 = getelementptr inbounds float* %tmp24000, i64 1
- %tmp24002 = getelementptr inbounds float* %tmp24001, i64 1
- %tmp24003 = getelementptr inbounds float* %tmp24002, i64 1
- %tmp24004 = getelementptr inbounds float* %tmp24003, i64 1
- %tmp24005 = getelementptr inbounds float* %tmp24004, i64 1
- %tmp24006 = getelementptr inbounds float* %tmp24005, i64 1
- %tmp24007 = getelementptr inbounds float* %tmp24006, i64 1
- %tmp24008 = getelementptr inbounds float* %tmp24007, i64 1
- %tmp24009 = getelementptr inbounds float* %tmp24008, i64 1
- %tmp24010 = getelementptr inbounds float* %tmp24009, i64 1
- %tmp24011 = getelementptr inbounds float* %tmp24010, i64 1
- %tmp24012 = getelementptr inbounds float* %tmp24011, i64 1
- %tmp24013 = getelementptr inbounds float* %tmp24012, i64 1
- %tmp24014 = getelementptr inbounds float* %tmp24013, i64 1
- %tmp24015 = getelementptr inbounds float* %tmp24014, i64 1
- %tmp24016 = getelementptr inbounds float* %tmp24015, i64 1
- %tmp24017 = getelementptr inbounds float* %tmp24016, i64 1
- %tmp24018 = getelementptr inbounds float* %tmp24017, i64 1
- %tmp24019 = getelementptr inbounds float* %tmp24018, i64 1
- %tmp24020 = getelementptr inbounds float* %tmp24019, i64 1
- %tmp24021 = getelementptr inbounds float* %tmp24020, i64 1
- %tmp24022 = getelementptr inbounds float* %tmp24021, i64 1
- %tmp24023 = getelementptr inbounds float* %tmp24022, i64 1
- %tmp24024 = getelementptr inbounds float* %tmp24023, i64 1
- %tmp24025 = getelementptr inbounds float* %tmp24024, i64 1
- %tmp24026 = getelementptr inbounds float* %tmp24025, i64 1
- %tmp24027 = getelementptr inbounds float* %tmp24026, i64 1
- %tmp24028 = getelementptr inbounds float* %tmp24027, i64 1
- %tmp24029 = getelementptr inbounds float* %tmp24028, i64 1
- %tmp24030 = getelementptr inbounds float* %tmp24029, i64 1
- %tmp24031 = getelementptr inbounds float* %tmp24030, i64 1
- %tmp24032 = getelementptr inbounds float* %tmp24031, i64 1
- %tmp24033 = getelementptr inbounds float* %tmp24032, i64 1
- %tmp24034 = getelementptr inbounds float* %tmp24033, i64 1
- %tmp24035 = getelementptr inbounds float* %tmp24034, i64 1
- %tmp24036 = getelementptr inbounds float* %tmp24035, i64 1
- %tmp24037 = getelementptr inbounds float* %tmp24036, i64 1
- %tmp24038 = getelementptr inbounds float* %tmp24037, i64 1
- %tmp24039 = getelementptr inbounds float* %tmp24038, i64 1
- %tmp24040 = getelementptr inbounds float* %tmp24039, i64 1
- %tmp24041 = getelementptr inbounds float* %tmp24040, i64 1
- %tmp24042 = getelementptr inbounds float* %tmp24041, i64 1
- %tmp24043 = getelementptr inbounds float* %tmp24042, i64 1
- %tmp24044 = getelementptr inbounds float* %tmp24043, i64 1
- %tmp24045 = getelementptr inbounds float* %tmp24044, i64 1
- %tmp24046 = getelementptr inbounds float* %tmp24045, i64 1
- %tmp24047 = getelementptr inbounds float* %tmp24046, i64 1
- %tmp24048 = getelementptr inbounds float* %tmp24047, i64 1
- %tmp24049 = getelementptr inbounds float* %tmp24048, i64 1
- %tmp24050 = getelementptr inbounds float* %tmp24049, i64 1
- %tmp24051 = getelementptr inbounds float* %tmp24050, i64 1
- %tmp24052 = getelementptr inbounds float* %tmp24051, i64 1
- %tmp24053 = getelementptr inbounds float* %tmp24052, i64 1
- %tmp24054 = getelementptr inbounds float* %tmp24053, i64 1
- %tmp24055 = getelementptr inbounds float* %tmp24054, i64 1
- %tmp24056 = getelementptr inbounds float* %tmp24055, i64 1
- %tmp24057 = getelementptr inbounds float* %tmp24056, i64 1
- %tmp24058 = getelementptr inbounds float* %tmp24057, i64 1
- %tmp24059 = getelementptr inbounds float* %tmp24058, i64 1
- %tmp24060 = getelementptr inbounds float* %tmp24059, i64 1
- %tmp24061 = getelementptr inbounds float* %tmp24060, i64 1
- %tmp24062 = getelementptr inbounds float* %tmp24061, i64 1
- %tmp24063 = getelementptr inbounds float* %tmp24062, i64 1
- %tmp24064 = getelementptr inbounds float* %tmp24063, i64 1
- %tmp24065 = getelementptr inbounds float* %tmp24064, i64 1
- %tmp24066 = getelementptr inbounds float* %tmp24065, i64 1
- %tmp24067 = getelementptr inbounds float* %tmp24066, i64 1
- %tmp24068 = getelementptr inbounds float* %tmp24067, i64 1
- %tmp24069 = getelementptr inbounds float* %tmp24068, i64 1
- %tmp24070 = getelementptr inbounds float* %tmp24069, i64 1
- %tmp24071 = getelementptr inbounds float* %tmp24070, i64 1
- %tmp24072 = getelementptr inbounds float* %tmp24071, i64 1
- %tmp24073 = getelementptr inbounds float* %tmp24072, i64 1
- %tmp24074 = getelementptr inbounds float* %tmp24073, i64 1
- %tmp24075 = getelementptr inbounds float* %tmp24074, i64 1
- %tmp24076 = getelementptr inbounds float* %tmp24075, i64 1
- %tmp24077 = getelementptr inbounds float* %tmp24076, i64 1
- %tmp24078 = getelementptr inbounds float* %tmp24077, i64 1
- %tmp24079 = getelementptr inbounds float* %tmp24078, i64 1
- %tmp24080 = getelementptr inbounds float* %tmp24079, i64 1
- %tmp24081 = getelementptr inbounds float* %tmp24080, i64 1
- %tmp24082 = getelementptr inbounds float* %tmp24081, i64 1
- %tmp24083 = getelementptr inbounds float* %tmp24082, i64 1
- %tmp24084 = getelementptr inbounds float* %tmp24083, i64 1
- %tmp24085 = getelementptr inbounds float* %tmp24084, i64 1
- %tmp24086 = getelementptr inbounds float* %tmp24085, i64 1
- %tmp24087 = getelementptr inbounds float* %tmp24086, i64 1
- %tmp24088 = getelementptr inbounds float* %tmp24087, i64 1
- %tmp24089 = getelementptr inbounds float* %tmp24088, i64 1
- %tmp24090 = getelementptr inbounds float* %tmp24089, i64 1
- %tmp24091 = getelementptr inbounds float* %tmp24090, i64 1
- %tmp24092 = getelementptr inbounds float* %tmp24091, i64 1
- %tmp24093 = getelementptr inbounds float* %tmp24092, i64 1
- %tmp24094 = getelementptr inbounds float* %tmp24093, i64 1
- %tmp24095 = getelementptr inbounds float* %tmp24094, i64 1
- %tmp24096 = getelementptr inbounds float* %tmp24095, i64 1
- %tmp24097 = getelementptr inbounds float* %tmp24096, i64 1
- %tmp24098 = getelementptr inbounds float* %tmp24097, i64 1
- %tmp24099 = getelementptr inbounds float* %tmp24098, i64 1
- %tmp24100 = getelementptr inbounds float* %tmp24099, i64 1
- %tmp24101 = getelementptr inbounds float* %tmp24100, i64 1
- %tmp24102 = getelementptr inbounds float* %tmp24101, i64 1
- %tmp24103 = getelementptr inbounds float* %tmp24102, i64 1
- %tmp24104 = getelementptr inbounds float* %tmp24103, i64 1
- %tmp24105 = getelementptr inbounds float* %tmp24104, i64 1
- %tmp24106 = getelementptr inbounds float* %tmp24105, i64 1
- %tmp24107 = getelementptr inbounds float* %tmp24106, i64 1
- %tmp24108 = getelementptr inbounds float* %tmp24107, i64 1
- %tmp24109 = getelementptr inbounds float* %tmp24108, i64 1
- %tmp24110 = getelementptr inbounds float* %tmp24109, i64 1
- %tmp24111 = getelementptr inbounds float* %tmp24110, i64 1
- %tmp24112 = getelementptr inbounds float* %tmp24111, i64 1
- %tmp24113 = getelementptr inbounds float* %tmp24112, i64 1
- %tmp24114 = getelementptr inbounds float* %tmp24113, i64 1
- %tmp24115 = getelementptr inbounds float* %tmp24114, i64 1
- %tmp24116 = getelementptr inbounds float* %tmp24115, i64 1
- %tmp24117 = getelementptr inbounds float* %tmp24116, i64 1
- %tmp24118 = getelementptr inbounds float* %tmp24117, i64 1
- %tmp24119 = getelementptr inbounds float* %tmp24118, i64 1
- %tmp24120 = getelementptr inbounds float* %tmp24119, i64 1
- %tmp24121 = getelementptr inbounds float* %tmp24120, i64 1
- %tmp24122 = getelementptr inbounds float* %tmp24121, i64 1
- %tmp24123 = getelementptr inbounds float* %tmp24122, i64 1
- %tmp24124 = getelementptr inbounds float* %tmp24123, i64 1
- %tmp24125 = getelementptr inbounds float* %tmp24124, i64 1
- %tmp24126 = getelementptr inbounds float* %tmp24125, i64 1
- %tmp24127 = getelementptr inbounds float* %tmp24126, i64 1
- %tmp24128 = getelementptr inbounds float* %tmp24127, i64 1
- %tmp24129 = getelementptr inbounds float* %tmp24128, i64 1
- %tmp24130 = getelementptr inbounds float* %tmp24129, i64 1
- %tmp24131 = getelementptr inbounds float* %tmp24130, i64 1
- %tmp24132 = getelementptr inbounds float* %tmp24131, i64 1
- %tmp24133 = getelementptr inbounds float* %tmp24132, i64 1
- %tmp24134 = getelementptr inbounds float* %tmp24133, i64 1
- %tmp24135 = getelementptr inbounds float* %tmp24134, i64 1
- %tmp24136 = getelementptr inbounds float* %tmp24135, i64 1
- %tmp24137 = getelementptr inbounds float* %tmp24136, i64 1
- %tmp24138 = getelementptr inbounds float* %tmp24137, i64 1
- %tmp24139 = getelementptr inbounds float* %tmp24138, i64 1
- %tmp24140 = getelementptr inbounds float* %tmp24139, i64 1
- %tmp24141 = getelementptr inbounds float* %tmp24140, i64 1
- %tmp24142 = getelementptr inbounds float* %tmp24141, i64 1
- %tmp24143 = getelementptr inbounds float* %tmp24142, i64 1
- %tmp24144 = getelementptr inbounds float* %tmp24143, i64 1
- %tmp24145 = getelementptr inbounds float* %tmp24144, i64 1
- %tmp24146 = getelementptr inbounds float* %tmp24145, i64 1
- %tmp24147 = getelementptr inbounds float* %tmp24146, i64 1
- %tmp24148 = getelementptr inbounds float* %tmp24147, i64 1
- %tmp24149 = getelementptr inbounds float* %tmp24148, i64 1
- %tmp24150 = getelementptr inbounds float* %tmp24149, i64 1
- %tmp24151 = getelementptr inbounds float* %tmp24150, i64 1
- %tmp24152 = getelementptr inbounds float* %tmp24151, i64 1
- %tmp24153 = getelementptr inbounds float* %tmp24152, i64 1
- %tmp24154 = getelementptr inbounds float* %tmp24153, i64 1
- %tmp24155 = getelementptr inbounds float* %tmp24154, i64 1
- %tmp24156 = getelementptr inbounds float* %tmp24155, i64 1
- %tmp24157 = getelementptr inbounds float* %tmp24156, i64 1
- %tmp24158 = getelementptr inbounds float* %tmp24157, i64 1
- %tmp24159 = getelementptr inbounds float* %tmp24158, i64 1
- %tmp24160 = getelementptr inbounds float* %tmp24159, i64 1
- %tmp24161 = getelementptr inbounds float* %tmp24160, i64 1
- %tmp24162 = getelementptr inbounds float* %tmp24161, i64 1
- %tmp24163 = getelementptr inbounds float* %tmp24162, i64 1
- %tmp24164 = getelementptr inbounds float* %tmp24163, i64 1
- %tmp24165 = getelementptr inbounds float* %tmp24164, i64 1
- %tmp24166 = getelementptr inbounds float* %tmp24165, i64 1
- %tmp24167 = getelementptr inbounds float* %tmp24166, i64 1
- %tmp24168 = getelementptr inbounds float* %tmp24167, i64 1
- %tmp24169 = getelementptr inbounds float* %tmp24168, i64 1
- %tmp24170 = getelementptr inbounds float* %tmp24169, i64 1
- %tmp24171 = getelementptr inbounds float* %tmp24170, i64 1
- %tmp24172 = getelementptr inbounds float* %tmp24171, i64 1
- %tmp24173 = getelementptr inbounds float* %tmp24172, i64 1
- %tmp24174 = getelementptr inbounds float* %tmp24173, i64 1
- %tmp24175 = getelementptr inbounds float* %tmp24174, i64 1
- %tmp24176 = getelementptr inbounds float* %tmp24175, i64 1
- %tmp24177 = getelementptr inbounds float* %tmp24176, i64 1
- %tmp24178 = getelementptr inbounds float* %tmp24177, i64 1
- %tmp24179 = getelementptr inbounds float* %tmp24178, i64 1
- %tmp24180 = getelementptr inbounds float* %tmp24179, i64 1
- %tmp24181 = getelementptr inbounds float* %tmp24180, i64 1
- %tmp24182 = getelementptr inbounds float* %tmp24181, i64 1
- %tmp24183 = getelementptr inbounds float* %tmp24182, i64 1
- %tmp24184 = getelementptr inbounds float* %tmp24183, i64 1
- %tmp24185 = getelementptr inbounds float* %tmp24184, i64 1
- %tmp24186 = getelementptr inbounds float* %tmp24185, i64 1
- %tmp24187 = getelementptr inbounds float* %tmp24186, i64 1
- %tmp24188 = getelementptr inbounds float* %tmp24187, i64 1
- %tmp24189 = getelementptr inbounds float* %tmp24188, i64 1
- %tmp24190 = getelementptr inbounds float* %tmp24189, i64 1
- %tmp24191 = getelementptr inbounds float* %tmp24190, i64 1
- %tmp24192 = getelementptr inbounds float* %tmp24191, i64 1
- %tmp24193 = getelementptr inbounds float* %tmp24192, i64 1
- %tmp24194 = getelementptr inbounds float* %tmp24193, i64 1
- %tmp24195 = getelementptr inbounds float* %tmp24194, i64 1
- %tmp24196 = getelementptr inbounds float* %tmp24195, i64 1
- %tmp24197 = getelementptr inbounds float* %tmp24196, i64 1
- %tmp24198 = getelementptr inbounds float* %tmp24197, i64 1
- %tmp24199 = getelementptr inbounds float* %tmp24198, i64 1
- %tmp24200 = getelementptr inbounds float* %tmp24199, i64 1
- %tmp24201 = getelementptr inbounds float* %tmp24200, i64 1
- %tmp24202 = getelementptr inbounds float* %tmp24201, i64 1
- %tmp24203 = getelementptr inbounds float* %tmp24202, i64 1
- %tmp24204 = getelementptr inbounds float* %tmp24203, i64 1
- %tmp24205 = getelementptr inbounds float* %tmp24204, i64 1
- %tmp24206 = getelementptr inbounds float* %tmp24205, i64 1
- %tmp24207 = getelementptr inbounds float* %tmp24206, i64 1
- %tmp24208 = getelementptr inbounds float* %tmp24207, i64 1
- %tmp24209 = getelementptr inbounds float* %tmp24208, i64 1
- %tmp24210 = getelementptr inbounds float* %tmp24209, i64 1
- %tmp24211 = getelementptr inbounds float* %tmp24210, i64 1
- %tmp24212 = getelementptr inbounds float* %tmp24211, i64 1
- %tmp24213 = getelementptr inbounds float* %tmp24212, i64 1
- %tmp24214 = getelementptr inbounds float* %tmp24213, i64 1
- %tmp24215 = getelementptr inbounds float* %tmp24214, i64 1
- %tmp24216 = getelementptr inbounds float* %tmp24215, i64 1
- %tmp24217 = getelementptr inbounds float* %tmp24216, i64 1
- %tmp24218 = getelementptr inbounds float* %tmp24217, i64 1
- %tmp24219 = getelementptr inbounds float* %tmp24218, i64 1
- %tmp24220 = getelementptr inbounds float* %tmp24219, i64 1
- %tmp24221 = getelementptr inbounds float* %tmp24220, i64 1
- %tmp24222 = getelementptr inbounds float* %tmp24221, i64 1
- %tmp24223 = getelementptr inbounds float* %tmp24222, i64 1
- %tmp24224 = getelementptr inbounds float* %tmp24223, i64 1
- %tmp24225 = getelementptr inbounds float* %tmp24224, i64 1
- %tmp24226 = getelementptr inbounds float* %tmp24225, i64 1
- %tmp24227 = getelementptr inbounds float* %tmp24226, i64 1
- %tmp24228 = getelementptr inbounds float* %tmp24227, i64 1
- %tmp24229 = getelementptr inbounds float* %tmp24228, i64 1
- %tmp24230 = getelementptr inbounds float* %tmp24229, i64 1
- %tmp24231 = getelementptr inbounds float* %tmp24230, i64 1
- %tmp24232 = getelementptr inbounds float* %tmp24231, i64 1
- %tmp24233 = getelementptr inbounds float* %tmp24232, i64 1
- %tmp24234 = getelementptr inbounds float* %tmp24233, i64 1
- %tmp24235 = getelementptr inbounds float* %tmp24234, i64 1
- %tmp24236 = getelementptr inbounds float* %tmp24235, i64 1
- %tmp24237 = getelementptr inbounds float* %tmp24236, i64 1
- %tmp24238 = getelementptr inbounds float* %tmp24237, i64 1
- %tmp24239 = getelementptr inbounds float* %tmp24238, i64 1
- %tmp24240 = getelementptr inbounds float* %tmp24239, i64 1
- %tmp24241 = getelementptr inbounds float* %tmp24240, i64 1
- %tmp24242 = getelementptr inbounds float* %tmp24241, i64 1
- %tmp24243 = getelementptr inbounds float* %tmp24242, i64 1
- %tmp24244 = getelementptr inbounds float* %tmp24243, i64 1
- %tmp24245 = getelementptr inbounds float* %tmp24244, i64 1
- %tmp24246 = getelementptr inbounds float* %tmp24245, i64 1
- %tmp24247 = getelementptr inbounds float* %tmp24246, i64 1
- %tmp24248 = getelementptr inbounds float* %tmp24247, i64 1
- %tmp24249 = getelementptr inbounds float* %tmp24248, i64 1
- %tmp24250 = getelementptr inbounds float* %tmp24249, i64 1
- %tmp24251 = getelementptr inbounds float* %tmp24250, i64 1
- %tmp24252 = getelementptr inbounds float* %tmp24251, i64 1
- %tmp24253 = getelementptr inbounds float* %tmp24252, i64 1
- %tmp24254 = getelementptr inbounds float* %tmp24253, i64 1
- %tmp24255 = getelementptr inbounds float* %tmp24254, i64 1
- %tmp24256 = getelementptr inbounds float* %tmp24255, i64 1
- %tmp24257 = getelementptr inbounds float* %tmp24256, i64 1
- %tmp24258 = getelementptr inbounds float* %tmp24257, i64 1
- %tmp24259 = getelementptr inbounds float* %tmp24258, i64 1
- %tmp24260 = getelementptr inbounds float* %tmp24259, i64 1
- %tmp24261 = getelementptr inbounds float* %tmp24260, i64 1
- %tmp24262 = getelementptr inbounds float* %tmp24261, i64 1
- %tmp24263 = getelementptr inbounds float* %tmp24262, i64 1
- %tmp24264 = getelementptr inbounds float* %tmp24263, i64 1
- %tmp24265 = getelementptr inbounds float* %tmp24264, i64 1
- %tmp24266 = getelementptr inbounds float* %tmp24265, i64 1
- %tmp24267 = getelementptr inbounds float* %tmp24266, i64 1
- %tmp24268 = getelementptr inbounds float* %tmp24267, i64 1
- %tmp24269 = getelementptr inbounds float* %tmp24268, i64 1
- %tmp24270 = getelementptr inbounds float* %tmp24269, i64 1
- %tmp24271 = getelementptr inbounds float* %tmp24270, i64 1
- %tmp24272 = getelementptr inbounds float* %tmp24271, i64 1
- %tmp24273 = getelementptr inbounds float* %tmp24272, i64 1
- %tmp24274 = getelementptr inbounds float* %tmp24273, i64 1
- %tmp24275 = getelementptr inbounds float* %tmp24274, i64 1
- %tmp24276 = getelementptr inbounds float* %tmp24275, i64 1
- %tmp24277 = getelementptr inbounds float* %tmp24276, i64 1
- %tmp24278 = getelementptr inbounds float* %tmp24277, i64 1
- %tmp24279 = getelementptr inbounds float* %tmp24278, i64 1
- %tmp24280 = getelementptr inbounds float* %tmp24279, i64 1
- %tmp24281 = getelementptr inbounds float* %tmp24280, i64 1
- %tmp24282 = getelementptr inbounds float* %tmp24281, i64 1
- %tmp24283 = getelementptr inbounds float* %tmp24282, i64 1
- %tmp24284 = getelementptr inbounds float* %tmp24283, i64 1
- %tmp24285 = getelementptr inbounds float* %tmp24284, i64 1
- %tmp24286 = getelementptr inbounds float* %tmp24285, i64 1
- %tmp24287 = getelementptr inbounds float* %tmp24286, i64 1
- %tmp24288 = getelementptr inbounds float* %tmp24287, i64 1
- %tmp24289 = getelementptr inbounds float* %tmp24288, i64 1
- %tmp24290 = getelementptr inbounds float* %tmp24289, i64 1
- %tmp24291 = getelementptr inbounds float* %tmp24290, i64 1
- %tmp24292 = getelementptr inbounds float* %tmp24291, i64 1
- %tmp24293 = getelementptr inbounds float* %tmp24292, i64 1
- %tmp24294 = getelementptr inbounds float* %tmp24293, i64 1
- %tmp24295 = getelementptr inbounds float* %tmp24294, i64 1
- %tmp24296 = getelementptr inbounds float* %tmp24295, i64 1
- %tmp24297 = getelementptr inbounds float* %tmp24296, i64 1
- %tmp24298 = getelementptr inbounds float* %tmp24297, i64 1
- %tmp24299 = getelementptr inbounds float* %tmp24298, i64 1
- %tmp24300 = getelementptr inbounds float* %tmp24299, i64 1
- %tmp24301 = getelementptr inbounds float* %tmp24300, i64 1
- %tmp24302 = getelementptr inbounds float* %tmp24301, i64 1
- %tmp24303 = getelementptr inbounds float* %tmp24302, i64 1
- %tmp24304 = getelementptr inbounds float* %tmp24303, i64 1
- %tmp24305 = getelementptr inbounds float* %tmp24304, i64 1
- %tmp24306 = getelementptr inbounds float* %tmp24305, i64 1
- %tmp24307 = getelementptr inbounds float* %tmp24306, i64 1
- %tmp24308 = getelementptr inbounds float* %tmp24307, i64 1
- %tmp24309 = getelementptr inbounds float* %tmp24308, i64 1
- %tmp24310 = getelementptr inbounds float* %tmp24309, i64 1
- %tmp24311 = getelementptr inbounds float* %tmp24310, i64 1
- %tmp24312 = getelementptr inbounds float* %tmp24311, i64 1
- %tmp24313 = getelementptr inbounds float* %tmp24312, i64 1
- %tmp24314 = getelementptr inbounds float* %tmp24313, i64 1
- %tmp24315 = getelementptr inbounds float* %tmp24314, i64 1
- %tmp24316 = getelementptr inbounds float* %tmp24315, i64 1
- %tmp24317 = getelementptr inbounds float* %tmp24316, i64 1
- %tmp24318 = getelementptr inbounds float* %tmp24317, i64 1
- %tmp24319 = getelementptr inbounds float* %tmp24318, i64 1
- %tmp24320 = getelementptr inbounds float* %tmp24319, i64 1
- %tmp24321 = getelementptr inbounds float* %tmp24320, i64 1
- %tmp24322 = getelementptr inbounds float* %tmp24321, i64 1
- %tmp24323 = getelementptr inbounds float* %tmp24322, i64 1
- %tmp24324 = getelementptr inbounds float* %tmp24323, i64 1
- %tmp24325 = getelementptr inbounds float* %tmp24324, i64 1
- %tmp24326 = getelementptr inbounds float* %tmp24325, i64 1
- %tmp24327 = getelementptr inbounds float* %tmp24326, i64 1
- %tmp24328 = getelementptr inbounds float* %tmp24327, i64 1
- %tmp24329 = getelementptr inbounds float* %tmp24328, i64 1
- %tmp24330 = getelementptr inbounds float* %tmp24329, i64 1
- %tmp24331 = getelementptr inbounds float* %tmp24330, i64 1
- %tmp24332 = getelementptr inbounds float* %tmp24331, i64 1
- %tmp24333 = getelementptr inbounds float* %tmp24332, i64 1
- %tmp24334 = getelementptr inbounds float* %tmp24333, i64 1
- %tmp24335 = getelementptr inbounds float* %tmp24334, i64 1
- %tmp24336 = getelementptr inbounds float* %tmp24335, i64 1
- %tmp24337 = getelementptr inbounds float* %tmp24336, i64 1
- %tmp24338 = getelementptr inbounds float* %tmp24337, i64 1
- %tmp24339 = getelementptr inbounds float* %tmp24338, i64 1
- %tmp24340 = getelementptr inbounds float* %tmp24339, i64 1
- %tmp24341 = getelementptr inbounds float* %tmp24340, i64 1
- %tmp24342 = getelementptr inbounds float* %tmp24341, i64 1
- %tmp24343 = getelementptr inbounds float* %tmp24342, i64 1
- %tmp24344 = getelementptr inbounds float* %tmp24343, i64 1
- %tmp24345 = getelementptr inbounds float* %tmp24344, i64 1
- %tmp24346 = getelementptr inbounds float* %tmp24345, i64 1
- %tmp24347 = getelementptr inbounds float* %tmp24346, i64 1
- %tmp24348 = getelementptr inbounds float* %tmp24347, i64 1
- %tmp24349 = getelementptr inbounds float* %tmp24348, i64 1
- %tmp24350 = getelementptr inbounds float* %tmp24349, i64 1
- %tmp24351 = getelementptr inbounds float* %tmp24350, i64 1
- %tmp24352 = getelementptr inbounds float* %tmp24351, i64 1
- %tmp24353 = getelementptr inbounds float* %tmp24352, i64 1
- %tmp24354 = getelementptr inbounds float* %tmp24353, i64 1
- %tmp24355 = getelementptr inbounds float* %tmp24354, i64 1
- %tmp24356 = getelementptr inbounds float* %tmp24355, i64 1
- %tmp24357 = getelementptr inbounds float* %tmp24356, i64 1
- %tmp24358 = getelementptr inbounds float* %tmp24357, i64 1
- %tmp24359 = getelementptr inbounds float* %tmp24358, i64 1
- %tmp24360 = getelementptr inbounds float* %tmp24359, i64 1
- %tmp24361 = getelementptr inbounds float* %tmp24360, i64 1
- %tmp24362 = getelementptr inbounds float* %tmp24361, i64 1
- %tmp24363 = getelementptr inbounds float* %tmp24362, i64 1
- %tmp24364 = getelementptr inbounds float* %tmp24363, i64 1
- %tmp24365 = getelementptr inbounds float* %tmp24364, i64 1
- %tmp24366 = getelementptr inbounds float* %tmp24365, i64 1
- %tmp24367 = getelementptr inbounds float* %tmp24366, i64 1
- %tmp24368 = getelementptr inbounds float* %tmp24367, i64 1
- %tmp24369 = getelementptr inbounds float* %tmp24368, i64 1
- %tmp24370 = getelementptr inbounds float* %tmp24369, i64 1
- %tmp24371 = getelementptr inbounds float* %tmp24370, i64 1
- %tmp24372 = getelementptr inbounds float* %tmp24371, i64 1
- %tmp24373 = getelementptr inbounds float* %tmp24372, i64 1
- %tmp24374 = getelementptr inbounds float* %tmp24373, i64 1
- %tmp24375 = getelementptr inbounds float* %tmp24374, i64 1
- %tmp24376 = getelementptr inbounds float* %tmp24375, i64 1
- %tmp24377 = getelementptr inbounds float* %tmp24376, i64 1
- %tmp24378 = getelementptr inbounds float* %tmp24377, i64 1
- %tmp24379 = getelementptr inbounds float* %tmp24378, i64 1
- %tmp24380 = getelementptr inbounds float* %tmp24379, i64 1
- %tmp24381 = getelementptr inbounds float* %tmp24380, i64 1
- %tmp24382 = getelementptr inbounds float* %tmp24381, i64 1
- %tmp24383 = getelementptr inbounds float* %tmp24382, i64 1
- %tmp24384 = getelementptr inbounds float* %tmp24383, i64 1
- %tmp24385 = getelementptr inbounds float* %tmp24384, i64 1
- %tmp24386 = getelementptr inbounds float* %tmp24385, i64 1
- %tmp24387 = getelementptr inbounds float* %tmp24386, i64 1
- %tmp24388 = getelementptr inbounds float* %tmp24387, i64 1
- %tmp24389 = getelementptr inbounds float* %tmp24388, i64 1
- %tmp24390 = getelementptr inbounds float* %tmp24389, i64 1
- %tmp24391 = getelementptr inbounds float* %tmp24390, i64 1
- %tmp24392 = getelementptr inbounds float* %tmp24391, i64 1
- %tmp24393 = getelementptr inbounds float* %tmp24392, i64 1
- %tmp24394 = getelementptr inbounds float* %tmp24393, i64 1
- %tmp24395 = getelementptr inbounds float* %tmp24394, i64 1
- %tmp24396 = getelementptr inbounds float* %tmp24395, i64 1
- %tmp24397 = getelementptr inbounds float* %tmp24396, i64 1
- %tmp24398 = getelementptr inbounds float* %tmp24397, i64 1
- %tmp24399 = getelementptr inbounds float* %tmp24398, i64 1
- %tmp24400 = getelementptr inbounds float* %tmp24399, i64 1
- %tmp24401 = getelementptr inbounds float* %tmp24400, i64 1
- %tmp24402 = getelementptr inbounds float* %tmp24401, i64 1
- %tmp24403 = getelementptr inbounds float* %tmp24402, i64 1
- %tmp24404 = getelementptr inbounds float* %tmp24403, i64 1
- %tmp24405 = getelementptr inbounds float* %tmp24404, i64 1
- %tmp24406 = getelementptr inbounds float* %tmp24405, i64 1
- %tmp24407 = getelementptr inbounds float* %tmp24406, i64 1
- %tmp24408 = getelementptr inbounds float* %tmp24407, i64 1
- %tmp24409 = getelementptr inbounds float* %tmp24408, i64 1
- %tmp24410 = getelementptr inbounds float* %tmp24409, i64 1
- %tmp24411 = getelementptr inbounds float* %tmp24410, i64 1
- %tmp24412 = getelementptr inbounds float* %tmp24411, i64 1
- %tmp24413 = getelementptr inbounds float* %tmp24412, i64 1
- %tmp24414 = getelementptr inbounds float* %tmp24413, i64 1
- %tmp24415 = getelementptr inbounds float* %tmp24414, i64 1
- %tmp24416 = getelementptr inbounds float* %tmp24415, i64 1
- %tmp24417 = getelementptr inbounds float* %tmp24416, i64 1
- %tmp24418 = getelementptr inbounds float* %tmp24417, i64 1
- %tmp24419 = getelementptr inbounds float* %tmp24418, i64 1
- %tmp24420 = getelementptr inbounds float* %tmp24419, i64 1
- %tmp24421 = getelementptr inbounds float* %tmp24420, i64 1
- %tmp24422 = getelementptr inbounds float* %tmp24421, i64 1
- %tmp24423 = getelementptr inbounds float* %tmp24422, i64 1
- %tmp24424 = getelementptr inbounds float* %tmp24423, i64 1
- %tmp24425 = getelementptr inbounds float* %tmp24424, i64 1
- %tmp24426 = getelementptr inbounds float* %tmp24425, i64 1
- %tmp24427 = getelementptr inbounds float* %tmp24426, i64 1
- %tmp24428 = getelementptr inbounds float* %tmp24427, i64 1
- %tmp24429 = getelementptr inbounds float* %tmp24428, i64 1
- %tmp24430 = getelementptr inbounds float* %tmp24429, i64 1
- %tmp24431 = getelementptr inbounds float* %tmp24430, i64 1
- %tmp24432 = getelementptr inbounds float* %tmp24431, i64 1
- %tmp24433 = getelementptr inbounds float* %tmp24432, i64 1
- %tmp24434 = getelementptr inbounds float* %tmp24433, i64 1
- %tmp24435 = getelementptr inbounds float* %tmp24434, i64 1
- %tmp24436 = getelementptr inbounds float* %tmp24435, i64 1
- %tmp24437 = getelementptr inbounds float* %tmp24436, i64 1
- %tmp24438 = getelementptr inbounds float* %tmp24437, i64 1
- %tmp24439 = getelementptr inbounds float* %tmp24438, i64 1
- %tmp24440 = getelementptr inbounds float* %tmp24439, i64 1
- %tmp24441 = getelementptr inbounds float* %tmp24440, i64 1
- %tmp24442 = getelementptr inbounds float* %tmp24441, i64 1
- %tmp24443 = getelementptr inbounds float* %tmp24442, i64 1
- %tmp24444 = getelementptr inbounds float* %tmp24443, i64 1
- %tmp24445 = getelementptr inbounds float* %tmp24444, i64 1
- %tmp24446 = getelementptr inbounds float* %tmp24445, i64 1
- %tmp24447 = getelementptr inbounds float* %tmp24446, i64 1
- %tmp24448 = getelementptr inbounds float* %tmp24447, i64 1
- %tmp24449 = getelementptr inbounds float* %tmp24448, i64 1
- %tmp24450 = getelementptr inbounds float* %tmp24449, i64 1
- %tmp24451 = getelementptr inbounds float* %tmp24450, i64 1
- %tmp24452 = getelementptr inbounds float* %tmp24451, i64 1
- %tmp24453 = getelementptr inbounds float* %tmp24452, i64 1
- %tmp24454 = getelementptr inbounds float* %tmp24453, i64 1
- %tmp24455 = getelementptr inbounds float* %tmp24454, i64 1
- %tmp24456 = getelementptr inbounds float* %tmp24455, i64 1
- %tmp24457 = getelementptr inbounds float* %tmp24456, i64 1
- %tmp24458 = getelementptr inbounds float* %tmp24457, i64 1
- %tmp24459 = getelementptr inbounds float* %tmp24458, i64 1
- %tmp24460 = getelementptr inbounds float* %tmp24459, i64 1
- %tmp24461 = getelementptr inbounds float* %tmp24460, i64 1
- %tmp24462 = getelementptr inbounds float* %tmp24461, i64 1
- %tmp24463 = getelementptr inbounds float* %tmp24462, i64 1
- %tmp24464 = getelementptr inbounds float* %tmp24463, i64 1
- %tmp24465 = getelementptr inbounds float* %tmp24464, i64 1
- %tmp24466 = getelementptr inbounds float* %tmp24465, i64 1
- %tmp24467 = getelementptr inbounds float* %tmp24466, i64 1
- %tmp24468 = getelementptr inbounds float* %tmp24467, i64 1
- %tmp24469 = getelementptr inbounds float* %tmp24468, i64 1
- %tmp24470 = getelementptr inbounds float* %tmp24469, i64 1
- %tmp24471 = getelementptr inbounds float* %tmp24470, i64 1
- %tmp24472 = getelementptr inbounds float* %tmp24471, i64 1
- %tmp24473 = getelementptr inbounds float* %tmp24472, i64 1
- %tmp24474 = getelementptr inbounds float* %tmp24473, i64 1
- %tmp24475 = getelementptr inbounds float* %tmp24474, i64 1
- %tmp24476 = getelementptr inbounds float* %tmp24475, i64 1
- %tmp24477 = getelementptr inbounds float* %tmp24476, i64 1
- %tmp24478 = getelementptr inbounds float* %tmp24477, i64 1
- %tmp24479 = getelementptr inbounds float* %tmp24478, i64 1
- %tmp24480 = getelementptr inbounds float* %tmp24479, i64 1
- %tmp24481 = getelementptr inbounds float* %tmp24480, i64 1
- %tmp24482 = getelementptr inbounds float* %tmp24481, i64 1
- %tmp24483 = getelementptr inbounds float* %tmp24482, i64 1
- %tmp24484 = getelementptr inbounds float* %tmp24483, i64 1
- %tmp24485 = getelementptr inbounds float* %tmp24484, i64 1
- %tmp24486 = getelementptr inbounds float* %tmp24485, i64 1
- %tmp24487 = getelementptr inbounds float* %tmp24486, i64 1
- %tmp24488 = getelementptr inbounds float* %tmp24487, i64 1
- %tmp24489 = getelementptr inbounds float* %tmp24488, i64 1
- %tmp24490 = getelementptr inbounds float* %tmp24489, i64 1
- %tmp24491 = getelementptr inbounds float* %tmp24490, i64 1
- %tmp24492 = getelementptr inbounds float* %tmp24491, i64 1
- %tmp24493 = getelementptr inbounds float* %tmp24492, i64 1
- %tmp24494 = getelementptr inbounds float* %tmp24493, i64 1
- %tmp24495 = getelementptr inbounds float* %tmp24494, i64 1
- %tmp24496 = getelementptr inbounds float* %tmp24495, i64 1
- %tmp24497 = getelementptr inbounds float* %tmp24496, i64 1
- %tmp24498 = getelementptr inbounds float* %tmp24497, i64 1
- %tmp24499 = getelementptr inbounds float* %tmp24498, i64 1
- %tmp24500 = getelementptr inbounds float* %tmp24499, i64 1
- %tmp24501 = getelementptr inbounds float* %tmp24500, i64 1
- %tmp24502 = getelementptr inbounds float* %tmp24501, i64 1
- %tmp24503 = getelementptr inbounds float* %tmp24502, i64 1
- %tmp24504 = getelementptr inbounds float* %tmp24503, i64 1
- %tmp24505 = getelementptr inbounds float* %tmp24504, i64 1
- %tmp24506 = getelementptr inbounds float* %tmp24505, i64 1
- %tmp24507 = getelementptr inbounds float* %tmp24506, i64 1
- %tmp24508 = getelementptr inbounds float* %tmp24507, i64 1
- %tmp24509 = getelementptr inbounds float* %tmp24508, i64 1
- %tmp24510 = getelementptr inbounds float* %tmp24509, i64 1
- %tmp24511 = getelementptr inbounds float* %tmp24510, i64 1
- %tmp24512 = getelementptr inbounds float* %tmp24511, i64 1
- %tmp24513 = getelementptr inbounds float* %tmp24512, i64 1
- %tmp24514 = getelementptr inbounds float* %tmp24513, i64 1
- %tmp24515 = getelementptr inbounds float* %tmp24514, i64 1
- %tmp24516 = getelementptr inbounds float* %tmp24515, i64 1
- %tmp24517 = getelementptr inbounds float* %tmp24516, i64 1
- %tmp24518 = getelementptr inbounds float* %tmp24517, i64 1
- %tmp24519 = getelementptr inbounds float* %tmp24518, i64 1
- %tmp24520 = getelementptr inbounds float* %tmp24519, i64 1
- %tmp24521 = getelementptr inbounds float* %tmp24520, i64 1
- %tmp24522 = getelementptr inbounds float* %tmp24521, i64 1
- %tmp24523 = getelementptr inbounds float* %tmp24522, i64 1
- %tmp24524 = getelementptr inbounds float* %tmp24523, i64 1
- %tmp24525 = getelementptr inbounds float* %tmp24524, i64 1
- %tmp24526 = getelementptr inbounds float* %tmp24525, i64 1
- %tmp24527 = getelementptr inbounds float* %tmp24526, i64 1
- %tmp24528 = getelementptr inbounds float* %tmp24527, i64 1
- %tmp24529 = getelementptr inbounds float* %tmp24528, i64 1
- %tmp24530 = getelementptr inbounds float* %tmp24529, i64 1
- %tmp24531 = getelementptr inbounds float* %tmp24530, i64 1
- %tmp24532 = getelementptr inbounds float* %tmp24531, i64 1
- %tmp24533 = getelementptr inbounds float* %tmp24532, i64 1
- %tmp24534 = getelementptr inbounds float* %tmp24533, i64 1
- %tmp24535 = getelementptr inbounds float* %tmp24534, i64 1
- %tmp24536 = getelementptr inbounds float* %tmp24535, i64 1
- %tmp24537 = getelementptr inbounds float* %tmp24536, i64 1
- %tmp24538 = getelementptr inbounds float* %tmp24537, i64 1
- %tmp24539 = getelementptr inbounds float* %tmp24538, i64 1
- %tmp24540 = getelementptr inbounds float* %tmp24539, i64 1
- %tmp24541 = getelementptr inbounds float* %tmp24540, i64 1
- %tmp24542 = getelementptr inbounds float* %tmp24541, i64 1
- %tmp24543 = getelementptr inbounds float* %tmp24542, i64 1
- %tmp24544 = getelementptr inbounds float* %tmp24543, i64 1
- %tmp24545 = getelementptr inbounds float* %tmp24544, i64 1
- %tmp24546 = getelementptr inbounds float* %tmp24545, i64 1
- %tmp24547 = getelementptr inbounds float* %tmp24546, i64 1
- %tmp24548 = getelementptr inbounds float* %tmp24547, i64 1
- %tmp24549 = getelementptr inbounds float* %tmp24548, i64 1
- %tmp24550 = getelementptr inbounds float* %tmp24549, i64 1
- %tmp24551 = getelementptr inbounds float* %tmp24550, i64 1
- %tmp24552 = getelementptr inbounds float* %tmp24551, i64 1
- %tmp24553 = getelementptr inbounds float* %tmp24552, i64 1
- %tmp24554 = getelementptr inbounds float* %tmp24553, i64 1
- %tmp24555 = getelementptr inbounds float* %tmp24554, i64 1
- %tmp24556 = getelementptr inbounds float* %tmp24555, i64 1
- %tmp24557 = getelementptr inbounds float* %tmp24556, i64 1
- %tmp24558 = getelementptr inbounds float* %tmp24557, i64 1
- %tmp24559 = getelementptr inbounds float* %tmp24558, i64 1
- %tmp24560 = getelementptr inbounds float* %tmp24559, i64 1
- %tmp24561 = getelementptr inbounds float* %tmp24560, i64 1
- %tmp24562 = getelementptr inbounds float* %tmp24561, i64 1
- %tmp24563 = getelementptr inbounds float* %tmp24562, i64 1
- %tmp24564 = getelementptr inbounds float* %tmp24563, i64 1
- %tmp24565 = getelementptr inbounds float* %tmp24564, i64 1
- %tmp24566 = getelementptr inbounds float* %tmp24565, i64 1
- %tmp24567 = getelementptr inbounds float* %tmp24566, i64 1
- %tmp24568 = getelementptr inbounds float* %tmp24567, i64 1
- %tmp24569 = getelementptr inbounds float* %tmp24568, i64 1
- %tmp24570 = getelementptr inbounds float* %tmp24569, i64 1
- %tmp24571 = getelementptr inbounds float* %tmp24570, i64 1
- %tmp24572 = getelementptr inbounds float* %tmp24571, i64 1
- %tmp24573 = getelementptr inbounds float* %tmp24572, i64 1
- %tmp24574 = getelementptr inbounds float* %tmp24573, i64 1
- %tmp24575 = getelementptr inbounds float* %tmp24574, i64 1
- %tmp24576 = getelementptr inbounds float* %tmp24575, i64 1
- %tmp24577 = getelementptr inbounds float* %tmp24576, i64 1
- %tmp24578 = getelementptr inbounds float* %tmp24577, i64 1
- %tmp24579 = getelementptr inbounds float* %tmp24578, i64 1
- %tmp24580 = getelementptr inbounds float* %tmp24579, i64 1
- %tmp24581 = getelementptr inbounds float* %tmp24580, i64 1
- %tmp24582 = getelementptr inbounds float* %tmp24581, i64 1
- %tmp24583 = getelementptr inbounds float* %tmp24582, i64 1
- %tmp24584 = getelementptr inbounds float* %tmp24583, i64 1
- %tmp24585 = getelementptr inbounds float* %tmp24584, i64 1
- %tmp24586 = getelementptr inbounds float* %tmp24585, i64 1
- %tmp24587 = getelementptr inbounds float* %tmp24586, i64 1
- %tmp24588 = getelementptr inbounds float* %tmp24587, i64 1
- %tmp24589 = getelementptr inbounds float* %tmp24588, i64 1
- %tmp24590 = getelementptr inbounds float* %tmp24589, i64 1
- %tmp24591 = getelementptr inbounds float* %tmp24590, i64 1
- %tmp24592 = getelementptr inbounds float* %tmp24591, i64 1
- %tmp24593 = getelementptr inbounds float* %tmp24592, i64 1
- %tmp24594 = getelementptr inbounds float* %tmp24593, i64 1
- %tmp24595 = getelementptr inbounds float* %tmp24594, i64 1
- %tmp24596 = getelementptr inbounds float* %tmp24595, i64 1
- %tmp24597 = getelementptr inbounds float* %tmp24596, i64 1
- %tmp24598 = getelementptr inbounds float* %tmp24597, i64 1
- %tmp24599 = getelementptr inbounds float* %tmp24598, i64 1
- %tmp24600 = getelementptr inbounds float* %tmp24599, i64 1
- %tmp24601 = getelementptr inbounds float* %tmp24600, i64 1
- %tmp24602 = getelementptr inbounds float* %tmp24601, i64 1
- %tmp24603 = getelementptr inbounds float* %tmp24602, i64 1
- %tmp24604 = getelementptr inbounds float* %tmp24603, i64 1
- %tmp24605 = getelementptr inbounds float* %tmp24604, i64 1
- %tmp24606 = getelementptr inbounds float* %tmp24605, i64 1
- %tmp24607 = getelementptr inbounds float* %tmp24606, i64 1
- %tmp24608 = getelementptr inbounds float* %tmp24607, i64 1
- %tmp24609 = getelementptr inbounds float* %tmp24608, i64 1
- %tmp24610 = getelementptr inbounds float* %tmp24609, i64 1
- %tmp24611 = getelementptr inbounds float* %tmp24610, i64 1
- %tmp24612 = getelementptr inbounds float* %tmp24611, i64 1
- %tmp24613 = getelementptr inbounds float* %tmp24612, i64 1
- %tmp24614 = getelementptr inbounds float* %tmp24613, i64 1
- %tmp24615 = getelementptr inbounds float* %tmp24614, i64 1
- %tmp24616 = getelementptr inbounds float* %tmp24615, i64 1
- %tmp24617 = getelementptr inbounds float* %tmp24616, i64 1
- %tmp24618 = getelementptr inbounds float* %tmp24617, i64 1
- %tmp24619 = getelementptr inbounds float* %tmp24618, i64 1
- %tmp24620 = getelementptr inbounds float* %tmp24619, i64 1
- %tmp24621 = getelementptr inbounds float* %tmp24620, i64 1
- %tmp24622 = getelementptr inbounds float* %tmp24621, i64 1
- %tmp24623 = getelementptr inbounds float* %tmp24622, i64 1
- %tmp24624 = getelementptr inbounds float* %tmp24623, i64 1
- %tmp24625 = getelementptr inbounds float* %tmp24624, i64 1
- %tmp24626 = getelementptr inbounds float* %tmp24625, i64 1
- %tmp24627 = getelementptr inbounds float* %tmp24626, i64 1
- %tmp24628 = getelementptr inbounds float* %tmp24627, i64 1
- %tmp24629 = getelementptr inbounds float* %tmp24628, i64 1
- %tmp24630 = getelementptr inbounds float* %tmp24629, i64 1
- %tmp24631 = getelementptr inbounds float* %tmp24630, i64 1
- %tmp24632 = getelementptr inbounds float* %tmp24631, i64 1
- %tmp24633 = getelementptr inbounds float* %tmp24632, i64 1
- %tmp24634 = getelementptr inbounds float* %tmp24633, i64 1
- %tmp24635 = getelementptr inbounds float* %tmp24634, i64 1
- %tmp24636 = getelementptr inbounds float* %tmp24635, i64 1
- %tmp24637 = getelementptr inbounds float* %tmp24636, i64 1
- %tmp24638 = getelementptr inbounds float* %tmp24637, i64 1
- %tmp24639 = getelementptr inbounds float* %tmp24638, i64 1
- %tmp24640 = getelementptr inbounds float* %tmp24639, i64 1
- %tmp24641 = getelementptr inbounds float* %tmp24640, i64 1
- %tmp24642 = getelementptr inbounds float* %tmp24641, i64 1
- %tmp24643 = getelementptr inbounds float* %tmp24642, i64 1
- %tmp24644 = getelementptr inbounds float* %tmp24643, i64 1
- %tmp24645 = getelementptr inbounds float* %tmp24644, i64 1
- %tmp24646 = getelementptr inbounds float* %tmp24645, i64 1
- %tmp24647 = getelementptr inbounds float* %tmp24646, i64 1
- %tmp24648 = getelementptr inbounds float* %tmp24647, i64 1
- %tmp24649 = getelementptr inbounds float* %tmp24648, i64 1
- %tmp24650 = getelementptr inbounds float* %tmp24649, i64 1
- %tmp24651 = getelementptr inbounds float* %tmp24650, i64 1
- %tmp24652 = getelementptr inbounds float* %tmp24651, i64 1
- %tmp24653 = getelementptr inbounds float* %tmp24652, i64 1
- %tmp24654 = getelementptr inbounds float* %tmp24653, i64 1
- %tmp24655 = getelementptr inbounds float* %tmp24654, i64 1
- %tmp24656 = getelementptr inbounds float* %tmp24655, i64 1
- %tmp24657 = getelementptr inbounds float* %tmp24656, i64 1
- %tmp24658 = getelementptr inbounds float* %tmp24657, i64 1
- %tmp24659 = getelementptr inbounds float* %tmp24658, i64 1
- %tmp24660 = getelementptr inbounds float* %tmp24659, i64 1
- %tmp24661 = getelementptr inbounds float* %tmp24660, i64 1
- %tmp24662 = getelementptr inbounds float* %tmp24661, i64 1
- %tmp24663 = getelementptr inbounds float* %tmp24662, i64 1
- %tmp24664 = getelementptr inbounds float* %tmp24663, i64 1
- %tmp24665 = getelementptr inbounds float* %tmp24664, i64 1
- %tmp24666 = getelementptr inbounds float* %tmp24665, i64 1
- %tmp24667 = getelementptr inbounds float* %tmp24666, i64 1
- %tmp24668 = getelementptr inbounds float* %tmp24667, i64 1
- %tmp24669 = getelementptr inbounds float* %tmp24668, i64 1
- %tmp24670 = getelementptr inbounds float* %tmp24669, i64 1
- %tmp24671 = getelementptr inbounds float* %tmp24670, i64 1
- %tmp24672 = getelementptr inbounds float* %tmp24671, i64 1
- %tmp24673 = getelementptr inbounds float* %tmp24672, i64 1
- %tmp24674 = getelementptr inbounds float* %tmp24673, i64 1
- %tmp24675 = getelementptr inbounds float* %tmp24674, i64 1
- %tmp24676 = getelementptr inbounds float* %tmp24675, i64 1
- %tmp24677 = getelementptr inbounds float* %tmp24676, i64 1
- %tmp24678 = getelementptr inbounds float* %tmp24677, i64 1
- %tmp24679 = getelementptr inbounds float* %tmp24678, i64 1
- %tmp24680 = getelementptr inbounds float* %tmp24679, i64 1
- %tmp24681 = getelementptr inbounds float* %tmp24680, i64 1
- %tmp24682 = getelementptr inbounds float* %tmp24681, i64 1
- %tmp24683 = getelementptr inbounds float* %tmp24682, i64 1
- %tmp24684 = getelementptr inbounds float* %tmp24683, i64 1
- %tmp24685 = getelementptr inbounds float* %tmp24684, i64 1
- %tmp24686 = getelementptr inbounds float* %tmp24685, i64 1
- %tmp24687 = getelementptr inbounds float* %tmp24686, i64 1
- %tmp24688 = getelementptr inbounds float* %tmp24687, i64 1
- %tmp24689 = getelementptr inbounds float* %tmp24688, i64 1
- %tmp24690 = getelementptr inbounds float* %tmp24689, i64 1
- %tmp24691 = getelementptr inbounds float* %tmp24690, i64 1
- %tmp24692 = getelementptr inbounds float* %tmp24691, i64 1
- %tmp24693 = getelementptr inbounds float* %tmp24692, i64 1
- %tmp24694 = getelementptr inbounds float* %tmp24693, i64 1
- %tmp24695 = getelementptr inbounds float* %tmp24694, i64 1
- %tmp24696 = getelementptr inbounds float* %tmp24695, i64 1
- %tmp24697 = getelementptr inbounds float* %tmp24696, i64 1
- %tmp24698 = getelementptr inbounds float* %tmp24697, i64 1
- %tmp24699 = getelementptr inbounds float* %tmp24698, i64 1
- %tmp24700 = getelementptr inbounds float* %tmp24699, i64 1
- %tmp24701 = getelementptr inbounds float* %tmp24700, i64 1
- %tmp24702 = getelementptr inbounds float* %tmp24701, i64 1
- %tmp24703 = getelementptr inbounds float* %tmp24702, i64 1
- %tmp24704 = getelementptr inbounds float* %tmp24703, i64 1
- %tmp24705 = getelementptr inbounds float* %tmp24704, i64 1
- %tmp24706 = getelementptr inbounds float* %tmp24705, i64 1
- %tmp24707 = getelementptr inbounds float* %tmp24706, i64 1
- %tmp24708 = getelementptr inbounds float* %tmp24707, i64 1
- %tmp24709 = getelementptr inbounds float* %tmp24708, i64 1
- %tmp24710 = getelementptr inbounds float* %tmp24709, i64 1
- %tmp24711 = getelementptr inbounds float* %tmp24710, i64 1
- %tmp24712 = getelementptr inbounds float* %tmp24711, i64 1
- %tmp24713 = getelementptr inbounds float* %tmp24712, i64 1
- %tmp24714 = getelementptr inbounds float* %tmp24713, i64 1
- %tmp24715 = getelementptr inbounds float* %tmp24714, i64 1
- %tmp24716 = getelementptr inbounds float* %tmp24715, i64 1
- %tmp24717 = getelementptr inbounds float* %tmp24716, i64 1
- %tmp24718 = getelementptr inbounds float* %tmp24717, i64 1
- %tmp24719 = getelementptr inbounds float* %tmp24718, i64 1
- %tmp24720 = getelementptr inbounds float* %tmp24719, i64 1
- %tmp24721 = getelementptr inbounds float* %tmp24720, i64 1
- %tmp24722 = getelementptr inbounds float* %tmp24721, i64 1
- %tmp24723 = getelementptr inbounds float* %tmp24722, i64 1
- %tmp24724 = getelementptr inbounds float* %tmp24723, i64 1
- %tmp24725 = getelementptr inbounds float* %tmp24724, i64 1
- %tmp24726 = getelementptr inbounds float* %tmp24725, i64 1
- %tmp24727 = getelementptr inbounds float* %tmp24726, i64 1
- %tmp24728 = getelementptr inbounds float* %tmp24727, i64 1
- %tmp24729 = getelementptr inbounds float* %tmp24728, i64 1
- %tmp24730 = getelementptr inbounds float* %tmp24729, i64 1
- %tmp24731 = getelementptr inbounds float* %tmp24730, i64 1
- %tmp24732 = getelementptr inbounds float* %tmp24731, i64 1
- %tmp24733 = getelementptr inbounds float* %tmp24732, i64 1
- %tmp24734 = getelementptr inbounds float* %tmp24733, i64 1
- %tmp24735 = getelementptr inbounds float* %tmp24734, i64 1
- %tmp24736 = getelementptr inbounds float* %tmp24735, i64 1
- %tmp24737 = getelementptr inbounds float* %tmp24736, i64 1
- %tmp24738 = getelementptr inbounds float* %tmp24737, i64 1
- %tmp24739 = getelementptr inbounds float* %tmp24738, i64 1
- %tmp24740 = getelementptr inbounds float* %tmp24739, i64 1
- %tmp24741 = getelementptr inbounds float* %tmp24740, i64 1
- %tmp24742 = getelementptr inbounds float* %tmp24741, i64 1
- %tmp24743 = getelementptr inbounds float* %tmp24742, i64 1
- %tmp24744 = getelementptr inbounds float* %tmp24743, i64 1
- %tmp24745 = getelementptr inbounds float* %tmp24744, i64 1
- %tmp24746 = getelementptr inbounds float* %tmp24745, i64 1
- %tmp24747 = getelementptr inbounds float* %tmp24746, i64 1
- %tmp24748 = getelementptr inbounds float* %tmp24747, i64 1
- %tmp24749 = getelementptr inbounds float* %tmp24748, i64 1
- %tmp24750 = getelementptr inbounds float* %tmp24749, i64 1
- %tmp24751 = getelementptr inbounds float* %tmp24750, i64 1
- %tmp24752 = getelementptr inbounds float* %tmp24751, i64 1
- %tmp24753 = getelementptr inbounds float* %tmp24752, i64 1
- %tmp24754 = getelementptr inbounds float* %tmp24753, i64 1
- %tmp24755 = getelementptr inbounds float* %tmp24754, i64 1
- %tmp24756 = getelementptr inbounds float* %tmp24755, i64 1
- %tmp24757 = getelementptr inbounds float* %tmp24756, i64 1
- %tmp24758 = getelementptr inbounds float* %tmp24757, i64 1
- %tmp24759 = getelementptr inbounds float* %tmp24758, i64 1
- %tmp24760 = getelementptr inbounds float* %tmp24759, i64 1
- %tmp24761 = getelementptr inbounds float* %tmp24760, i64 1
- %tmp24762 = getelementptr inbounds float* %tmp24761, i64 1
- %tmp24763 = getelementptr inbounds float* %tmp24762, i64 1
- %tmp24764 = getelementptr inbounds float* %tmp24763, i64 1
- %tmp24765 = getelementptr inbounds float* %tmp24764, i64 1
- %tmp24766 = getelementptr inbounds float* %tmp24765, i64 1
- %tmp24767 = getelementptr inbounds float* %tmp24766, i64 1
- %tmp24768 = getelementptr inbounds float* %tmp24767, i64 1
- %tmp24769 = getelementptr inbounds float* %tmp24768, i64 1
- %tmp24770 = getelementptr inbounds float* %tmp24769, i64 1
- %tmp24771 = getelementptr inbounds float* %tmp24770, i64 1
- %tmp24772 = getelementptr inbounds float* %tmp24771, i64 1
- %tmp24773 = getelementptr inbounds float* %tmp24772, i64 1
- %tmp24774 = getelementptr inbounds float* %tmp24773, i64 1
- %tmp24775 = getelementptr inbounds float* %tmp24774, i64 1
- %tmp24776 = getelementptr inbounds float* %tmp24775, i64 1
- %tmp24777 = getelementptr inbounds float* %tmp24776, i64 1
- %tmp24778 = getelementptr inbounds float* %tmp24777, i64 1
- %tmp24779 = getelementptr inbounds float* %tmp24778, i64 1
- %tmp24780 = getelementptr inbounds float* %tmp24779, i64 1
- %tmp24781 = getelementptr inbounds float* %tmp24780, i64 1
- %tmp24782 = getelementptr inbounds float* %tmp24781, i64 1
- %tmp24783 = getelementptr inbounds float* %tmp24782, i64 1
- %tmp24784 = getelementptr inbounds float* %tmp24783, i64 1
- %tmp24785 = getelementptr inbounds float* %tmp24784, i64 1
- %tmp24786 = getelementptr inbounds float* %tmp24785, i64 1
- %tmp24787 = getelementptr inbounds float* %tmp24786, i64 1
- %tmp24788 = getelementptr inbounds float* %tmp24787, i64 1
- %tmp24789 = getelementptr inbounds float* %tmp24788, i64 1
- %tmp24790 = getelementptr inbounds float* %tmp24789, i64 1
- %tmp24791 = getelementptr inbounds float* %tmp24790, i64 1
- %tmp24792 = getelementptr inbounds float* %tmp24791, i64 1
- %tmp24793 = getelementptr inbounds float* %tmp24792, i64 1
- %tmp24794 = getelementptr inbounds float* %tmp24793, i64 1
- %tmp24795 = getelementptr inbounds float* %tmp24794, i64 1
- %tmp24796 = getelementptr inbounds float* %tmp24795, i64 1
- %tmp24797 = getelementptr inbounds float* %tmp24796, i64 1
- %tmp24798 = getelementptr inbounds float* %tmp24797, i64 1
- %tmp24799 = getelementptr inbounds float* %tmp24798, i64 1
- %tmp24800 = getelementptr inbounds float* %tmp24799, i64 1
- %tmp24801 = getelementptr inbounds float* %tmp24800, i64 1
- %tmp24802 = getelementptr inbounds float* %tmp24801, i64 1
- %tmp24803 = getelementptr inbounds float* %tmp24802, i64 1
- %tmp24804 = getelementptr inbounds float* %tmp24803, i64 1
- %tmp24805 = getelementptr inbounds float* %tmp24804, i64 1
- %tmp24806 = getelementptr inbounds float* %tmp24805, i64 1
- %tmp24807 = getelementptr inbounds float* %tmp24806, i64 1
- %tmp24808 = getelementptr inbounds float* %tmp24807, i64 1
- %tmp24809 = getelementptr inbounds float* %tmp24808, i64 1
- %tmp24810 = getelementptr inbounds float* %tmp24809, i64 1
- %tmp24811 = getelementptr inbounds float* %tmp24810, i64 1
- %tmp24812 = getelementptr inbounds float* %tmp24811, i64 1
- %tmp24813 = getelementptr inbounds float* %tmp24812, i64 1
- %tmp24814 = getelementptr inbounds float* %tmp24813, i64 1
- %tmp24815 = getelementptr inbounds float* %tmp24814, i64 1
- %tmp24816 = getelementptr inbounds float* %tmp24815, i64 1
- %tmp24817 = getelementptr inbounds float* %tmp24816, i64 1
- %tmp24818 = getelementptr inbounds float* %tmp24817, i64 1
- %tmp24819 = getelementptr inbounds float* %tmp24818, i64 1
- %tmp24820 = getelementptr inbounds float* %tmp24819, i64 1
- %tmp24821 = getelementptr inbounds float* %tmp24820, i64 1
- %tmp24822 = getelementptr inbounds float* %tmp24821, i64 1
- %tmp24823 = getelementptr inbounds float* %tmp24822, i64 1
- %tmp24824 = getelementptr inbounds float* %tmp24823, i64 1
- %tmp24825 = getelementptr inbounds float* %tmp24824, i64 1
- %tmp24826 = getelementptr inbounds float* %tmp24825, i64 1
- %tmp24827 = getelementptr inbounds float* %tmp24826, i64 1
- %tmp24828 = getelementptr inbounds float* %tmp24827, i64 1
- %tmp24829 = getelementptr inbounds float* %tmp24828, i64 1
- %tmp24830 = getelementptr inbounds float* %tmp24829, i64 1
- %tmp24831 = getelementptr inbounds float* %tmp24830, i64 1
- %tmp24832 = getelementptr inbounds float* %tmp24831, i64 1
- %tmp24833 = getelementptr inbounds float* %tmp24832, i64 1
- %tmp24834 = getelementptr inbounds float* %tmp24833, i64 1
- %tmp24835 = getelementptr inbounds float* %tmp24834, i64 1
- %tmp24836 = getelementptr inbounds float* %tmp24835, i64 1
- %tmp24837 = getelementptr inbounds float* %tmp24836, i64 1
- %tmp24838 = getelementptr inbounds float* %tmp24837, i64 1
- %tmp24839 = getelementptr inbounds float* %tmp24838, i64 1
- %tmp24840 = getelementptr inbounds float* %tmp24839, i64 1
- %tmp24841 = getelementptr inbounds float* %tmp24840, i64 1
- %tmp24842 = getelementptr inbounds float* %tmp24841, i64 1
- %tmp24843 = getelementptr inbounds float* %tmp24842, i64 1
- %tmp24844 = getelementptr inbounds float* %tmp24843, i64 1
- %tmp24845 = getelementptr inbounds float* %tmp24844, i64 1
- %tmp24846 = getelementptr inbounds float* %tmp24845, i64 1
- %tmp24847 = getelementptr inbounds float* %tmp24846, i64 1
- %tmp24848 = getelementptr inbounds float* %tmp24847, i64 1
- %tmp24849 = getelementptr inbounds float* %tmp24848, i64 1
- %tmp24850 = getelementptr inbounds float* %tmp24849, i64 1
- %tmp24851 = getelementptr inbounds float* %tmp24850, i64 1
- %tmp24852 = getelementptr inbounds float* %tmp24851, i64 1
- %tmp24853 = getelementptr inbounds float* %tmp24852, i64 1
- %tmp24854 = getelementptr inbounds float* %tmp24853, i64 1
- %tmp24855 = getelementptr inbounds float* %tmp24854, i64 1
- %tmp24856 = getelementptr inbounds float* %tmp24855, i64 1
- %tmp24857 = getelementptr inbounds float* %tmp24856, i64 1
- %tmp24858 = getelementptr inbounds float* %tmp24857, i64 1
- %tmp24859 = getelementptr inbounds float* %tmp24858, i64 1
- %tmp24860 = getelementptr inbounds float* %tmp24859, i64 1
- %tmp24861 = getelementptr inbounds float* %tmp24860, i64 1
- %tmp24862 = getelementptr inbounds float* %tmp24861, i64 1
- %tmp24863 = getelementptr inbounds float* %tmp24862, i64 1
- %tmp24864 = getelementptr inbounds float* %tmp24863, i64 1
- %tmp24865 = getelementptr inbounds float* %tmp24864, i64 1
- %tmp24866 = getelementptr inbounds float* %tmp24865, i64 1
- %tmp24867 = getelementptr inbounds float* %tmp24866, i64 1
- %tmp24868 = getelementptr inbounds float* %tmp24867, i64 1
- %tmp24869 = getelementptr inbounds float* %tmp24868, i64 1
- %tmp24870 = getelementptr inbounds float* %tmp24869, i64 1
- %tmp24871 = getelementptr inbounds float* %tmp24870, i64 1
- %tmp24872 = getelementptr inbounds float* %tmp24871, i64 1
- %tmp24873 = getelementptr inbounds float* %tmp24872, i64 1
- %tmp24874 = getelementptr inbounds float* %tmp24873, i64 1
- %tmp24875 = getelementptr inbounds float* %tmp24874, i64 1
- %tmp24876 = getelementptr inbounds float* %tmp24875, i64 1
- %tmp24877 = getelementptr inbounds float* %tmp24876, i64 1
- %tmp24878 = getelementptr inbounds float* %tmp24877, i64 1
- %tmp24879 = getelementptr inbounds float* %tmp24878, i64 1
- %tmp24880 = getelementptr inbounds float* %tmp24879, i64 1
- %tmp24881 = getelementptr inbounds float* %tmp24880, i64 1
- %tmp24882 = getelementptr inbounds float* %tmp24881, i64 1
- %tmp24883 = getelementptr inbounds float* %tmp24882, i64 1
- %tmp24884 = getelementptr inbounds float* %tmp24883, i64 1
- %tmp24885 = getelementptr inbounds float* %tmp24884, i64 1
- %tmp24886 = getelementptr inbounds float* %tmp24885, i64 1
- %tmp24887 = getelementptr inbounds float* %tmp24886, i64 1
- %tmp24888 = getelementptr inbounds float* %tmp24887, i64 1
- %tmp24889 = getelementptr inbounds float* %tmp24888, i64 1
- %tmp24890 = getelementptr inbounds float* %tmp24889, i64 1
- %tmp24891 = getelementptr inbounds float* %tmp24890, i64 1
- %tmp24892 = getelementptr inbounds float* %tmp24891, i64 1
- %tmp24893 = getelementptr inbounds float* %tmp24892, i64 1
- %tmp24894 = getelementptr inbounds float* %tmp24893, i64 1
- %tmp24895 = getelementptr inbounds float* %tmp24894, i64 1
- %tmp24896 = getelementptr inbounds float* %tmp24895, i64 1
- %tmp24897 = getelementptr inbounds float* %tmp24896, i64 1
- %tmp24898 = getelementptr inbounds float* %tmp24897, i64 1
- %tmp24899 = getelementptr inbounds float* %tmp24898, i64 1
- %tmp24900 = getelementptr inbounds float* %tmp24899, i64 1
- %tmp24901 = getelementptr inbounds float* %tmp24900, i64 1
- %tmp24902 = getelementptr inbounds float* %tmp24901, i64 1
- %tmp24903 = getelementptr inbounds float* %tmp24902, i64 1
- %tmp24904 = getelementptr inbounds float* %tmp24903, i64 1
- %tmp24905 = getelementptr inbounds float* %tmp24904, i64 1
- %tmp24906 = getelementptr inbounds float* %tmp24905, i64 1
- %tmp24907 = getelementptr inbounds float* %tmp24906, i64 1
- %tmp24908 = getelementptr inbounds float* %tmp24907, i64 1
- %tmp24909 = getelementptr inbounds float* %tmp24908, i64 1
- %tmp24910 = getelementptr inbounds float* %tmp24909, i64 1
- %tmp24911 = getelementptr inbounds float* %tmp24910, i64 1
- %tmp24912 = getelementptr inbounds float* %tmp24911, i64 1
- %tmp24913 = getelementptr inbounds float* %tmp24912, i64 1
- %tmp24914 = getelementptr inbounds float* %tmp24913, i64 1
- %tmp24915 = getelementptr inbounds float* %tmp24914, i64 1
- %tmp24916 = getelementptr inbounds float* %tmp24915, i64 1
- %tmp24917 = getelementptr inbounds float* %tmp24916, i64 1
- %tmp24918 = getelementptr inbounds float* %tmp24917, i64 1
- %tmp24919 = getelementptr inbounds float* %tmp24918, i64 1
- %tmp24920 = getelementptr inbounds float* %tmp24919, i64 1
- %tmp24921 = getelementptr inbounds float* %tmp24920, i64 1
- %tmp24922 = getelementptr inbounds float* %tmp24921, i64 1
- %tmp24923 = getelementptr inbounds float* %tmp24922, i64 1
- %tmp24924 = getelementptr inbounds float* %tmp24923, i64 1
- %tmp24925 = getelementptr inbounds float* %tmp24924, i64 1
- %tmp24926 = getelementptr inbounds float* %tmp24925, i64 1
- %tmp24927 = getelementptr inbounds float* %tmp24926, i64 1
- %tmp24928 = getelementptr inbounds float* %tmp24927, i64 1
- %tmp24929 = getelementptr inbounds float* %tmp24928, i64 1
- %tmp24930 = getelementptr inbounds float* %tmp24929, i64 1
- %tmp24931 = getelementptr inbounds float* %tmp24930, i64 1
- %tmp24932 = getelementptr inbounds float* %tmp24931, i64 1
- %tmp24933 = getelementptr inbounds float* %tmp24932, i64 1
- %tmp24934 = getelementptr inbounds float* %tmp24933, i64 1
- %tmp24935 = getelementptr inbounds float* %tmp24934, i64 1
- %tmp24936 = getelementptr inbounds float* %tmp24935, i64 1
- %tmp24937 = getelementptr inbounds float* %tmp24936, i64 1
- %tmp24938 = getelementptr inbounds float* %tmp24937, i64 1
- %tmp24939 = getelementptr inbounds float* %tmp24938, i64 1
- %tmp24940 = getelementptr inbounds float* %tmp24939, i64 1
- %tmp24941 = getelementptr inbounds float* %tmp24940, i64 1
- %tmp24942 = getelementptr inbounds float* %tmp24941, i64 1
- %tmp24943 = getelementptr inbounds float* %tmp24942, i64 1
- %tmp24944 = getelementptr inbounds float* %tmp24943, i64 1
- %tmp24945 = getelementptr inbounds float* %tmp24944, i64 1
- %tmp24946 = getelementptr inbounds float* %tmp24945, i64 1
+ %tmp = getelementptr inbounds float, float* null, i64 1
+ %tmp3 = getelementptr inbounds float, float* %tmp, i64 1
+ %tmp4 = getelementptr inbounds float, float* %tmp3, i64 1
+ %tmp5 = getelementptr inbounds float, float* %tmp4, i64 1
+ %tmp6 = getelementptr inbounds float, float* %tmp5, i64 1
+ %tmp7 = getelementptr inbounds float, float* %tmp6, i64 1
+ %tmp8 = getelementptr inbounds float, float* %tmp7, i64 1
+ %tmp9 = getelementptr inbounds float, float* %tmp8, i64 1
+ %tmp10 = getelementptr inbounds float, float* %tmp9, i64 1
+ %tmp11 = getelementptr inbounds float, float* %tmp10, i64 1
+ %tmp12 = getelementptr inbounds float, float* %tmp11, i64 1
+ %tmp13 = getelementptr inbounds float, float* %tmp12, i64 1
+ %tmp14 = getelementptr inbounds float, float* %tmp13, i64 1
+ %tmp15 = getelementptr inbounds float, float* %tmp14, i64 1
+ %tmp16 = getelementptr inbounds float, float* %tmp15, i64 1
+ %tmp17 = getelementptr inbounds float, float* %tmp16, i64 1
+ %tmp18 = getelementptr inbounds float, float* %tmp17, i64 1
+ %tmp19 = getelementptr inbounds float, float* %tmp18, i64 1
+ %tmp20 = getelementptr inbounds float, float* %tmp19, i64 1
+ %tmp21 = getelementptr inbounds float, float* %tmp20, i64 1
+ %tmp22 = getelementptr inbounds float, float* %tmp21, i64 1
+ %tmp23 = getelementptr inbounds float, float* %tmp22, i64 1
+ %tmp24 = getelementptr inbounds float, float* %tmp23, i64 1
+ %tmp25 = getelementptr inbounds float, float* %tmp24, i64 1
+ %tmp26 = getelementptr inbounds float, float* %tmp25, i64 1
+ %tmp27 = getelementptr inbounds float, float* %tmp26, i64 1
+ %tmp28 = getelementptr inbounds float, float* %tmp27, i64 1
+ %tmp29 = getelementptr inbounds float, float* %tmp28, i64 1
+ %tmp30 = getelementptr inbounds float, float* %tmp29, i64 1
+ %tmp31 = getelementptr inbounds float, float* %tmp30, i64 1
+ %tmp32 = getelementptr inbounds float, float* %tmp31, i64 1
+ %tmp33 = getelementptr inbounds float, float* %tmp32, i64 1
+ %tmp34 = getelementptr inbounds float, float* %tmp33, i64 1
+ %tmp35 = getelementptr inbounds float, float* %tmp34, i64 1
+ %tmp36 = getelementptr inbounds float, float* %tmp35, i64 1
+ %tmp37 = getelementptr inbounds float, float* %tmp36, i64 1
+ %tmp38 = getelementptr inbounds float, float* %tmp37, i64 1
+ %tmp39 = getelementptr inbounds float, float* %tmp38, i64 1
+ %tmp40 = getelementptr inbounds float, float* %tmp39, i64 1
+ %tmp41 = getelementptr inbounds float, float* %tmp40, i64 1
+ %tmp42 = getelementptr inbounds float, float* %tmp41, i64 1
+ %tmp43 = getelementptr inbounds float, float* %tmp42, i64 1
+ %tmp44 = getelementptr inbounds float, float* %tmp43, i64 1
+ %tmp45 = getelementptr inbounds float, float* %tmp44, i64 1
+ %tmp46 = getelementptr inbounds float, float* %tmp45, i64 1
+ %tmp47 = getelementptr inbounds float, float* %tmp46, i64 1
+ %tmp48 = getelementptr inbounds float, float* %tmp47, i64 1
+ %tmp49 = getelementptr inbounds float, float* %tmp48, i64 1
+ %tmp50 = getelementptr inbounds float, float* %tmp49, i64 1
+ %tmp51 = getelementptr inbounds float, float* %tmp50, i64 1
+ %tmp52 = getelementptr inbounds float, float* %tmp51, i64 1
+ %tmp53 = getelementptr inbounds float, float* %tmp52, i64 1
+ %tmp54 = getelementptr inbounds float, float* %tmp53, i64 1
+ %tmp55 = getelementptr inbounds float, float* %tmp54, i64 1
+ %tmp56 = getelementptr inbounds float, float* %tmp55, i64 1
+ %tmp57 = getelementptr inbounds float, float* %tmp56, i64 1
+ %tmp58 = getelementptr inbounds float, float* %tmp57, i64 1
+ %tmp59 = getelementptr inbounds float, float* %tmp58, i64 1
+ %tmp60 = getelementptr inbounds float, float* %tmp59, i64 1
+ %tmp61 = getelementptr inbounds float, float* %tmp60, i64 1
+ %tmp62 = getelementptr inbounds float, float* %tmp61, i64 1
+ %tmp63 = getelementptr inbounds float, float* %tmp62, i64 1
+ %tmp64 = getelementptr inbounds float, float* %tmp63, i64 1
+ %tmp65 = getelementptr inbounds float, float* %tmp64, i64 1
+ %tmp66 = getelementptr inbounds float, float* %tmp65, i64 1
+ %tmp67 = getelementptr inbounds float, float* %tmp66, i64 1
+ %tmp68 = getelementptr inbounds float, float* %tmp67, i64 1
+ %tmp69 = getelementptr inbounds float, float* %tmp68, i64 1
+ %tmp70 = getelementptr inbounds float, float* %tmp69, i64 1
+ %tmp71 = getelementptr inbounds float, float* %tmp70, i64 1
+ %tmp72 = getelementptr inbounds float, float* %tmp71, i64 1
+ %tmp73 = getelementptr inbounds float, float* %tmp72, i64 1
+ %tmp74 = getelementptr inbounds float, float* %tmp73, i64 1
+ %tmp75 = getelementptr inbounds float, float* %tmp74, i64 1
+ %tmp76 = getelementptr inbounds float, float* %tmp75, i64 1
+ %tmp77 = getelementptr inbounds float, float* %tmp76, i64 1
+ %tmp78 = getelementptr inbounds float, float* %tmp77, i64 1
+ %tmp79 = getelementptr inbounds float, float* %tmp78, i64 1
+ %tmp80 = getelementptr inbounds float, float* %tmp79, i64 1
+ %tmp81 = getelementptr inbounds float, float* %tmp80, i64 1
+ %tmp82 = getelementptr inbounds float, float* %tmp81, i64 1
+ %tmp83 = getelementptr inbounds float, float* %tmp82, i64 1
+ %tmp84 = getelementptr inbounds float, float* %tmp83, i64 1
+ %tmp85 = getelementptr inbounds float, float* %tmp84, i64 1
+ %tmp86 = getelementptr inbounds float, float* %tmp85, i64 1
+ %tmp87 = getelementptr inbounds float, float* %tmp86, i64 1
+ %tmp88 = getelementptr inbounds float, float* %tmp87, i64 1
+ %tmp89 = getelementptr inbounds float, float* %tmp88, i64 1
+ %tmp90 = getelementptr inbounds float, float* %tmp89, i64 1
+ %tmp91 = getelementptr inbounds float, float* %tmp90, i64 1
+ %tmp92 = getelementptr inbounds float, float* %tmp91, i64 1
+ %tmp93 = getelementptr inbounds float, float* %tmp92, i64 1
+ %tmp94 = getelementptr inbounds float, float* %tmp93, i64 1
+ %tmp95 = getelementptr inbounds float, float* %tmp94, i64 1
+ %tmp96 = getelementptr inbounds float, float* %tmp95, i64 1
+ %tmp97 = getelementptr inbounds float, float* %tmp96, i64 1
+ %tmp98 = getelementptr inbounds float, float* %tmp97, i64 1
+ %tmp99 = getelementptr inbounds float, float* %tmp98, i64 1
+ %tmp100 = getelementptr inbounds float, float* %tmp99, i64 1
+ %tmp101 = getelementptr inbounds float, float* %tmp100, i64 1
+ %tmp102 = getelementptr inbounds float, float* %tmp101, i64 1
+ %tmp103 = getelementptr inbounds float, float* %tmp102, i64 1
+ %tmp104 = getelementptr inbounds float, float* %tmp103, i64 1
+ %tmp105 = getelementptr inbounds float, float* %tmp104, i64 1
+ %tmp106 = getelementptr inbounds float, float* %tmp105, i64 1
+ %tmp107 = getelementptr inbounds float, float* %tmp106, i64 1
+ %tmp108 = getelementptr inbounds float, float* %tmp107, i64 1
+ %tmp109 = getelementptr inbounds float, float* %tmp108, i64 1
+ %tmp110 = getelementptr inbounds float, float* %tmp109, i64 1
+ %tmp111 = getelementptr inbounds float, float* %tmp110, i64 1
+ %tmp112 = getelementptr inbounds float, float* %tmp111, i64 1
+ %tmp113 = getelementptr inbounds float, float* %tmp112, i64 1
+ %tmp114 = getelementptr inbounds float, float* %tmp113, i64 1
+ %tmp115 = getelementptr inbounds float, float* %tmp114, i64 1
+ %tmp116 = getelementptr inbounds float, float* %tmp115, i64 1
+ %tmp117 = getelementptr inbounds float, float* %tmp116, i64 1
+ %tmp118 = getelementptr inbounds float, float* %tmp117, i64 1
+ %tmp119 = getelementptr inbounds float, float* %tmp118, i64 1
+ %tmp120 = getelementptr inbounds float, float* %tmp119, i64 1
+ %tmp121 = getelementptr inbounds float, float* %tmp120, i64 1
+ %tmp122 = getelementptr inbounds float, float* %tmp121, i64 1
+ %tmp123 = getelementptr inbounds float, float* %tmp122, i64 1
+ %tmp124 = getelementptr inbounds float, float* %tmp123, i64 1
+ %tmp125 = getelementptr inbounds float, float* %tmp124, i64 1
+ %tmp126 = getelementptr inbounds float, float* %tmp125, i64 1
+ %tmp127 = getelementptr inbounds float, float* %tmp126, i64 1
+ %tmp128 = getelementptr inbounds float, float* %tmp127, i64 1
+ %tmp129 = getelementptr inbounds float, float* %tmp128, i64 1
+ %tmp130 = getelementptr inbounds float, float* %tmp129, i64 1
+ %tmp131 = getelementptr inbounds float, float* %tmp130, i64 1
+ %tmp132 = getelementptr inbounds float, float* %tmp131, i64 1
+ %tmp133 = getelementptr inbounds float, float* %tmp132, i64 1
+ %tmp134 = getelementptr inbounds float, float* %tmp133, i64 1
+ %tmp135 = getelementptr inbounds float, float* %tmp134, i64 1
+ %tmp136 = getelementptr inbounds float, float* %tmp135, i64 1
+ %tmp137 = getelementptr inbounds float, float* %tmp136, i64 1
+ %tmp138 = getelementptr inbounds float, float* %tmp137, i64 1
+ %tmp139 = getelementptr inbounds float, float* %tmp138, i64 1
+ %tmp140 = getelementptr inbounds float, float* %tmp139, i64 1
+ %tmp141 = getelementptr inbounds float, float* %tmp140, i64 1
+ %tmp142 = getelementptr inbounds float, float* %tmp141, i64 1
+ %tmp143 = getelementptr inbounds float, float* %tmp142, i64 1
+ %tmp144 = getelementptr inbounds float, float* %tmp143, i64 1
+ %tmp145 = getelementptr inbounds float, float* %tmp144, i64 1
+ %tmp146 = getelementptr inbounds float, float* %tmp145, i64 1
+ %tmp147 = getelementptr inbounds float, float* %tmp146, i64 1
+ %tmp148 = getelementptr inbounds float, float* %tmp147, i64 1
+ %tmp149 = getelementptr inbounds float, float* %tmp148, i64 1
+ %tmp150 = getelementptr inbounds float, float* %tmp149, i64 1
+ %tmp151 = getelementptr inbounds float, float* %tmp150, i64 1
+ %tmp152 = getelementptr inbounds float, float* %tmp151, i64 1
+ %tmp153 = getelementptr inbounds float, float* %tmp152, i64 1
+ %tmp154 = getelementptr inbounds float, float* %tmp153, i64 1
+ %tmp155 = getelementptr inbounds float, float* %tmp154, i64 1
+ %tmp156 = getelementptr inbounds float, float* %tmp155, i64 1
+ %tmp157 = getelementptr inbounds float, float* %tmp156, i64 1
+ %tmp158 = getelementptr inbounds float, float* %tmp157, i64 1
+ %tmp159 = getelementptr inbounds float, float* %tmp158, i64 1
+ %tmp160 = getelementptr inbounds float, float* %tmp159, i64 1
+ %tmp161 = getelementptr inbounds float, float* %tmp160, i64 1
+ %tmp162 = getelementptr inbounds float, float* %tmp161, i64 1
+ %tmp163 = getelementptr inbounds float, float* %tmp162, i64 1
+ %tmp164 = getelementptr inbounds float, float* %tmp163, i64 1
+ %tmp165 = getelementptr inbounds float, float* %tmp164, i64 1
+ %tmp166 = getelementptr inbounds float, float* %tmp165, i64 1
+ %tmp167 = getelementptr inbounds float, float* %tmp166, i64 1
+ %tmp168 = getelementptr inbounds float, float* %tmp167, i64 1
+ %tmp169 = getelementptr inbounds float, float* %tmp168, i64 1
+ %tmp170 = getelementptr inbounds float, float* %tmp169, i64 1
+ %tmp171 = getelementptr inbounds float, float* %tmp170, i64 1
+ %tmp172 = getelementptr inbounds float, float* %tmp171, i64 1
+ %tmp173 = getelementptr inbounds float, float* %tmp172, i64 1
+ %tmp174 = getelementptr inbounds float, float* %tmp173, i64 1
+ %tmp175 = getelementptr inbounds float, float* %tmp174, i64 1
+ %tmp176 = getelementptr inbounds float, float* %tmp175, i64 1
+ %tmp177 = getelementptr inbounds float, float* %tmp176, i64 1
+ %tmp178 = getelementptr inbounds float, float* %tmp177, i64 1
+ %tmp179 = getelementptr inbounds float, float* %tmp178, i64 1
+ %tmp180 = getelementptr inbounds float, float* %tmp179, i64 1
+ %tmp181 = getelementptr inbounds float, float* %tmp180, i64 1
+ %tmp182 = getelementptr inbounds float, float* %tmp181, i64 1
+ %tmp183 = getelementptr inbounds float, float* %tmp182, i64 1
+ %tmp184 = getelementptr inbounds float, float* %tmp183, i64 1
+ %tmp185 = getelementptr inbounds float, float* %tmp184, i64 1
+ %tmp186 = getelementptr inbounds float, float* %tmp185, i64 1
+ %tmp187 = getelementptr inbounds float, float* %tmp186, i64 1
+ %tmp188 = getelementptr inbounds float, float* %tmp187, i64 1
+ %tmp189 = getelementptr inbounds float, float* %tmp188, i64 1
+ %tmp190 = getelementptr inbounds float, float* %tmp189, i64 1
+ %tmp191 = getelementptr inbounds float, float* %tmp190, i64 1
+ %tmp192 = getelementptr inbounds float, float* %tmp191, i64 1
+ %tmp193 = getelementptr inbounds float, float* %tmp192, i64 1
+ %tmp194 = getelementptr inbounds float, float* %tmp193, i64 1
+ %tmp195 = getelementptr inbounds float, float* %tmp194, i64 1
+ %tmp196 = getelementptr inbounds float, float* %tmp195, i64 1
+ %tmp197 = getelementptr inbounds float, float* %tmp196, i64 1
+ %tmp198 = getelementptr inbounds float, float* %tmp197, i64 1
+ %tmp199 = getelementptr inbounds float, float* %tmp198, i64 1
+ %tmp200 = getelementptr inbounds float, float* %tmp199, i64 1
+ %tmp201 = getelementptr inbounds float, float* %tmp200, i64 1
+ %tmp202 = getelementptr inbounds float, float* %tmp201, i64 1
+ %tmp203 = getelementptr inbounds float, float* %tmp202, i64 1
+ %tmp204 = getelementptr inbounds float, float* %tmp203, i64 1
+ %tmp205 = getelementptr inbounds float, float* %tmp204, i64 1
+ %tmp206 = getelementptr inbounds float, float* %tmp205, i64 1
+ %tmp207 = getelementptr inbounds float, float* %tmp206, i64 1
+ %tmp208 = getelementptr inbounds float, float* %tmp207, i64 1
+ %tmp209 = getelementptr inbounds float, float* %tmp208, i64 1
+ %tmp210 = getelementptr inbounds float, float* %tmp209, i64 1
+ %tmp211 = getelementptr inbounds float, float* %tmp210, i64 1
+ %tmp212 = getelementptr inbounds float, float* %tmp211, i64 1
+ %tmp213 = getelementptr inbounds float, float* %tmp212, i64 1
+ %tmp214 = getelementptr inbounds float, float* %tmp213, i64 1
+ %tmp215 = getelementptr inbounds float, float* %tmp214, i64 1
+ %tmp216 = getelementptr inbounds float, float* %tmp215, i64 1
+ %tmp217 = getelementptr inbounds float, float* %tmp216, i64 1
+ %tmp218 = getelementptr inbounds float, float* %tmp217, i64 1
+ %tmp219 = getelementptr inbounds float, float* %tmp218, i64 1
+ %tmp220 = getelementptr inbounds float, float* %tmp219, i64 1
+ %tmp221 = getelementptr inbounds float, float* %tmp220, i64 1
+ %tmp222 = getelementptr inbounds float, float* %tmp221, i64 1
+ %tmp223 = getelementptr inbounds float, float* %tmp222, i64 1
+ %tmp224 = getelementptr inbounds float, float* %tmp223, i64 1
+ %tmp225 = getelementptr inbounds float, float* %tmp224, i64 1
+ %tmp226 = getelementptr inbounds float, float* %tmp225, i64 1
+ %tmp227 = getelementptr inbounds float, float* %tmp226, i64 1
+ %tmp228 = getelementptr inbounds float, float* %tmp227, i64 1
+ %tmp229 = getelementptr inbounds float, float* %tmp228, i64 1
+ %tmp230 = getelementptr inbounds float, float* %tmp229, i64 1
+ %tmp231 = getelementptr inbounds float, float* %tmp230, i64 1
+ %tmp232 = getelementptr inbounds float, float* %tmp231, i64 1
+ %tmp233 = getelementptr inbounds float, float* %tmp232, i64 1
+ %tmp234 = getelementptr inbounds float, float* %tmp233, i64 1
+ %tmp235 = getelementptr inbounds float, float* %tmp234, i64 1
+ %tmp236 = getelementptr inbounds float, float* %tmp235, i64 1
+ %tmp237 = getelementptr inbounds float, float* %tmp236, i64 1
+ %tmp238 = getelementptr inbounds float, float* %tmp237, i64 1
+ %tmp239 = getelementptr inbounds float, float* %tmp238, i64 1
+ %tmp240 = getelementptr inbounds float, float* %tmp239, i64 1
+ %tmp241 = getelementptr inbounds float, float* %tmp240, i64 1
+ %tmp242 = getelementptr inbounds float, float* %tmp241, i64 1
+ %tmp243 = getelementptr inbounds float, float* %tmp242, i64 1
+ %tmp244 = getelementptr inbounds float, float* %tmp243, i64 1
+ %tmp245 = getelementptr inbounds float, float* %tmp244, i64 1
+ %tmp246 = getelementptr inbounds float, float* %tmp245, i64 1
+ %tmp247 = getelementptr inbounds float, float* %tmp246, i64 1
+ %tmp248 = getelementptr inbounds float, float* %tmp247, i64 1
+ %tmp249 = getelementptr inbounds float, float* %tmp248, i64 1
+ %tmp250 = getelementptr inbounds float, float* %tmp249, i64 1
+ %tmp251 = getelementptr inbounds float, float* %tmp250, i64 1
+ %tmp252 = getelementptr inbounds float, float* %tmp251, i64 1
+ %tmp253 = getelementptr inbounds float, float* %tmp252, i64 1
+ %tmp254 = getelementptr inbounds float, float* %tmp253, i64 1
+ %tmp255 = getelementptr inbounds float, float* %tmp254, i64 1
+ %tmp256 = getelementptr inbounds float, float* %tmp255, i64 1
+ %tmp257 = getelementptr inbounds float, float* %tmp256, i64 1
+ %tmp258 = getelementptr inbounds float, float* %tmp257, i64 1
+ %tmp259 = getelementptr inbounds float, float* %tmp258, i64 1
+ %tmp260 = getelementptr inbounds float, float* %tmp259, i64 1
+ %tmp261 = getelementptr inbounds float, float* %tmp260, i64 1
+ %tmp262 = getelementptr inbounds float, float* %tmp261, i64 1
+ %tmp263 = getelementptr inbounds float, float* %tmp262, i64 1
+ %tmp264 = getelementptr inbounds float, float* %tmp263, i64 1
+ %tmp265 = getelementptr inbounds float, float* %tmp264, i64 1
+ %tmp266 = getelementptr inbounds float, float* %tmp265, i64 1
+ %tmp267 = getelementptr inbounds float, float* %tmp266, i64 1
+ %tmp268 = getelementptr inbounds float, float* %tmp267, i64 1
+ %tmp269 = getelementptr inbounds float, float* %tmp268, i64 1
+ %tmp270 = getelementptr inbounds float, float* %tmp269, i64 1
+ %tmp271 = getelementptr inbounds float, float* %tmp270, i64 1
+ %tmp272 = getelementptr inbounds float, float* %tmp271, i64 1
+ %tmp273 = getelementptr inbounds float, float* %tmp272, i64 1
+ %tmp274 = getelementptr inbounds float, float* %tmp273, i64 1
+ %tmp275 = getelementptr inbounds float, float* %tmp274, i64 1
+ %tmp276 = getelementptr inbounds float, float* %tmp275, i64 1
+ %tmp277 = getelementptr inbounds float, float* %tmp276, i64 1
+ %tmp278 = getelementptr inbounds float, float* %tmp277, i64 1
+ %tmp279 = getelementptr inbounds float, float* %tmp278, i64 1
+ %tmp280 = getelementptr inbounds float, float* %tmp279, i64 1
+ %tmp281 = getelementptr inbounds float, float* %tmp280, i64 1
+ %tmp282 = getelementptr inbounds float, float* %tmp281, i64 1
+ %tmp283 = getelementptr inbounds float, float* %tmp282, i64 1
+ %tmp284 = getelementptr inbounds float, float* %tmp283, i64 1
+ %tmp285 = getelementptr inbounds float, float* %tmp284, i64 1
+ %tmp286 = getelementptr inbounds float, float* %tmp285, i64 1
+ %tmp287 = getelementptr inbounds float, float* %tmp286, i64 1
+ %tmp288 = getelementptr inbounds float, float* %tmp287, i64 1
+ %tmp289 = getelementptr inbounds float, float* %tmp288, i64 1
+ %tmp290 = getelementptr inbounds float, float* %tmp289, i64 1
+ %tmp291 = getelementptr inbounds float, float* %tmp290, i64 1
+ %tmp292 = getelementptr inbounds float, float* %tmp291, i64 1
+ %tmp293 = getelementptr inbounds float, float* %tmp292, i64 1
+ %tmp294 = getelementptr inbounds float, float* %tmp293, i64 1
+ %tmp295 = getelementptr inbounds float, float* %tmp294, i64 1
+ %tmp296 = getelementptr inbounds float, float* %tmp295, i64 1
+ %tmp297 = getelementptr inbounds float, float* %tmp296, i64 1
+ %tmp298 = getelementptr inbounds float, float* %tmp297, i64 1
+ %tmp299 = getelementptr inbounds float, float* %tmp298, i64 1
+ %tmp300 = getelementptr inbounds float, float* %tmp299, i64 1
+ %tmp301 = getelementptr inbounds float, float* %tmp300, i64 1
+ %tmp302 = getelementptr inbounds float, float* %tmp301, i64 1
+ %tmp303 = getelementptr inbounds float, float* %tmp302, i64 1
+ %tmp304 = getelementptr inbounds float, float* %tmp303, i64 1
+ %tmp305 = getelementptr inbounds float, float* %tmp304, i64 1
+ %tmp306 = getelementptr inbounds float, float* %tmp305, i64 1
+ %tmp307 = getelementptr inbounds float, float* %tmp306, i64 1
+ %tmp308 = getelementptr inbounds float, float* %tmp307, i64 1
+ %tmp309 = getelementptr inbounds float, float* %tmp308, i64 1
+ %tmp310 = getelementptr inbounds float, float* %tmp309, i64 1
+ %tmp311 = getelementptr inbounds float, float* %tmp310, i64 1
+ %tmp312 = getelementptr inbounds float, float* %tmp311, i64 1
+ %tmp313 = getelementptr inbounds float, float* %tmp312, i64 1
+ %tmp314 = getelementptr inbounds float, float* %tmp313, i64 1
+ %tmp315 = getelementptr inbounds float, float* %tmp314, i64 1
+ %tmp316 = getelementptr inbounds float, float* %tmp315, i64 1
+ %tmp317 = getelementptr inbounds float, float* %tmp316, i64 1
+ %tmp318 = getelementptr inbounds float, float* %tmp317, i64 1
+ %tmp319 = getelementptr inbounds float, float* %tmp318, i64 1
+ %tmp320 = getelementptr inbounds float, float* %tmp319, i64 1
+ %tmp321 = getelementptr inbounds float, float* %tmp320, i64 1
+ %tmp322 = getelementptr inbounds float, float* %tmp321, i64 1
+ %tmp323 = getelementptr inbounds float, float* %tmp322, i64 1
+ %tmp324 = getelementptr inbounds float, float* %tmp323, i64 1
+ %tmp325 = getelementptr inbounds float, float* %tmp324, i64 1
+ %tmp326 = getelementptr inbounds float, float* %tmp325, i64 1
+ %tmp327 = getelementptr inbounds float, float* %tmp326, i64 1
+ %tmp328 = getelementptr inbounds float, float* %tmp327, i64 1
+ %tmp329 = getelementptr inbounds float, float* %tmp328, i64 1
+ %tmp330 = getelementptr inbounds float, float* %tmp329, i64 1
+ %tmp331 = getelementptr inbounds float, float* %tmp330, i64 1
+ %tmp332 = getelementptr inbounds float, float* %tmp331, i64 1
+ %tmp333 = getelementptr inbounds float, float* %tmp332, i64 1
+ %tmp334 = getelementptr inbounds float, float* %tmp333, i64 1
+ %tmp335 = getelementptr inbounds float, float* %tmp334, i64 1
+ %tmp336 = getelementptr inbounds float, float* %tmp335, i64 1
+ %tmp337 = getelementptr inbounds float, float* %tmp336, i64 1
+ %tmp338 = getelementptr inbounds float, float* %tmp337, i64 1
+ %tmp339 = getelementptr inbounds float, float* %tmp338, i64 1
+ %tmp340 = getelementptr inbounds float, float* %tmp339, i64 1
+ %tmp341 = getelementptr inbounds float, float* %tmp340, i64 1
+ %tmp342 = getelementptr inbounds float, float* %tmp341, i64 1
+ %tmp343 = getelementptr inbounds float, float* %tmp342, i64 1
+ %tmp344 = getelementptr inbounds float, float* %tmp343, i64 1
+ %tmp345 = getelementptr inbounds float, float* %tmp344, i64 1
+ %tmp346 = getelementptr inbounds float, float* %tmp345, i64 1
+ %tmp347 = getelementptr inbounds float, float* %tmp346, i64 1
+ %tmp348 = getelementptr inbounds float, float* %tmp347, i64 1
+ %tmp349 = getelementptr inbounds float, float* %tmp348, i64 1
+ %tmp350 = getelementptr inbounds float, float* %tmp349, i64 1
+ %tmp351 = getelementptr inbounds float, float* %tmp350, i64 1
+ %tmp352 = getelementptr inbounds float, float* %tmp351, i64 1
+ %tmp353 = getelementptr inbounds float, float* %tmp352, i64 1
+ %tmp354 = getelementptr inbounds float, float* %tmp353, i64 1
+ %tmp355 = getelementptr inbounds float, float* %tmp354, i64 1
+ %tmp356 = getelementptr inbounds float, float* %tmp355, i64 1
+ %tmp357 = getelementptr inbounds float, float* %tmp356, i64 1
+ %tmp358 = getelementptr inbounds float, float* %tmp357, i64 1
+ %tmp359 = getelementptr inbounds float, float* %tmp358, i64 1
+ %tmp360 = getelementptr inbounds float, float* %tmp359, i64 1
+ %tmp361 = getelementptr inbounds float, float* %tmp360, i64 1
+ %tmp362 = getelementptr inbounds float, float* %tmp361, i64 1
+ %tmp363 = getelementptr inbounds float, float* %tmp362, i64 1
+ %tmp364 = getelementptr inbounds float, float* %tmp363, i64 1
+ %tmp365 = getelementptr inbounds float, float* %tmp364, i64 1
+ %tmp366 = getelementptr inbounds float, float* %tmp365, i64 1
+ %tmp367 = getelementptr inbounds float, float* %tmp366, i64 1
+ %tmp368 = getelementptr inbounds float, float* %tmp367, i64 1
+ %tmp369 = getelementptr inbounds float, float* %tmp368, i64 1
+ %tmp370 = getelementptr inbounds float, float* %tmp369, i64 1
+ %tmp371 = getelementptr inbounds float, float* %tmp370, i64 1
+ %tmp372 = getelementptr inbounds float, float* %tmp371, i64 1
+ %tmp373 = getelementptr inbounds float, float* %tmp372, i64 1
+ %tmp374 = getelementptr inbounds float, float* %tmp373, i64 1
+ %tmp375 = getelementptr inbounds float, float* %tmp374, i64 1
+ %tmp376 = getelementptr inbounds float, float* %tmp375, i64 1
+ %tmp377 = getelementptr inbounds float, float* %tmp376, i64 1
+ %tmp378 = getelementptr inbounds float, float* %tmp377, i64 1
+ %tmp379 = getelementptr inbounds float, float* %tmp378, i64 1
+ %tmp380 = getelementptr inbounds float, float* %tmp379, i64 1
+ %tmp381 = getelementptr inbounds float, float* %tmp380, i64 1
+ %tmp382 = getelementptr inbounds float, float* %tmp381, i64 1
+ %tmp383 = getelementptr inbounds float, float* %tmp382, i64 1
+ %tmp384 = getelementptr inbounds float, float* %tmp383, i64 1
+ %tmp385 = getelementptr inbounds float, float* %tmp384, i64 1
+ %tmp386 = getelementptr inbounds float, float* %tmp385, i64 1
+ %tmp387 = getelementptr inbounds float, float* %tmp386, i64 1
+ %tmp388 = getelementptr inbounds float, float* %tmp387, i64 1
+ %tmp389 = getelementptr inbounds float, float* %tmp388, i64 1
+ %tmp390 = getelementptr inbounds float, float* %tmp389, i64 1
+ %tmp391 = getelementptr inbounds float, float* %tmp390, i64 1
+ %tmp392 = getelementptr inbounds float, float* %tmp391, i64 1
+ %tmp393 = getelementptr inbounds float, float* %tmp392, i64 1
+ %tmp394 = getelementptr inbounds float, float* %tmp393, i64 1
+ %tmp395 = getelementptr inbounds float, float* %tmp394, i64 1
+ %tmp396 = getelementptr inbounds float, float* %tmp395, i64 1
+ %tmp397 = getelementptr inbounds float, float* %tmp396, i64 1
+ %tmp398 = getelementptr inbounds float, float* %tmp397, i64 1
+ %tmp399 = getelementptr inbounds float, float* %tmp398, i64 1
+ %tmp400 = getelementptr inbounds float, float* %tmp399, i64 1
+ %tmp401 = getelementptr inbounds float, float* %tmp400, i64 1
+ %tmp402 = getelementptr inbounds float, float* %tmp401, i64 1
+ %tmp403 = getelementptr inbounds float, float* %tmp402, i64 1
+ %tmp404 = getelementptr inbounds float, float* %tmp403, i64 1
+ %tmp405 = getelementptr inbounds float, float* %tmp404, i64 1
+ %tmp406 = getelementptr inbounds float, float* %tmp405, i64 1
+ %tmp407 = getelementptr inbounds float, float* %tmp406, i64 1
+ %tmp408 = getelementptr inbounds float, float* %tmp407, i64 1
+ %tmp409 = getelementptr inbounds float, float* %tmp408, i64 1
+ %tmp410 = getelementptr inbounds float, float* %tmp409, i64 1
+ %tmp411 = getelementptr inbounds float, float* %tmp410, i64 1
+ %tmp412 = getelementptr inbounds float, float* %tmp411, i64 1
+ %tmp413 = getelementptr inbounds float, float* %tmp412, i64 1
+ %tmp414 = getelementptr inbounds float, float* %tmp413, i64 1
+ %tmp415 = getelementptr inbounds float, float* %tmp414, i64 1
+ %tmp416 = getelementptr inbounds float, float* %tmp415, i64 1
+ %tmp417 = getelementptr inbounds float, float* %tmp416, i64 1
+ %tmp418 = getelementptr inbounds float, float* %tmp417, i64 1
+ %tmp419 = getelementptr inbounds float, float* %tmp418, i64 1
+ %tmp420 = getelementptr inbounds float, float* %tmp419, i64 1
+ %tmp421 = getelementptr inbounds float, float* %tmp420, i64 1
+ %tmp422 = getelementptr inbounds float, float* %tmp421, i64 1
+ %tmp423 = getelementptr inbounds float, float* %tmp422, i64 1
+ %tmp424 = getelementptr inbounds float, float* %tmp423, i64 1
+ %tmp425 = getelementptr inbounds float, float* %tmp424, i64 1
+ %tmp426 = getelementptr inbounds float, float* %tmp425, i64 1
+ %tmp427 = getelementptr inbounds float, float* %tmp426, i64 1
+ %tmp428 = getelementptr inbounds float, float* %tmp427, i64 1
+ %tmp429 = getelementptr inbounds float, float* %tmp428, i64 1
+ %tmp430 = getelementptr inbounds float, float* %tmp429, i64 1
+ %tmp431 = getelementptr inbounds float, float* %tmp430, i64 1
+ %tmp432 = getelementptr inbounds float, float* %tmp431, i64 1
+ %tmp433 = getelementptr inbounds float, float* %tmp432, i64 1
+ %tmp434 = getelementptr inbounds float, float* %tmp433, i64 1
+ %tmp435 = getelementptr inbounds float, float* %tmp434, i64 1
+ %tmp436 = getelementptr inbounds float, float* %tmp435, i64 1
+ %tmp437 = getelementptr inbounds float, float* %tmp436, i64 1
+ %tmp438 = getelementptr inbounds float, float* %tmp437, i64 1
+ %tmp439 = getelementptr inbounds float, float* %tmp438, i64 1
+ %tmp440 = getelementptr inbounds float, float* %tmp439, i64 1
+ %tmp441 = getelementptr inbounds float, float* %tmp440, i64 1
+ %tmp442 = getelementptr inbounds float, float* %tmp441, i64 1
+ %tmp443 = getelementptr inbounds float, float* %tmp442, i64 1
+ %tmp444 = getelementptr inbounds float, float* %tmp443, i64 1
+ %tmp445 = getelementptr inbounds float, float* %tmp444, i64 1
+ %tmp446 = getelementptr inbounds float, float* %tmp445, i64 1
+ %tmp447 = getelementptr inbounds float, float* %tmp446, i64 1
+ %tmp448 = getelementptr inbounds float, float* %tmp447, i64 1
+ %tmp449 = getelementptr inbounds float, float* %tmp448, i64 1
+ %tmp450 = getelementptr inbounds float, float* %tmp449, i64 1
+ %tmp451 = getelementptr inbounds float, float* %tmp450, i64 1
+ %tmp452 = getelementptr inbounds float, float* %tmp451, i64 1
+ %tmp453 = getelementptr inbounds float, float* %tmp452, i64 1
+ %tmp454 = getelementptr inbounds float, float* %tmp453, i64 1
+ %tmp455 = getelementptr inbounds float, float* %tmp454, i64 1
+ %tmp456 = getelementptr inbounds float, float* %tmp455, i64 1
+ %tmp457 = getelementptr inbounds float, float* %tmp456, i64 1
+ %tmp458 = getelementptr inbounds float, float* %tmp457, i64 1
+ %tmp459 = getelementptr inbounds float, float* %tmp458, i64 1
+ %tmp460 = getelementptr inbounds float, float* %tmp459, i64 1
+ %tmp461 = getelementptr inbounds float, float* %tmp460, i64 1
+ %tmp462 = getelementptr inbounds float, float* %tmp461, i64 1
+ %tmp463 = getelementptr inbounds float, float* %tmp462, i64 1
+ %tmp464 = getelementptr inbounds float, float* %tmp463, i64 1
+ %tmp465 = getelementptr inbounds float, float* %tmp464, i64 1
+ %tmp466 = getelementptr inbounds float, float* %tmp465, i64 1
+ %tmp467 = getelementptr inbounds float, float* %tmp466, i64 1
+ %tmp468 = getelementptr inbounds float, float* %tmp467, i64 1
+ %tmp469 = getelementptr inbounds float, float* %tmp468, i64 1
+ %tmp470 = getelementptr inbounds float, float* %tmp469, i64 1
+ %tmp471 = getelementptr inbounds float, float* %tmp470, i64 1
+ %tmp472 = getelementptr inbounds float, float* %tmp471, i64 1
+ %tmp473 = getelementptr inbounds float, float* %tmp472, i64 1
+ %tmp474 = getelementptr inbounds float, float* %tmp473, i64 1
+ %tmp475 = getelementptr inbounds float, float* %tmp474, i64 1
+ %tmp476 = getelementptr inbounds float, float* %tmp475, i64 1
+ %tmp477 = getelementptr inbounds float, float* %tmp476, i64 1
+ %tmp478 = getelementptr inbounds float, float* %tmp477, i64 1
+ %tmp479 = getelementptr inbounds float, float* %tmp478, i64 1
+ %tmp480 = getelementptr inbounds float, float* %tmp479, i64 1
+ %tmp481 = getelementptr inbounds float, float* %tmp480, i64 1
+ %tmp482 = getelementptr inbounds float, float* %tmp481, i64 1
+ %tmp483 = getelementptr inbounds float, float* %tmp482, i64 1
+ %tmp484 = getelementptr inbounds float, float* %tmp483, i64 1
+ %tmp485 = getelementptr inbounds float, float* %tmp484, i64 1
+ %tmp486 = getelementptr inbounds float, float* %tmp485, i64 1
+ %tmp487 = getelementptr inbounds float, float* %tmp486, i64 1
+ %tmp488 = getelementptr inbounds float, float* %tmp487, i64 1
+ %tmp489 = getelementptr inbounds float, float* %tmp488, i64 1
+ %tmp490 = getelementptr inbounds float, float* %tmp489, i64 1
+ %tmp491 = getelementptr inbounds float, float* %tmp490, i64 1
+ %tmp492 = getelementptr inbounds float, float* %tmp491, i64 1
+ %tmp493 = getelementptr inbounds float, float* %tmp492, i64 1
+ %tmp494 = getelementptr inbounds float, float* %tmp493, i64 1
+ %tmp495 = getelementptr inbounds float, float* %tmp494, i64 1
+ %tmp496 = getelementptr inbounds float, float* %tmp495, i64 1
+ %tmp497 = getelementptr inbounds float, float* %tmp496, i64 1
+ %tmp498 = getelementptr inbounds float, float* %tmp497, i64 1
+ %tmp499 = getelementptr inbounds float, float* %tmp498, i64 1
+ %tmp500 = getelementptr inbounds float, float* %tmp499, i64 1
+ %tmp501 = getelementptr inbounds float, float* %tmp500, i64 1
+ %tmp502 = getelementptr inbounds float, float* %tmp501, i64 1
+ %tmp503 = getelementptr inbounds float, float* %tmp502, i64 1
+ %tmp504 = getelementptr inbounds float, float* %tmp503, i64 1
+ %tmp505 = getelementptr inbounds float, float* %tmp504, i64 1
+ %tmp506 = getelementptr inbounds float, float* %tmp505, i64 1
+ %tmp507 = getelementptr inbounds float, float* %tmp506, i64 1
+ %tmp508 = getelementptr inbounds float, float* %tmp507, i64 1
+ %tmp509 = getelementptr inbounds float, float* %tmp508, i64 1
+ %tmp510 = getelementptr inbounds float, float* %tmp509, i64 1
+ %tmp511 = getelementptr inbounds float, float* %tmp510, i64 1
+ %tmp512 = getelementptr inbounds float, float* %tmp511, i64 1
+ %tmp513 = getelementptr inbounds float, float* %tmp512, i64 1
+ %tmp514 = getelementptr inbounds float, float* %tmp513, i64 1
+ %tmp515 = getelementptr inbounds float, float* %tmp514, i64 1
+ %tmp516 = getelementptr inbounds float, float* %tmp515, i64 1
+ %tmp517 = getelementptr inbounds float, float* %tmp516, i64 1
+ %tmp518 = getelementptr inbounds float, float* %tmp517, i64 1
+ %tmp519 = getelementptr inbounds float, float* %tmp518, i64 1
+ %tmp520 = getelementptr inbounds float, float* %tmp519, i64 1
+ %tmp521 = getelementptr inbounds float, float* %tmp520, i64 1
+ %tmp522 = getelementptr inbounds float, float* %tmp521, i64 1
+ %tmp523 = getelementptr inbounds float, float* %tmp522, i64 1
+ %tmp524 = getelementptr inbounds float, float* %tmp523, i64 1
+ %tmp525 = getelementptr inbounds float, float* %tmp524, i64 1
+ %tmp526 = getelementptr inbounds float, float* %tmp525, i64 1
+ %tmp527 = getelementptr inbounds float, float* %tmp526, i64 1
+ %tmp528 = getelementptr inbounds float, float* %tmp527, i64 1
+ %tmp529 = getelementptr inbounds float, float* %tmp528, i64 1
+ %tmp530 = getelementptr inbounds float, float* %tmp529, i64 1
+ %tmp531 = getelementptr inbounds float, float* %tmp530, i64 1
+ %tmp532 = getelementptr inbounds float, float* %tmp531, i64 1
+ %tmp533 = getelementptr inbounds float, float* %tmp532, i64 1
+ %tmp534 = getelementptr inbounds float, float* %tmp533, i64 1
+ %tmp535 = getelementptr inbounds float, float* %tmp534, i64 1
+ %tmp536 = getelementptr inbounds float, float* %tmp535, i64 1
+ %tmp537 = getelementptr inbounds float, float* %tmp536, i64 1
+ %tmp538 = getelementptr inbounds float, float* %tmp537, i64 1
+ %tmp539 = getelementptr inbounds float, float* %tmp538, i64 1
+ %tmp540 = getelementptr inbounds float, float* %tmp539, i64 1
+ %tmp541 = getelementptr inbounds float, float* %tmp540, i64 1
+ %tmp542 = getelementptr inbounds float, float* %tmp541, i64 1
+ %tmp543 = getelementptr inbounds float, float* %tmp542, i64 1
+ %tmp544 = getelementptr inbounds float, float* %tmp543, i64 1
+ %tmp545 = getelementptr inbounds float, float* %tmp544, i64 1
+ %tmp546 = getelementptr inbounds float, float* %tmp545, i64 1
+ %tmp547 = getelementptr inbounds float, float* %tmp546, i64 1
+ %tmp548 = getelementptr inbounds float, float* %tmp547, i64 1
+ %tmp549 = getelementptr inbounds float, float* %tmp548, i64 1
+ %tmp550 = getelementptr inbounds float, float* %tmp549, i64 1
+ %tmp551 = getelementptr inbounds float, float* %tmp550, i64 1
+ %tmp552 = getelementptr inbounds float, float* %tmp551, i64 1
+ %tmp553 = getelementptr inbounds float, float* %tmp552, i64 1
+ %tmp554 = getelementptr inbounds float, float* %tmp553, i64 1
+ %tmp555 = getelementptr inbounds float, float* %tmp554, i64 1
+ %tmp556 = getelementptr inbounds float, float* %tmp555, i64 1
+ %tmp557 = getelementptr inbounds float, float* %tmp556, i64 1
+ %tmp558 = getelementptr inbounds float, float* %tmp557, i64 1
+ %tmp559 = getelementptr inbounds float, float* %tmp558, i64 1
+ %tmp560 = getelementptr inbounds float, float* %tmp559, i64 1
+ %tmp561 = getelementptr inbounds float, float* %tmp560, i64 1
+ %tmp562 = getelementptr inbounds float, float* %tmp561, i64 1
+ %tmp563 = getelementptr inbounds float, float* %tmp562, i64 1
+ %tmp564 = getelementptr inbounds float, float* %tmp563, i64 1
+ %tmp565 = getelementptr inbounds float, float* %tmp564, i64 1
+ %tmp566 = getelementptr inbounds float, float* %tmp565, i64 1
+ %tmp567 = getelementptr inbounds float, float* %tmp566, i64 1
+ %tmp568 = getelementptr inbounds float, float* %tmp567, i64 1
+ %tmp569 = getelementptr inbounds float, float* %tmp568, i64 1
+ %tmp570 = getelementptr inbounds float, float* %tmp569, i64 1
+ %tmp571 = getelementptr inbounds float, float* %tmp570, i64 1
+ %tmp572 = getelementptr inbounds float, float* %tmp571, i64 1
+ %tmp573 = getelementptr inbounds float, float* %tmp572, i64 1
+ %tmp574 = getelementptr inbounds float, float* %tmp573, i64 1
+ %tmp575 = getelementptr inbounds float, float* %tmp574, i64 1
+ %tmp576 = getelementptr inbounds float, float* %tmp575, i64 1
+ %tmp577 = getelementptr inbounds float, float* %tmp576, i64 1
+ %tmp578 = getelementptr inbounds float, float* %tmp577, i64 1
+ %tmp579 = getelementptr inbounds float, float* %tmp578, i64 1
+ %tmp580 = getelementptr inbounds float, float* %tmp579, i64 1
+ %tmp581 = getelementptr inbounds float, float* %tmp580, i64 1
+ %tmp582 = getelementptr inbounds float, float* %tmp581, i64 1
+ %tmp583 = getelementptr inbounds float, float* %tmp582, i64 1
+ %tmp584 = getelementptr inbounds float, float* %tmp583, i64 1
+ %tmp585 = getelementptr inbounds float, float* %tmp584, i64 1
+ %tmp586 = getelementptr inbounds float, float* %tmp585, i64 1
+ %tmp587 = getelementptr inbounds float, float* %tmp586, i64 1
+ %tmp588 = getelementptr inbounds float, float* %tmp587, i64 1
+ %tmp589 = getelementptr inbounds float, float* %tmp588, i64 1
+ %tmp590 = getelementptr inbounds float, float* %tmp589, i64 1
+ %tmp591 = getelementptr inbounds float, float* %tmp590, i64 1
+ %tmp592 = getelementptr inbounds float, float* %tmp591, i64 1
+ %tmp593 = getelementptr inbounds float, float* %tmp592, i64 1
+ %tmp594 = getelementptr inbounds float, float* %tmp593, i64 1
+ %tmp595 = getelementptr inbounds float, float* %tmp594, i64 1
+ %tmp596 = getelementptr inbounds float, float* %tmp595, i64 1
+ %tmp597 = getelementptr inbounds float, float* %tmp596, i64 1
+ %tmp598 = getelementptr inbounds float, float* %tmp597, i64 1
+ %tmp599 = getelementptr inbounds float, float* %tmp598, i64 1
+ %tmp600 = getelementptr inbounds float, float* %tmp599, i64 1
+ %tmp601 = getelementptr inbounds float, float* %tmp600, i64 1
+ %tmp602 = getelementptr inbounds float, float* %tmp601, i64 1
+ %tmp603 = getelementptr inbounds float, float* %tmp602, i64 1
+ %tmp604 = getelementptr inbounds float, float* %tmp603, i64 1
+ %tmp605 = getelementptr inbounds float, float* %tmp604, i64 1
+ %tmp606 = getelementptr inbounds float, float* %tmp605, i64 1
+ %tmp607 = getelementptr inbounds float, float* %tmp606, i64 1
+ %tmp608 = getelementptr inbounds float, float* %tmp607, i64 1
+ %tmp609 = getelementptr inbounds float, float* %tmp608, i64 1
+ %tmp610 = getelementptr inbounds float, float* %tmp609, i64 1
+ %tmp611 = getelementptr inbounds float, float* %tmp610, i64 1
+ %tmp612 = getelementptr inbounds float, float* %tmp611, i64 1
+ %tmp613 = getelementptr inbounds float, float* %tmp612, i64 1
+ %tmp614 = getelementptr inbounds float, float* %tmp613, i64 1
+ %tmp615 = getelementptr inbounds float, float* %tmp614, i64 1
+ %tmp616 = getelementptr inbounds float, float* %tmp615, i64 1
+ %tmp617 = getelementptr inbounds float, float* %tmp616, i64 1
+ %tmp618 = getelementptr inbounds float, float* %tmp617, i64 1
+ %tmp619 = getelementptr inbounds float, float* %tmp618, i64 1
+ %tmp620 = getelementptr inbounds float, float* %tmp619, i64 1
+ %tmp621 = getelementptr inbounds float, float* %tmp620, i64 1
+ %tmp622 = getelementptr inbounds float, float* %tmp621, i64 1
+ %tmp623 = getelementptr inbounds float, float* %tmp622, i64 1
+ %tmp624 = getelementptr inbounds float, float* %tmp623, i64 1
+ %tmp625 = getelementptr inbounds float, float* %tmp624, i64 1
+ %tmp626 = getelementptr inbounds float, float* %tmp625, i64 1
+ %tmp627 = getelementptr inbounds float, float* %tmp626, i64 1
+ %tmp628 = getelementptr inbounds float, float* %tmp627, i64 1
+ %tmp629 = getelementptr inbounds float, float* %tmp628, i64 1
+ %tmp630 = getelementptr inbounds float, float* %tmp629, i64 1
+ %tmp631 = getelementptr inbounds float, float* %tmp630, i64 1
+ %tmp632 = getelementptr inbounds float, float* %tmp631, i64 1
+ %tmp633 = getelementptr inbounds float, float* %tmp632, i64 1
+ %tmp634 = getelementptr inbounds float, float* %tmp633, i64 1
+ %tmp635 = getelementptr inbounds float, float* %tmp634, i64 1
+ %tmp636 = getelementptr inbounds float, float* %tmp635, i64 1
+ %tmp637 = getelementptr inbounds float, float* %tmp636, i64 1
+ %tmp638 = getelementptr inbounds float, float* %tmp637, i64 1
+ %tmp639 = getelementptr inbounds float, float* %tmp638, i64 1
+ %tmp640 = getelementptr inbounds float, float* %tmp639, i64 1
+ %tmp641 = getelementptr inbounds float, float* %tmp640, i64 1
+ %tmp642 = getelementptr inbounds float, float* %tmp641, i64 1
+ %tmp643 = getelementptr inbounds float, float* %tmp642, i64 1
+ %tmp644 = getelementptr inbounds float, float* %tmp643, i64 1
+ %tmp645 = getelementptr inbounds float, float* %tmp644, i64 1
+ %tmp646 = getelementptr inbounds float, float* %tmp645, i64 1
+ %tmp647 = getelementptr inbounds float, float* %tmp646, i64 1
+ %tmp648 = getelementptr inbounds float, float* %tmp647, i64 1
+ %tmp649 = getelementptr inbounds float, float* %tmp648, i64 1
+ %tmp650 = getelementptr inbounds float, float* %tmp649, i64 1
+ %tmp651 = getelementptr inbounds float, float* %tmp650, i64 1
+ %tmp652 = getelementptr inbounds float, float* %tmp651, i64 1
+ %tmp653 = getelementptr inbounds float, float* %tmp652, i64 1
+ %tmp654 = getelementptr inbounds float, float* %tmp653, i64 1
+ %tmp655 = getelementptr inbounds float, float* %tmp654, i64 1
+ %tmp656 = getelementptr inbounds float, float* %tmp655, i64 1
+ %tmp657 = getelementptr inbounds float, float* %tmp656, i64 1
+ %tmp658 = getelementptr inbounds float, float* %tmp657, i64 1
+ %tmp659 = getelementptr inbounds float, float* %tmp658, i64 1
+ %tmp660 = getelementptr inbounds float, float* %tmp659, i64 1
+ %tmp661 = getelementptr inbounds float, float* %tmp660, i64 1
+ %tmp662 = getelementptr inbounds float, float* %tmp661, i64 1
+ %tmp663 = getelementptr inbounds float, float* %tmp662, i64 1
+ %tmp664 = getelementptr inbounds float, float* %tmp663, i64 1
+ %tmp665 = getelementptr inbounds float, float* %tmp664, i64 1
+ %tmp666 = getelementptr inbounds float, float* %tmp665, i64 1
+ %tmp667 = getelementptr inbounds float, float* %tmp666, i64 1
+ %tmp668 = getelementptr inbounds float, float* %tmp667, i64 1
+ %tmp669 = getelementptr inbounds float, float* %tmp668, i64 1
+ %tmp670 = getelementptr inbounds float, float* %tmp669, i64 1
+ %tmp671 = getelementptr inbounds float, float* %tmp670, i64 1
+ %tmp672 = getelementptr inbounds float, float* %tmp671, i64 1
+ %tmp673 = getelementptr inbounds float, float* %tmp672, i64 1
+ %tmp674 = getelementptr inbounds float, float* %tmp673, i64 1
+ %tmp675 = getelementptr inbounds float, float* %tmp674, i64 1
+ %tmp676 = getelementptr inbounds float, float* %tmp675, i64 1
+ %tmp677 = getelementptr inbounds float, float* %tmp676, i64 1
+ %tmp678 = getelementptr inbounds float, float* %tmp677, i64 1
+ %tmp679 = getelementptr inbounds float, float* %tmp678, i64 1
+ %tmp680 = getelementptr inbounds float, float* %tmp679, i64 1
+ %tmp681 = getelementptr inbounds float, float* %tmp680, i64 1
+ %tmp682 = getelementptr inbounds float, float* %tmp681, i64 1
+ %tmp683 = getelementptr inbounds float, float* %tmp682, i64 1
+ %tmp684 = getelementptr inbounds float, float* %tmp683, i64 1
+ %tmp685 = getelementptr inbounds float, float* %tmp684, i64 1
+ %tmp686 = getelementptr inbounds float, float* %tmp685, i64 1
+ %tmp687 = getelementptr inbounds float, float* %tmp686, i64 1
+ %tmp688 = getelementptr inbounds float, float* %tmp687, i64 1
+ %tmp689 = getelementptr inbounds float, float* %tmp688, i64 1
+ %tmp690 = getelementptr inbounds float, float* %tmp689, i64 1
+ %tmp691 = getelementptr inbounds float, float* %tmp690, i64 1
+ %tmp692 = getelementptr inbounds float, float* %tmp691, i64 1
+ %tmp693 = getelementptr inbounds float, float* %tmp692, i64 1
+ %tmp694 = getelementptr inbounds float, float* %tmp693, i64 1
+ %tmp695 = getelementptr inbounds float, float* %tmp694, i64 1
+ %tmp696 = getelementptr inbounds float, float* %tmp695, i64 1
+ %tmp697 = getelementptr inbounds float, float* %tmp696, i64 1
+ %tmp698 = getelementptr inbounds float, float* %tmp697, i64 1
+ %tmp699 = getelementptr inbounds float, float* %tmp698, i64 1
+ %tmp700 = getelementptr inbounds float, float* %tmp699, i64 1
+ %tmp701 = getelementptr inbounds float, float* %tmp700, i64 1
+ %tmp702 = getelementptr inbounds float, float* %tmp701, i64 1
+ %tmp703 = getelementptr inbounds float, float* %tmp702, i64 1
+ %tmp704 = getelementptr inbounds float, float* %tmp703, i64 1
+ %tmp705 = getelementptr inbounds float, float* %tmp704, i64 1
+ %tmp706 = getelementptr inbounds float, float* %tmp705, i64 1
+ %tmp707 = getelementptr inbounds float, float* %tmp706, i64 1
+ %tmp708 = getelementptr inbounds float, float* %tmp707, i64 1
+ %tmp709 = getelementptr inbounds float, float* %tmp708, i64 1
+ %tmp710 = getelementptr inbounds float, float* %tmp709, i64 1
+ %tmp711 = getelementptr inbounds float, float* %tmp710, i64 1
+ %tmp712 = getelementptr inbounds float, float* %tmp711, i64 1
+ %tmp713 = getelementptr inbounds float, float* %tmp712, i64 1
+ %tmp714 = getelementptr inbounds float, float* %tmp713, i64 1
+ %tmp715 = getelementptr inbounds float, float* %tmp714, i64 1
+ %tmp716 = getelementptr inbounds float, float* %tmp715, i64 1
+ %tmp717 = getelementptr inbounds float, float* %tmp716, i64 1
+ %tmp718 = getelementptr inbounds float, float* %tmp717, i64 1
+ %tmp719 = getelementptr inbounds float, float* %tmp718, i64 1
+ %tmp720 = getelementptr inbounds float, float* %tmp719, i64 1
+ %tmp721 = getelementptr inbounds float, float* %tmp720, i64 1
+ %tmp722 = getelementptr inbounds float, float* %tmp721, i64 1
+ %tmp723 = getelementptr inbounds float, float* %tmp722, i64 1
+ %tmp724 = getelementptr inbounds float, float* %tmp723, i64 1
+ %tmp725 = getelementptr inbounds float, float* %tmp724, i64 1
+ %tmp726 = getelementptr inbounds float, float* %tmp725, i64 1
+ %tmp727 = getelementptr inbounds float, float* %tmp726, i64 1
+ %tmp728 = getelementptr inbounds float, float* %tmp727, i64 1
+ %tmp729 = getelementptr inbounds float, float* %tmp728, i64 1
+ %tmp730 = getelementptr inbounds float, float* %tmp729, i64 1
+ %tmp731 = getelementptr inbounds float, float* %tmp730, i64 1
+ %tmp732 = getelementptr inbounds float, float* %tmp731, i64 1
+ %tmp733 = getelementptr inbounds float, float* %tmp732, i64 1
+ %tmp734 = getelementptr inbounds float, float* %tmp733, i64 1
+ %tmp735 = getelementptr inbounds float, float* %tmp734, i64 1
+ %tmp736 = getelementptr inbounds float, float* %tmp735, i64 1
+ %tmp737 = getelementptr inbounds float, float* %tmp736, i64 1
+ %tmp738 = getelementptr inbounds float, float* %tmp737, i64 1
+ %tmp739 = getelementptr inbounds float, float* %tmp738, i64 1
+ %tmp740 = getelementptr inbounds float, float* %tmp739, i64 1
+ %tmp741 = getelementptr inbounds float, float* %tmp740, i64 1
+ %tmp742 = getelementptr inbounds float, float* %tmp741, i64 1
+ %tmp743 = getelementptr inbounds float, float* %tmp742, i64 1
+ %tmp744 = getelementptr inbounds float, float* %tmp743, i64 1
+ %tmp745 = getelementptr inbounds float, float* %tmp744, i64 1
+ %tmp746 = getelementptr inbounds float, float* %tmp745, i64 1
+ %tmp747 = getelementptr inbounds float, float* %tmp746, i64 1
+ %tmp748 = getelementptr inbounds float, float* %tmp747, i64 1
+ %tmp749 = getelementptr inbounds float, float* %tmp748, i64 1
+ %tmp750 = getelementptr inbounds float, float* %tmp749, i64 1
+ %tmp751 = getelementptr inbounds float, float* %tmp750, i64 1
+ %tmp752 = getelementptr inbounds float, float* %tmp751, i64 1
+ %tmp753 = getelementptr inbounds float, float* %tmp752, i64 1
+ %tmp754 = getelementptr inbounds float, float* %tmp753, i64 1
+ %tmp755 = getelementptr inbounds float, float* %tmp754, i64 1
+ %tmp756 = getelementptr inbounds float, float* %tmp755, i64 1
+ %tmp757 = getelementptr inbounds float, float* %tmp756, i64 1
+ %tmp758 = getelementptr inbounds float, float* %tmp757, i64 1
+ %tmp759 = getelementptr inbounds float, float* %tmp758, i64 1
+ %tmp760 = getelementptr inbounds float, float* %tmp759, i64 1
+ %tmp761 = getelementptr inbounds float, float* %tmp760, i64 1
+ %tmp762 = getelementptr inbounds float, float* %tmp761, i64 1
+ %tmp763 = getelementptr inbounds float, float* %tmp762, i64 1
+ %tmp764 = getelementptr inbounds float, float* %tmp763, i64 1
+ %tmp765 = getelementptr inbounds float, float* %tmp764, i64 1
+ %tmp766 = getelementptr inbounds float, float* %tmp765, i64 1
+ %tmp767 = getelementptr inbounds float, float* %tmp766, i64 1
+ %tmp768 = getelementptr inbounds float, float* %tmp767, i64 1
+ %tmp769 = getelementptr inbounds float, float* %tmp768, i64 1
+ %tmp770 = getelementptr inbounds float, float* %tmp769, i64 1
+ %tmp771 = getelementptr inbounds float, float* %tmp770, i64 1
+ %tmp772 = getelementptr inbounds float, float* %tmp771, i64 1
+ %tmp773 = getelementptr inbounds float, float* %tmp772, i64 1
+ %tmp774 = getelementptr inbounds float, float* %tmp773, i64 1
+ %tmp775 = getelementptr inbounds float, float* %tmp774, i64 1
+ %tmp776 = getelementptr inbounds float, float* %tmp775, i64 1
+ %tmp777 = getelementptr inbounds float, float* %tmp776, i64 1
+ %tmp778 = getelementptr inbounds float, float* %tmp777, i64 1
+ %tmp779 = getelementptr inbounds float, float* %tmp778, i64 1
+ %tmp780 = getelementptr inbounds float, float* %tmp779, i64 1
+ %tmp781 = getelementptr inbounds float, float* %tmp780, i64 1
+ %tmp782 = getelementptr inbounds float, float* %tmp781, i64 1
+ %tmp783 = getelementptr inbounds float, float* %tmp782, i64 1
+ %tmp784 = getelementptr inbounds float, float* %tmp783, i64 1
+ %tmp785 = getelementptr inbounds float, float* %tmp784, i64 1
+ %tmp786 = getelementptr inbounds float, float* %tmp785, i64 1
+ %tmp787 = getelementptr inbounds float, float* %tmp786, i64 1
+ %tmp788 = getelementptr inbounds float, float* %tmp787, i64 1
+ %tmp789 = getelementptr inbounds float, float* %tmp788, i64 1
+ %tmp790 = getelementptr inbounds float, float* %tmp789, i64 1
+ %tmp791 = getelementptr inbounds float, float* %tmp790, i64 1
+ %tmp792 = getelementptr inbounds float, float* %tmp791, i64 1
+ %tmp793 = getelementptr inbounds float, float* %tmp792, i64 1
+ %tmp794 = getelementptr inbounds float, float* %tmp793, i64 1
+ %tmp795 = getelementptr inbounds float, float* %tmp794, i64 1
+ %tmp796 = getelementptr inbounds float, float* %tmp795, i64 1
+ %tmp797 = getelementptr inbounds float, float* %tmp796, i64 1
+ %tmp798 = getelementptr inbounds float, float* %tmp797, i64 1
+ %tmp799 = getelementptr inbounds float, float* %tmp798, i64 1
+ %tmp800 = getelementptr inbounds float, float* %tmp799, i64 1
+ %tmp801 = getelementptr inbounds float, float* %tmp800, i64 1
+ %tmp802 = getelementptr inbounds float, float* %tmp801, i64 1
+ %tmp803 = getelementptr inbounds float, float* %tmp802, i64 1
+ %tmp804 = getelementptr inbounds float, float* %tmp803, i64 1
+ %tmp805 = getelementptr inbounds float, float* %tmp804, i64 1
+ %tmp806 = getelementptr inbounds float, float* %tmp805, i64 1
+ %tmp807 = getelementptr inbounds float, float* %tmp806, i64 1
+ %tmp808 = getelementptr inbounds float, float* %tmp807, i64 1
+ %tmp809 = getelementptr inbounds float, float* %tmp808, i64 1
+ %tmp810 = getelementptr inbounds float, float* %tmp809, i64 1
+ %tmp811 = getelementptr inbounds float, float* %tmp810, i64 1
+ %tmp812 = getelementptr inbounds float, float* %tmp811, i64 1
+ %tmp813 = getelementptr inbounds float, float* %tmp812, i64 1
+ %tmp814 = getelementptr inbounds float, float* %tmp813, i64 1
+ %tmp815 = getelementptr inbounds float, float* %tmp814, i64 1
+ %tmp816 = getelementptr inbounds float, float* %tmp815, i64 1
+ %tmp817 = getelementptr inbounds float, float* %tmp816, i64 1
+ %tmp818 = getelementptr inbounds float, float* %tmp817, i64 1
+ %tmp819 = getelementptr inbounds float, float* %tmp818, i64 1
+ %tmp820 = getelementptr inbounds float, float* %tmp819, i64 1
+ %tmp821 = getelementptr inbounds float, float* %tmp820, i64 1
+ %tmp822 = getelementptr inbounds float, float* %tmp821, i64 1
+ %tmp823 = getelementptr inbounds float, float* %tmp822, i64 1
+ %tmp824 = getelementptr inbounds float, float* %tmp823, i64 1
+ %tmp825 = getelementptr inbounds float, float* %tmp824, i64 1
+ %tmp826 = getelementptr inbounds float, float* %tmp825, i64 1
+ %tmp827 = getelementptr inbounds float, float* %tmp826, i64 1
+ %tmp828 = getelementptr inbounds float, float* %tmp827, i64 1
+ %tmp829 = getelementptr inbounds float, float* %tmp828, i64 1
+ %tmp830 = getelementptr inbounds float, float* %tmp829, i64 1
+ %tmp831 = getelementptr inbounds float, float* %tmp830, i64 1
+ %tmp832 = getelementptr inbounds float, float* %tmp831, i64 1
+ %tmp833 = getelementptr inbounds float, float* %tmp832, i64 1
+ %tmp834 = getelementptr inbounds float, float* %tmp833, i64 1
+ %tmp835 = getelementptr inbounds float, float* %tmp834, i64 1
+ %tmp836 = getelementptr inbounds float, float* %tmp835, i64 1
+ %tmp837 = getelementptr inbounds float, float* %tmp836, i64 1
+ %tmp838 = getelementptr inbounds float, float* %tmp837, i64 1
+ %tmp839 = getelementptr inbounds float, float* %tmp838, i64 1
+ %tmp840 = getelementptr inbounds float, float* %tmp839, i64 1
+ %tmp841 = getelementptr inbounds float, float* %tmp840, i64 1
+ %tmp842 = getelementptr inbounds float, float* %tmp841, i64 1
+ %tmp843 = getelementptr inbounds float, float* %tmp842, i64 1
+ %tmp844 = getelementptr inbounds float, float* %tmp843, i64 1
+ %tmp845 = getelementptr inbounds float, float* %tmp844, i64 1
+ %tmp846 = getelementptr inbounds float, float* %tmp845, i64 1
+ %tmp847 = getelementptr inbounds float, float* %tmp846, i64 1
+ %tmp848 = getelementptr inbounds float, float* %tmp847, i64 1
+ %tmp849 = getelementptr inbounds float, float* %tmp848, i64 1
+ %tmp850 = getelementptr inbounds float, float* %tmp849, i64 1
+ %tmp851 = getelementptr inbounds float, float* %tmp850, i64 1
+ %tmp852 = getelementptr inbounds float, float* %tmp851, i64 1
+ %tmp853 = getelementptr inbounds float, float* %tmp852, i64 1
+ %tmp854 = getelementptr inbounds float, float* %tmp853, i64 1
+ %tmp855 = getelementptr inbounds float, float* %tmp854, i64 1
+ %tmp856 = getelementptr inbounds float, float* %tmp855, i64 1
+ %tmp857 = getelementptr inbounds float, float* %tmp856, i64 1
+ %tmp858 = getelementptr inbounds float, float* %tmp857, i64 1
+ %tmp859 = getelementptr inbounds float, float* %tmp858, i64 1
+ %tmp860 = getelementptr inbounds float, float* %tmp859, i64 1
+ %tmp861 = getelementptr inbounds float, float* %tmp860, i64 1
+ %tmp862 = getelementptr inbounds float, float* %tmp861, i64 1
+ %tmp863 = getelementptr inbounds float, float* %tmp862, i64 1
+ %tmp864 = getelementptr inbounds float, float* %tmp863, i64 1
+ %tmp865 = getelementptr inbounds float, float* %tmp864, i64 1
+ %tmp866 = getelementptr inbounds float, float* %tmp865, i64 1
+ %tmp867 = getelementptr inbounds float, float* %tmp866, i64 1
+ %tmp868 = getelementptr inbounds float, float* %tmp867, i64 1
+ %tmp869 = getelementptr inbounds float, float* %tmp868, i64 1
+ %tmp870 = getelementptr inbounds float, float* %tmp869, i64 1
+ %tmp871 = getelementptr inbounds float, float* %tmp870, i64 1
+ %tmp872 = getelementptr inbounds float, float* %tmp871, i64 1
+ %tmp873 = getelementptr inbounds float, float* %tmp872, i64 1
+ %tmp874 = getelementptr inbounds float, float* %tmp873, i64 1
+ %tmp875 = getelementptr inbounds float, float* %tmp874, i64 1
+ %tmp876 = getelementptr inbounds float, float* %tmp875, i64 1
+ %tmp877 = getelementptr inbounds float, float* %tmp876, i64 1
+ %tmp878 = getelementptr inbounds float, float* %tmp877, i64 1
+ %tmp879 = getelementptr inbounds float, float* %tmp878, i64 1
+ %tmp880 = getelementptr inbounds float, float* %tmp879, i64 1
+ %tmp881 = getelementptr inbounds float, float* %tmp880, i64 1
+ %tmp882 = getelementptr inbounds float, float* %tmp881, i64 1
+ %tmp883 = getelementptr inbounds float, float* %tmp882, i64 1
+ %tmp884 = getelementptr inbounds float, float* %tmp883, i64 1
+ %tmp885 = getelementptr inbounds float, float* %tmp884, i64 1
+ %tmp886 = getelementptr inbounds float, float* %tmp885, i64 1
+ %tmp887 = getelementptr inbounds float, float* %tmp886, i64 1
+ %tmp888 = getelementptr inbounds float, float* %tmp887, i64 1
+ %tmp889 = getelementptr inbounds float, float* %tmp888, i64 1
+ %tmp890 = getelementptr inbounds float, float* %tmp889, i64 1
+ %tmp891 = getelementptr inbounds float, float* %tmp890, i64 1
+ %tmp892 = getelementptr inbounds float, float* %tmp891, i64 1
+ %tmp893 = getelementptr inbounds float, float* %tmp892, i64 1
+ %tmp894 = getelementptr inbounds float, float* %tmp893, i64 1
+ %tmp895 = getelementptr inbounds float, float* %tmp894, i64 1
+ %tmp896 = getelementptr inbounds float, float* %tmp895, i64 1
+ %tmp897 = getelementptr inbounds float, float* %tmp896, i64 1
+ %tmp898 = getelementptr inbounds float, float* %tmp897, i64 1
+ %tmp899 = getelementptr inbounds float, float* %tmp898, i64 1
+ %tmp900 = getelementptr inbounds float, float* %tmp899, i64 1
+ %tmp901 = getelementptr inbounds float, float* %tmp900, i64 1
+ %tmp902 = getelementptr inbounds float, float* %tmp901, i64 1
+ %tmp903 = getelementptr inbounds float, float* %tmp902, i64 1
+ %tmp904 = getelementptr inbounds float, float* %tmp903, i64 1
+ %tmp905 = getelementptr inbounds float, float* %tmp904, i64 1
+ %tmp906 = getelementptr inbounds float, float* %tmp905, i64 1
+ %tmp907 = getelementptr inbounds float, float* %tmp906, i64 1
+ %tmp908 = getelementptr inbounds float, float* %tmp907, i64 1
+ %tmp909 = getelementptr inbounds float, float* %tmp908, i64 1
+ %tmp910 = getelementptr inbounds float, float* %tmp909, i64 1
+ %tmp911 = getelementptr inbounds float, float* %tmp910, i64 1
+ %tmp912 = getelementptr inbounds float, float* %tmp911, i64 1
+ %tmp913 = getelementptr inbounds float, float* %tmp912, i64 1
+ %tmp914 = getelementptr inbounds float, float* %tmp913, i64 1
+ %tmp915 = getelementptr inbounds float, float* %tmp914, i64 1
+ %tmp916 = getelementptr inbounds float, float* %tmp915, i64 1
+ %tmp917 = getelementptr inbounds float, float* %tmp916, i64 1
+ %tmp918 = getelementptr inbounds float, float* %tmp917, i64 1
+ %tmp919 = getelementptr inbounds float, float* %tmp918, i64 1
+ %tmp920 = getelementptr inbounds float, float* %tmp919, i64 1
+ %tmp921 = getelementptr inbounds float, float* %tmp920, i64 1
+ %tmp922 = getelementptr inbounds float, float* %tmp921, i64 1
+ %tmp923 = getelementptr inbounds float, float* %tmp922, i64 1
+ %tmp924 = getelementptr inbounds float, float* %tmp923, i64 1
+ %tmp925 = getelementptr inbounds float, float* %tmp924, i64 1
+ %tmp926 = getelementptr inbounds float, float* %tmp925, i64 1
+ %tmp927 = getelementptr inbounds float, float* %tmp926, i64 1
+ %tmp928 = getelementptr inbounds float, float* %tmp927, i64 1
+ %tmp929 = getelementptr inbounds float, float* %tmp928, i64 1
+ %tmp930 = getelementptr inbounds float, float* %tmp929, i64 1
+ %tmp931 = getelementptr inbounds float, float* %tmp930, i64 1
+ %tmp932 = getelementptr inbounds float, float* %tmp931, i64 1
+ %tmp933 = getelementptr inbounds float, float* %tmp932, i64 1
+ %tmp934 = getelementptr inbounds float, float* %tmp933, i64 1
+ %tmp935 = getelementptr inbounds float, float* %tmp934, i64 1
+ %tmp936 = getelementptr inbounds float, float* %tmp935, i64 1
+ %tmp937 = getelementptr inbounds float, float* %tmp936, i64 1
+ %tmp938 = getelementptr inbounds float, float* %tmp937, i64 1
+ %tmp939 = getelementptr inbounds float, float* %tmp938, i64 1
+ %tmp940 = getelementptr inbounds float, float* %tmp939, i64 1
+ %tmp941 = getelementptr inbounds float, float* %tmp940, i64 1
+ %tmp942 = getelementptr inbounds float, float* %tmp941, i64 1
+ %tmp943 = getelementptr inbounds float, float* %tmp942, i64 1
+ %tmp944 = getelementptr inbounds float, float* %tmp943, i64 1
+ %tmp945 = getelementptr inbounds float, float* %tmp944, i64 1
+ %tmp946 = getelementptr inbounds float, float* %tmp945, i64 1
+ %tmp947 = getelementptr inbounds float, float* %tmp946, i64 1
+ %tmp948 = getelementptr inbounds float, float* %tmp947, i64 1
+ %tmp949 = getelementptr inbounds float, float* %tmp948, i64 1
+ %tmp950 = getelementptr inbounds float, float* %tmp949, i64 1
+ %tmp951 = getelementptr inbounds float, float* %tmp950, i64 1
+ %tmp952 = getelementptr inbounds float, float* %tmp951, i64 1
+ %tmp953 = getelementptr inbounds float, float* %tmp952, i64 1
+ %tmp954 = getelementptr inbounds float, float* %tmp953, i64 1
+ %tmp955 = getelementptr inbounds float, float* %tmp954, i64 1
+ %tmp956 = getelementptr inbounds float, float* %tmp955, i64 1
+ %tmp957 = getelementptr inbounds float, float* %tmp956, i64 1
+ %tmp958 = getelementptr inbounds float, float* %tmp957, i64 1
+ %tmp959 = getelementptr inbounds float, float* %tmp958, i64 1
+ %tmp960 = getelementptr inbounds float, float* %tmp959, i64 1
+ %tmp961 = getelementptr inbounds float, float* %tmp960, i64 1
+ %tmp962 = getelementptr inbounds float, float* %tmp961, i64 1
+ %tmp963 = getelementptr inbounds float, float* %tmp962, i64 1
+ %tmp964 = getelementptr inbounds float, float* %tmp963, i64 1
+ %tmp965 = getelementptr inbounds float, float* %tmp964, i64 1
+ %tmp966 = getelementptr inbounds float, float* %tmp965, i64 1
+ %tmp967 = getelementptr inbounds float, float* %tmp966, i64 1
+ %tmp968 = getelementptr inbounds float, float* %tmp967, i64 1
+ %tmp969 = getelementptr inbounds float, float* %tmp968, i64 1
+ %tmp970 = getelementptr inbounds float, float* %tmp969, i64 1
+ %tmp971 = getelementptr inbounds float, float* %tmp970, i64 1
+ %tmp972 = getelementptr inbounds float, float* %tmp971, i64 1
+ %tmp973 = getelementptr inbounds float, float* %tmp972, i64 1
+ %tmp974 = getelementptr inbounds float, float* %tmp973, i64 1
+ %tmp975 = getelementptr inbounds float, float* %tmp974, i64 1
+ %tmp976 = getelementptr inbounds float, float* %tmp975, i64 1
+ %tmp977 = getelementptr inbounds float, float* %tmp976, i64 1
+ %tmp978 = getelementptr inbounds float, float* %tmp977, i64 1
+ %tmp979 = getelementptr inbounds float, float* %tmp978, i64 1
+ %tmp980 = getelementptr inbounds float, float* %tmp979, i64 1
+ %tmp981 = getelementptr inbounds float, float* %tmp980, i64 1
+ %tmp982 = getelementptr inbounds float, float* %tmp981, i64 1
+ %tmp983 = getelementptr inbounds float, float* %tmp982, i64 1
+ %tmp984 = getelementptr inbounds float, float* %tmp983, i64 1
+ %tmp985 = getelementptr inbounds float, float* %tmp984, i64 1
+ %tmp986 = getelementptr inbounds float, float* %tmp985, i64 1
+ %tmp987 = getelementptr inbounds float, float* %tmp986, i64 1
+ %tmp988 = getelementptr inbounds float, float* %tmp987, i64 1
+ %tmp989 = getelementptr inbounds float, float* %tmp988, i64 1
+ %tmp990 = getelementptr inbounds float, float* %tmp989, i64 1
+ %tmp991 = getelementptr inbounds float, float* %tmp990, i64 1
+ %tmp992 = getelementptr inbounds float, float* %tmp991, i64 1
+ %tmp993 = getelementptr inbounds float, float* %tmp992, i64 1
+ %tmp994 = getelementptr inbounds float, float* %tmp993, i64 1
+ %tmp995 = getelementptr inbounds float, float* %tmp994, i64 1
+ %tmp996 = getelementptr inbounds float, float* %tmp995, i64 1
+ %tmp997 = getelementptr inbounds float, float* %tmp996, i64 1
+ %tmp998 = getelementptr inbounds float, float* %tmp997, i64 1
+ %tmp999 = getelementptr inbounds float, float* %tmp998, i64 1
+ %tmp1000 = getelementptr inbounds float, float* %tmp999, i64 1
+ %tmp1001 = getelementptr inbounds float, float* %tmp1000, i64 1
+ %tmp1002 = getelementptr inbounds float, float* %tmp1001, i64 1
+ %tmp1003 = getelementptr inbounds float, float* %tmp1002, i64 1
+ %tmp1004 = getelementptr inbounds float, float* %tmp1003, i64 1
+ %tmp1005 = getelementptr inbounds float, float* %tmp1004, i64 1
+ %tmp1006 = getelementptr inbounds float, float* %tmp1005, i64 1
+ %tmp1007 = getelementptr inbounds float, float* %tmp1006, i64 1
+ %tmp1008 = getelementptr inbounds float, float* %tmp1007, i64 1
+ %tmp1009 = getelementptr inbounds float, float* %tmp1008, i64 1
+ %tmp1010 = getelementptr inbounds float, float* %tmp1009, i64 1
+ %tmp1011 = getelementptr inbounds float, float* %tmp1010, i64 1
+ %tmp1012 = getelementptr inbounds float, float* %tmp1011, i64 1
+ %tmp1013 = getelementptr inbounds float, float* %tmp1012, i64 1
+ %tmp1014 = getelementptr inbounds float, float* %tmp1013, i64 1
+ %tmp1015 = getelementptr inbounds float, float* %tmp1014, i64 1
+ %tmp1016 = getelementptr inbounds float, float* %tmp1015, i64 1
+ %tmp1017 = getelementptr inbounds float, float* %tmp1016, i64 1
+ %tmp1018 = getelementptr inbounds float, float* %tmp1017, i64 1
+ %tmp1019 = getelementptr inbounds float, float* %tmp1018, i64 1
+ %tmp1020 = getelementptr inbounds float, float* %tmp1019, i64 1
+ %tmp1021 = getelementptr inbounds float, float* %tmp1020, i64 1
+ %tmp1022 = getelementptr inbounds float, float* %tmp1021, i64 1
+ %tmp1023 = getelementptr inbounds float, float* %tmp1022, i64 1
+ %tmp1024 = getelementptr inbounds float, float* %tmp1023, i64 1
+ %tmp1025 = getelementptr inbounds float, float* %tmp1024, i64 1
+ %tmp1026 = getelementptr inbounds float, float* %tmp1025, i64 1
+ %tmp1027 = getelementptr inbounds float, float* %tmp1026, i64 1
+ %tmp1028 = getelementptr inbounds float, float* %tmp1027, i64 1
+ %tmp1029 = getelementptr inbounds float, float* %tmp1028, i64 1
+ %tmp1030 = getelementptr inbounds float, float* %tmp1029, i64 1
+ %tmp1031 = getelementptr inbounds float, float* %tmp1030, i64 1
+ %tmp1032 = getelementptr inbounds float, float* %tmp1031, i64 1
+ %tmp1033 = getelementptr inbounds float, float* %tmp1032, i64 1
+ %tmp1034 = getelementptr inbounds float, float* %tmp1033, i64 1
+ %tmp1035 = getelementptr inbounds float, float* %tmp1034, i64 1
+ %tmp1036 = getelementptr inbounds float, float* %tmp1035, i64 1
+ %tmp1037 = getelementptr inbounds float, float* %tmp1036, i64 1
+ %tmp1038 = getelementptr inbounds float, float* %tmp1037, i64 1
+ %tmp1039 = getelementptr inbounds float, float* %tmp1038, i64 1
+ %tmp1040 = getelementptr inbounds float, float* %tmp1039, i64 1
+ %tmp1041 = getelementptr inbounds float, float* %tmp1040, i64 1
+ %tmp1042 = getelementptr inbounds float, float* %tmp1041, i64 1
+ %tmp1043 = getelementptr inbounds float, float* %tmp1042, i64 1
+ %tmp1044 = getelementptr inbounds float, float* %tmp1043, i64 1
+ %tmp1045 = getelementptr inbounds float, float* %tmp1044, i64 1
+ %tmp1046 = getelementptr inbounds float, float* %tmp1045, i64 1
+ %tmp1047 = getelementptr inbounds float, float* %tmp1046, i64 1
+ %tmp1048 = getelementptr inbounds float, float* %tmp1047, i64 1
+ %tmp1049 = getelementptr inbounds float, float* %tmp1048, i64 1
+ %tmp1050 = getelementptr inbounds float, float* %tmp1049, i64 1
+ %tmp1051 = getelementptr inbounds float, float* %tmp1050, i64 1
+ %tmp1052 = getelementptr inbounds float, float* %tmp1051, i64 1
+ %tmp1053 = getelementptr inbounds float, float* %tmp1052, i64 1
+ %tmp1054 = getelementptr inbounds float, float* %tmp1053, i64 1
+ %tmp1055 = getelementptr inbounds float, float* %tmp1054, i64 1
+ %tmp1056 = getelementptr inbounds float, float* %tmp1055, i64 1
+ %tmp1057 = getelementptr inbounds float, float* %tmp1056, i64 1
+ %tmp1058 = getelementptr inbounds float, float* %tmp1057, i64 1
+ %tmp1059 = getelementptr inbounds float, float* %tmp1058, i64 1
+ %tmp1060 = getelementptr inbounds float, float* %tmp1059, i64 1
+ %tmp1061 = getelementptr inbounds float, float* %tmp1060, i64 1
+ %tmp1062 = getelementptr inbounds float, float* %tmp1061, i64 1
+ %tmp1063 = getelementptr inbounds float, float* %tmp1062, i64 1
+ %tmp1064 = getelementptr inbounds float, float* %tmp1063, i64 1
+ %tmp1065 = getelementptr inbounds float, float* %tmp1064, i64 1
+ %tmp1066 = getelementptr inbounds float, float* %tmp1065, i64 1
+ %tmp1067 = getelementptr inbounds float, float* %tmp1066, i64 1
+ %tmp1068 = getelementptr inbounds float, float* %tmp1067, i64 1
+ %tmp1069 = getelementptr inbounds float, float* %tmp1068, i64 1
+ %tmp1070 = getelementptr inbounds float, float* %tmp1069, i64 1
+ %tmp1071 = getelementptr inbounds float, float* %tmp1070, i64 1
+ %tmp1072 = getelementptr inbounds float, float* %tmp1071, i64 1
+ %tmp1073 = getelementptr inbounds float, float* %tmp1072, i64 1
+ %tmp1074 = getelementptr inbounds float, float* %tmp1073, i64 1
+ %tmp1075 = getelementptr inbounds float, float* %tmp1074, i64 1
+ %tmp1076 = getelementptr inbounds float, float* %tmp1075, i64 1
+ %tmp1077 = getelementptr inbounds float, float* %tmp1076, i64 1
+ %tmp1078 = getelementptr inbounds float, float* %tmp1077, i64 1
+ %tmp1079 = getelementptr inbounds float, float* %tmp1078, i64 1
+ %tmp1080 = getelementptr inbounds float, float* %tmp1079, i64 1
+ %tmp1081 = getelementptr inbounds float, float* %tmp1080, i64 1
+ %tmp1082 = getelementptr inbounds float, float* %tmp1081, i64 1
+ %tmp1083 = getelementptr inbounds float, float* %tmp1082, i64 1
+ %tmp1084 = getelementptr inbounds float, float* %tmp1083, i64 1
+ %tmp1085 = getelementptr inbounds float, float* %tmp1084, i64 1
+ %tmp1086 = getelementptr inbounds float, float* %tmp1085, i64 1
+ %tmp1087 = getelementptr inbounds float, float* %tmp1086, i64 1
+ %tmp1088 = getelementptr inbounds float, float* %tmp1087, i64 1
+ %tmp1089 = getelementptr inbounds float, float* %tmp1088, i64 1
+ %tmp1090 = getelementptr inbounds float, float* %tmp1089, i64 1
+ %tmp1091 = getelementptr inbounds float, float* %tmp1090, i64 1
+ %tmp1092 = getelementptr inbounds float, float* %tmp1091, i64 1
+ %tmp1093 = getelementptr inbounds float, float* %tmp1092, i64 1
+ %tmp1094 = getelementptr inbounds float, float* %tmp1093, i64 1
+ %tmp1095 = getelementptr inbounds float, float* %tmp1094, i64 1
+ %tmp1096 = getelementptr inbounds float, float* %tmp1095, i64 1
+ %tmp1097 = getelementptr inbounds float, float* %tmp1096, i64 1
+ %tmp1098 = getelementptr inbounds float, float* %tmp1097, i64 1
+ %tmp1099 = getelementptr inbounds float, float* %tmp1098, i64 1
+ %tmp1100 = getelementptr inbounds float, float* %tmp1099, i64 1
+ %tmp1101 = getelementptr inbounds float, float* %tmp1100, i64 1
+ %tmp1102 = getelementptr inbounds float, float* %tmp1101, i64 1
+ %tmp1103 = getelementptr inbounds float, float* %tmp1102, i64 1
+ %tmp1104 = getelementptr inbounds float, float* %tmp1103, i64 1
+ %tmp1105 = getelementptr inbounds float, float* %tmp1104, i64 1
+ %tmp1106 = getelementptr inbounds float, float* %tmp1105, i64 1
+ %tmp1107 = getelementptr inbounds float, float* %tmp1106, i64 1
+ %tmp1108 = getelementptr inbounds float, float* %tmp1107, i64 1
+ %tmp1109 = getelementptr inbounds float, float* %tmp1108, i64 1
+ %tmp1110 = getelementptr inbounds float, float* %tmp1109, i64 1
+ %tmp1111 = getelementptr inbounds float, float* %tmp1110, i64 1
+ %tmp1112 = getelementptr inbounds float, float* %tmp1111, i64 1
+ %tmp1113 = getelementptr inbounds float, float* %tmp1112, i64 1
+ %tmp1114 = getelementptr inbounds float, float* %tmp1113, i64 1
+ %tmp1115 = getelementptr inbounds float, float* %tmp1114, i64 1
+ %tmp1116 = getelementptr inbounds float, float* %tmp1115, i64 1
+ %tmp1117 = getelementptr inbounds float, float* %tmp1116, i64 1
+ %tmp1118 = getelementptr inbounds float, float* %tmp1117, i64 1
+ %tmp1119 = getelementptr inbounds float, float* %tmp1118, i64 1
+ %tmp1120 = getelementptr inbounds float, float* %tmp1119, i64 1
+ %tmp1121 = getelementptr inbounds float, float* %tmp1120, i64 1
+ %tmp1122 = getelementptr inbounds float, float* %tmp1121, i64 1
+ %tmp1123 = getelementptr inbounds float, float* %tmp1122, i64 1
+ %tmp1124 = getelementptr inbounds float, float* %tmp1123, i64 1
+ %tmp1125 = getelementptr inbounds float, float* %tmp1124, i64 1
+ %tmp1126 = getelementptr inbounds float, float* %tmp1125, i64 1
+ %tmp1127 = getelementptr inbounds float, float* %tmp1126, i64 1
+ %tmp1128 = getelementptr inbounds float, float* %tmp1127, i64 1
+ %tmp1129 = getelementptr inbounds float, float* %tmp1128, i64 1
+ %tmp1130 = getelementptr inbounds float, float* %tmp1129, i64 1
+ %tmp1131 = getelementptr inbounds float, float* %tmp1130, i64 1
+ %tmp1132 = getelementptr inbounds float, float* %tmp1131, i64 1
+ %tmp1133 = getelementptr inbounds float, float* %tmp1132, i64 1
+ %tmp1134 = getelementptr inbounds float, float* %tmp1133, i64 1
+ %tmp1135 = getelementptr inbounds float, float* %tmp1134, i64 1
+ %tmp1136 = getelementptr inbounds float, float* %tmp1135, i64 1
+ %tmp1137 = getelementptr inbounds float, float* %tmp1136, i64 1
+ %tmp1138 = getelementptr inbounds float, float* %tmp1137, i64 1
+ %tmp1139 = getelementptr inbounds float, float* %tmp1138, i64 1
+ %tmp1140 = getelementptr inbounds float, float* %tmp1139, i64 1
+ %tmp1141 = getelementptr inbounds float, float* %tmp1140, i64 1
+ %tmp1142 = getelementptr inbounds float, float* %tmp1141, i64 1
+ %tmp1143 = getelementptr inbounds float, float* %tmp1142, i64 1
+ %tmp1144 = getelementptr inbounds float, float* %tmp1143, i64 1
+ %tmp1145 = getelementptr inbounds float, float* %tmp1144, i64 1
+ %tmp1146 = getelementptr inbounds float, float* %tmp1145, i64 1
+ %tmp1147 = getelementptr inbounds float, float* %tmp1146, i64 1
+ %tmp1148 = getelementptr inbounds float, float* %tmp1147, i64 1
+ %tmp1149 = getelementptr inbounds float, float* %tmp1148, i64 1
+ %tmp1150 = getelementptr inbounds float, float* %tmp1149, i64 1
+ %tmp1151 = getelementptr inbounds float, float* %tmp1150, i64 1
+ %tmp1152 = getelementptr inbounds float, float* %tmp1151, i64 1
+ %tmp1153 = getelementptr inbounds float, float* %tmp1152, i64 1
+ %tmp1154 = getelementptr inbounds float, float* %tmp1153, i64 1
+ %tmp1155 = getelementptr inbounds float, float* %tmp1154, i64 1
+ %tmp1156 = getelementptr inbounds float, float* %tmp1155, i64 1
+ %tmp1157 = getelementptr inbounds float, float* %tmp1156, i64 1
+ %tmp1158 = getelementptr inbounds float, float* %tmp1157, i64 1
+ %tmp1159 = getelementptr inbounds float, float* %tmp1158, i64 1
+ %tmp1160 = getelementptr inbounds float, float* %tmp1159, i64 1
+ %tmp1161 = getelementptr inbounds float, float* %tmp1160, i64 1
+ %tmp1162 = getelementptr inbounds float, float* %tmp1161, i64 1
+ %tmp1163 = getelementptr inbounds float, float* %tmp1162, i64 1
+ %tmp1164 = getelementptr inbounds float, float* %tmp1163, i64 1
+ %tmp1165 = getelementptr inbounds float, float* %tmp1164, i64 1
+ %tmp1166 = getelementptr inbounds float, float* %tmp1165, i64 1
+ %tmp1167 = getelementptr inbounds float, float* %tmp1166, i64 1
+ %tmp1168 = getelementptr inbounds float, float* %tmp1167, i64 1
+ %tmp1169 = getelementptr inbounds float, float* %tmp1168, i64 1
+ %tmp1170 = getelementptr inbounds float, float* %tmp1169, i64 1
+ %tmp1171 = getelementptr inbounds float, float* %tmp1170, i64 1
+ %tmp1172 = getelementptr inbounds float, float* %tmp1171, i64 1
+ %tmp1173 = getelementptr inbounds float, float* %tmp1172, i64 1
+ %tmp1174 = getelementptr inbounds float, float* %tmp1173, i64 1
+ %tmp1175 = getelementptr inbounds float, float* %tmp1174, i64 1
+ %tmp1176 = getelementptr inbounds float, float* %tmp1175, i64 1
+ %tmp1177 = getelementptr inbounds float, float* %tmp1176, i64 1
+ %tmp1178 = getelementptr inbounds float, float* %tmp1177, i64 1
+ %tmp1179 = getelementptr inbounds float, float* %tmp1178, i64 1
+ %tmp1180 = getelementptr inbounds float, float* %tmp1179, i64 1
+ %tmp1181 = getelementptr inbounds float, float* %tmp1180, i64 1
+ %tmp1182 = getelementptr inbounds float, float* %tmp1181, i64 1
+ %tmp1183 = getelementptr inbounds float, float* %tmp1182, i64 1
+ %tmp1184 = getelementptr inbounds float, float* %tmp1183, i64 1
+ %tmp1185 = getelementptr inbounds float, float* %tmp1184, i64 1
+ %tmp1186 = getelementptr inbounds float, float* %tmp1185, i64 1
+ %tmp1187 = getelementptr inbounds float, float* %tmp1186, i64 1
+ %tmp1188 = getelementptr inbounds float, float* %tmp1187, i64 1
+ %tmp1189 = getelementptr inbounds float, float* %tmp1188, i64 1
+ %tmp1190 = getelementptr inbounds float, float* %tmp1189, i64 1
+ %tmp1191 = getelementptr inbounds float, float* %tmp1190, i64 1
+ %tmp1192 = getelementptr inbounds float, float* %tmp1191, i64 1
+ %tmp1193 = getelementptr inbounds float, float* %tmp1192, i64 1
+ %tmp1194 = getelementptr inbounds float, float* %tmp1193, i64 1
+ %tmp1195 = getelementptr inbounds float, float* %tmp1194, i64 1
+ %tmp1196 = getelementptr inbounds float, float* %tmp1195, i64 1
+ %tmp1197 = getelementptr inbounds float, float* %tmp1196, i64 1
+ %tmp1198 = getelementptr inbounds float, float* %tmp1197, i64 1
+ %tmp1199 = getelementptr inbounds float, float* %tmp1198, i64 1
+ %tmp1200 = getelementptr inbounds float, float* %tmp1199, i64 1
+ %tmp1201 = getelementptr inbounds float, float* %tmp1200, i64 1
+ %tmp1202 = getelementptr inbounds float, float* %tmp1201, i64 1
+ %tmp1203 = getelementptr inbounds float, float* %tmp1202, i64 1
+ %tmp1204 = getelementptr inbounds float, float* %tmp1203, i64 1
+ %tmp1205 = getelementptr inbounds float, float* %tmp1204, i64 1
+ %tmp1206 = getelementptr inbounds float, float* %tmp1205, i64 1
+ %tmp1207 = getelementptr inbounds float, float* %tmp1206, i64 1
+ %tmp1208 = getelementptr inbounds float, float* %tmp1207, i64 1
+ %tmp1209 = getelementptr inbounds float, float* %tmp1208, i64 1
+ %tmp1210 = getelementptr inbounds float, float* %tmp1209, i64 1
+ %tmp1211 = getelementptr inbounds float, float* %tmp1210, i64 1
+ %tmp1212 = getelementptr inbounds float, float* %tmp1211, i64 1
+ %tmp1213 = getelementptr inbounds float, float* %tmp1212, i64 1
+ %tmp1214 = getelementptr inbounds float, float* %tmp1213, i64 1
+ %tmp1215 = getelementptr inbounds float, float* %tmp1214, i64 1
+ %tmp1216 = getelementptr inbounds float, float* %tmp1215, i64 1
+ %tmp1217 = getelementptr inbounds float, float* %tmp1216, i64 1
+ %tmp1218 = getelementptr inbounds float, float* %tmp1217, i64 1
+ %tmp1219 = getelementptr inbounds float, float* %tmp1218, i64 1
+ %tmp1220 = getelementptr inbounds float, float* %tmp1219, i64 1
+ %tmp1221 = getelementptr inbounds float, float* %tmp1220, i64 1
+ %tmp1222 = getelementptr inbounds float, float* %tmp1221, i64 1
+ %tmp1223 = getelementptr inbounds float, float* %tmp1222, i64 1
+ %tmp1224 = getelementptr inbounds float, float* %tmp1223, i64 1
+ %tmp1225 = getelementptr inbounds float, float* %tmp1224, i64 1
+ %tmp1226 = getelementptr inbounds float, float* %tmp1225, i64 1
+ %tmp1227 = getelementptr inbounds float, float* %tmp1226, i64 1
+ %tmp1228 = getelementptr inbounds float, float* %tmp1227, i64 1
+ %tmp1229 = getelementptr inbounds float, float* %tmp1228, i64 1
+ %tmp1230 = getelementptr inbounds float, float* %tmp1229, i64 1
+ %tmp1231 = getelementptr inbounds float, float* %tmp1230, i64 1
+ %tmp1232 = getelementptr inbounds float, float* %tmp1231, i64 1
+ %tmp1233 = getelementptr inbounds float, float* %tmp1232, i64 1
+ %tmp1234 = getelementptr inbounds float, float* %tmp1233, i64 1
+ %tmp1235 = getelementptr inbounds float, float* %tmp1234, i64 1
+ %tmp1236 = getelementptr inbounds float, float* %tmp1235, i64 1
+ %tmp1237 = getelementptr inbounds float, float* %tmp1236, i64 1
+ %tmp1238 = getelementptr inbounds float, float* %tmp1237, i64 1
+ %tmp1239 = getelementptr inbounds float, float* %tmp1238, i64 1
+ %tmp1240 = getelementptr inbounds float, float* %tmp1239, i64 1
+ %tmp1241 = getelementptr inbounds float, float* %tmp1240, i64 1
+ %tmp1242 = getelementptr inbounds float, float* %tmp1241, i64 1
+ %tmp1243 = getelementptr inbounds float, float* %tmp1242, i64 1
+ %tmp1244 = getelementptr inbounds float, float* %tmp1243, i64 1
+ %tmp1245 = getelementptr inbounds float, float* %tmp1244, i64 1
+ %tmp1246 = getelementptr inbounds float, float* %tmp1245, i64 1
+ %tmp1247 = getelementptr inbounds float, float* %tmp1246, i64 1
+ %tmp1248 = getelementptr inbounds float, float* %tmp1247, i64 1
+ %tmp1249 = getelementptr inbounds float, float* %tmp1248, i64 1
+ %tmp1250 = getelementptr inbounds float, float* %tmp1249, i64 1
+ %tmp1251 = getelementptr inbounds float, float* %tmp1250, i64 1
+ %tmp1252 = getelementptr inbounds float, float* %tmp1251, i64 1
+ %tmp1253 = getelementptr inbounds float, float* %tmp1252, i64 1
+ %tmp1254 = getelementptr inbounds float, float* %tmp1253, i64 1
+ %tmp1255 = getelementptr inbounds float, float* %tmp1254, i64 1
+ %tmp1256 = getelementptr inbounds float, float* %tmp1255, i64 1
+ %tmp1257 = getelementptr inbounds float, float* %tmp1256, i64 1
+ %tmp1258 = getelementptr inbounds float, float* %tmp1257, i64 1
+ %tmp1259 = getelementptr inbounds float, float* %tmp1258, i64 1
+ %tmp1260 = getelementptr inbounds float, float* %tmp1259, i64 1
+ %tmp1261 = getelementptr inbounds float, float* %tmp1260, i64 1
+ %tmp1262 = getelementptr inbounds float, float* %tmp1261, i64 1
+ %tmp1263 = getelementptr inbounds float, float* %tmp1262, i64 1
+ %tmp1264 = getelementptr inbounds float, float* %tmp1263, i64 1
+ %tmp1265 = getelementptr inbounds float, float* %tmp1264, i64 1
+ %tmp1266 = getelementptr inbounds float, float* %tmp1265, i64 1
+ %tmp1267 = getelementptr inbounds float, float* %tmp1266, i64 1
+ %tmp1268 = getelementptr inbounds float, float* %tmp1267, i64 1
+ %tmp1269 = getelementptr inbounds float, float* %tmp1268, i64 1
+ %tmp1270 = getelementptr inbounds float, float* %tmp1269, i64 1
+ %tmp1271 = getelementptr inbounds float, float* %tmp1270, i64 1
+ %tmp1272 = getelementptr inbounds float, float* %tmp1271, i64 1
+ %tmp1273 = getelementptr inbounds float, float* %tmp1272, i64 1
+ %tmp1274 = getelementptr inbounds float, float* %tmp1273, i64 1
+ %tmp1275 = getelementptr inbounds float, float* %tmp1274, i64 1
+ %tmp1276 = getelementptr inbounds float, float* %tmp1275, i64 1
+ %tmp1277 = getelementptr inbounds float, float* %tmp1276, i64 1
+ %tmp1278 = getelementptr inbounds float, float* %tmp1277, i64 1
+ %tmp1279 = getelementptr inbounds float, float* %tmp1278, i64 1
+ %tmp1280 = getelementptr inbounds float, float* %tmp1279, i64 1
+ %tmp1281 = getelementptr inbounds float, float* %tmp1280, i64 1
+ %tmp1282 = getelementptr inbounds float, float* %tmp1281, i64 1
+ %tmp1283 = getelementptr inbounds float, float* %tmp1282, i64 1
+ %tmp1284 = getelementptr inbounds float, float* %tmp1283, i64 1
+ %tmp1285 = getelementptr inbounds float, float* %tmp1284, i64 1
+ %tmp1286 = getelementptr inbounds float, float* %tmp1285, i64 1
+ %tmp1287 = getelementptr inbounds float, float* %tmp1286, i64 1
+ %tmp1288 = getelementptr inbounds float, float* %tmp1287, i64 1
+ %tmp1289 = getelementptr inbounds float, float* %tmp1288, i64 1
+ %tmp1290 = getelementptr inbounds float, float* %tmp1289, i64 1
+ %tmp1291 = getelementptr inbounds float, float* %tmp1290, i64 1
+ %tmp1292 = getelementptr inbounds float, float* %tmp1291, i64 1
+ %tmp1293 = getelementptr inbounds float, float* %tmp1292, i64 1
+ %tmp1294 = getelementptr inbounds float, float* %tmp1293, i64 1
+ %tmp1295 = getelementptr inbounds float, float* %tmp1294, i64 1
+ %tmp1296 = getelementptr inbounds float, float* %tmp1295, i64 1
+ %tmp1297 = getelementptr inbounds float, float* %tmp1296, i64 1
+ %tmp1298 = getelementptr inbounds float, float* %tmp1297, i64 1
+ %tmp1299 = getelementptr inbounds float, float* %tmp1298, i64 1
+ %tmp1300 = getelementptr inbounds float, float* %tmp1299, i64 1
+ %tmp1301 = getelementptr inbounds float, float* %tmp1300, i64 1
+ %tmp1302 = getelementptr inbounds float, float* %tmp1301, i64 1
+ %tmp1303 = getelementptr inbounds float, float* %tmp1302, i64 1
+ %tmp1304 = getelementptr inbounds float, float* %tmp1303, i64 1
+ %tmp1305 = getelementptr inbounds float, float* %tmp1304, i64 1
+ %tmp1306 = getelementptr inbounds float, float* %tmp1305, i64 1
+ %tmp1307 = getelementptr inbounds float, float* %tmp1306, i64 1
+ %tmp1308 = getelementptr inbounds float, float* %tmp1307, i64 1
+ %tmp1309 = getelementptr inbounds float, float* %tmp1308, i64 1
+ %tmp1310 = getelementptr inbounds float, float* %tmp1309, i64 1
+ %tmp1311 = getelementptr inbounds float, float* %tmp1310, i64 1
+ %tmp1312 = getelementptr inbounds float, float* %tmp1311, i64 1
+ %tmp1313 = getelementptr inbounds float, float* %tmp1312, i64 1
+ %tmp1314 = getelementptr inbounds float, float* %tmp1313, i64 1
+ %tmp1315 = getelementptr inbounds float, float* %tmp1314, i64 1
+ %tmp1316 = getelementptr inbounds float, float* %tmp1315, i64 1
+ %tmp1317 = getelementptr inbounds float, float* %tmp1316, i64 1
+ %tmp1318 = getelementptr inbounds float, float* %tmp1317, i64 1
+ %tmp1319 = getelementptr inbounds float, float* %tmp1318, i64 1
+ %tmp1320 = getelementptr inbounds float, float* %tmp1319, i64 1
+ %tmp1321 = getelementptr inbounds float, float* %tmp1320, i64 1
+ %tmp1322 = getelementptr inbounds float, float* %tmp1321, i64 1
+ %tmp1323 = getelementptr inbounds float, float* %tmp1322, i64 1
+ %tmp1324 = getelementptr inbounds float, float* %tmp1323, i64 1
+ %tmp1325 = getelementptr inbounds float, float* %tmp1324, i64 1
+ %tmp1326 = getelementptr inbounds float, float* %tmp1325, i64 1
+ %tmp1327 = getelementptr inbounds float, float* %tmp1326, i64 1
+ %tmp1328 = getelementptr inbounds float, float* %tmp1327, i64 1
+ %tmp1329 = getelementptr inbounds float, float* %tmp1328, i64 1
+ %tmp1330 = getelementptr inbounds float, float* %tmp1329, i64 1
+ %tmp1331 = getelementptr inbounds float, float* %tmp1330, i64 1
+ %tmp1332 = getelementptr inbounds float, float* %tmp1331, i64 1
+ %tmp1333 = getelementptr inbounds float, float* %tmp1332, i64 1
+ %tmp1334 = getelementptr inbounds float, float* %tmp1333, i64 1
+ %tmp1335 = getelementptr inbounds float, float* %tmp1334, i64 1
+ %tmp1336 = getelementptr inbounds float, float* %tmp1335, i64 1
+ %tmp1337 = getelementptr inbounds float, float* %tmp1336, i64 1
+ %tmp1338 = getelementptr inbounds float, float* %tmp1337, i64 1
+ %tmp1339 = getelementptr inbounds float, float* %tmp1338, i64 1
+ %tmp1340 = getelementptr inbounds float, float* %tmp1339, i64 1
+ %tmp1341 = getelementptr inbounds float, float* %tmp1340, i64 1
+ %tmp1342 = getelementptr inbounds float, float* %tmp1341, i64 1
+ %tmp1343 = getelementptr inbounds float, float* %tmp1342, i64 1
+ %tmp1344 = getelementptr inbounds float, float* %tmp1343, i64 1
+ %tmp1345 = getelementptr inbounds float, float* %tmp1344, i64 1
+ %tmp1346 = getelementptr inbounds float, float* %tmp1345, i64 1
+ %tmp1347 = getelementptr inbounds float, float* %tmp1346, i64 1
+ %tmp1348 = getelementptr inbounds float, float* %tmp1347, i64 1
+ %tmp1349 = getelementptr inbounds float, float* %tmp1348, i64 1
+ %tmp1350 = getelementptr inbounds float, float* %tmp1349, i64 1
+ %tmp1351 = getelementptr inbounds float, float* %tmp1350, i64 1
+ %tmp1352 = getelementptr inbounds float, float* %tmp1351, i64 1
+ %tmp1353 = getelementptr inbounds float, float* %tmp1352, i64 1
+ %tmp1354 = getelementptr inbounds float, float* %tmp1353, i64 1
+ %tmp1355 = getelementptr inbounds float, float* %tmp1354, i64 1
+ %tmp1356 = getelementptr inbounds float, float* %tmp1355, i64 1
+ %tmp1357 = getelementptr inbounds float, float* %tmp1356, i64 1
+ %tmp1358 = getelementptr inbounds float, float* %tmp1357, i64 1
+ %tmp1359 = getelementptr inbounds float, float* %tmp1358, i64 1
+ %tmp1360 = getelementptr inbounds float, float* %tmp1359, i64 1
+ %tmp1361 = getelementptr inbounds float, float* %tmp1360, i64 1
+ %tmp1362 = getelementptr inbounds float, float* %tmp1361, i64 1
+ %tmp1363 = getelementptr inbounds float, float* %tmp1362, i64 1
+ %tmp1364 = getelementptr inbounds float, float* %tmp1363, i64 1
+ %tmp1365 = getelementptr inbounds float, float* %tmp1364, i64 1
+ %tmp1366 = getelementptr inbounds float, float* %tmp1365, i64 1
+ %tmp1367 = getelementptr inbounds float, float* %tmp1366, i64 1
+ %tmp1368 = getelementptr inbounds float, float* %tmp1367, i64 1
+ %tmp1369 = getelementptr inbounds float, float* %tmp1368, i64 1
+ %tmp1370 = getelementptr inbounds float, float* %tmp1369, i64 1
+ %tmp1371 = getelementptr inbounds float, float* %tmp1370, i64 1
+ %tmp1372 = getelementptr inbounds float, float* %tmp1371, i64 1
+ %tmp1373 = getelementptr inbounds float, float* %tmp1372, i64 1
+ %tmp1374 = getelementptr inbounds float, float* %tmp1373, i64 1
+ %tmp1375 = getelementptr inbounds float, float* %tmp1374, i64 1
+ %tmp1376 = getelementptr inbounds float, float* %tmp1375, i64 1
+ %tmp1377 = getelementptr inbounds float, float* %tmp1376, i64 1
+ %tmp1378 = getelementptr inbounds float, float* %tmp1377, i64 1
+ %tmp1379 = getelementptr inbounds float, float* %tmp1378, i64 1
+ %tmp1380 = getelementptr inbounds float, float* %tmp1379, i64 1
+ %tmp1381 = getelementptr inbounds float, float* %tmp1380, i64 1
+ %tmp1382 = getelementptr inbounds float, float* %tmp1381, i64 1
+ %tmp1383 = getelementptr inbounds float, float* %tmp1382, i64 1
+ %tmp1384 = getelementptr inbounds float, float* %tmp1383, i64 1
+ %tmp1385 = getelementptr inbounds float, float* %tmp1384, i64 1
+ %tmp1386 = getelementptr inbounds float, float* %tmp1385, i64 1
+ %tmp1387 = getelementptr inbounds float, float* %tmp1386, i64 1
+ %tmp1388 = getelementptr inbounds float, float* %tmp1387, i64 1
+ %tmp1389 = getelementptr inbounds float, float* %tmp1388, i64 1
+ %tmp1390 = getelementptr inbounds float, float* %tmp1389, i64 1
+ %tmp1391 = getelementptr inbounds float, float* %tmp1390, i64 1
+ %tmp1392 = getelementptr inbounds float, float* %tmp1391, i64 1
+ %tmp1393 = getelementptr inbounds float, float* %tmp1392, i64 1
+ %tmp1394 = getelementptr inbounds float, float* %tmp1393, i64 1
+ %tmp1395 = getelementptr inbounds float, float* %tmp1394, i64 1
+ %tmp1396 = getelementptr inbounds float, float* %tmp1395, i64 1
+ %tmp1397 = getelementptr inbounds float, float* %tmp1396, i64 1
+ %tmp1398 = getelementptr inbounds float, float* %tmp1397, i64 1
+ %tmp1399 = getelementptr inbounds float, float* %tmp1398, i64 1
+ %tmp1400 = getelementptr inbounds float, float* %tmp1399, i64 1
+ %tmp1401 = getelementptr inbounds float, float* %tmp1400, i64 1
+ %tmp1402 = getelementptr inbounds float, float* %tmp1401, i64 1
+ %tmp1403 = getelementptr inbounds float, float* %tmp1402, i64 1
+ %tmp1404 = getelementptr inbounds float, float* %tmp1403, i64 1
+ %tmp1405 = getelementptr inbounds float, float* %tmp1404, i64 1
+ %tmp1406 = getelementptr inbounds float, float* %tmp1405, i64 1
+ %tmp1407 = getelementptr inbounds float, float* %tmp1406, i64 1
+ %tmp1408 = getelementptr inbounds float, float* %tmp1407, i64 1
+ %tmp1409 = getelementptr inbounds float, float* %tmp1408, i64 1
+ %tmp1410 = getelementptr inbounds float, float* %tmp1409, i64 1
+ %tmp1411 = getelementptr inbounds float, float* %tmp1410, i64 1
+ %tmp1412 = getelementptr inbounds float, float* %tmp1411, i64 1
+ %tmp1413 = getelementptr inbounds float, float* %tmp1412, i64 1
+ %tmp1414 = getelementptr inbounds float, float* %tmp1413, i64 1
+ %tmp1415 = getelementptr inbounds float, float* %tmp1414, i64 1
+ %tmp1416 = getelementptr inbounds float, float* %tmp1415, i64 1
+ %tmp1417 = getelementptr inbounds float, float* %tmp1416, i64 1
+ %tmp1418 = getelementptr inbounds float, float* %tmp1417, i64 1
+ %tmp1419 = getelementptr inbounds float, float* %tmp1418, i64 1
+ %tmp1420 = getelementptr inbounds float, float* %tmp1419, i64 1
+ %tmp1421 = getelementptr inbounds float, float* %tmp1420, i64 1
+ %tmp1422 = getelementptr inbounds float, float* %tmp1421, i64 1
+ %tmp1423 = getelementptr inbounds float, float* %tmp1422, i64 1
+ %tmp1424 = getelementptr inbounds float, float* %tmp1423, i64 1
+ %tmp1425 = getelementptr inbounds float, float* %tmp1424, i64 1
+ %tmp1426 = getelementptr inbounds float, float* %tmp1425, i64 1
+ %tmp1427 = getelementptr inbounds float, float* %tmp1426, i64 1
+ %tmp1428 = getelementptr inbounds float, float* %tmp1427, i64 1
+ %tmp1429 = getelementptr inbounds float, float* %tmp1428, i64 1
+ %tmp1430 = getelementptr inbounds float, float* %tmp1429, i64 1
+ %tmp1431 = getelementptr inbounds float, float* %tmp1430, i64 1
+ %tmp1432 = getelementptr inbounds float, float* %tmp1431, i64 1
+ %tmp1433 = getelementptr inbounds float, float* %tmp1432, i64 1
+ %tmp1434 = getelementptr inbounds float, float* %tmp1433, i64 1
+ %tmp1435 = getelementptr inbounds float, float* %tmp1434, i64 1
+ %tmp1436 = getelementptr inbounds float, float* %tmp1435, i64 1
+ %tmp1437 = getelementptr inbounds float, float* %tmp1436, i64 1
+ %tmp1438 = getelementptr inbounds float, float* %tmp1437, i64 1
+ %tmp1439 = getelementptr inbounds float, float* %tmp1438, i64 1
+ %tmp1440 = getelementptr inbounds float, float* %tmp1439, i64 1
+ %tmp1441 = getelementptr inbounds float, float* %tmp1440, i64 1
+ %tmp1442 = getelementptr inbounds float, float* %tmp1441, i64 1
+ %tmp1443 = getelementptr inbounds float, float* %tmp1442, i64 1
+ %tmp1444 = getelementptr inbounds float, float* %tmp1443, i64 1
+ %tmp1445 = getelementptr inbounds float, float* %tmp1444, i64 1
+ %tmp1446 = getelementptr inbounds float, float* %tmp1445, i64 1
+ %tmp1447 = getelementptr inbounds float, float* %tmp1446, i64 1
+ %tmp1448 = getelementptr inbounds float, float* %tmp1447, i64 1
+ %tmp1449 = getelementptr inbounds float, float* %tmp1448, i64 1
+ %tmp1450 = getelementptr inbounds float, float* %tmp1449, i64 1
+ %tmp1451 = getelementptr inbounds float, float* %tmp1450, i64 1
+ %tmp1452 = getelementptr inbounds float, float* %tmp1451, i64 1
+ %tmp1453 = getelementptr inbounds float, float* %tmp1452, i64 1
+ %tmp1454 = getelementptr inbounds float, float* %tmp1453, i64 1
+ %tmp1455 = getelementptr inbounds float, float* %tmp1454, i64 1
+ %tmp1456 = getelementptr inbounds float, float* %tmp1455, i64 1
+ %tmp1457 = getelementptr inbounds float, float* %tmp1456, i64 1
+ %tmp1458 = getelementptr inbounds float, float* %tmp1457, i64 1
+ %tmp1459 = getelementptr inbounds float, float* %tmp1458, i64 1
+ %tmp1460 = getelementptr inbounds float, float* %tmp1459, i64 1
+ %tmp1461 = getelementptr inbounds float, float* %tmp1460, i64 1
+ %tmp1462 = getelementptr inbounds float, float* %tmp1461, i64 1
+ %tmp1463 = getelementptr inbounds float, float* %tmp1462, i64 1
+ %tmp1464 = getelementptr inbounds float, float* %tmp1463, i64 1
+ %tmp1465 = getelementptr inbounds float, float* %tmp1464, i64 1
+ %tmp1466 = getelementptr inbounds float, float* %tmp1465, i64 1
+ %tmp1467 = getelementptr inbounds float, float* %tmp1466, i64 1
+ %tmp1468 = getelementptr inbounds float, float* %tmp1467, i64 1
+ %tmp1469 = getelementptr inbounds float, float* %tmp1468, i64 1
+ %tmp1470 = getelementptr inbounds float, float* %tmp1469, i64 1
+ %tmp1471 = getelementptr inbounds float, float* %tmp1470, i64 1
+ %tmp1472 = getelementptr inbounds float, float* %tmp1471, i64 1
+ %tmp1473 = getelementptr inbounds float, float* %tmp1472, i64 1
+ %tmp1474 = getelementptr inbounds float, float* %tmp1473, i64 1
+ %tmp1475 = getelementptr inbounds float, float* %tmp1474, i64 1
+ %tmp1476 = getelementptr inbounds float, float* %tmp1475, i64 1
+ %tmp1477 = getelementptr inbounds float, float* %tmp1476, i64 1
+ %tmp1478 = getelementptr inbounds float, float* %tmp1477, i64 1
+ %tmp1479 = getelementptr inbounds float, float* %tmp1478, i64 1
+ %tmp1480 = getelementptr inbounds float, float* %tmp1479, i64 1
+ %tmp1481 = getelementptr inbounds float, float* %tmp1480, i64 1
+ %tmp1482 = getelementptr inbounds float, float* %tmp1481, i64 1
+ %tmp1483 = getelementptr inbounds float, float* %tmp1482, i64 1
+ %tmp1484 = getelementptr inbounds float, float* %tmp1483, i64 1
+ %tmp1485 = getelementptr inbounds float, float* %tmp1484, i64 1
+ %tmp1486 = getelementptr inbounds float, float* %tmp1485, i64 1
+ %tmp1487 = getelementptr inbounds float, float* %tmp1486, i64 1
+ %tmp1488 = getelementptr inbounds float, float* %tmp1487, i64 1
+ %tmp1489 = getelementptr inbounds float, float* %tmp1488, i64 1
+ %tmp1490 = getelementptr inbounds float, float* %tmp1489, i64 1
+ %tmp1491 = getelementptr inbounds float, float* %tmp1490, i64 1
+ %tmp1492 = getelementptr inbounds float, float* %tmp1491, i64 1
+ %tmp1493 = getelementptr inbounds float, float* %tmp1492, i64 1
+ %tmp1494 = getelementptr inbounds float, float* %tmp1493, i64 1
+ %tmp1495 = getelementptr inbounds float, float* %tmp1494, i64 1
+ %tmp1496 = getelementptr inbounds float, float* %tmp1495, i64 1
+ %tmp1497 = getelementptr inbounds float, float* %tmp1496, i64 1
+ %tmp1498 = getelementptr inbounds float, float* %tmp1497, i64 1
+ %tmp1499 = getelementptr inbounds float, float* %tmp1498, i64 1
+ %tmp1500 = getelementptr inbounds float, float* %tmp1499, i64 1
+ %tmp1501 = getelementptr inbounds float, float* %tmp1500, i64 1
+ %tmp1502 = getelementptr inbounds float, float* %tmp1501, i64 1
+ %tmp1503 = getelementptr inbounds float, float* %tmp1502, i64 1
+ %tmp1504 = getelementptr inbounds float, float* %tmp1503, i64 1
+ %tmp1505 = getelementptr inbounds float, float* %tmp1504, i64 1
+ %tmp1506 = getelementptr inbounds float, float* %tmp1505, i64 1
+ %tmp1507 = getelementptr inbounds float, float* %tmp1506, i64 1
+ %tmp1508 = getelementptr inbounds float, float* %tmp1507, i64 1
+ %tmp1509 = getelementptr inbounds float, float* %tmp1508, i64 1
+ %tmp1510 = getelementptr inbounds float, float* %tmp1509, i64 1
+ %tmp1511 = getelementptr inbounds float, float* %tmp1510, i64 1
+ %tmp1512 = getelementptr inbounds float, float* %tmp1511, i64 1
+ %tmp1513 = getelementptr inbounds float, float* %tmp1512, i64 1
+ %tmp1514 = getelementptr inbounds float, float* %tmp1513, i64 1
+ %tmp1515 = getelementptr inbounds float, float* %tmp1514, i64 1
+ %tmp1516 = getelementptr inbounds float, float* %tmp1515, i64 1
+ %tmp1517 = getelementptr inbounds float, float* %tmp1516, i64 1
+ %tmp1518 = getelementptr inbounds float, float* %tmp1517, i64 1
+ %tmp1519 = getelementptr inbounds float, float* %tmp1518, i64 1
+ %tmp1520 = getelementptr inbounds float, float* %tmp1519, i64 1
+ %tmp1521 = getelementptr inbounds float, float* %tmp1520, i64 1
+ %tmp1522 = getelementptr inbounds float, float* %tmp1521, i64 1
+ %tmp1523 = getelementptr inbounds float, float* %tmp1522, i64 1
+ %tmp1524 = getelementptr inbounds float, float* %tmp1523, i64 1
+ %tmp1525 = getelementptr inbounds float, float* %tmp1524, i64 1
+ %tmp1526 = getelementptr inbounds float, float* %tmp1525, i64 1
+ %tmp1527 = getelementptr inbounds float, float* %tmp1526, i64 1
+ %tmp1528 = getelementptr inbounds float, float* %tmp1527, i64 1
+ %tmp1529 = getelementptr inbounds float, float* %tmp1528, i64 1
+ %tmp1530 = getelementptr inbounds float, float* %tmp1529, i64 1
+ %tmp1531 = getelementptr inbounds float, float* %tmp1530, i64 1
+ %tmp1532 = getelementptr inbounds float, float* %tmp1531, i64 1
+ %tmp1533 = getelementptr inbounds float, float* %tmp1532, i64 1
+ %tmp1534 = getelementptr inbounds float, float* %tmp1533, i64 1
+ %tmp1535 = getelementptr inbounds float, float* %tmp1534, i64 1
+ %tmp1536 = getelementptr inbounds float, float* %tmp1535, i64 1
+ %tmp1537 = getelementptr inbounds float, float* %tmp1536, i64 1
+ %tmp1538 = getelementptr inbounds float, float* %tmp1537, i64 1
+ %tmp1539 = getelementptr inbounds float, float* %tmp1538, i64 1
+ %tmp1540 = getelementptr inbounds float, float* %tmp1539, i64 1
+ %tmp1541 = getelementptr inbounds float, float* %tmp1540, i64 1
+ %tmp1542 = getelementptr inbounds float, float* %tmp1541, i64 1
+ %tmp1543 = getelementptr inbounds float, float* %tmp1542, i64 1
+ %tmp1544 = getelementptr inbounds float, float* %tmp1543, i64 1
+ %tmp1545 = getelementptr inbounds float, float* %tmp1544, i64 1
+ %tmp1546 = getelementptr inbounds float, float* %tmp1545, i64 1
+ %tmp1547 = getelementptr inbounds float, float* %tmp1546, i64 1
+ %tmp1548 = getelementptr inbounds float, float* %tmp1547, i64 1
+ %tmp1549 = getelementptr inbounds float, float* %tmp1548, i64 1
+ %tmp1550 = getelementptr inbounds float, float* %tmp1549, i64 1
+ %tmp1551 = getelementptr inbounds float, float* %tmp1550, i64 1
+ %tmp1552 = getelementptr inbounds float, float* %tmp1551, i64 1
+ %tmp1553 = getelementptr inbounds float, float* %tmp1552, i64 1
+ %tmp1554 = getelementptr inbounds float, float* %tmp1553, i64 1
+ %tmp1555 = getelementptr inbounds float, float* %tmp1554, i64 1
+ %tmp1556 = getelementptr inbounds float, float* %tmp1555, i64 1
+ %tmp1557 = getelementptr inbounds float, float* %tmp1556, i64 1
+ %tmp1558 = getelementptr inbounds float, float* %tmp1557, i64 1
+ %tmp1559 = getelementptr inbounds float, float* %tmp1558, i64 1
+ %tmp1560 = getelementptr inbounds float, float* %tmp1559, i64 1
+ %tmp1561 = getelementptr inbounds float, float* %tmp1560, i64 1
+ %tmp1562 = getelementptr inbounds float, float* %tmp1561, i64 1
+ %tmp1563 = getelementptr inbounds float, float* %tmp1562, i64 1
+ %tmp1564 = getelementptr inbounds float, float* %tmp1563, i64 1
+ %tmp1565 = getelementptr inbounds float, float* %tmp1564, i64 1
+ %tmp1566 = getelementptr inbounds float, float* %tmp1565, i64 1
+ %tmp1567 = getelementptr inbounds float, float* %tmp1566, i64 1
+ %tmp1568 = getelementptr inbounds float, float* %tmp1567, i64 1
+ %tmp1569 = getelementptr inbounds float, float* %tmp1568, i64 1
+ %tmp1570 = getelementptr inbounds float, float* %tmp1569, i64 1
+ %tmp1571 = getelementptr inbounds float, float* %tmp1570, i64 1
+ %tmp1572 = getelementptr inbounds float, float* %tmp1571, i64 1
+ %tmp1573 = getelementptr inbounds float, float* %tmp1572, i64 1
+ %tmp1574 = getelementptr inbounds float, float* %tmp1573, i64 1
+ %tmp1575 = getelementptr inbounds float, float* %tmp1574, i64 1
+ %tmp1576 = getelementptr inbounds float, float* %tmp1575, i64 1
+ %tmp1577 = getelementptr inbounds float, float* %tmp1576, i64 1
+ %tmp1578 = getelementptr inbounds float, float* %tmp1577, i64 1
+ %tmp1579 = getelementptr inbounds float, float* %tmp1578, i64 1
+ %tmp1580 = getelementptr inbounds float, float* %tmp1579, i64 1
+ %tmp1581 = getelementptr inbounds float, float* %tmp1580, i64 1
+ %tmp1582 = getelementptr inbounds float, float* %tmp1581, i64 1
+ %tmp1583 = getelementptr inbounds float, float* %tmp1582, i64 1
+ %tmp1584 = getelementptr inbounds float, float* %tmp1583, i64 1
+ %tmp1585 = getelementptr inbounds float, float* %tmp1584, i64 1
+ %tmp1586 = getelementptr inbounds float, float* %tmp1585, i64 1
+ %tmp1587 = getelementptr inbounds float, float* %tmp1586, i64 1
+ %tmp1588 = getelementptr inbounds float, float* %tmp1587, i64 1
+ %tmp1589 = getelementptr inbounds float, float* %tmp1588, i64 1
+ %tmp1590 = getelementptr inbounds float, float* %tmp1589, i64 1
+ %tmp1591 = getelementptr inbounds float, float* %tmp1590, i64 1
+ %tmp1592 = getelementptr inbounds float, float* %tmp1591, i64 1
+ %tmp1593 = getelementptr inbounds float, float* %tmp1592, i64 1
+ %tmp1594 = getelementptr inbounds float, float* %tmp1593, i64 1
+ %tmp1595 = getelementptr inbounds float, float* %tmp1594, i64 1
+ %tmp1596 = getelementptr inbounds float, float* %tmp1595, i64 1
+ %tmp1597 = getelementptr inbounds float, float* %tmp1596, i64 1
+ %tmp1598 = getelementptr inbounds float, float* %tmp1597, i64 1
+ %tmp1599 = getelementptr inbounds float, float* %tmp1598, i64 1
+ %tmp1600 = getelementptr inbounds float, float* %tmp1599, i64 1
+ %tmp1601 = getelementptr inbounds float, float* %tmp1600, i64 1
+ %tmp1602 = getelementptr inbounds float, float* %tmp1601, i64 1
+ %tmp1603 = getelementptr inbounds float, float* %tmp1602, i64 1
+ %tmp1604 = getelementptr inbounds float, float* %tmp1603, i64 1
+ %tmp1605 = getelementptr inbounds float, float* %tmp1604, i64 1
+ %tmp1606 = getelementptr inbounds float, float* %tmp1605, i64 1
+ %tmp1607 = getelementptr inbounds float, float* %tmp1606, i64 1
+ %tmp1608 = getelementptr inbounds float, float* %tmp1607, i64 1
+ %tmp1609 = getelementptr inbounds float, float* %tmp1608, i64 1
+ %tmp1610 = getelementptr inbounds float, float* %tmp1609, i64 1
+ %tmp1611 = getelementptr inbounds float, float* %tmp1610, i64 1
+ %tmp1612 = getelementptr inbounds float, float* %tmp1611, i64 1
+ %tmp1613 = getelementptr inbounds float, float* %tmp1612, i64 1
+ %tmp1614 = getelementptr inbounds float, float* %tmp1613, i64 1
+ %tmp1615 = getelementptr inbounds float, float* %tmp1614, i64 1
+ %tmp1616 = getelementptr inbounds float, float* %tmp1615, i64 1
+ %tmp1617 = getelementptr inbounds float, float* %tmp1616, i64 1
+ %tmp1618 = getelementptr inbounds float, float* %tmp1617, i64 1
+ %tmp1619 = getelementptr inbounds float, float* %tmp1618, i64 1
+ %tmp1620 = getelementptr inbounds float, float* %tmp1619, i64 1
+ %tmp1621 = getelementptr inbounds float, float* %tmp1620, i64 1
+ %tmp1622 = getelementptr inbounds float, float* %tmp1621, i64 1
+ %tmp1623 = getelementptr inbounds float, float* %tmp1622, i64 1
+ %tmp1624 = getelementptr inbounds float, float* %tmp1623, i64 1
+ %tmp1625 = getelementptr inbounds float, float* %tmp1624, i64 1
+ %tmp1626 = getelementptr inbounds float, float* %tmp1625, i64 1
+ %tmp1627 = getelementptr inbounds float, float* %tmp1626, i64 1
+ %tmp1628 = getelementptr inbounds float, float* %tmp1627, i64 1
+ %tmp1629 = getelementptr inbounds float, float* %tmp1628, i64 1
+ %tmp1630 = getelementptr inbounds float, float* %tmp1629, i64 1
+ %tmp1631 = getelementptr inbounds float, float* %tmp1630, i64 1
+ %tmp1632 = getelementptr inbounds float, float* %tmp1631, i64 1
+ %tmp1633 = getelementptr inbounds float, float* %tmp1632, i64 1
+ %tmp1634 = getelementptr inbounds float, float* %tmp1633, i64 1
+ %tmp1635 = getelementptr inbounds float, float* %tmp1634, i64 1
+ %tmp1636 = getelementptr inbounds float, float* %tmp1635, i64 1
+ %tmp1637 = getelementptr inbounds float, float* %tmp1636, i64 1
+ %tmp1638 = getelementptr inbounds float, float* %tmp1637, i64 1
+ %tmp1639 = getelementptr inbounds float, float* %tmp1638, i64 1
+ %tmp1640 = getelementptr inbounds float, float* %tmp1639, i64 1
+ %tmp1641 = getelementptr inbounds float, float* %tmp1640, i64 1
+ %tmp1642 = getelementptr inbounds float, float* %tmp1641, i64 1
+ %tmp1643 = getelementptr inbounds float, float* %tmp1642, i64 1
+ %tmp1644 = getelementptr inbounds float, float* %tmp1643, i64 1
+ %tmp1645 = getelementptr inbounds float, float* %tmp1644, i64 1
+ %tmp1646 = getelementptr inbounds float, float* %tmp1645, i64 1
+ %tmp1647 = getelementptr inbounds float, float* %tmp1646, i64 1
+ %tmp1648 = getelementptr inbounds float, float* %tmp1647, i64 1
+ %tmp1649 = getelementptr inbounds float, float* %tmp1648, i64 1
+ %tmp1650 = getelementptr inbounds float, float* %tmp1649, i64 1
+ %tmp1651 = getelementptr inbounds float, float* %tmp1650, i64 1
+ %tmp1652 = getelementptr inbounds float, float* %tmp1651, i64 1
+ %tmp1653 = getelementptr inbounds float, float* %tmp1652, i64 1
+ %tmp1654 = getelementptr inbounds float, float* %tmp1653, i64 1
+ %tmp1655 = getelementptr inbounds float, float* %tmp1654, i64 1
+ %tmp1656 = getelementptr inbounds float, float* %tmp1655, i64 1
+ %tmp1657 = getelementptr inbounds float, float* %tmp1656, i64 1
+ %tmp1658 = getelementptr inbounds float, float* %tmp1657, i64 1
+ %tmp1659 = getelementptr inbounds float, float* %tmp1658, i64 1
+ %tmp1660 = getelementptr inbounds float, float* %tmp1659, i64 1
+ %tmp1661 = getelementptr inbounds float, float* %tmp1660, i64 1
+ %tmp1662 = getelementptr inbounds float, float* %tmp1661, i64 1
+ %tmp1663 = getelementptr inbounds float, float* %tmp1662, i64 1
+ %tmp1664 = getelementptr inbounds float, float* %tmp1663, i64 1
+ %tmp1665 = getelementptr inbounds float, float* %tmp1664, i64 1
+ %tmp1666 = getelementptr inbounds float, float* %tmp1665, i64 1
+ %tmp1667 = getelementptr inbounds float, float* %tmp1666, i64 1
+ %tmp1668 = getelementptr inbounds float, float* %tmp1667, i64 1
+ %tmp1669 = getelementptr inbounds float, float* %tmp1668, i64 1
+ %tmp1670 = getelementptr inbounds float, float* %tmp1669, i64 1
+ %tmp1671 = getelementptr inbounds float, float* %tmp1670, i64 1
+ %tmp1672 = getelementptr inbounds float, float* %tmp1671, i64 1
+ %tmp1673 = getelementptr inbounds float, float* %tmp1672, i64 1
+ %tmp1674 = getelementptr inbounds float, float* %tmp1673, i64 1
+ %tmp1675 = getelementptr inbounds float, float* %tmp1674, i64 1
+ %tmp1676 = getelementptr inbounds float, float* %tmp1675, i64 1
+ %tmp1677 = getelementptr inbounds float, float* %tmp1676, i64 1
+ %tmp1678 = getelementptr inbounds float, float* %tmp1677, i64 1
+ %tmp1679 = getelementptr inbounds float, float* %tmp1678, i64 1
+ %tmp1680 = getelementptr inbounds float, float* %tmp1679, i64 1
+ %tmp1681 = getelementptr inbounds float, float* %tmp1680, i64 1
+ %tmp1682 = getelementptr inbounds float, float* %tmp1681, i64 1
+ %tmp1683 = getelementptr inbounds float, float* %tmp1682, i64 1
+ %tmp1684 = getelementptr inbounds float, float* %tmp1683, i64 1
+ %tmp1685 = getelementptr inbounds float, float* %tmp1684, i64 1
+ %tmp1686 = getelementptr inbounds float, float* %tmp1685, i64 1
+ %tmp1687 = getelementptr inbounds float, float* %tmp1686, i64 1
+ %tmp1688 = getelementptr inbounds float, float* %tmp1687, i64 1
+ %tmp1689 = getelementptr inbounds float, float* %tmp1688, i64 1
+ %tmp1690 = getelementptr inbounds float, float* %tmp1689, i64 1
+ %tmp1691 = getelementptr inbounds float, float* %tmp1690, i64 1
+ %tmp1692 = getelementptr inbounds float, float* %tmp1691, i64 1
+ %tmp1693 = getelementptr inbounds float, float* %tmp1692, i64 1
+ %tmp1694 = getelementptr inbounds float, float* %tmp1693, i64 1
+ %tmp1695 = getelementptr inbounds float, float* %tmp1694, i64 1
+ %tmp1696 = getelementptr inbounds float, float* %tmp1695, i64 1
+ %tmp1697 = getelementptr inbounds float, float* %tmp1696, i64 1
+ %tmp1698 = getelementptr inbounds float, float* %tmp1697, i64 1
+ %tmp1699 = getelementptr inbounds float, float* %tmp1698, i64 1
+ %tmp1700 = getelementptr inbounds float, float* %tmp1699, i64 1
+ %tmp1701 = getelementptr inbounds float, float* %tmp1700, i64 1
+ %tmp1702 = getelementptr inbounds float, float* %tmp1701, i64 1
+ %tmp1703 = getelementptr inbounds float, float* %tmp1702, i64 1
+ %tmp1704 = getelementptr inbounds float, float* %tmp1703, i64 1
+ %tmp1705 = getelementptr inbounds float, float* %tmp1704, i64 1
+ %tmp1706 = getelementptr inbounds float, float* %tmp1705, i64 1
+ %tmp1707 = getelementptr inbounds float, float* %tmp1706, i64 1
+ %tmp1708 = getelementptr inbounds float, float* %tmp1707, i64 1
+ %tmp1709 = getelementptr inbounds float, float* %tmp1708, i64 1
+ %tmp1710 = getelementptr inbounds float, float* %tmp1709, i64 1
+ %tmp1711 = getelementptr inbounds float, float* %tmp1710, i64 1
+ %tmp1712 = getelementptr inbounds float, float* %tmp1711, i64 1
+ %tmp1713 = getelementptr inbounds float, float* %tmp1712, i64 1
+ %tmp1714 = getelementptr inbounds float, float* %tmp1713, i64 1
+ %tmp1715 = getelementptr inbounds float, float* %tmp1714, i64 1
+ %tmp1716 = getelementptr inbounds float, float* %tmp1715, i64 1
+ %tmp1717 = getelementptr inbounds float, float* %tmp1716, i64 1
+ %tmp1718 = getelementptr inbounds float, float* %tmp1717, i64 1
+ %tmp1719 = getelementptr inbounds float, float* %tmp1718, i64 1
+ %tmp1720 = getelementptr inbounds float, float* %tmp1719, i64 1
+ %tmp1721 = getelementptr inbounds float, float* %tmp1720, i64 1
+ %tmp1722 = getelementptr inbounds float, float* %tmp1721, i64 1
+ %tmp1723 = getelementptr inbounds float, float* %tmp1722, i64 1
+ %tmp1724 = getelementptr inbounds float, float* %tmp1723, i64 1
+ %tmp1725 = getelementptr inbounds float, float* %tmp1724, i64 1
+ %tmp1726 = getelementptr inbounds float, float* %tmp1725, i64 1
+ %tmp1727 = getelementptr inbounds float, float* %tmp1726, i64 1
+ %tmp1728 = getelementptr inbounds float, float* %tmp1727, i64 1
+ %tmp1729 = getelementptr inbounds float, float* %tmp1728, i64 1
+ %tmp1730 = getelementptr inbounds float, float* %tmp1729, i64 1
+ %tmp1731 = getelementptr inbounds float, float* %tmp1730, i64 1
+ %tmp1732 = getelementptr inbounds float, float* %tmp1731, i64 1
+ %tmp1733 = getelementptr inbounds float, float* %tmp1732, i64 1
+ %tmp1734 = getelementptr inbounds float, float* %tmp1733, i64 1
+ %tmp1735 = getelementptr inbounds float, float* %tmp1734, i64 1
+ %tmp1736 = getelementptr inbounds float, float* %tmp1735, i64 1
+ %tmp1737 = getelementptr inbounds float, float* %tmp1736, i64 1
+ %tmp1738 = getelementptr inbounds float, float* %tmp1737, i64 1
+ %tmp1739 = getelementptr inbounds float, float* %tmp1738, i64 1
+ %tmp1740 = getelementptr inbounds float, float* %tmp1739, i64 1
+ %tmp1741 = getelementptr inbounds float, float* %tmp1740, i64 1
+ %tmp1742 = getelementptr inbounds float, float* %tmp1741, i64 1
+ %tmp1743 = getelementptr inbounds float, float* %tmp1742, i64 1
+ %tmp1744 = getelementptr inbounds float, float* %tmp1743, i64 1
+ %tmp1745 = getelementptr inbounds float, float* %tmp1744, i64 1
+ %tmp1746 = getelementptr inbounds float, float* %tmp1745, i64 1
+ %tmp1747 = getelementptr inbounds float, float* %tmp1746, i64 1
+ %tmp1748 = getelementptr inbounds float, float* %tmp1747, i64 1
+ %tmp1749 = getelementptr inbounds float, float* %tmp1748, i64 1
+ %tmp1750 = getelementptr inbounds float, float* %tmp1749, i64 1
+ %tmp1751 = getelementptr inbounds float, float* %tmp1750, i64 1
+ %tmp1752 = getelementptr inbounds float, float* %tmp1751, i64 1
+ %tmp1753 = getelementptr inbounds float, float* %tmp1752, i64 1
+ %tmp1754 = getelementptr inbounds float, float* %tmp1753, i64 1
+ %tmp1755 = getelementptr inbounds float, float* %tmp1754, i64 1
+ %tmp1756 = getelementptr inbounds float, float* %tmp1755, i64 1
+ %tmp1757 = getelementptr inbounds float, float* %tmp1756, i64 1
+ %tmp1758 = getelementptr inbounds float, float* %tmp1757, i64 1
+ %tmp1759 = getelementptr inbounds float, float* %tmp1758, i64 1
+ %tmp1760 = getelementptr inbounds float, float* %tmp1759, i64 1
+ %tmp1761 = getelementptr inbounds float, float* %tmp1760, i64 1
+ %tmp1762 = getelementptr inbounds float, float* %tmp1761, i64 1
+ %tmp1763 = getelementptr inbounds float, float* %tmp1762, i64 1
+ %tmp1764 = getelementptr inbounds float, float* %tmp1763, i64 1
+ %tmp1765 = getelementptr inbounds float, float* %tmp1764, i64 1
+ %tmp1766 = getelementptr inbounds float, float* %tmp1765, i64 1
+ %tmp1767 = getelementptr inbounds float, float* %tmp1766, i64 1
+ %tmp1768 = getelementptr inbounds float, float* %tmp1767, i64 1
+ %tmp1769 = getelementptr inbounds float, float* %tmp1768, i64 1
+ %tmp1770 = getelementptr inbounds float, float* %tmp1769, i64 1
+ %tmp1771 = getelementptr inbounds float, float* %tmp1770, i64 1
+ %tmp1772 = getelementptr inbounds float, float* %tmp1771, i64 1
+ %tmp1773 = getelementptr inbounds float, float* %tmp1772, i64 1
+ %tmp1774 = getelementptr inbounds float, float* %tmp1773, i64 1
+ %tmp1775 = getelementptr inbounds float, float* %tmp1774, i64 1
+ %tmp1776 = getelementptr inbounds float, float* %tmp1775, i64 1
+ %tmp1777 = getelementptr inbounds float, float* %tmp1776, i64 1
+ %tmp1778 = getelementptr inbounds float, float* %tmp1777, i64 1
+ %tmp1779 = getelementptr inbounds float, float* %tmp1778, i64 1
+ %tmp1780 = getelementptr inbounds float, float* %tmp1779, i64 1
+ %tmp1781 = getelementptr inbounds float, float* %tmp1780, i64 1
+ %tmp1782 = getelementptr inbounds float, float* %tmp1781, i64 1
+ %tmp1783 = getelementptr inbounds float, float* %tmp1782, i64 1
+ %tmp1784 = getelementptr inbounds float, float* %tmp1783, i64 1
+ %tmp1785 = getelementptr inbounds float, float* %tmp1784, i64 1
+ %tmp1786 = getelementptr inbounds float, float* %tmp1785, i64 1
+ %tmp1787 = getelementptr inbounds float, float* %tmp1786, i64 1
+ %tmp1788 = getelementptr inbounds float, float* %tmp1787, i64 1
+ %tmp1789 = getelementptr inbounds float, float* %tmp1788, i64 1
+ %tmp1790 = getelementptr inbounds float, float* %tmp1789, i64 1
+ %tmp1791 = getelementptr inbounds float, float* %tmp1790, i64 1
+ %tmp1792 = getelementptr inbounds float, float* %tmp1791, i64 1
+ %tmp1793 = getelementptr inbounds float, float* %tmp1792, i64 1
+ %tmp1794 = getelementptr inbounds float, float* %tmp1793, i64 1
+ %tmp1795 = getelementptr inbounds float, float* %tmp1794, i64 1
+ %tmp1796 = getelementptr inbounds float, float* %tmp1795, i64 1
+ %tmp1797 = getelementptr inbounds float, float* %tmp1796, i64 1
+ %tmp1798 = getelementptr inbounds float, float* %tmp1797, i64 1
+ %tmp1799 = getelementptr inbounds float, float* %tmp1798, i64 1
+ %tmp1800 = getelementptr inbounds float, float* %tmp1799, i64 1
+ %tmp1801 = getelementptr inbounds float, float* %tmp1800, i64 1
+ %tmp1802 = getelementptr inbounds float, float* %tmp1801, i64 1
+ %tmp1803 = getelementptr inbounds float, float* %tmp1802, i64 1
+ %tmp1804 = getelementptr inbounds float, float* %tmp1803, i64 1
+ %tmp1805 = getelementptr inbounds float, float* %tmp1804, i64 1
+ %tmp1806 = getelementptr inbounds float, float* %tmp1805, i64 1
+ %tmp1807 = getelementptr inbounds float, float* %tmp1806, i64 1
+ %tmp1808 = getelementptr inbounds float, float* %tmp1807, i64 1
+ %tmp1809 = getelementptr inbounds float, float* %tmp1808, i64 1
+ %tmp1810 = getelementptr inbounds float, float* %tmp1809, i64 1
+ %tmp1811 = getelementptr inbounds float, float* %tmp1810, i64 1
+ %tmp1812 = getelementptr inbounds float, float* %tmp1811, i64 1
+ %tmp1813 = getelementptr inbounds float, float* %tmp1812, i64 1
+ %tmp1814 = getelementptr inbounds float, float* %tmp1813, i64 1
+ %tmp1815 = getelementptr inbounds float, float* %tmp1814, i64 1
+ %tmp1816 = getelementptr inbounds float, float* %tmp1815, i64 1
+ %tmp1817 = getelementptr inbounds float, float* %tmp1816, i64 1
+ %tmp1818 = getelementptr inbounds float, float* %tmp1817, i64 1
+ %tmp1819 = getelementptr inbounds float, float* %tmp1818, i64 1
+ %tmp1820 = getelementptr inbounds float, float* %tmp1819, i64 1
+ %tmp1821 = getelementptr inbounds float, float* %tmp1820, i64 1
+ %tmp1822 = getelementptr inbounds float, float* %tmp1821, i64 1
+ %tmp1823 = getelementptr inbounds float, float* %tmp1822, i64 1
+ %tmp1824 = getelementptr inbounds float, float* %tmp1823, i64 1
+ %tmp1825 = getelementptr inbounds float, float* %tmp1824, i64 1
+ %tmp1826 = getelementptr inbounds float, float* %tmp1825, i64 1
+ %tmp1827 = getelementptr inbounds float, float* %tmp1826, i64 1
+ %tmp1828 = getelementptr inbounds float, float* %tmp1827, i64 1
+ %tmp1829 = getelementptr inbounds float, float* %tmp1828, i64 1
+ %tmp1830 = getelementptr inbounds float, float* %tmp1829, i64 1
+ %tmp1831 = getelementptr inbounds float, float* %tmp1830, i64 1
+ %tmp1832 = getelementptr inbounds float, float* %tmp1831, i64 1
+ %tmp1833 = getelementptr inbounds float, float* %tmp1832, i64 1
+ %tmp1834 = getelementptr inbounds float, float* %tmp1833, i64 1
+ %tmp1835 = getelementptr inbounds float, float* %tmp1834, i64 1
+ %tmp1836 = getelementptr inbounds float, float* %tmp1835, i64 1
+ %tmp1837 = getelementptr inbounds float, float* %tmp1836, i64 1
+ %tmp1838 = getelementptr inbounds float, float* %tmp1837, i64 1
+ %tmp1839 = getelementptr inbounds float, float* %tmp1838, i64 1
+ %tmp1840 = getelementptr inbounds float, float* %tmp1839, i64 1
+ %tmp1841 = getelementptr inbounds float, float* %tmp1840, i64 1
+ %tmp1842 = getelementptr inbounds float, float* %tmp1841, i64 1
+ %tmp1843 = getelementptr inbounds float, float* %tmp1842, i64 1
+ %tmp1844 = getelementptr inbounds float, float* %tmp1843, i64 1
+ %tmp1845 = getelementptr inbounds float, float* %tmp1844, i64 1
+ %tmp1846 = getelementptr inbounds float, float* %tmp1845, i64 1
+ %tmp1847 = getelementptr inbounds float, float* %tmp1846, i64 1
+ %tmp1848 = getelementptr inbounds float, float* %tmp1847, i64 1
+ %tmp1849 = getelementptr inbounds float, float* %tmp1848, i64 1
+ %tmp1850 = getelementptr inbounds float, float* %tmp1849, i64 1
+ %tmp1851 = getelementptr inbounds float, float* %tmp1850, i64 1
+ %tmp1852 = getelementptr inbounds float, float* %tmp1851, i64 1
+ %tmp1853 = getelementptr inbounds float, float* %tmp1852, i64 1
+ %tmp1854 = getelementptr inbounds float, float* %tmp1853, i64 1
+ %tmp1855 = getelementptr inbounds float, float* %tmp1854, i64 1
+ %tmp1856 = getelementptr inbounds float, float* %tmp1855, i64 1
+ %tmp1857 = getelementptr inbounds float, float* %tmp1856, i64 1
+ %tmp1858 = getelementptr inbounds float, float* %tmp1857, i64 1
+ %tmp1859 = getelementptr inbounds float, float* %tmp1858, i64 1
+ %tmp1860 = getelementptr inbounds float, float* %tmp1859, i64 1
+ %tmp1861 = getelementptr inbounds float, float* %tmp1860, i64 1
+ %tmp1862 = getelementptr inbounds float, float* %tmp1861, i64 1
+ %tmp1863 = getelementptr inbounds float, float* %tmp1862, i64 1
+ %tmp1864 = getelementptr inbounds float, float* %tmp1863, i64 1
+ %tmp1865 = getelementptr inbounds float, float* %tmp1864, i64 1
+ %tmp1866 = getelementptr inbounds float, float* %tmp1865, i64 1
+ %tmp1867 = getelementptr inbounds float, float* %tmp1866, i64 1
+ %tmp1868 = getelementptr inbounds float, float* %tmp1867, i64 1
+ %tmp1869 = getelementptr inbounds float, float* %tmp1868, i64 1
+ %tmp1870 = getelementptr inbounds float, float* %tmp1869, i64 1
+ %tmp1871 = getelementptr inbounds float, float* %tmp1870, i64 1
+ %tmp1872 = getelementptr inbounds float, float* %tmp1871, i64 1
+ %tmp1873 = getelementptr inbounds float, float* %tmp1872, i64 1
+ %tmp1874 = getelementptr inbounds float, float* %tmp1873, i64 1
+ %tmp1875 = getelementptr inbounds float, float* %tmp1874, i64 1
+ %tmp1876 = getelementptr inbounds float, float* %tmp1875, i64 1
+ %tmp1877 = getelementptr inbounds float, float* %tmp1876, i64 1
+ %tmp1878 = getelementptr inbounds float, float* %tmp1877, i64 1
+ %tmp1879 = getelementptr inbounds float, float* %tmp1878, i64 1
+ %tmp1880 = getelementptr inbounds float, float* %tmp1879, i64 1
+ %tmp1881 = getelementptr inbounds float, float* %tmp1880, i64 1
+ %tmp1882 = getelementptr inbounds float, float* %tmp1881, i64 1
+ %tmp1883 = getelementptr inbounds float, float* %tmp1882, i64 1
+ %tmp1884 = getelementptr inbounds float, float* %tmp1883, i64 1
+ %tmp1885 = getelementptr inbounds float, float* %tmp1884, i64 1
+ %tmp1886 = getelementptr inbounds float, float* %tmp1885, i64 1
+ %tmp1887 = getelementptr inbounds float, float* %tmp1886, i64 1
+ %tmp1888 = getelementptr inbounds float, float* %tmp1887, i64 1
+ %tmp1889 = getelementptr inbounds float, float* %tmp1888, i64 1
+ %tmp1890 = getelementptr inbounds float, float* %tmp1889, i64 1
+ %tmp1891 = getelementptr inbounds float, float* %tmp1890, i64 1
+ %tmp1892 = getelementptr inbounds float, float* %tmp1891, i64 1
+ %tmp1893 = getelementptr inbounds float, float* %tmp1892, i64 1
+ %tmp1894 = getelementptr inbounds float, float* %tmp1893, i64 1
+ %tmp1895 = getelementptr inbounds float, float* %tmp1894, i64 1
+ %tmp1896 = getelementptr inbounds float, float* %tmp1895, i64 1
+ %tmp1897 = getelementptr inbounds float, float* %tmp1896, i64 1
+ %tmp1898 = getelementptr inbounds float, float* %tmp1897, i64 1
+ %tmp1899 = getelementptr inbounds float, float* %tmp1898, i64 1
+ %tmp1900 = getelementptr inbounds float, float* %tmp1899, i64 1
+ %tmp1901 = getelementptr inbounds float, float* %tmp1900, i64 1
+ %tmp1902 = getelementptr inbounds float, float* %tmp1901, i64 1
+ %tmp1903 = getelementptr inbounds float, float* %tmp1902, i64 1
+ %tmp1904 = getelementptr inbounds float, float* %tmp1903, i64 1
+ %tmp1905 = getelementptr inbounds float, float* %tmp1904, i64 1
+ %tmp1906 = getelementptr inbounds float, float* %tmp1905, i64 1
+ %tmp1907 = getelementptr inbounds float, float* %tmp1906, i64 1
+ %tmp1908 = getelementptr inbounds float, float* %tmp1907, i64 1
+ %tmp1909 = getelementptr inbounds float, float* %tmp1908, i64 1
+ %tmp1910 = getelementptr inbounds float, float* %tmp1909, i64 1
+ %tmp1911 = getelementptr inbounds float, float* %tmp1910, i64 1
+ %tmp1912 = getelementptr inbounds float, float* %tmp1911, i64 1
+ %tmp1913 = getelementptr inbounds float, float* %tmp1912, i64 1
+ %tmp1914 = getelementptr inbounds float, float* %tmp1913, i64 1
+ %tmp1915 = getelementptr inbounds float, float* %tmp1914, i64 1
+ %tmp1916 = getelementptr inbounds float, float* %tmp1915, i64 1
+ %tmp1917 = getelementptr inbounds float, float* %tmp1916, i64 1
+ %tmp1918 = getelementptr inbounds float, float* %tmp1917, i64 1
+ %tmp1919 = getelementptr inbounds float, float* %tmp1918, i64 1
+ %tmp1920 = getelementptr inbounds float, float* %tmp1919, i64 1
+ %tmp1921 = getelementptr inbounds float, float* %tmp1920, i64 1
+ %tmp1922 = getelementptr inbounds float, float* %tmp1921, i64 1
+ %tmp1923 = getelementptr inbounds float, float* %tmp1922, i64 1
+ %tmp1924 = getelementptr inbounds float, float* %tmp1923, i64 1
+ %tmp1925 = getelementptr inbounds float, float* %tmp1924, i64 1
+ %tmp1926 = getelementptr inbounds float, float* %tmp1925, i64 1
+ %tmp1927 = getelementptr inbounds float, float* %tmp1926, i64 1
+ %tmp1928 = getelementptr inbounds float, float* %tmp1927, i64 1
+ %tmp1929 = getelementptr inbounds float, float* %tmp1928, i64 1
+ %tmp1930 = getelementptr inbounds float, float* %tmp1929, i64 1
+ %tmp1931 = getelementptr inbounds float, float* %tmp1930, i64 1
+ %tmp1932 = getelementptr inbounds float, float* %tmp1931, i64 1
+ %tmp1933 = getelementptr inbounds float, float* %tmp1932, i64 1
+ %tmp1934 = getelementptr inbounds float, float* %tmp1933, i64 1
+ %tmp1935 = getelementptr inbounds float, float* %tmp1934, i64 1
+ %tmp1936 = getelementptr inbounds float, float* %tmp1935, i64 1
+ %tmp1937 = getelementptr inbounds float, float* %tmp1936, i64 1
+ %tmp1938 = getelementptr inbounds float, float* %tmp1937, i64 1
+ %tmp1939 = getelementptr inbounds float, float* %tmp1938, i64 1
+ %tmp1940 = getelementptr inbounds float, float* %tmp1939, i64 1
+ %tmp1941 = getelementptr inbounds float, float* %tmp1940, i64 1
+ %tmp1942 = getelementptr inbounds float, float* %tmp1941, i64 1
+ %tmp1943 = getelementptr inbounds float, float* %tmp1942, i64 1
+ %tmp1944 = getelementptr inbounds float, float* %tmp1943, i64 1
+ %tmp1945 = getelementptr inbounds float, float* %tmp1944, i64 1
+ %tmp1946 = getelementptr inbounds float, float* %tmp1945, i64 1
+ %tmp1947 = getelementptr inbounds float, float* %tmp1946, i64 1
+ %tmp1948 = getelementptr inbounds float, float* %tmp1947, i64 1
+ %tmp1949 = getelementptr inbounds float, float* %tmp1948, i64 1
+ %tmp1950 = getelementptr inbounds float, float* %tmp1949, i64 1
+ %tmp1951 = getelementptr inbounds float, float* %tmp1950, i64 1
+ %tmp1952 = getelementptr inbounds float, float* %tmp1951, i64 1
+ %tmp1953 = getelementptr inbounds float, float* %tmp1952, i64 1
+ %tmp1954 = getelementptr inbounds float, float* %tmp1953, i64 1
+ %tmp1955 = getelementptr inbounds float, float* %tmp1954, i64 1
+ %tmp1956 = getelementptr inbounds float, float* %tmp1955, i64 1
+ %tmp1957 = getelementptr inbounds float, float* %tmp1956, i64 1
+ %tmp1958 = getelementptr inbounds float, float* %tmp1957, i64 1
+ %tmp1959 = getelementptr inbounds float, float* %tmp1958, i64 1
+ %tmp1960 = getelementptr inbounds float, float* %tmp1959, i64 1
+ %tmp1961 = getelementptr inbounds float, float* %tmp1960, i64 1
+ %tmp1962 = getelementptr inbounds float, float* %tmp1961, i64 1
+ %tmp1963 = getelementptr inbounds float, float* %tmp1962, i64 1
+ %tmp1964 = getelementptr inbounds float, float* %tmp1963, i64 1
+ %tmp1965 = getelementptr inbounds float, float* %tmp1964, i64 1
+ %tmp1966 = getelementptr inbounds float, float* %tmp1965, i64 1
+ %tmp1967 = getelementptr inbounds float, float* %tmp1966, i64 1
+ %tmp1968 = getelementptr inbounds float, float* %tmp1967, i64 1
+ %tmp1969 = getelementptr inbounds float, float* %tmp1968, i64 1
+ %tmp1970 = getelementptr inbounds float, float* %tmp1969, i64 1
+ %tmp1971 = getelementptr inbounds float, float* %tmp1970, i64 1
+ %tmp1972 = getelementptr inbounds float, float* %tmp1971, i64 1
+ %tmp1973 = getelementptr inbounds float, float* %tmp1972, i64 1
+ %tmp1974 = getelementptr inbounds float, float* %tmp1973, i64 1
+ %tmp1975 = getelementptr inbounds float, float* %tmp1974, i64 1
+ %tmp1976 = getelementptr inbounds float, float* %tmp1975, i64 1
+ %tmp1977 = getelementptr inbounds float, float* %tmp1976, i64 1
+ %tmp1978 = getelementptr inbounds float, float* %tmp1977, i64 1
+ %tmp1979 = getelementptr inbounds float, float* %tmp1978, i64 1
+ %tmp1980 = getelementptr inbounds float, float* %tmp1979, i64 1
+ %tmp1981 = getelementptr inbounds float, float* %tmp1980, i64 1
+ %tmp1982 = getelementptr inbounds float, float* %tmp1981, i64 1
+ %tmp1983 = getelementptr inbounds float, float* %tmp1982, i64 1
+ %tmp1984 = getelementptr inbounds float, float* %tmp1983, i64 1
+ %tmp1985 = getelementptr inbounds float, float* %tmp1984, i64 1
+ %tmp1986 = getelementptr inbounds float, float* %tmp1985, i64 1
+ %tmp1987 = getelementptr inbounds float, float* %tmp1986, i64 1
+ %tmp1988 = getelementptr inbounds float, float* %tmp1987, i64 1
+ %tmp1989 = getelementptr inbounds float, float* %tmp1988, i64 1
+ %tmp1990 = getelementptr inbounds float, float* %tmp1989, i64 1
+ %tmp1991 = getelementptr inbounds float, float* %tmp1990, i64 1
+ %tmp1992 = getelementptr inbounds float, float* %tmp1991, i64 1
+ %tmp1993 = getelementptr inbounds float, float* %tmp1992, i64 1
+ %tmp1994 = getelementptr inbounds float, float* %tmp1993, i64 1
+ %tmp1995 = getelementptr inbounds float, float* %tmp1994, i64 1
+ %tmp1996 = getelementptr inbounds float, float* %tmp1995, i64 1
+ %tmp1997 = getelementptr inbounds float, float* %tmp1996, i64 1
+ %tmp1998 = getelementptr inbounds float, float* %tmp1997, i64 1
+ %tmp1999 = getelementptr inbounds float, float* %tmp1998, i64 1
+ %tmp2000 = getelementptr inbounds float, float* %tmp1999, i64 1
+ %tmp2001 = getelementptr inbounds float, float* %tmp2000, i64 1
+ %tmp2002 = getelementptr inbounds float, float* %tmp2001, i64 1
+ %tmp2003 = getelementptr inbounds float, float* %tmp2002, i64 1
+ %tmp2004 = getelementptr inbounds float, float* %tmp2003, i64 1
+ %tmp2005 = getelementptr inbounds float, float* %tmp2004, i64 1
+ %tmp2006 = getelementptr inbounds float, float* %tmp2005, i64 1
+ %tmp2007 = getelementptr inbounds float, float* %tmp2006, i64 1
+ %tmp2008 = getelementptr inbounds float, float* %tmp2007, i64 1
+ %tmp2009 = getelementptr inbounds float, float* %tmp2008, i64 1
+ %tmp2010 = getelementptr inbounds float, float* %tmp2009, i64 1
+ %tmp2011 = getelementptr inbounds float, float* %tmp2010, i64 1
+ %tmp2012 = getelementptr inbounds float, float* %tmp2011, i64 1
+ %tmp2013 = getelementptr inbounds float, float* %tmp2012, i64 1
+ %tmp2014 = getelementptr inbounds float, float* %tmp2013, i64 1
+ %tmp2015 = getelementptr inbounds float, float* %tmp2014, i64 1
+ %tmp2016 = getelementptr inbounds float, float* %tmp2015, i64 1
+ %tmp2017 = getelementptr inbounds float, float* %tmp2016, i64 1
+ %tmp2018 = getelementptr inbounds float, float* %tmp2017, i64 1
+ %tmp2019 = getelementptr inbounds float, float* %tmp2018, i64 1
+ %tmp2020 = getelementptr inbounds float, float* %tmp2019, i64 1
+ %tmp2021 = getelementptr inbounds float, float* %tmp2020, i64 1
+ %tmp2022 = getelementptr inbounds float, float* %tmp2021, i64 1
+ %tmp2023 = getelementptr inbounds float, float* %tmp2022, i64 1
+ %tmp2024 = getelementptr inbounds float, float* %tmp2023, i64 1
+ %tmp2025 = getelementptr inbounds float, float* %tmp2024, i64 1
+ %tmp2026 = getelementptr inbounds float, float* %tmp2025, i64 1
+ %tmp2027 = getelementptr inbounds float, float* %tmp2026, i64 1
+ %tmp2028 = getelementptr inbounds float, float* %tmp2027, i64 1
+ %tmp2029 = getelementptr inbounds float, float* %tmp2028, i64 1
+ %tmp2030 = getelementptr inbounds float, float* %tmp2029, i64 1
+ %tmp2031 = getelementptr inbounds float, float* %tmp2030, i64 1
+ %tmp2032 = getelementptr inbounds float, float* %tmp2031, i64 1
+ %tmp2033 = getelementptr inbounds float, float* %tmp2032, i64 1
+ %tmp2034 = getelementptr inbounds float, float* %tmp2033, i64 1
+ %tmp2035 = getelementptr inbounds float, float* %tmp2034, i64 1
+ %tmp2036 = getelementptr inbounds float, float* %tmp2035, i64 1
+ %tmp2037 = getelementptr inbounds float, float* %tmp2036, i64 1
+ %tmp2038 = getelementptr inbounds float, float* %tmp2037, i64 1
+ %tmp2039 = getelementptr inbounds float, float* %tmp2038, i64 1
+ %tmp2040 = getelementptr inbounds float, float* %tmp2039, i64 1
+ %tmp2041 = getelementptr inbounds float, float* %tmp2040, i64 1
+ %tmp2042 = getelementptr inbounds float, float* %tmp2041, i64 1
+ %tmp2043 = getelementptr inbounds float, float* %tmp2042, i64 1
+ %tmp2044 = getelementptr inbounds float, float* %tmp2043, i64 1
+ %tmp2045 = getelementptr inbounds float, float* %tmp2044, i64 1
+ %tmp2046 = getelementptr inbounds float, float* %tmp2045, i64 1
+ %tmp2047 = getelementptr inbounds float, float* %tmp2046, i64 1
+ %tmp2048 = getelementptr inbounds float, float* %tmp2047, i64 1
+ %tmp2049 = getelementptr inbounds float, float* %tmp2048, i64 1
+ %tmp2050 = getelementptr inbounds float, float* %tmp2049, i64 1
+ %tmp2051 = getelementptr inbounds float, float* %tmp2050, i64 1
+ %tmp2052 = getelementptr inbounds float, float* %tmp2051, i64 1
+ %tmp2053 = getelementptr inbounds float, float* %tmp2052, i64 1
+ %tmp2054 = getelementptr inbounds float, float* %tmp2053, i64 1
+ %tmp2055 = getelementptr inbounds float, float* %tmp2054, i64 1
+ %tmp2056 = getelementptr inbounds float, float* %tmp2055, i64 1
+ %tmp2057 = getelementptr inbounds float, float* %tmp2056, i64 1
+ %tmp2058 = getelementptr inbounds float, float* %tmp2057, i64 1
+ %tmp2059 = getelementptr inbounds float, float* %tmp2058, i64 1
+ %tmp2060 = getelementptr inbounds float, float* %tmp2059, i64 1
+ %tmp2061 = getelementptr inbounds float, float* %tmp2060, i64 1
+ %tmp2062 = getelementptr inbounds float, float* %tmp2061, i64 1
+ %tmp2063 = getelementptr inbounds float, float* %tmp2062, i64 1
+ %tmp2064 = getelementptr inbounds float, float* %tmp2063, i64 1
+ %tmp2065 = getelementptr inbounds float, float* %tmp2064, i64 1
+ %tmp2066 = getelementptr inbounds float, float* %tmp2065, i64 1
+ %tmp2067 = getelementptr inbounds float, float* %tmp2066, i64 1
+ %tmp2068 = getelementptr inbounds float, float* %tmp2067, i64 1
+ %tmp2069 = getelementptr inbounds float, float* %tmp2068, i64 1
+ %tmp2070 = getelementptr inbounds float, float* %tmp2069, i64 1
+ %tmp2071 = getelementptr inbounds float, float* %tmp2070, i64 1
+ %tmp2072 = getelementptr inbounds float, float* %tmp2071, i64 1
+ %tmp2073 = getelementptr inbounds float, float* %tmp2072, i64 1
+ %tmp2074 = getelementptr inbounds float, float* %tmp2073, i64 1
+ %tmp2075 = getelementptr inbounds float, float* %tmp2074, i64 1
+ %tmp2076 = getelementptr inbounds float, float* %tmp2075, i64 1
+ %tmp2077 = getelementptr inbounds float, float* %tmp2076, i64 1
+ %tmp2078 = getelementptr inbounds float, float* %tmp2077, i64 1
+ %tmp2079 = getelementptr inbounds float, float* %tmp2078, i64 1
+ %tmp2080 = getelementptr inbounds float, float* %tmp2079, i64 1
+ %tmp2081 = getelementptr inbounds float, float* %tmp2080, i64 1
+ %tmp2082 = getelementptr inbounds float, float* %tmp2081, i64 1
+ %tmp2083 = getelementptr inbounds float, float* %tmp2082, i64 1
+ %tmp2084 = getelementptr inbounds float, float* %tmp2083, i64 1
+ %tmp2085 = getelementptr inbounds float, float* %tmp2084, i64 1
+ %tmp2086 = getelementptr inbounds float, float* %tmp2085, i64 1
+ %tmp2087 = getelementptr inbounds float, float* %tmp2086, i64 1
+ %tmp2088 = getelementptr inbounds float, float* %tmp2087, i64 1
+ %tmp2089 = getelementptr inbounds float, float* %tmp2088, i64 1
+ %tmp2090 = getelementptr inbounds float, float* %tmp2089, i64 1
+ %tmp2091 = getelementptr inbounds float, float* %tmp2090, i64 1
+ %tmp2092 = getelementptr inbounds float, float* %tmp2091, i64 1
+ %tmp2093 = getelementptr inbounds float, float* %tmp2092, i64 1
+ %tmp2094 = getelementptr inbounds float, float* %tmp2093, i64 1
+ %tmp2095 = getelementptr inbounds float, float* %tmp2094, i64 1
+ %tmp2096 = getelementptr inbounds float, float* %tmp2095, i64 1
+ %tmp2097 = getelementptr inbounds float, float* %tmp2096, i64 1
+ %tmp2098 = getelementptr inbounds float, float* %tmp2097, i64 1
+ %tmp2099 = getelementptr inbounds float, float* %tmp2098, i64 1
+ %tmp2100 = getelementptr inbounds float, float* %tmp2099, i64 1
+ %tmp2101 = getelementptr inbounds float, float* %tmp2100, i64 1
+ %tmp2102 = getelementptr inbounds float, float* %tmp2101, i64 1
+ %tmp2103 = getelementptr inbounds float, float* %tmp2102, i64 1
+ %tmp2104 = getelementptr inbounds float, float* %tmp2103, i64 1
+ %tmp2105 = getelementptr inbounds float, float* %tmp2104, i64 1
+ %tmp2106 = getelementptr inbounds float, float* %tmp2105, i64 1
+ %tmp2107 = getelementptr inbounds float, float* %tmp2106, i64 1
+ %tmp2108 = getelementptr inbounds float, float* %tmp2107, i64 1
+ %tmp2109 = getelementptr inbounds float, float* %tmp2108, i64 1
+ %tmp2110 = getelementptr inbounds float, float* %tmp2109, i64 1
+ %tmp2111 = getelementptr inbounds float, float* %tmp2110, i64 1
+ %tmp2112 = getelementptr inbounds float, float* %tmp2111, i64 1
+ %tmp2113 = getelementptr inbounds float, float* %tmp2112, i64 1
+ %tmp2114 = getelementptr inbounds float, float* %tmp2113, i64 1
+ %tmp2115 = getelementptr inbounds float, float* %tmp2114, i64 1
+ %tmp2116 = getelementptr inbounds float, float* %tmp2115, i64 1
+ %tmp2117 = getelementptr inbounds float, float* %tmp2116, i64 1
+ %tmp2118 = getelementptr inbounds float, float* %tmp2117, i64 1
+ %tmp2119 = getelementptr inbounds float, float* %tmp2118, i64 1
+ %tmp2120 = getelementptr inbounds float, float* %tmp2119, i64 1
+ %tmp2121 = getelementptr inbounds float, float* %tmp2120, i64 1
+ %tmp2122 = getelementptr inbounds float, float* %tmp2121, i64 1
+ %tmp2123 = getelementptr inbounds float, float* %tmp2122, i64 1
+ %tmp2124 = getelementptr inbounds float, float* %tmp2123, i64 1
+ %tmp2125 = getelementptr inbounds float, float* %tmp2124, i64 1
+ %tmp2126 = getelementptr inbounds float, float* %tmp2125, i64 1
+ %tmp2127 = getelementptr inbounds float, float* %tmp2126, i64 1
+ %tmp2128 = getelementptr inbounds float, float* %tmp2127, i64 1
+ %tmp2129 = getelementptr inbounds float, float* %tmp2128, i64 1
+ %tmp2130 = getelementptr inbounds float, float* %tmp2129, i64 1
+ %tmp2131 = getelementptr inbounds float, float* %tmp2130, i64 1
+ %tmp2132 = getelementptr inbounds float, float* %tmp2131, i64 1
+ %tmp2133 = getelementptr inbounds float, float* %tmp2132, i64 1
+ %tmp2134 = getelementptr inbounds float, float* %tmp2133, i64 1
+ %tmp2135 = getelementptr inbounds float, float* %tmp2134, i64 1
+ %tmp2136 = getelementptr inbounds float, float* %tmp2135, i64 1
+ %tmp2137 = getelementptr inbounds float, float* %tmp2136, i64 1
+ %tmp2138 = getelementptr inbounds float, float* %tmp2137, i64 1
+ %tmp2139 = getelementptr inbounds float, float* %tmp2138, i64 1
+ %tmp2140 = getelementptr inbounds float, float* %tmp2139, i64 1
+ %tmp2141 = getelementptr inbounds float, float* %tmp2140, i64 1
+ %tmp2142 = getelementptr inbounds float, float* %tmp2141, i64 1
+ %tmp2143 = getelementptr inbounds float, float* %tmp2142, i64 1
+ %tmp2144 = getelementptr inbounds float, float* %tmp2143, i64 1
+ %tmp2145 = getelementptr inbounds float, float* %tmp2144, i64 1
+ %tmp2146 = getelementptr inbounds float, float* %tmp2145, i64 1
+ %tmp2147 = getelementptr inbounds float, float* %tmp2146, i64 1
+ %tmp2148 = getelementptr inbounds float, float* %tmp2147, i64 1
+ %tmp2149 = getelementptr inbounds float, float* %tmp2148, i64 1
+ %tmp2150 = getelementptr inbounds float, float* %tmp2149, i64 1
+ %tmp2151 = getelementptr inbounds float, float* %tmp2150, i64 1
+ %tmp2152 = getelementptr inbounds float, float* %tmp2151, i64 1
+ %tmp2153 = getelementptr inbounds float, float* %tmp2152, i64 1
+ %tmp2154 = getelementptr inbounds float, float* %tmp2153, i64 1
+ %tmp2155 = getelementptr inbounds float, float* %tmp2154, i64 1
+ %tmp2156 = getelementptr inbounds float, float* %tmp2155, i64 1
+ %tmp2157 = getelementptr inbounds float, float* %tmp2156, i64 1
+ %tmp2158 = getelementptr inbounds float, float* %tmp2157, i64 1
+ %tmp2159 = getelementptr inbounds float, float* %tmp2158, i64 1
+ %tmp2160 = getelementptr inbounds float, float* %tmp2159, i64 1
+ %tmp2161 = getelementptr inbounds float, float* %tmp2160, i64 1
+ %tmp2162 = getelementptr inbounds float, float* %tmp2161, i64 1
+ %tmp2163 = getelementptr inbounds float, float* %tmp2162, i64 1
+ %tmp2164 = getelementptr inbounds float, float* %tmp2163, i64 1
+ %tmp2165 = getelementptr inbounds float, float* %tmp2164, i64 1
+ %tmp2166 = getelementptr inbounds float, float* %tmp2165, i64 1
+ %tmp2167 = getelementptr inbounds float, float* %tmp2166, i64 1
+ %tmp2168 = getelementptr inbounds float, float* %tmp2167, i64 1
+ %tmp2169 = getelementptr inbounds float, float* %tmp2168, i64 1
+ %tmp2170 = getelementptr inbounds float, float* %tmp2169, i64 1
+ %tmp2171 = getelementptr inbounds float, float* %tmp2170, i64 1
+ %tmp2172 = getelementptr inbounds float, float* %tmp2171, i64 1
+ %tmp2173 = getelementptr inbounds float, float* %tmp2172, i64 1
+ %tmp2174 = getelementptr inbounds float, float* %tmp2173, i64 1
+ %tmp2175 = getelementptr inbounds float, float* %tmp2174, i64 1
+ %tmp2176 = getelementptr inbounds float, float* %tmp2175, i64 1
+ %tmp2177 = getelementptr inbounds float, float* %tmp2176, i64 1
+ %tmp2178 = getelementptr inbounds float, float* %tmp2177, i64 1
+ %tmp2179 = getelementptr inbounds float, float* %tmp2178, i64 1
+ %tmp2180 = getelementptr inbounds float, float* %tmp2179, i64 1
+ %tmp2181 = getelementptr inbounds float, float* %tmp2180, i64 1
+ %tmp2182 = getelementptr inbounds float, float* %tmp2181, i64 1
+ %tmp2183 = getelementptr inbounds float, float* %tmp2182, i64 1
+ %tmp2184 = getelementptr inbounds float, float* %tmp2183, i64 1
+ %tmp2185 = getelementptr inbounds float, float* %tmp2184, i64 1
+ %tmp2186 = getelementptr inbounds float, float* %tmp2185, i64 1
+ %tmp2187 = getelementptr inbounds float, float* %tmp2186, i64 1
+ %tmp2188 = getelementptr inbounds float, float* %tmp2187, i64 1
+ %tmp2189 = getelementptr inbounds float, float* %tmp2188, i64 1
+ %tmp2190 = getelementptr inbounds float, float* %tmp2189, i64 1
+ %tmp2191 = getelementptr inbounds float, float* %tmp2190, i64 1
+ %tmp2192 = getelementptr inbounds float, float* %tmp2191, i64 1
+ %tmp2193 = getelementptr inbounds float, float* %tmp2192, i64 1
+ %tmp2194 = getelementptr inbounds float, float* %tmp2193, i64 1
+ %tmp2195 = getelementptr inbounds float, float* %tmp2194, i64 1
+ %tmp2196 = getelementptr inbounds float, float* %tmp2195, i64 1
+ %tmp2197 = getelementptr inbounds float, float* %tmp2196, i64 1
+ %tmp2198 = getelementptr inbounds float, float* %tmp2197, i64 1
+ %tmp2199 = getelementptr inbounds float, float* %tmp2198, i64 1
+ %tmp2200 = getelementptr inbounds float, float* %tmp2199, i64 1
+ %tmp2201 = getelementptr inbounds float, float* %tmp2200, i64 1
+ %tmp2202 = getelementptr inbounds float, float* %tmp2201, i64 1
+ %tmp2203 = getelementptr inbounds float, float* %tmp2202, i64 1
+ %tmp2204 = getelementptr inbounds float, float* %tmp2203, i64 1
+ %tmp2205 = getelementptr inbounds float, float* %tmp2204, i64 1
+ %tmp2206 = getelementptr inbounds float, float* %tmp2205, i64 1
+ %tmp2207 = getelementptr inbounds float, float* %tmp2206, i64 1
+ %tmp2208 = getelementptr inbounds float, float* %tmp2207, i64 1
+ %tmp2209 = getelementptr inbounds float, float* %tmp2208, i64 1
+ %tmp2210 = getelementptr inbounds float, float* %tmp2209, i64 1
+ %tmp2211 = getelementptr inbounds float, float* %tmp2210, i64 1
+ %tmp2212 = getelementptr inbounds float, float* %tmp2211, i64 1
+ %tmp2213 = getelementptr inbounds float, float* %tmp2212, i64 1
+ %tmp2214 = getelementptr inbounds float, float* %tmp2213, i64 1
+ %tmp2215 = getelementptr inbounds float, float* %tmp2214, i64 1
+ %tmp2216 = getelementptr inbounds float, float* %tmp2215, i64 1
+ %tmp2217 = getelementptr inbounds float, float* %tmp2216, i64 1
+ %tmp2218 = getelementptr inbounds float, float* %tmp2217, i64 1
+ %tmp2219 = getelementptr inbounds float, float* %tmp2218, i64 1
+ %tmp2220 = getelementptr inbounds float, float* %tmp2219, i64 1
+ %tmp2221 = getelementptr inbounds float, float* %tmp2220, i64 1
+ %tmp2222 = getelementptr inbounds float, float* %tmp2221, i64 1
+ %tmp2223 = getelementptr inbounds float, float* %tmp2222, i64 1
+ %tmp2224 = getelementptr inbounds float, float* %tmp2223, i64 1
+ %tmp2225 = getelementptr inbounds float, float* %tmp2224, i64 1
+ %tmp2226 = getelementptr inbounds float, float* %tmp2225, i64 1
+ %tmp2227 = getelementptr inbounds float, float* %tmp2226, i64 1
+ %tmp2228 = getelementptr inbounds float, float* %tmp2227, i64 1
+ %tmp2229 = getelementptr inbounds float, float* %tmp2228, i64 1
+ %tmp2230 = getelementptr inbounds float, float* %tmp2229, i64 1
+ %tmp2231 = getelementptr inbounds float, float* %tmp2230, i64 1
+ %tmp2232 = getelementptr inbounds float, float* %tmp2231, i64 1
+ %tmp2233 = getelementptr inbounds float, float* %tmp2232, i64 1
+ %tmp2234 = getelementptr inbounds float, float* %tmp2233, i64 1
+ %tmp2235 = getelementptr inbounds float, float* %tmp2234, i64 1
+ %tmp2236 = getelementptr inbounds float, float* %tmp2235, i64 1
+ %tmp2237 = getelementptr inbounds float, float* %tmp2236, i64 1
+ %tmp2238 = getelementptr inbounds float, float* %tmp2237, i64 1
+ %tmp2239 = getelementptr inbounds float, float* %tmp2238, i64 1
+ %tmp2240 = getelementptr inbounds float, float* %tmp2239, i64 1
+ %tmp2241 = getelementptr inbounds float, float* %tmp2240, i64 1
+ %tmp2242 = getelementptr inbounds float, float* %tmp2241, i64 1
+ %tmp2243 = getelementptr inbounds float, float* %tmp2242, i64 1
+ %tmp2244 = getelementptr inbounds float, float* %tmp2243, i64 1
+ %tmp2245 = getelementptr inbounds float, float* %tmp2244, i64 1
+ %tmp2246 = getelementptr inbounds float, float* %tmp2245, i64 1
+ %tmp2247 = getelementptr inbounds float, float* %tmp2246, i64 1
+ %tmp2248 = getelementptr inbounds float, float* %tmp2247, i64 1
+ %tmp2249 = getelementptr inbounds float, float* %tmp2248, i64 1
+ %tmp2250 = getelementptr inbounds float, float* %tmp2249, i64 1
+ %tmp2251 = getelementptr inbounds float, float* %tmp2250, i64 1
+ %tmp2252 = getelementptr inbounds float, float* %tmp2251, i64 1
+ %tmp2253 = getelementptr inbounds float, float* %tmp2252, i64 1
+ %tmp2254 = getelementptr inbounds float, float* %tmp2253, i64 1
+ %tmp2255 = getelementptr inbounds float, float* %tmp2254, i64 1
+ %tmp2256 = getelementptr inbounds float, float* %tmp2255, i64 1
+ %tmp2257 = getelementptr inbounds float, float* %tmp2256, i64 1
+ %tmp2258 = getelementptr inbounds float, float* %tmp2257, i64 1
+ %tmp2259 = getelementptr inbounds float, float* %tmp2258, i64 1
+ %tmp2260 = getelementptr inbounds float, float* %tmp2259, i64 1
+ %tmp2261 = getelementptr inbounds float, float* %tmp2260, i64 1
+ %tmp2262 = getelementptr inbounds float, float* %tmp2261, i64 1
+ %tmp2263 = getelementptr inbounds float, float* %tmp2262, i64 1
+ %tmp2264 = getelementptr inbounds float, float* %tmp2263, i64 1
+ %tmp2265 = getelementptr inbounds float, float* %tmp2264, i64 1
+ %tmp2266 = getelementptr inbounds float, float* %tmp2265, i64 1
+ %tmp2267 = getelementptr inbounds float, float* %tmp2266, i64 1
+ %tmp2268 = getelementptr inbounds float, float* %tmp2267, i64 1
+ %tmp2269 = getelementptr inbounds float, float* %tmp2268, i64 1
+ %tmp2270 = getelementptr inbounds float, float* %tmp2269, i64 1
+ %tmp2271 = getelementptr inbounds float, float* %tmp2270, i64 1
+ %tmp2272 = getelementptr inbounds float, float* %tmp2271, i64 1
+ %tmp2273 = getelementptr inbounds float, float* %tmp2272, i64 1
+ %tmp2274 = getelementptr inbounds float, float* %tmp2273, i64 1
+ %tmp2275 = getelementptr inbounds float, float* %tmp2274, i64 1
+ %tmp2276 = getelementptr inbounds float, float* %tmp2275, i64 1
+ %tmp2277 = getelementptr inbounds float, float* %tmp2276, i64 1
+ %tmp2278 = getelementptr inbounds float, float* %tmp2277, i64 1
+ %tmp2279 = getelementptr inbounds float, float* %tmp2278, i64 1
+ %tmp2280 = getelementptr inbounds float, float* %tmp2279, i64 1
+ %tmp2281 = getelementptr inbounds float, float* %tmp2280, i64 1
+ %tmp2282 = getelementptr inbounds float, float* %tmp2281, i64 1
+ %tmp2283 = getelementptr inbounds float, float* %tmp2282, i64 1
+ %tmp2284 = getelementptr inbounds float, float* %tmp2283, i64 1
+ %tmp2285 = getelementptr inbounds float, float* %tmp2284, i64 1
+ %tmp2286 = getelementptr inbounds float, float* %tmp2285, i64 1
+ %tmp2287 = getelementptr inbounds float, float* %tmp2286, i64 1
+ %tmp2288 = getelementptr inbounds float, float* %tmp2287, i64 1
+ %tmp2289 = getelementptr inbounds float, float* %tmp2288, i64 1
+ %tmp2290 = getelementptr inbounds float, float* %tmp2289, i64 1
+ %tmp2291 = getelementptr inbounds float, float* %tmp2290, i64 1
+ %tmp2292 = getelementptr inbounds float, float* %tmp2291, i64 1
+ %tmp2293 = getelementptr inbounds float, float* %tmp2292, i64 1
+ %tmp2294 = getelementptr inbounds float, float* %tmp2293, i64 1
+ %tmp2295 = getelementptr inbounds float, float* %tmp2294, i64 1
+ %tmp2296 = getelementptr inbounds float, float* %tmp2295, i64 1
+ %tmp2297 = getelementptr inbounds float, float* %tmp2296, i64 1
+ %tmp2298 = getelementptr inbounds float, float* %tmp2297, i64 1
+ %tmp2299 = getelementptr inbounds float, float* %tmp2298, i64 1
+ %tmp2300 = getelementptr inbounds float, float* %tmp2299, i64 1
+ %tmp2301 = getelementptr inbounds float, float* %tmp2300, i64 1
+ %tmp2302 = getelementptr inbounds float, float* %tmp2301, i64 1
+ %tmp2303 = getelementptr inbounds float, float* %tmp2302, i64 1
+ %tmp2304 = getelementptr inbounds float, float* %tmp2303, i64 1
+ %tmp2305 = getelementptr inbounds float, float* %tmp2304, i64 1
+ %tmp2306 = getelementptr inbounds float, float* %tmp2305, i64 1
+ %tmp2307 = getelementptr inbounds float, float* %tmp2306, i64 1
+ %tmp2308 = getelementptr inbounds float, float* %tmp2307, i64 1
+ %tmp2309 = getelementptr inbounds float, float* %tmp2308, i64 1
+ %tmp2310 = getelementptr inbounds float, float* %tmp2309, i64 1
+ %tmp2311 = getelementptr inbounds float, float* %tmp2310, i64 1
+ %tmp2312 = getelementptr inbounds float, float* %tmp2311, i64 1
+ %tmp2313 = getelementptr inbounds float, float* %tmp2312, i64 1
+ %tmp2314 = getelementptr inbounds float, float* %tmp2313, i64 1
+ %tmp2315 = getelementptr inbounds float, float* %tmp2314, i64 1
+ %tmp2316 = getelementptr inbounds float, float* %tmp2315, i64 1
+ %tmp2317 = getelementptr inbounds float, float* %tmp2316, i64 1
+ %tmp2318 = getelementptr inbounds float, float* %tmp2317, i64 1
+ %tmp2319 = getelementptr inbounds float, float* %tmp2318, i64 1
+ %tmp2320 = getelementptr inbounds float, float* %tmp2319, i64 1
+ %tmp2321 = getelementptr inbounds float, float* %tmp2320, i64 1
+ %tmp2322 = getelementptr inbounds float, float* %tmp2321, i64 1
+ %tmp2323 = getelementptr inbounds float, float* %tmp2322, i64 1
+ %tmp2324 = getelementptr inbounds float, float* %tmp2323, i64 1
+ %tmp2325 = getelementptr inbounds float, float* %tmp2324, i64 1
+ %tmp2326 = getelementptr inbounds float, float* %tmp2325, i64 1
+ %tmp2327 = getelementptr inbounds float, float* %tmp2326, i64 1
+ %tmp2328 = getelementptr inbounds float, float* %tmp2327, i64 1
+ %tmp2329 = getelementptr inbounds float, float* %tmp2328, i64 1
+ %tmp2330 = getelementptr inbounds float, float* %tmp2329, i64 1
+ %tmp2331 = getelementptr inbounds float, float* %tmp2330, i64 1
+ %tmp2332 = getelementptr inbounds float, float* %tmp2331, i64 1
+ %tmp2333 = getelementptr inbounds float, float* %tmp2332, i64 1
+ %tmp2334 = getelementptr inbounds float, float* %tmp2333, i64 1
+ %tmp2335 = getelementptr inbounds float, float* %tmp2334, i64 1
+ %tmp2336 = getelementptr inbounds float, float* %tmp2335, i64 1
+ %tmp2337 = getelementptr inbounds float, float* %tmp2336, i64 1
+ %tmp2338 = getelementptr inbounds float, float* %tmp2337, i64 1
+ %tmp2339 = getelementptr inbounds float, float* %tmp2338, i64 1
+ %tmp2340 = getelementptr inbounds float, float* %tmp2339, i64 1
+ %tmp2341 = getelementptr inbounds float, float* %tmp2340, i64 1
+ %tmp2342 = getelementptr inbounds float, float* %tmp2341, i64 1
+ %tmp2343 = getelementptr inbounds float, float* %tmp2342, i64 1
+ %tmp2344 = getelementptr inbounds float, float* %tmp2343, i64 1
+ %tmp2345 = getelementptr inbounds float, float* %tmp2344, i64 1
+ %tmp2346 = getelementptr inbounds float, float* %tmp2345, i64 1
+ %tmp2347 = getelementptr inbounds float, float* %tmp2346, i64 1
+ %tmp2348 = getelementptr inbounds float, float* %tmp2347, i64 1
+ %tmp2349 = getelementptr inbounds float, float* %tmp2348, i64 1
+ %tmp2350 = getelementptr inbounds float, float* %tmp2349, i64 1
+ %tmp2351 = getelementptr inbounds float, float* %tmp2350, i64 1
+ %tmp2352 = getelementptr inbounds float, float* %tmp2351, i64 1
+ %tmp2353 = getelementptr inbounds float, float* %tmp2352, i64 1
+ %tmp2354 = getelementptr inbounds float, float* %tmp2353, i64 1
+ %tmp2355 = getelementptr inbounds float, float* %tmp2354, i64 1
+ %tmp2356 = getelementptr inbounds float, float* %tmp2355, i64 1
+ %tmp2357 = getelementptr inbounds float, float* %tmp2356, i64 1
+ %tmp2358 = getelementptr inbounds float, float* %tmp2357, i64 1
+ %tmp2359 = getelementptr inbounds float, float* %tmp2358, i64 1
+ %tmp2360 = getelementptr inbounds float, float* %tmp2359, i64 1
+ %tmp2361 = getelementptr inbounds float, float* %tmp2360, i64 1
+ %tmp2362 = getelementptr inbounds float, float* %tmp2361, i64 1
+ %tmp2363 = getelementptr inbounds float, float* %tmp2362, i64 1
+ %tmp2364 = getelementptr inbounds float, float* %tmp2363, i64 1
+ %tmp2365 = getelementptr inbounds float, float* %tmp2364, i64 1
+ %tmp2366 = getelementptr inbounds float, float* %tmp2365, i64 1
+ %tmp2367 = getelementptr inbounds float, float* %tmp2366, i64 1
+ %tmp2368 = getelementptr inbounds float, float* %tmp2367, i64 1
+ %tmp2369 = getelementptr inbounds float, float* %tmp2368, i64 1
+ %tmp2370 = getelementptr inbounds float, float* %tmp2369, i64 1
+ %tmp2371 = getelementptr inbounds float, float* %tmp2370, i64 1
+ %tmp2372 = getelementptr inbounds float, float* %tmp2371, i64 1
+ %tmp2373 = getelementptr inbounds float, float* %tmp2372, i64 1
+ %tmp2374 = getelementptr inbounds float, float* %tmp2373, i64 1
+ %tmp2375 = getelementptr inbounds float, float* %tmp2374, i64 1
+ %tmp2376 = getelementptr inbounds float, float* %tmp2375, i64 1
+ %tmp2377 = getelementptr inbounds float, float* %tmp2376, i64 1
+ %tmp2378 = getelementptr inbounds float, float* %tmp2377, i64 1
+ %tmp2379 = getelementptr inbounds float, float* %tmp2378, i64 1
+ %tmp2380 = getelementptr inbounds float, float* %tmp2379, i64 1
+ %tmp2381 = getelementptr inbounds float, float* %tmp2380, i64 1
+ %tmp2382 = getelementptr inbounds float, float* %tmp2381, i64 1
+ %tmp2383 = getelementptr inbounds float, float* %tmp2382, i64 1
+ %tmp2384 = getelementptr inbounds float, float* %tmp2383, i64 1
+ %tmp2385 = getelementptr inbounds float, float* %tmp2384, i64 1
+ %tmp2386 = getelementptr inbounds float, float* %tmp2385, i64 1
+ %tmp2387 = getelementptr inbounds float, float* %tmp2386, i64 1
+ %tmp2388 = getelementptr inbounds float, float* %tmp2387, i64 1
+ %tmp2389 = getelementptr inbounds float, float* %tmp2388, i64 1
+ %tmp2390 = getelementptr inbounds float, float* %tmp2389, i64 1
+ %tmp2391 = getelementptr inbounds float, float* %tmp2390, i64 1
+ %tmp2392 = getelementptr inbounds float, float* %tmp2391, i64 1
+ %tmp2393 = getelementptr inbounds float, float* %tmp2392, i64 1
+ %tmp2394 = getelementptr inbounds float, float* %tmp2393, i64 1
+ %tmp2395 = getelementptr inbounds float, float* %tmp2394, i64 1
+ %tmp2396 = getelementptr inbounds float, float* %tmp2395, i64 1
+ %tmp2397 = getelementptr inbounds float, float* %tmp2396, i64 1
+ %tmp2398 = getelementptr inbounds float, float* %tmp2397, i64 1
+ %tmp2399 = getelementptr inbounds float, float* %tmp2398, i64 1
+ %tmp2400 = getelementptr inbounds float, float* %tmp2399, i64 1
+ %tmp2401 = getelementptr inbounds float, float* %tmp2400, i64 1
+ %tmp2402 = getelementptr inbounds float, float* %tmp2401, i64 1
+ %tmp2403 = getelementptr inbounds float, float* %tmp2402, i64 1
+ %tmp2404 = getelementptr inbounds float, float* %tmp2403, i64 1
+ %tmp2405 = getelementptr inbounds float, float* %tmp2404, i64 1
+ %tmp2406 = getelementptr inbounds float, float* %tmp2405, i64 1
+ %tmp2407 = getelementptr inbounds float, float* %tmp2406, i64 1
+ %tmp2408 = getelementptr inbounds float, float* %tmp2407, i64 1
+ %tmp2409 = getelementptr inbounds float, float* %tmp2408, i64 1
+ %tmp2410 = getelementptr inbounds float, float* %tmp2409, i64 1
+ %tmp2411 = getelementptr inbounds float, float* %tmp2410, i64 1
+ %tmp2412 = getelementptr inbounds float, float* %tmp2411, i64 1
+ %tmp2413 = getelementptr inbounds float, float* %tmp2412, i64 1
+ %tmp2414 = getelementptr inbounds float, float* %tmp2413, i64 1
+ %tmp2415 = getelementptr inbounds float, float* %tmp2414, i64 1
+ %tmp2416 = getelementptr inbounds float, float* %tmp2415, i64 1
+ %tmp2417 = getelementptr inbounds float, float* %tmp2416, i64 1
+ %tmp2418 = getelementptr inbounds float, float* %tmp2417, i64 1
+ %tmp2419 = getelementptr inbounds float, float* %tmp2418, i64 1
+ %tmp2420 = getelementptr inbounds float, float* %tmp2419, i64 1
+ %tmp2421 = getelementptr inbounds float, float* %tmp2420, i64 1
+ %tmp2422 = getelementptr inbounds float, float* %tmp2421, i64 1
+ %tmp2423 = getelementptr inbounds float, float* %tmp2422, i64 1
+ %tmp2424 = getelementptr inbounds float, float* %tmp2423, i64 1
+ %tmp2425 = getelementptr inbounds float, float* %tmp2424, i64 1
+ %tmp2426 = getelementptr inbounds float, float* %tmp2425, i64 1
+ %tmp2427 = getelementptr inbounds float, float* %tmp2426, i64 1
+ %tmp2428 = getelementptr inbounds float, float* %tmp2427, i64 1
+ %tmp2429 = getelementptr inbounds float, float* %tmp2428, i64 1
+ %tmp2430 = getelementptr inbounds float, float* %tmp2429, i64 1
+ %tmp2431 = getelementptr inbounds float, float* %tmp2430, i64 1
+ %tmp2432 = getelementptr inbounds float, float* %tmp2431, i64 1
+ %tmp2433 = getelementptr inbounds float, float* %tmp2432, i64 1
+ %tmp2434 = getelementptr inbounds float, float* %tmp2433, i64 1
+ %tmp2435 = getelementptr inbounds float, float* %tmp2434, i64 1
+ %tmp2436 = getelementptr inbounds float, float* %tmp2435, i64 1
+ %tmp2437 = getelementptr inbounds float, float* %tmp2436, i64 1
+ %tmp2438 = getelementptr inbounds float, float* %tmp2437, i64 1
+ %tmp2439 = getelementptr inbounds float, float* %tmp2438, i64 1
+ %tmp2440 = getelementptr inbounds float, float* %tmp2439, i64 1
+ %tmp2441 = getelementptr inbounds float, float* %tmp2440, i64 1
+ %tmp2442 = getelementptr inbounds float, float* %tmp2441, i64 1
+ %tmp2443 = getelementptr inbounds float, float* %tmp2442, i64 1
+ %tmp2444 = getelementptr inbounds float, float* %tmp2443, i64 1
+ %tmp2445 = getelementptr inbounds float, float* %tmp2444, i64 1
+ %tmp2446 = getelementptr inbounds float, float* %tmp2445, i64 1
+ %tmp2447 = getelementptr inbounds float, float* %tmp2446, i64 1
+ %tmp2448 = getelementptr inbounds float, float* %tmp2447, i64 1
+ %tmp2449 = getelementptr inbounds float, float* %tmp2448, i64 1
+ %tmp2450 = getelementptr inbounds float, float* %tmp2449, i64 1
+ %tmp2451 = getelementptr inbounds float, float* %tmp2450, i64 1
+ %tmp2452 = getelementptr inbounds float, float* %tmp2451, i64 1
+ %tmp2453 = getelementptr inbounds float, float* %tmp2452, i64 1
+ %tmp2454 = getelementptr inbounds float, float* %tmp2453, i64 1
+ %tmp2455 = getelementptr inbounds float, float* %tmp2454, i64 1
+ %tmp2456 = getelementptr inbounds float, float* %tmp2455, i64 1
+ %tmp2457 = getelementptr inbounds float, float* %tmp2456, i64 1
+ %tmp2458 = getelementptr inbounds float, float* %tmp2457, i64 1
+ %tmp2459 = getelementptr inbounds float, float* %tmp2458, i64 1
+ %tmp2460 = getelementptr inbounds float, float* %tmp2459, i64 1
+ %tmp2461 = getelementptr inbounds float, float* %tmp2460, i64 1
+ %tmp2462 = getelementptr inbounds float, float* %tmp2461, i64 1
+ %tmp2463 = getelementptr inbounds float, float* %tmp2462, i64 1
+ %tmp2464 = getelementptr inbounds float, float* %tmp2463, i64 1
+ %tmp2465 = getelementptr inbounds float, float* %tmp2464, i64 1
+ %tmp2466 = getelementptr inbounds float, float* %tmp2465, i64 1
+ %tmp2467 = getelementptr inbounds float, float* %tmp2466, i64 1
+ %tmp2468 = getelementptr inbounds float, float* %tmp2467, i64 1
+ %tmp2469 = getelementptr inbounds float, float* %tmp2468, i64 1
+ %tmp2470 = getelementptr inbounds float, float* %tmp2469, i64 1
+ %tmp2471 = getelementptr inbounds float, float* %tmp2470, i64 1
+ %tmp2472 = getelementptr inbounds float, float* %tmp2471, i64 1
+ %tmp2473 = getelementptr inbounds float, float* %tmp2472, i64 1
+ %tmp2474 = getelementptr inbounds float, float* %tmp2473, i64 1
+ %tmp2475 = getelementptr inbounds float, float* %tmp2474, i64 1
+ %tmp2476 = getelementptr inbounds float, float* %tmp2475, i64 1
+ %tmp2477 = getelementptr inbounds float, float* %tmp2476, i64 1
+ %tmp2478 = getelementptr inbounds float, float* %tmp2477, i64 1
+ %tmp2479 = getelementptr inbounds float, float* %tmp2478, i64 1
+ %tmp2480 = getelementptr inbounds float, float* %tmp2479, i64 1
+ %tmp2481 = getelementptr inbounds float, float* %tmp2480, i64 1
+ %tmp2482 = getelementptr inbounds float, float* %tmp2481, i64 1
+ %tmp2483 = getelementptr inbounds float, float* %tmp2482, i64 1
+ %tmp2484 = getelementptr inbounds float, float* %tmp2483, i64 1
+ %tmp2485 = getelementptr inbounds float, float* %tmp2484, i64 1
+ %tmp2486 = getelementptr inbounds float, float* %tmp2485, i64 1
+ %tmp2487 = getelementptr inbounds float, float* %tmp2486, i64 1
+ %tmp2488 = getelementptr inbounds float, float* %tmp2487, i64 1
+ %tmp2489 = getelementptr inbounds float, float* %tmp2488, i64 1
+ %tmp2490 = getelementptr inbounds float, float* %tmp2489, i64 1
+ %tmp2491 = getelementptr inbounds float, float* %tmp2490, i64 1
+ %tmp2492 = getelementptr inbounds float, float* %tmp2491, i64 1
+ %tmp2493 = getelementptr inbounds float, float* %tmp2492, i64 1
+ %tmp2494 = getelementptr inbounds float, float* %tmp2493, i64 1
+ %tmp2495 = getelementptr inbounds float, float* %tmp2494, i64 1
+ %tmp2496 = getelementptr inbounds float, float* %tmp2495, i64 1
+ %tmp2497 = getelementptr inbounds float, float* %tmp2496, i64 1
+ %tmp2498 = getelementptr inbounds float, float* %tmp2497, i64 1
+ %tmp2499 = getelementptr inbounds float, float* %tmp2498, i64 1
+ %tmp2500 = getelementptr inbounds float, float* %tmp2499, i64 1
+ %tmp2501 = getelementptr inbounds float, float* %tmp2500, i64 1
+ %tmp2502 = getelementptr inbounds float, float* %tmp2501, i64 1
+ %tmp2503 = getelementptr inbounds float, float* %tmp2502, i64 1
+ %tmp2504 = getelementptr inbounds float, float* %tmp2503, i64 1
+ %tmp2505 = getelementptr inbounds float, float* %tmp2504, i64 1
+ %tmp2506 = getelementptr inbounds float, float* %tmp2505, i64 1
+ %tmp2507 = getelementptr inbounds float, float* %tmp2506, i64 1
+ %tmp2508 = getelementptr inbounds float, float* %tmp2507, i64 1
+ %tmp2509 = getelementptr inbounds float, float* %tmp2508, i64 1
+ %tmp2510 = getelementptr inbounds float, float* %tmp2509, i64 1
+ %tmp2511 = getelementptr inbounds float, float* %tmp2510, i64 1
+ %tmp2512 = getelementptr inbounds float, float* %tmp2511, i64 1
+ %tmp2513 = getelementptr inbounds float, float* %tmp2512, i64 1
+ %tmp2514 = getelementptr inbounds float, float* %tmp2513, i64 1
+ %tmp2515 = getelementptr inbounds float, float* %tmp2514, i64 1
+ %tmp2516 = getelementptr inbounds float, float* %tmp2515, i64 1
+ %tmp2517 = getelementptr inbounds float, float* %tmp2516, i64 1
+ %tmp2518 = getelementptr inbounds float, float* %tmp2517, i64 1
+ %tmp2519 = getelementptr inbounds float, float* %tmp2518, i64 1
+ %tmp2520 = getelementptr inbounds float, float* %tmp2519, i64 1
+ %tmp2521 = getelementptr inbounds float, float* %tmp2520, i64 1
+ %tmp2522 = getelementptr inbounds float, float* %tmp2521, i64 1
+ %tmp2523 = getelementptr inbounds float, float* %tmp2522, i64 1
+ %tmp2524 = getelementptr inbounds float, float* %tmp2523, i64 1
+ %tmp2525 = getelementptr inbounds float, float* %tmp2524, i64 1
+ %tmp2526 = getelementptr inbounds float, float* %tmp2525, i64 1
+ %tmp2527 = getelementptr inbounds float, float* %tmp2526, i64 1
+ %tmp2528 = getelementptr inbounds float, float* %tmp2527, i64 1
+ %tmp2529 = getelementptr inbounds float, float* %tmp2528, i64 1
+ %tmp2530 = getelementptr inbounds float, float* %tmp2529, i64 1
+ %tmp2531 = getelementptr inbounds float, float* %tmp2530, i64 1
+ %tmp2532 = getelementptr inbounds float, float* %tmp2531, i64 1
+ %tmp2533 = getelementptr inbounds float, float* %tmp2532, i64 1
+ %tmp2534 = getelementptr inbounds float, float* %tmp2533, i64 1
+ %tmp2535 = getelementptr inbounds float, float* %tmp2534, i64 1
+ %tmp2536 = getelementptr inbounds float, float* %tmp2535, i64 1
+ %tmp2537 = getelementptr inbounds float, float* %tmp2536, i64 1
+ %tmp2538 = getelementptr inbounds float, float* %tmp2537, i64 1
+ %tmp2539 = getelementptr inbounds float, float* %tmp2538, i64 1
+ %tmp2540 = getelementptr inbounds float, float* %tmp2539, i64 1
+ %tmp2541 = getelementptr inbounds float, float* %tmp2540, i64 1
+ %tmp2542 = getelementptr inbounds float, float* %tmp2541, i64 1
+ %tmp2543 = getelementptr inbounds float, float* %tmp2542, i64 1
+ %tmp2544 = getelementptr inbounds float, float* %tmp2543, i64 1
+ %tmp2545 = getelementptr inbounds float, float* %tmp2544, i64 1
+ %tmp2546 = getelementptr inbounds float, float* %tmp2545, i64 1
+ %tmp2547 = getelementptr inbounds float, float* %tmp2546, i64 1
+ %tmp2548 = getelementptr inbounds float, float* %tmp2547, i64 1
+ %tmp2549 = getelementptr inbounds float, float* %tmp2548, i64 1
+ %tmp2550 = getelementptr inbounds float, float* %tmp2549, i64 1
+ %tmp2551 = getelementptr inbounds float, float* %tmp2550, i64 1
+ %tmp2552 = getelementptr inbounds float, float* %tmp2551, i64 1
+ %tmp2553 = getelementptr inbounds float, float* %tmp2552, i64 1
+ %tmp2554 = getelementptr inbounds float, float* %tmp2553, i64 1
+ %tmp2555 = getelementptr inbounds float, float* %tmp2554, i64 1
+ %tmp2556 = getelementptr inbounds float, float* %tmp2555, i64 1
+ %tmp2557 = getelementptr inbounds float, float* %tmp2556, i64 1
+ %tmp2558 = getelementptr inbounds float, float* %tmp2557, i64 1
+ %tmp2559 = getelementptr inbounds float, float* %tmp2558, i64 1
+ %tmp2560 = getelementptr inbounds float, float* %tmp2559, i64 1
+ %tmp2561 = getelementptr inbounds float, float* %tmp2560, i64 1
+ %tmp2562 = getelementptr inbounds float, float* %tmp2561, i64 1
+ %tmp2563 = getelementptr inbounds float, float* %tmp2562, i64 1
+ %tmp2564 = getelementptr inbounds float, float* %tmp2563, i64 1
+ %tmp2565 = getelementptr inbounds float, float* %tmp2564, i64 1
+ %tmp2566 = getelementptr inbounds float, float* %tmp2565, i64 1
+ %tmp2567 = getelementptr inbounds float, float* %tmp2566, i64 1
+ %tmp2568 = getelementptr inbounds float, float* %tmp2567, i64 1
+ %tmp2569 = getelementptr inbounds float, float* %tmp2568, i64 1
+ %tmp2570 = getelementptr inbounds float, float* %tmp2569, i64 1
+ %tmp2571 = getelementptr inbounds float, float* %tmp2570, i64 1
+ %tmp2572 = getelementptr inbounds float, float* %tmp2571, i64 1
+ %tmp2573 = getelementptr inbounds float, float* %tmp2572, i64 1
+ %tmp2574 = getelementptr inbounds float, float* %tmp2573, i64 1
+ %tmp2575 = getelementptr inbounds float, float* %tmp2574, i64 1
+ %tmp2576 = getelementptr inbounds float, float* %tmp2575, i64 1
+ %tmp2577 = getelementptr inbounds float, float* %tmp2576, i64 1
+ %tmp2578 = getelementptr inbounds float, float* %tmp2577, i64 1
+ %tmp2579 = getelementptr inbounds float, float* %tmp2578, i64 1
+ %tmp2580 = getelementptr inbounds float, float* %tmp2579, i64 1
+ %tmp2581 = getelementptr inbounds float, float* %tmp2580, i64 1
+ %tmp2582 = getelementptr inbounds float, float* %tmp2581, i64 1
+ %tmp2583 = getelementptr inbounds float, float* %tmp2582, i64 1
+ %tmp2584 = getelementptr inbounds float, float* %tmp2583, i64 1
+ %tmp2585 = getelementptr inbounds float, float* %tmp2584, i64 1
+ %tmp2586 = getelementptr inbounds float, float* %tmp2585, i64 1
+ %tmp2587 = getelementptr inbounds float, float* %tmp2586, i64 1
+ %tmp2588 = getelementptr inbounds float, float* %tmp2587, i64 1
+ %tmp2589 = getelementptr inbounds float, float* %tmp2588, i64 1
+ %tmp2590 = getelementptr inbounds float, float* %tmp2589, i64 1
+ %tmp2591 = getelementptr inbounds float, float* %tmp2590, i64 1
+ %tmp2592 = getelementptr inbounds float, float* %tmp2591, i64 1
+ %tmp2593 = getelementptr inbounds float, float* %tmp2592, i64 1
+ %tmp2594 = getelementptr inbounds float, float* %tmp2593, i64 1
+ %tmp2595 = getelementptr inbounds float, float* %tmp2594, i64 1
+ %tmp2596 = getelementptr inbounds float, float* %tmp2595, i64 1
+ %tmp2597 = getelementptr inbounds float, float* %tmp2596, i64 1
+ %tmp2598 = getelementptr inbounds float, float* %tmp2597, i64 1
+ %tmp2599 = getelementptr inbounds float, float* %tmp2598, i64 1
+ %tmp2600 = getelementptr inbounds float, float* %tmp2599, i64 1
+ %tmp2601 = getelementptr inbounds float, float* %tmp2600, i64 1
+ %tmp2602 = getelementptr inbounds float, float* %tmp2601, i64 1
+ %tmp2603 = getelementptr inbounds float, float* %tmp2602, i64 1
+ %tmp2604 = getelementptr inbounds float, float* %tmp2603, i64 1
+ %tmp2605 = getelementptr inbounds float, float* %tmp2604, i64 1
+ %tmp2606 = getelementptr inbounds float, float* %tmp2605, i64 1
+ %tmp2607 = getelementptr inbounds float, float* %tmp2606, i64 1
+ %tmp2608 = getelementptr inbounds float, float* %tmp2607, i64 1
+ %tmp2609 = getelementptr inbounds float, float* %tmp2608, i64 1
+ %tmp2610 = getelementptr inbounds float, float* %tmp2609, i64 1
+ %tmp2611 = getelementptr inbounds float, float* %tmp2610, i64 1
+ %tmp2612 = getelementptr inbounds float, float* %tmp2611, i64 1
+ %tmp2613 = getelementptr inbounds float, float* %tmp2612, i64 1
+ %tmp2614 = getelementptr inbounds float, float* %tmp2613, i64 1
+ %tmp2615 = getelementptr inbounds float, float* %tmp2614, i64 1
+ %tmp2616 = getelementptr inbounds float, float* %tmp2615, i64 1
+ %tmp2617 = getelementptr inbounds float, float* %tmp2616, i64 1
+ %tmp2618 = getelementptr inbounds float, float* %tmp2617, i64 1
+ %tmp2619 = getelementptr inbounds float, float* %tmp2618, i64 1
+ %tmp2620 = getelementptr inbounds float, float* %tmp2619, i64 1
+ %tmp2621 = getelementptr inbounds float, float* %tmp2620, i64 1
+ %tmp2622 = getelementptr inbounds float, float* %tmp2621, i64 1
+ %tmp2623 = getelementptr inbounds float, float* %tmp2622, i64 1
+ %tmp2624 = getelementptr inbounds float, float* %tmp2623, i64 1
+ %tmp2625 = getelementptr inbounds float, float* %tmp2624, i64 1
+ %tmp2626 = getelementptr inbounds float, float* %tmp2625, i64 1
+ %tmp2627 = getelementptr inbounds float, float* %tmp2626, i64 1
+ %tmp2628 = getelementptr inbounds float, float* %tmp2627, i64 1
+ %tmp2629 = getelementptr inbounds float, float* %tmp2628, i64 1
+ %tmp2630 = getelementptr inbounds float, float* %tmp2629, i64 1
+ %tmp2631 = getelementptr inbounds float, float* %tmp2630, i64 1
+ %tmp2632 = getelementptr inbounds float, float* %tmp2631, i64 1
+ %tmp2633 = getelementptr inbounds float, float* %tmp2632, i64 1
+ %tmp2634 = getelementptr inbounds float, float* %tmp2633, i64 1
+ %tmp2635 = getelementptr inbounds float, float* %tmp2634, i64 1
+ %tmp2636 = getelementptr inbounds float, float* %tmp2635, i64 1
+ %tmp2637 = getelementptr inbounds float, float* %tmp2636, i64 1
+ %tmp2638 = getelementptr inbounds float, float* %tmp2637, i64 1
+ %tmp2639 = getelementptr inbounds float, float* %tmp2638, i64 1
+ %tmp2640 = getelementptr inbounds float, float* %tmp2639, i64 1
+ %tmp2641 = getelementptr inbounds float, float* %tmp2640, i64 1
+ %tmp2642 = getelementptr inbounds float, float* %tmp2641, i64 1
+ %tmp2643 = getelementptr inbounds float, float* %tmp2642, i64 1
+ %tmp2644 = getelementptr inbounds float, float* %tmp2643, i64 1
+ %tmp2645 = getelementptr inbounds float, float* %tmp2644, i64 1
+ %tmp2646 = getelementptr inbounds float, float* %tmp2645, i64 1
+ %tmp2647 = getelementptr inbounds float, float* %tmp2646, i64 1
+ %tmp2648 = getelementptr inbounds float, float* %tmp2647, i64 1
+ %tmp2649 = getelementptr inbounds float, float* %tmp2648, i64 1
+ %tmp2650 = getelementptr inbounds float, float* %tmp2649, i64 1
+ %tmp2651 = getelementptr inbounds float, float* %tmp2650, i64 1
+ %tmp2652 = getelementptr inbounds float, float* %tmp2651, i64 1
+ %tmp2653 = getelementptr inbounds float, float* %tmp2652, i64 1
+ %tmp2654 = getelementptr inbounds float, float* %tmp2653, i64 1
+ %tmp2655 = getelementptr inbounds float, float* %tmp2654, i64 1
+ %tmp2656 = getelementptr inbounds float, float* %tmp2655, i64 1
+ %tmp2657 = getelementptr inbounds float, float* %tmp2656, i64 1
+ %tmp2658 = getelementptr inbounds float, float* %tmp2657, i64 1
+ %tmp2659 = getelementptr inbounds float, float* %tmp2658, i64 1
+ %tmp2660 = getelementptr inbounds float, float* %tmp2659, i64 1
+ %tmp2661 = getelementptr inbounds float, float* %tmp2660, i64 1
+ %tmp2662 = getelementptr inbounds float, float* %tmp2661, i64 1
+ %tmp2663 = getelementptr inbounds float, float* %tmp2662, i64 1
+ %tmp2664 = getelementptr inbounds float, float* %tmp2663, i64 1
+ %tmp2665 = getelementptr inbounds float, float* %tmp2664, i64 1
+ %tmp2666 = getelementptr inbounds float, float* %tmp2665, i64 1
+ %tmp2667 = getelementptr inbounds float, float* %tmp2666, i64 1
+ %tmp2668 = getelementptr inbounds float, float* %tmp2667, i64 1
+ %tmp2669 = getelementptr inbounds float, float* %tmp2668, i64 1
+ %tmp2670 = getelementptr inbounds float, float* %tmp2669, i64 1
+ %tmp2671 = getelementptr inbounds float, float* %tmp2670, i64 1
+ %tmp2672 = getelementptr inbounds float, float* %tmp2671, i64 1
+ %tmp2673 = getelementptr inbounds float, float* %tmp2672, i64 1
+ %tmp2674 = getelementptr inbounds float, float* %tmp2673, i64 1
+ %tmp2675 = getelementptr inbounds float, float* %tmp2674, i64 1
+ %tmp2676 = getelementptr inbounds float, float* %tmp2675, i64 1
+ %tmp2677 = getelementptr inbounds float, float* %tmp2676, i64 1
+ %tmp2678 = getelementptr inbounds float, float* %tmp2677, i64 1
+ %tmp2679 = getelementptr inbounds float, float* %tmp2678, i64 1
+ %tmp2680 = getelementptr inbounds float, float* %tmp2679, i64 1
+ %tmp2681 = getelementptr inbounds float, float* %tmp2680, i64 1
+ %tmp2682 = getelementptr inbounds float, float* %tmp2681, i64 1
+ %tmp2683 = getelementptr inbounds float, float* %tmp2682, i64 1
+ %tmp2684 = getelementptr inbounds float, float* %tmp2683, i64 1
+ %tmp2685 = getelementptr inbounds float, float* %tmp2684, i64 1
+ %tmp2686 = getelementptr inbounds float, float* %tmp2685, i64 1
+ %tmp2687 = getelementptr inbounds float, float* %tmp2686, i64 1
+ %tmp2688 = getelementptr inbounds float, float* %tmp2687, i64 1
+ %tmp2689 = getelementptr inbounds float, float* %tmp2688, i64 1
+ %tmp2690 = getelementptr inbounds float, float* %tmp2689, i64 1
+ %tmp2691 = getelementptr inbounds float, float* %tmp2690, i64 1
+ %tmp2692 = getelementptr inbounds float, float* %tmp2691, i64 1
+ %tmp2693 = getelementptr inbounds float, float* %tmp2692, i64 1
+ %tmp2694 = getelementptr inbounds float, float* %tmp2693, i64 1
+ %tmp2695 = getelementptr inbounds float, float* %tmp2694, i64 1
+ %tmp2696 = getelementptr inbounds float, float* %tmp2695, i64 1
+ %tmp2697 = getelementptr inbounds float, float* %tmp2696, i64 1
+ %tmp2698 = getelementptr inbounds float, float* %tmp2697, i64 1
+ %tmp2699 = getelementptr inbounds float, float* %tmp2698, i64 1
+ %tmp2700 = getelementptr inbounds float, float* %tmp2699, i64 1
+ %tmp2701 = getelementptr inbounds float, float* %tmp2700, i64 1
+ %tmp2702 = getelementptr inbounds float, float* %tmp2701, i64 1
+ %tmp2703 = getelementptr inbounds float, float* %tmp2702, i64 1
+ %tmp2704 = getelementptr inbounds float, float* %tmp2703, i64 1
+ %tmp2705 = getelementptr inbounds float, float* %tmp2704, i64 1
+ %tmp2706 = getelementptr inbounds float, float* %tmp2705, i64 1
+ %tmp2707 = getelementptr inbounds float, float* %tmp2706, i64 1
+ %tmp2708 = getelementptr inbounds float, float* %tmp2707, i64 1
+ %tmp2709 = getelementptr inbounds float, float* %tmp2708, i64 1
+ %tmp2710 = getelementptr inbounds float, float* %tmp2709, i64 1
+ %tmp2711 = getelementptr inbounds float, float* %tmp2710, i64 1
+ %tmp2712 = getelementptr inbounds float, float* %tmp2711, i64 1
+ %tmp2713 = getelementptr inbounds float, float* %tmp2712, i64 1
+ %tmp2714 = getelementptr inbounds float, float* %tmp2713, i64 1
+ %tmp2715 = getelementptr inbounds float, float* %tmp2714, i64 1
+ %tmp2716 = getelementptr inbounds float, float* %tmp2715, i64 1
+ %tmp2717 = getelementptr inbounds float, float* %tmp2716, i64 1
+ %tmp2718 = getelementptr inbounds float, float* %tmp2717, i64 1
+ %tmp2719 = getelementptr inbounds float, float* %tmp2718, i64 1
+ %tmp2720 = getelementptr inbounds float, float* %tmp2719, i64 1
+ %tmp2721 = getelementptr inbounds float, float* %tmp2720, i64 1
+ %tmp2722 = getelementptr inbounds float, float* %tmp2721, i64 1
+ %tmp2723 = getelementptr inbounds float, float* %tmp2722, i64 1
+ %tmp2724 = getelementptr inbounds float, float* %tmp2723, i64 1
+ %tmp2725 = getelementptr inbounds float, float* %tmp2724, i64 1
+ %tmp2726 = getelementptr inbounds float, float* %tmp2725, i64 1
+ %tmp2727 = getelementptr inbounds float, float* %tmp2726, i64 1
+ %tmp2728 = getelementptr inbounds float, float* %tmp2727, i64 1
+ %tmp2729 = getelementptr inbounds float, float* %tmp2728, i64 1
+ %tmp2730 = getelementptr inbounds float, float* %tmp2729, i64 1
+ %tmp2731 = getelementptr inbounds float, float* %tmp2730, i64 1
+ %tmp2732 = getelementptr inbounds float, float* %tmp2731, i64 1
+ %tmp2733 = getelementptr inbounds float, float* %tmp2732, i64 1
+ %tmp2734 = getelementptr inbounds float, float* %tmp2733, i64 1
+ %tmp2735 = getelementptr inbounds float, float* %tmp2734, i64 1
+ %tmp2736 = getelementptr inbounds float, float* %tmp2735, i64 1
+ %tmp2737 = getelementptr inbounds float, float* %tmp2736, i64 1
+ %tmp2738 = getelementptr inbounds float, float* %tmp2737, i64 1
+ %tmp2739 = getelementptr inbounds float, float* %tmp2738, i64 1
+ %tmp2740 = getelementptr inbounds float, float* %tmp2739, i64 1
+ %tmp2741 = getelementptr inbounds float, float* %tmp2740, i64 1
+ %tmp2742 = getelementptr inbounds float, float* %tmp2741, i64 1
+ %tmp2743 = getelementptr inbounds float, float* %tmp2742, i64 1
+ %tmp2744 = getelementptr inbounds float, float* %tmp2743, i64 1
+ %tmp2745 = getelementptr inbounds float, float* %tmp2744, i64 1
+ %tmp2746 = getelementptr inbounds float, float* %tmp2745, i64 1
+ %tmp2747 = getelementptr inbounds float, float* %tmp2746, i64 1
+ %tmp2748 = getelementptr inbounds float, float* %tmp2747, i64 1
+ %tmp2749 = getelementptr inbounds float, float* %tmp2748, i64 1
+ %tmp2750 = getelementptr inbounds float, float* %tmp2749, i64 1
+ %tmp2751 = getelementptr inbounds float, float* %tmp2750, i64 1
+ %tmp2752 = getelementptr inbounds float, float* %tmp2751, i64 1
+ %tmp2753 = getelementptr inbounds float, float* %tmp2752, i64 1
+ %tmp2754 = getelementptr inbounds float, float* %tmp2753, i64 1
+ %tmp2755 = getelementptr inbounds float, float* %tmp2754, i64 1
+ %tmp2756 = getelementptr inbounds float, float* %tmp2755, i64 1
+ %tmp2757 = getelementptr inbounds float, float* %tmp2756, i64 1
+ %tmp2758 = getelementptr inbounds float, float* %tmp2757, i64 1
+ %tmp2759 = getelementptr inbounds float, float* %tmp2758, i64 1
+ %tmp2760 = getelementptr inbounds float, float* %tmp2759, i64 1
+ %tmp2761 = getelementptr inbounds float, float* %tmp2760, i64 1
+ %tmp2762 = getelementptr inbounds float, float* %tmp2761, i64 1
+ %tmp2763 = getelementptr inbounds float, float* %tmp2762, i64 1
+ %tmp2764 = getelementptr inbounds float, float* %tmp2763, i64 1
+ %tmp2765 = getelementptr inbounds float, float* %tmp2764, i64 1
+ %tmp2766 = getelementptr inbounds float, float* %tmp2765, i64 1
+ %tmp2767 = getelementptr inbounds float, float* %tmp2766, i64 1
+ %tmp2768 = getelementptr inbounds float, float* %tmp2767, i64 1
+ %tmp2769 = getelementptr inbounds float, float* %tmp2768, i64 1
+ %tmp2770 = getelementptr inbounds float, float* %tmp2769, i64 1
+ %tmp2771 = getelementptr inbounds float, float* %tmp2770, i64 1
+ %tmp2772 = getelementptr inbounds float, float* %tmp2771, i64 1
+ %tmp2773 = getelementptr inbounds float, float* %tmp2772, i64 1
+ %tmp2774 = getelementptr inbounds float, float* %tmp2773, i64 1
+ %tmp2775 = getelementptr inbounds float, float* %tmp2774, i64 1
+ %tmp2776 = getelementptr inbounds float, float* %tmp2775, i64 1
+ %tmp2777 = getelementptr inbounds float, float* %tmp2776, i64 1
+ %tmp2778 = getelementptr inbounds float, float* %tmp2777, i64 1
+ %tmp2779 = getelementptr inbounds float, float* %tmp2778, i64 1
+ %tmp2780 = getelementptr inbounds float, float* %tmp2779, i64 1
+ %tmp2781 = getelementptr inbounds float, float* %tmp2780, i64 1
+ %tmp2782 = getelementptr inbounds float, float* %tmp2781, i64 1
+ %tmp2783 = getelementptr inbounds float, float* %tmp2782, i64 1
+ %tmp2784 = getelementptr inbounds float, float* %tmp2783, i64 1
+ %tmp2785 = getelementptr inbounds float, float* %tmp2784, i64 1
+ %tmp2786 = getelementptr inbounds float, float* %tmp2785, i64 1
+ %tmp2787 = getelementptr inbounds float, float* %tmp2786, i64 1
+ %tmp2788 = getelementptr inbounds float, float* %tmp2787, i64 1
+ %tmp2789 = getelementptr inbounds float, float* %tmp2788, i64 1
+ %tmp2790 = getelementptr inbounds float, float* %tmp2789, i64 1
+ %tmp2791 = getelementptr inbounds float, float* %tmp2790, i64 1
+ %tmp2792 = getelementptr inbounds float, float* %tmp2791, i64 1
+ %tmp2793 = getelementptr inbounds float, float* %tmp2792, i64 1
+ %tmp2794 = getelementptr inbounds float, float* %tmp2793, i64 1
+ %tmp2795 = getelementptr inbounds float, float* %tmp2794, i64 1
+ %tmp2796 = getelementptr inbounds float, float* %tmp2795, i64 1
+ %tmp2797 = getelementptr inbounds float, float* %tmp2796, i64 1
+ %tmp2798 = getelementptr inbounds float, float* %tmp2797, i64 1
+ %tmp2799 = getelementptr inbounds float, float* %tmp2798, i64 1
+ %tmp2800 = getelementptr inbounds float, float* %tmp2799, i64 1
+ %tmp2801 = getelementptr inbounds float, float* %tmp2800, i64 1
+ %tmp2802 = getelementptr inbounds float, float* %tmp2801, i64 1
+ %tmp2803 = getelementptr inbounds float, float* %tmp2802, i64 1
+ %tmp2804 = getelementptr inbounds float, float* %tmp2803, i64 1
+ %tmp2805 = getelementptr inbounds float, float* %tmp2804, i64 1
+ %tmp2806 = getelementptr inbounds float, float* %tmp2805, i64 1
+ %tmp2807 = getelementptr inbounds float, float* %tmp2806, i64 1
+ %tmp2808 = getelementptr inbounds float, float* %tmp2807, i64 1
+ %tmp2809 = getelementptr inbounds float, float* %tmp2808, i64 1
+ %tmp2810 = getelementptr inbounds float, float* %tmp2809, i64 1
+ %tmp2811 = getelementptr inbounds float, float* %tmp2810, i64 1
+ %tmp2812 = getelementptr inbounds float, float* %tmp2811, i64 1
+ %tmp2813 = getelementptr inbounds float, float* %tmp2812, i64 1
+ %tmp2814 = getelementptr inbounds float, float* %tmp2813, i64 1
+ %tmp2815 = getelementptr inbounds float, float* %tmp2814, i64 1
+ %tmp2816 = getelementptr inbounds float, float* %tmp2815, i64 1
+ %tmp2817 = getelementptr inbounds float, float* %tmp2816, i64 1
+ %tmp2818 = getelementptr inbounds float, float* %tmp2817, i64 1
+ %tmp2819 = getelementptr inbounds float, float* %tmp2818, i64 1
+ %tmp2820 = getelementptr inbounds float, float* %tmp2819, i64 1
+ %tmp2821 = getelementptr inbounds float, float* %tmp2820, i64 1
+ %tmp2822 = getelementptr inbounds float, float* %tmp2821, i64 1
+ %tmp2823 = getelementptr inbounds float, float* %tmp2822, i64 1
+ %tmp2824 = getelementptr inbounds float, float* %tmp2823, i64 1
+ %tmp2825 = getelementptr inbounds float, float* %tmp2824, i64 1
+ %tmp2826 = getelementptr inbounds float, float* %tmp2825, i64 1
+ %tmp2827 = getelementptr inbounds float, float* %tmp2826, i64 1
+ %tmp2828 = getelementptr inbounds float, float* %tmp2827, i64 1
+ %tmp2829 = getelementptr inbounds float, float* %tmp2828, i64 1
+ %tmp2830 = getelementptr inbounds float, float* %tmp2829, i64 1
+ %tmp2831 = getelementptr inbounds float, float* %tmp2830, i64 1
+ %tmp2832 = getelementptr inbounds float, float* %tmp2831, i64 1
+ %tmp2833 = getelementptr inbounds float, float* %tmp2832, i64 1
+ %tmp2834 = getelementptr inbounds float, float* %tmp2833, i64 1
+ %tmp2835 = getelementptr inbounds float, float* %tmp2834, i64 1
+ %tmp2836 = getelementptr inbounds float, float* %tmp2835, i64 1
+ %tmp2837 = getelementptr inbounds float, float* %tmp2836, i64 1
+ %tmp2838 = getelementptr inbounds float, float* %tmp2837, i64 1
+ %tmp2839 = getelementptr inbounds float, float* %tmp2838, i64 1
+ %tmp2840 = getelementptr inbounds float, float* %tmp2839, i64 1
+ %tmp2841 = getelementptr inbounds float, float* %tmp2840, i64 1
+ %tmp2842 = getelementptr inbounds float, float* %tmp2841, i64 1
+ %tmp2843 = getelementptr inbounds float, float* %tmp2842, i64 1
+ %tmp2844 = getelementptr inbounds float, float* %tmp2843, i64 1
+ %tmp2845 = getelementptr inbounds float, float* %tmp2844, i64 1
+ %tmp2846 = getelementptr inbounds float, float* %tmp2845, i64 1
+ %tmp2847 = getelementptr inbounds float, float* %tmp2846, i64 1
+ %tmp2848 = getelementptr inbounds float, float* %tmp2847, i64 1
+ %tmp2849 = getelementptr inbounds float, float* %tmp2848, i64 1
+ %tmp2850 = getelementptr inbounds float, float* %tmp2849, i64 1
+ %tmp2851 = getelementptr inbounds float, float* %tmp2850, i64 1
+ %tmp2852 = getelementptr inbounds float, float* %tmp2851, i64 1
+ %tmp2853 = getelementptr inbounds float, float* %tmp2852, i64 1
+ %tmp2854 = getelementptr inbounds float, float* %tmp2853, i64 1
+ %tmp2855 = getelementptr inbounds float, float* %tmp2854, i64 1
+ %tmp2856 = getelementptr inbounds float, float* %tmp2855, i64 1
+ %tmp2857 = getelementptr inbounds float, float* %tmp2856, i64 1
+ %tmp2858 = getelementptr inbounds float, float* %tmp2857, i64 1
+ %tmp2859 = getelementptr inbounds float, float* %tmp2858, i64 1
+ %tmp2860 = getelementptr inbounds float, float* %tmp2859, i64 1
+ %tmp2861 = getelementptr inbounds float, float* %tmp2860, i64 1
+ %tmp2862 = getelementptr inbounds float, float* %tmp2861, i64 1
+ %tmp2863 = getelementptr inbounds float, float* %tmp2862, i64 1
+ %tmp2864 = getelementptr inbounds float, float* %tmp2863, i64 1
+ %tmp2865 = getelementptr inbounds float, float* %tmp2864, i64 1
+ %tmp2866 = getelementptr inbounds float, float* %tmp2865, i64 1
+ %tmp2867 = getelementptr inbounds float, float* %tmp2866, i64 1
+ %tmp2868 = getelementptr inbounds float, float* %tmp2867, i64 1
+ %tmp2869 = getelementptr inbounds float, float* %tmp2868, i64 1
+ %tmp2870 = getelementptr inbounds float, float* %tmp2869, i64 1
+ %tmp2871 = getelementptr inbounds float, float* %tmp2870, i64 1
+ %tmp2872 = getelementptr inbounds float, float* %tmp2871, i64 1
+ %tmp2873 = getelementptr inbounds float, float* %tmp2872, i64 1
+ %tmp2874 = getelementptr inbounds float, float* %tmp2873, i64 1
+ %tmp2875 = getelementptr inbounds float, float* %tmp2874, i64 1
+ %tmp2876 = getelementptr inbounds float, float* %tmp2875, i64 1
+ %tmp2877 = getelementptr inbounds float, float* %tmp2876, i64 1
+ %tmp2878 = getelementptr inbounds float, float* %tmp2877, i64 1
+ %tmp2879 = getelementptr inbounds float, float* %tmp2878, i64 1
+ %tmp2880 = getelementptr inbounds float, float* %tmp2879, i64 1
+ %tmp2881 = getelementptr inbounds float, float* %tmp2880, i64 1
+ %tmp2882 = getelementptr inbounds float, float* %tmp2881, i64 1
+ %tmp2883 = getelementptr inbounds float, float* %tmp2882, i64 1
+ %tmp2884 = getelementptr inbounds float, float* %tmp2883, i64 1
+ %tmp2885 = getelementptr inbounds float, float* %tmp2884, i64 1
+ %tmp2886 = getelementptr inbounds float, float* %tmp2885, i64 1
+ %tmp2887 = getelementptr inbounds float, float* %tmp2886, i64 1
+ %tmp2888 = getelementptr inbounds float, float* %tmp2887, i64 1
+ %tmp2889 = getelementptr inbounds float, float* %tmp2888, i64 1
+ %tmp2890 = getelementptr inbounds float, float* %tmp2889, i64 1
+ %tmp2891 = getelementptr inbounds float, float* %tmp2890, i64 1
+ %tmp2892 = getelementptr inbounds float, float* %tmp2891, i64 1
+ %tmp2893 = getelementptr inbounds float, float* %tmp2892, i64 1
+ %tmp2894 = getelementptr inbounds float, float* %tmp2893, i64 1
+ %tmp2895 = getelementptr inbounds float, float* %tmp2894, i64 1
+ %tmp2896 = getelementptr inbounds float, float* %tmp2895, i64 1
+ %tmp2897 = getelementptr inbounds float, float* %tmp2896, i64 1
+ %tmp2898 = getelementptr inbounds float, float* %tmp2897, i64 1
+ %tmp2899 = getelementptr inbounds float, float* %tmp2898, i64 1
+ %tmp2900 = getelementptr inbounds float, float* %tmp2899, i64 1
+ %tmp2901 = getelementptr inbounds float, float* %tmp2900, i64 1
+ %tmp2902 = getelementptr inbounds float, float* %tmp2901, i64 1
+ %tmp2903 = getelementptr inbounds float, float* %tmp2902, i64 1
+ %tmp2904 = getelementptr inbounds float, float* %tmp2903, i64 1
+ %tmp2905 = getelementptr inbounds float, float* %tmp2904, i64 1
+ %tmp2906 = getelementptr inbounds float, float* %tmp2905, i64 1
+ %tmp2907 = getelementptr inbounds float, float* %tmp2906, i64 1
+ %tmp2908 = getelementptr inbounds float, float* %tmp2907, i64 1
+ %tmp2909 = getelementptr inbounds float, float* %tmp2908, i64 1
+ %tmp2910 = getelementptr inbounds float, float* %tmp2909, i64 1
+ %tmp2911 = getelementptr inbounds float, float* %tmp2910, i64 1
+ %tmp2912 = getelementptr inbounds float, float* %tmp2911, i64 1
+ %tmp2913 = getelementptr inbounds float, float* %tmp2912, i64 1
+ %tmp2914 = getelementptr inbounds float, float* %tmp2913, i64 1
+ %tmp2915 = getelementptr inbounds float, float* %tmp2914, i64 1
+ %tmp2916 = getelementptr inbounds float, float* %tmp2915, i64 1
+ %tmp2917 = getelementptr inbounds float, float* %tmp2916, i64 1
+ %tmp2918 = getelementptr inbounds float, float* %tmp2917, i64 1
+ %tmp2919 = getelementptr inbounds float, float* %tmp2918, i64 1
+ %tmp2920 = getelementptr inbounds float, float* %tmp2919, i64 1
+ %tmp2921 = getelementptr inbounds float, float* %tmp2920, i64 1
+ %tmp2922 = getelementptr inbounds float, float* %tmp2921, i64 1
+ %tmp2923 = getelementptr inbounds float, float* %tmp2922, i64 1
+ %tmp2924 = getelementptr inbounds float, float* %tmp2923, i64 1
+ %tmp2925 = getelementptr inbounds float, float* %tmp2924, i64 1
+ %tmp2926 = getelementptr inbounds float, float* %tmp2925, i64 1
+ %tmp2927 = getelementptr inbounds float, float* %tmp2926, i64 1
+ %tmp2928 = getelementptr inbounds float, float* %tmp2927, i64 1
+ %tmp2929 = getelementptr inbounds float, float* %tmp2928, i64 1
+ %tmp2930 = getelementptr inbounds float, float* %tmp2929, i64 1
+ %tmp2931 = getelementptr inbounds float, float* %tmp2930, i64 1
+ %tmp2932 = getelementptr inbounds float, float* %tmp2931, i64 1
+ %tmp2933 = getelementptr inbounds float, float* %tmp2932, i64 1
+ %tmp2934 = getelementptr inbounds float, float* %tmp2933, i64 1
+ %tmp2935 = getelementptr inbounds float, float* %tmp2934, i64 1
+ %tmp2936 = getelementptr inbounds float, float* %tmp2935, i64 1
+ %tmp2937 = getelementptr inbounds float, float* %tmp2936, i64 1
+ %tmp2938 = getelementptr inbounds float, float* %tmp2937, i64 1
+ %tmp2939 = getelementptr inbounds float, float* %tmp2938, i64 1
+ %tmp2940 = getelementptr inbounds float, float* %tmp2939, i64 1
+ %tmp2941 = getelementptr inbounds float, float* %tmp2940, i64 1
+ %tmp2942 = getelementptr inbounds float, float* %tmp2941, i64 1
+ %tmp2943 = getelementptr inbounds float, float* %tmp2942, i64 1
+ %tmp2944 = getelementptr inbounds float, float* %tmp2943, i64 1
+ %tmp2945 = getelementptr inbounds float, float* %tmp2944, i64 1
+ %tmp2946 = getelementptr inbounds float, float* %tmp2945, i64 1
+ %tmp2947 = getelementptr inbounds float, float* %tmp2946, i64 1
+ %tmp2948 = getelementptr inbounds float, float* %tmp2947, i64 1
+ %tmp2949 = getelementptr inbounds float, float* %tmp2948, i64 1
+ %tmp2950 = getelementptr inbounds float, float* %tmp2949, i64 1
+ %tmp2951 = getelementptr inbounds float, float* %tmp2950, i64 1
+ %tmp2952 = getelementptr inbounds float, float* %tmp2951, i64 1
+ %tmp2953 = getelementptr inbounds float, float* %tmp2952, i64 1
+ %tmp2954 = getelementptr inbounds float, float* %tmp2953, i64 1
+ %tmp2955 = getelementptr inbounds float, float* %tmp2954, i64 1
+ %tmp2956 = getelementptr inbounds float, float* %tmp2955, i64 1
+ %tmp2957 = getelementptr inbounds float, float* %tmp2956, i64 1
+ %tmp2958 = getelementptr inbounds float, float* %tmp2957, i64 1
+ %tmp2959 = getelementptr inbounds float, float* %tmp2958, i64 1
+ %tmp2960 = getelementptr inbounds float, float* %tmp2959, i64 1
+ %tmp2961 = getelementptr inbounds float, float* %tmp2960, i64 1
+ %tmp2962 = getelementptr inbounds float, float* %tmp2961, i64 1
+ %tmp2963 = getelementptr inbounds float, float* %tmp2962, i64 1
+ %tmp2964 = getelementptr inbounds float, float* %tmp2963, i64 1
+ %tmp2965 = getelementptr inbounds float, float* %tmp2964, i64 1
+ %tmp2966 = getelementptr inbounds float, float* %tmp2965, i64 1
+ %tmp2967 = getelementptr inbounds float, float* %tmp2966, i64 1
+ %tmp2968 = getelementptr inbounds float, float* %tmp2967, i64 1
+ %tmp2969 = getelementptr inbounds float, float* %tmp2968, i64 1
+ %tmp2970 = getelementptr inbounds float, float* %tmp2969, i64 1
+ %tmp2971 = getelementptr inbounds float, float* %tmp2970, i64 1
+ %tmp2972 = getelementptr inbounds float, float* %tmp2971, i64 1
+ %tmp2973 = getelementptr inbounds float, float* %tmp2972, i64 1
+ %tmp2974 = getelementptr inbounds float, float* %tmp2973, i64 1
+ %tmp2975 = getelementptr inbounds float, float* %tmp2974, i64 1
+ %tmp2976 = getelementptr inbounds float, float* %tmp2975, i64 1
+ %tmp2977 = getelementptr inbounds float, float* %tmp2976, i64 1
+ %tmp2978 = getelementptr inbounds float, float* %tmp2977, i64 1
+ %tmp2979 = getelementptr inbounds float, float* %tmp2978, i64 1
+ %tmp2980 = getelementptr inbounds float, float* %tmp2979, i64 1
+ %tmp2981 = getelementptr inbounds float, float* %tmp2980, i64 1
+ %tmp2982 = getelementptr inbounds float, float* %tmp2981, i64 1
+ %tmp2983 = getelementptr inbounds float, float* %tmp2982, i64 1
+ %tmp2984 = getelementptr inbounds float, float* %tmp2983, i64 1
+ %tmp2985 = getelementptr inbounds float, float* %tmp2984, i64 1
+ %tmp2986 = getelementptr inbounds float, float* %tmp2985, i64 1
+ %tmp2987 = getelementptr inbounds float, float* %tmp2986, i64 1
+ %tmp2988 = getelementptr inbounds float, float* %tmp2987, i64 1
+ %tmp2989 = getelementptr inbounds float, float* %tmp2988, i64 1
+ %tmp2990 = getelementptr inbounds float, float* %tmp2989, i64 1
+ %tmp2991 = getelementptr inbounds float, float* %tmp2990, i64 1
+ %tmp2992 = getelementptr inbounds float, float* %tmp2991, i64 1
+ %tmp2993 = getelementptr inbounds float, float* %tmp2992, i64 1
+ %tmp2994 = getelementptr inbounds float, float* %tmp2993, i64 1
+ %tmp2995 = getelementptr inbounds float, float* %tmp2994, i64 1
+ %tmp2996 = getelementptr inbounds float, float* %tmp2995, i64 1
+ %tmp2997 = getelementptr inbounds float, float* %tmp2996, i64 1
+ %tmp2998 = getelementptr inbounds float, float* %tmp2997, i64 1
+ %tmp2999 = getelementptr inbounds float, float* %tmp2998, i64 1
+ %tmp3000 = getelementptr inbounds float, float* %tmp2999, i64 1
+ %tmp3001 = getelementptr inbounds float, float* %tmp3000, i64 1
+ %tmp3002 = getelementptr inbounds float, float* %tmp3001, i64 1
+ %tmp3003 = getelementptr inbounds float, float* %tmp3002, i64 1
+ %tmp3004 = getelementptr inbounds float, float* %tmp3003, i64 1
+ %tmp3005 = getelementptr inbounds float, float* %tmp3004, i64 1
+ %tmp3006 = getelementptr inbounds float, float* %tmp3005, i64 1
+ %tmp3007 = getelementptr inbounds float, float* %tmp3006, i64 1
+ %tmp3008 = getelementptr inbounds float, float* %tmp3007, i64 1
+ %tmp3009 = getelementptr inbounds float, float* %tmp3008, i64 1
+ %tmp3010 = getelementptr inbounds float, float* %tmp3009, i64 1
+ %tmp3011 = getelementptr inbounds float, float* %tmp3010, i64 1
+ %tmp3012 = getelementptr inbounds float, float* %tmp3011, i64 1
+ %tmp3013 = getelementptr inbounds float, float* %tmp3012, i64 1
+ %tmp3014 = getelementptr inbounds float, float* %tmp3013, i64 1
+ %tmp3015 = getelementptr inbounds float, float* %tmp3014, i64 1
+ %tmp3016 = getelementptr inbounds float, float* %tmp3015, i64 1
+ %tmp3017 = getelementptr inbounds float, float* %tmp3016, i64 1
+ %tmp3018 = getelementptr inbounds float, float* %tmp3017, i64 1
+ %tmp3019 = getelementptr inbounds float, float* %tmp3018, i64 1
+ %tmp3020 = getelementptr inbounds float, float* %tmp3019, i64 1
+ %tmp3021 = getelementptr inbounds float, float* %tmp3020, i64 1
+ %tmp3022 = getelementptr inbounds float, float* %tmp3021, i64 1
+ %tmp3023 = getelementptr inbounds float, float* %tmp3022, i64 1
+ %tmp3024 = getelementptr inbounds float, float* %tmp3023, i64 1
+ %tmp3025 = getelementptr inbounds float, float* %tmp3024, i64 1
+ %tmp3026 = getelementptr inbounds float, float* %tmp3025, i64 1
+ %tmp3027 = getelementptr inbounds float, float* %tmp3026, i64 1
+ %tmp3028 = getelementptr inbounds float, float* %tmp3027, i64 1
+ %tmp3029 = getelementptr inbounds float, float* %tmp3028, i64 1
+ %tmp3030 = getelementptr inbounds float, float* %tmp3029, i64 1
+ %tmp3031 = getelementptr inbounds float, float* %tmp3030, i64 1
+ %tmp3032 = getelementptr inbounds float, float* %tmp3031, i64 1
+ %tmp3033 = getelementptr inbounds float, float* %tmp3032, i64 1
+ %tmp3034 = getelementptr inbounds float, float* %tmp3033, i64 1
+ %tmp3035 = getelementptr inbounds float, float* %tmp3034, i64 1
+ %tmp3036 = getelementptr inbounds float, float* %tmp3035, i64 1
+ %tmp3037 = getelementptr inbounds float, float* %tmp3036, i64 1
+ %tmp3038 = getelementptr inbounds float, float* %tmp3037, i64 1
+ %tmp3039 = getelementptr inbounds float, float* %tmp3038, i64 1
+ %tmp3040 = getelementptr inbounds float, float* %tmp3039, i64 1
+ %tmp3041 = getelementptr inbounds float, float* %tmp3040, i64 1
+ %tmp3042 = getelementptr inbounds float, float* %tmp3041, i64 1
+ %tmp3043 = getelementptr inbounds float, float* %tmp3042, i64 1
+ %tmp3044 = getelementptr inbounds float, float* %tmp3043, i64 1
+ %tmp3045 = getelementptr inbounds float, float* %tmp3044, i64 1
+ %tmp3046 = getelementptr inbounds float, float* %tmp3045, i64 1
+ %tmp3047 = getelementptr inbounds float, float* %tmp3046, i64 1
+ %tmp3048 = getelementptr inbounds float, float* %tmp3047, i64 1
+ %tmp3049 = getelementptr inbounds float, float* %tmp3048, i64 1
+ %tmp3050 = getelementptr inbounds float, float* %tmp3049, i64 1
+ %tmp3051 = getelementptr inbounds float, float* %tmp3050, i64 1
+ %tmp3052 = getelementptr inbounds float, float* %tmp3051, i64 1
+ %tmp3053 = getelementptr inbounds float, float* %tmp3052, i64 1
+ %tmp3054 = getelementptr inbounds float, float* %tmp3053, i64 1
+ %tmp3055 = getelementptr inbounds float, float* %tmp3054, i64 1
+ %tmp3056 = getelementptr inbounds float, float* %tmp3055, i64 1
+ %tmp3057 = getelementptr inbounds float, float* %tmp3056, i64 1
+ %tmp3058 = getelementptr inbounds float, float* %tmp3057, i64 1
+ %tmp3059 = getelementptr inbounds float, float* %tmp3058, i64 1
+ %tmp3060 = getelementptr inbounds float, float* %tmp3059, i64 1
+ %tmp3061 = getelementptr inbounds float, float* %tmp3060, i64 1
+ %tmp3062 = getelementptr inbounds float, float* %tmp3061, i64 1
+ %tmp3063 = getelementptr inbounds float, float* %tmp3062, i64 1
+ %tmp3064 = getelementptr inbounds float, float* %tmp3063, i64 1
+ %tmp3065 = getelementptr inbounds float, float* %tmp3064, i64 1
+ %tmp3066 = getelementptr inbounds float, float* %tmp3065, i64 1
+ %tmp3067 = getelementptr inbounds float, float* %tmp3066, i64 1
+ %tmp3068 = getelementptr inbounds float, float* %tmp3067, i64 1
+ %tmp3069 = getelementptr inbounds float, float* %tmp3068, i64 1
+ %tmp3070 = getelementptr inbounds float, float* %tmp3069, i64 1
+ %tmp3071 = getelementptr inbounds float, float* %tmp3070, i64 1
+ %tmp3072 = getelementptr inbounds float, float* %tmp3071, i64 1
+ %tmp3073 = getelementptr inbounds float, float* %tmp3072, i64 1
+ %tmp3074 = getelementptr inbounds float, float* %tmp3073, i64 1
+ %tmp3075 = getelementptr inbounds float, float* %tmp3074, i64 1
+ %tmp3076 = getelementptr inbounds float, float* %tmp3075, i64 1
+ %tmp3077 = getelementptr inbounds float, float* %tmp3076, i64 1
+ %tmp3078 = getelementptr inbounds float, float* %tmp3077, i64 1
+ %tmp3079 = getelementptr inbounds float, float* %tmp3078, i64 1
+ %tmp3080 = getelementptr inbounds float, float* %tmp3079, i64 1
+ %tmp3081 = getelementptr inbounds float, float* %tmp3080, i64 1
+ %tmp3082 = getelementptr inbounds float, float* %tmp3081, i64 1
+ %tmp3083 = getelementptr inbounds float, float* %tmp3082, i64 1
+ %tmp3084 = getelementptr inbounds float, float* %tmp3083, i64 1
+ %tmp3085 = getelementptr inbounds float, float* %tmp3084, i64 1
+ %tmp3086 = getelementptr inbounds float, float* %tmp3085, i64 1
+ %tmp3087 = getelementptr inbounds float, float* %tmp3086, i64 1
+ %tmp3088 = getelementptr inbounds float, float* %tmp3087, i64 1
+ %tmp3089 = getelementptr inbounds float, float* %tmp3088, i64 1
+ %tmp3090 = getelementptr inbounds float, float* %tmp3089, i64 1
+ %tmp3091 = getelementptr inbounds float, float* %tmp3090, i64 1
+ %tmp3092 = getelementptr inbounds float, float* %tmp3091, i64 1
+ %tmp3093 = getelementptr inbounds float, float* %tmp3092, i64 1
+ %tmp3094 = getelementptr inbounds float, float* %tmp3093, i64 1
+ %tmp3095 = getelementptr inbounds float, float* %tmp3094, i64 1
+ %tmp3096 = getelementptr inbounds float, float* %tmp3095, i64 1
+ %tmp3097 = getelementptr inbounds float, float* %tmp3096, i64 1
+ %tmp3098 = getelementptr inbounds float, float* %tmp3097, i64 1
+ %tmp3099 = getelementptr inbounds float, float* %tmp3098, i64 1
+ %tmp3100 = getelementptr inbounds float, float* %tmp3099, i64 1
+ %tmp3101 = getelementptr inbounds float, float* %tmp3100, i64 1
+ %tmp3102 = getelementptr inbounds float, float* %tmp3101, i64 1
+ %tmp3103 = getelementptr inbounds float, float* %tmp3102, i64 1
+ %tmp3104 = getelementptr inbounds float, float* %tmp3103, i64 1
+ %tmp3105 = getelementptr inbounds float, float* %tmp3104, i64 1
+ %tmp3106 = getelementptr inbounds float, float* %tmp3105, i64 1
+ %tmp3107 = getelementptr inbounds float, float* %tmp3106, i64 1
+ %tmp3108 = getelementptr inbounds float, float* %tmp3107, i64 1
+ %tmp3109 = getelementptr inbounds float, float* %tmp3108, i64 1
+ %tmp3110 = getelementptr inbounds float, float* %tmp3109, i64 1
+ %tmp3111 = getelementptr inbounds float, float* %tmp3110, i64 1
+ %tmp3112 = getelementptr inbounds float, float* %tmp3111, i64 1
+ %tmp3113 = getelementptr inbounds float, float* %tmp3112, i64 1
+ %tmp3114 = getelementptr inbounds float, float* %tmp3113, i64 1
+ %tmp3115 = getelementptr inbounds float, float* %tmp3114, i64 1
+ %tmp3116 = getelementptr inbounds float, float* %tmp3115, i64 1
+ %tmp3117 = getelementptr inbounds float, float* %tmp3116, i64 1
+ %tmp3118 = getelementptr inbounds float, float* %tmp3117, i64 1
+ %tmp3119 = getelementptr inbounds float, float* %tmp3118, i64 1
+ %tmp3120 = getelementptr inbounds float, float* %tmp3119, i64 1
+ %tmp3121 = getelementptr inbounds float, float* %tmp3120, i64 1
+ %tmp3122 = getelementptr inbounds float, float* %tmp3121, i64 1
+ %tmp3123 = getelementptr inbounds float, float* %tmp3122, i64 1
+ %tmp3124 = getelementptr inbounds float, float* %tmp3123, i64 1
+ %tmp3125 = getelementptr inbounds float, float* %tmp3124, i64 1
+ %tmp3126 = getelementptr inbounds float, float* %tmp3125, i64 1
+ %tmp3127 = getelementptr inbounds float, float* %tmp3126, i64 1
+ %tmp3128 = getelementptr inbounds float, float* %tmp3127, i64 1
+ %tmp3129 = getelementptr inbounds float, float* %tmp3128, i64 1
+ %tmp3130 = getelementptr inbounds float, float* %tmp3129, i64 1
+ %tmp3131 = getelementptr inbounds float, float* %tmp3130, i64 1
+ %tmp3132 = getelementptr inbounds float, float* %tmp3131, i64 1
+ %tmp3133 = getelementptr inbounds float, float* %tmp3132, i64 1
+ %tmp3134 = getelementptr inbounds float, float* %tmp3133, i64 1
+ %tmp3135 = getelementptr inbounds float, float* %tmp3134, i64 1
+ %tmp3136 = getelementptr inbounds float, float* %tmp3135, i64 1
+ %tmp3137 = getelementptr inbounds float, float* %tmp3136, i64 1
+ %tmp3138 = getelementptr inbounds float, float* %tmp3137, i64 1
+ %tmp3139 = getelementptr inbounds float, float* %tmp3138, i64 1
+ %tmp3140 = getelementptr inbounds float, float* %tmp3139, i64 1
+ %tmp3141 = getelementptr inbounds float, float* %tmp3140, i64 1
+ %tmp3142 = getelementptr inbounds float, float* %tmp3141, i64 1
+ %tmp3143 = getelementptr inbounds float, float* %tmp3142, i64 1
+ %tmp3144 = getelementptr inbounds float, float* %tmp3143, i64 1
+ %tmp3145 = getelementptr inbounds float, float* %tmp3144, i64 1
+ %tmp3146 = getelementptr inbounds float, float* %tmp3145, i64 1
+ %tmp3147 = getelementptr inbounds float, float* %tmp3146, i64 1
+ %tmp3148 = getelementptr inbounds float, float* %tmp3147, i64 1
+ %tmp3149 = getelementptr inbounds float, float* %tmp3148, i64 1
+ %tmp3150 = getelementptr inbounds float, float* %tmp3149, i64 1
+ %tmp3151 = getelementptr inbounds float, float* %tmp3150, i64 1
+ %tmp3152 = getelementptr inbounds float, float* %tmp3151, i64 1
+ %tmp3153 = getelementptr inbounds float, float* %tmp3152, i64 1
+ %tmp3154 = getelementptr inbounds float, float* %tmp3153, i64 1
+ %tmp3155 = getelementptr inbounds float, float* %tmp3154, i64 1
+ %tmp3156 = getelementptr inbounds float, float* %tmp3155, i64 1
+ %tmp3157 = getelementptr inbounds float, float* %tmp3156, i64 1
+ %tmp3158 = getelementptr inbounds float, float* %tmp3157, i64 1
+ %tmp3159 = getelementptr inbounds float, float* %tmp3158, i64 1
+ %tmp3160 = getelementptr inbounds float, float* %tmp3159, i64 1
+ %tmp3161 = getelementptr inbounds float, float* %tmp3160, i64 1
+ %tmp3162 = getelementptr inbounds float, float* %tmp3161, i64 1
+ %tmp3163 = getelementptr inbounds float, float* %tmp3162, i64 1
+ %tmp3164 = getelementptr inbounds float, float* %tmp3163, i64 1
+ %tmp3165 = getelementptr inbounds float, float* %tmp3164, i64 1
+ %tmp3166 = getelementptr inbounds float, float* %tmp3165, i64 1
+ %tmp3167 = getelementptr inbounds float, float* %tmp3166, i64 1
+ %tmp3168 = getelementptr inbounds float, float* %tmp3167, i64 1
+ %tmp3169 = getelementptr inbounds float, float* %tmp3168, i64 1
+ %tmp3170 = getelementptr inbounds float, float* %tmp3169, i64 1
+ %tmp3171 = getelementptr inbounds float, float* %tmp3170, i64 1
+ %tmp3172 = getelementptr inbounds float, float* %tmp3171, i64 1
+ %tmp3173 = getelementptr inbounds float, float* %tmp3172, i64 1
+ %tmp3174 = getelementptr inbounds float, float* %tmp3173, i64 1
+ %tmp3175 = getelementptr inbounds float, float* %tmp3174, i64 1
+ %tmp3176 = getelementptr inbounds float, float* %tmp3175, i64 1
+ %tmp3177 = getelementptr inbounds float, float* %tmp3176, i64 1
+ %tmp3178 = getelementptr inbounds float, float* %tmp3177, i64 1
+ %tmp3179 = getelementptr inbounds float, float* %tmp3178, i64 1
+ %tmp3180 = getelementptr inbounds float, float* %tmp3179, i64 1
+ %tmp3181 = getelementptr inbounds float, float* %tmp3180, i64 1
+ %tmp3182 = getelementptr inbounds float, float* %tmp3181, i64 1
+ %tmp3183 = getelementptr inbounds float, float* %tmp3182, i64 1
+ %tmp3184 = getelementptr inbounds float, float* %tmp3183, i64 1
+ %tmp3185 = getelementptr inbounds float, float* %tmp3184, i64 1
+ %tmp3186 = getelementptr inbounds float, float* %tmp3185, i64 1
+ %tmp3187 = getelementptr inbounds float, float* %tmp3186, i64 1
+ %tmp3188 = getelementptr inbounds float, float* %tmp3187, i64 1
+ %tmp3189 = getelementptr inbounds float, float* %tmp3188, i64 1
+ %tmp3190 = getelementptr inbounds float, float* %tmp3189, i64 1
+ %tmp3191 = getelementptr inbounds float, float* %tmp3190, i64 1
+ %tmp3192 = getelementptr inbounds float, float* %tmp3191, i64 1
+ %tmp3193 = getelementptr inbounds float, float* %tmp3192, i64 1
+ %tmp3194 = getelementptr inbounds float, float* %tmp3193, i64 1
+ %tmp3195 = getelementptr inbounds float, float* %tmp3194, i64 1
+ %tmp3196 = getelementptr inbounds float, float* %tmp3195, i64 1
+ %tmp3197 = getelementptr inbounds float, float* %tmp3196, i64 1
+ %tmp3198 = getelementptr inbounds float, float* %tmp3197, i64 1
+ %tmp3199 = getelementptr inbounds float, float* %tmp3198, i64 1
+ %tmp3200 = getelementptr inbounds float, float* %tmp3199, i64 1
+ %tmp3201 = getelementptr inbounds float, float* %tmp3200, i64 1
+ %tmp3202 = getelementptr inbounds float, float* %tmp3201, i64 1
+ %tmp3203 = getelementptr inbounds float, float* %tmp3202, i64 1
+ %tmp3204 = getelementptr inbounds float, float* %tmp3203, i64 1
+ %tmp3205 = getelementptr inbounds float, float* %tmp3204, i64 1
+ %tmp3206 = getelementptr inbounds float, float* %tmp3205, i64 1
+ %tmp3207 = getelementptr inbounds float, float* %tmp3206, i64 1
+ %tmp3208 = getelementptr inbounds float, float* %tmp3207, i64 1
+ %tmp3209 = getelementptr inbounds float, float* %tmp3208, i64 1
+ %tmp3210 = getelementptr inbounds float, float* %tmp3209, i64 1
+ %tmp3211 = getelementptr inbounds float, float* %tmp3210, i64 1
+ %tmp3212 = getelementptr inbounds float, float* %tmp3211, i64 1
+ %tmp3213 = getelementptr inbounds float, float* %tmp3212, i64 1
+ %tmp3214 = getelementptr inbounds float, float* %tmp3213, i64 1
+ %tmp3215 = getelementptr inbounds float, float* %tmp3214, i64 1
+ %tmp3216 = getelementptr inbounds float, float* %tmp3215, i64 1
+ %tmp3217 = getelementptr inbounds float, float* %tmp3216, i64 1
+ %tmp3218 = getelementptr inbounds float, float* %tmp3217, i64 1
+ %tmp3219 = getelementptr inbounds float, float* %tmp3218, i64 1
+ %tmp3220 = getelementptr inbounds float, float* %tmp3219, i64 1
+ %tmp3221 = getelementptr inbounds float, float* %tmp3220, i64 1
+ %tmp3222 = getelementptr inbounds float, float* %tmp3221, i64 1
+ %tmp3223 = getelementptr inbounds float, float* %tmp3222, i64 1
+ %tmp3224 = getelementptr inbounds float, float* %tmp3223, i64 1
+ %tmp3225 = getelementptr inbounds float, float* %tmp3224, i64 1
+ %tmp3226 = getelementptr inbounds float, float* %tmp3225, i64 1
+ %tmp3227 = getelementptr inbounds float, float* %tmp3226, i64 1
+ %tmp3228 = getelementptr inbounds float, float* %tmp3227, i64 1
+ %tmp3229 = getelementptr inbounds float, float* %tmp3228, i64 1
+ %tmp3230 = getelementptr inbounds float, float* %tmp3229, i64 1
+ %tmp3231 = getelementptr inbounds float, float* %tmp3230, i64 1
+ %tmp3232 = getelementptr inbounds float, float* %tmp3231, i64 1
+ %tmp3233 = getelementptr inbounds float, float* %tmp3232, i64 1
+ %tmp3234 = getelementptr inbounds float, float* %tmp3233, i64 1
+ %tmp3235 = getelementptr inbounds float, float* %tmp3234, i64 1
+ %tmp3236 = getelementptr inbounds float, float* %tmp3235, i64 1
+ %tmp3237 = getelementptr inbounds float, float* %tmp3236, i64 1
+ %tmp3238 = getelementptr inbounds float, float* %tmp3237, i64 1
+ %tmp3239 = getelementptr inbounds float, float* %tmp3238, i64 1
+ %tmp3240 = getelementptr inbounds float, float* %tmp3239, i64 1
+ %tmp3241 = getelementptr inbounds float, float* %tmp3240, i64 1
+ %tmp3242 = getelementptr inbounds float, float* %tmp3241, i64 1
+ %tmp3243 = getelementptr inbounds float, float* %tmp3242, i64 1
+ %tmp3244 = getelementptr inbounds float, float* %tmp3243, i64 1
+ %tmp3245 = getelementptr inbounds float, float* %tmp3244, i64 1
+ %tmp3246 = getelementptr inbounds float, float* %tmp3245, i64 1
+ %tmp3247 = getelementptr inbounds float, float* %tmp3246, i64 1
+ %tmp3248 = getelementptr inbounds float, float* %tmp3247, i64 1
+ %tmp3249 = getelementptr inbounds float, float* %tmp3248, i64 1
+ %tmp3250 = getelementptr inbounds float, float* %tmp3249, i64 1
+ %tmp3251 = getelementptr inbounds float, float* %tmp3250, i64 1
+ %tmp3252 = getelementptr inbounds float, float* %tmp3251, i64 1
+ %tmp3253 = getelementptr inbounds float, float* %tmp3252, i64 1
+ %tmp3254 = getelementptr inbounds float, float* %tmp3253, i64 1
+ %tmp3255 = getelementptr inbounds float, float* %tmp3254, i64 1
+ %tmp3256 = getelementptr inbounds float, float* %tmp3255, i64 1
+ %tmp3257 = getelementptr inbounds float, float* %tmp3256, i64 1
+ %tmp3258 = getelementptr inbounds float, float* %tmp3257, i64 1
+ %tmp3259 = getelementptr inbounds float, float* %tmp3258, i64 1
+ %tmp3260 = getelementptr inbounds float, float* %tmp3259, i64 1
+ %tmp3261 = getelementptr inbounds float, float* %tmp3260, i64 1
+ %tmp3262 = getelementptr inbounds float, float* %tmp3261, i64 1
+ %tmp3263 = getelementptr inbounds float, float* %tmp3262, i64 1
+ %tmp3264 = getelementptr inbounds float, float* %tmp3263, i64 1
+ %tmp3265 = getelementptr inbounds float, float* %tmp3264, i64 1
+ %tmp3266 = getelementptr inbounds float, float* %tmp3265, i64 1
+ %tmp3267 = getelementptr inbounds float, float* %tmp3266, i64 1
+ %tmp3268 = getelementptr inbounds float, float* %tmp3267, i64 1
+ %tmp3269 = getelementptr inbounds float, float* %tmp3268, i64 1
+ %tmp3270 = getelementptr inbounds float, float* %tmp3269, i64 1
+ %tmp3271 = getelementptr inbounds float, float* %tmp3270, i64 1
+ %tmp3272 = getelementptr inbounds float, float* %tmp3271, i64 1
+ %tmp3273 = getelementptr inbounds float, float* %tmp3272, i64 1
+ %tmp3274 = getelementptr inbounds float, float* %tmp3273, i64 1
+ %tmp3275 = getelementptr inbounds float, float* %tmp3274, i64 1
+ %tmp3276 = getelementptr inbounds float, float* %tmp3275, i64 1
+ %tmp3277 = getelementptr inbounds float, float* %tmp3276, i64 1
+ %tmp3278 = getelementptr inbounds float, float* %tmp3277, i64 1
+ %tmp3279 = getelementptr inbounds float, float* %tmp3278, i64 1
+ %tmp3280 = getelementptr inbounds float, float* %tmp3279, i64 1
+ %tmp3281 = getelementptr inbounds float, float* %tmp3280, i64 1
+ %tmp3282 = getelementptr inbounds float, float* %tmp3281, i64 1
+ %tmp3283 = getelementptr inbounds float, float* %tmp3282, i64 1
+ %tmp3284 = getelementptr inbounds float, float* %tmp3283, i64 1
+ %tmp3285 = getelementptr inbounds float, float* %tmp3284, i64 1
+ %tmp3286 = getelementptr inbounds float, float* %tmp3285, i64 1
+ %tmp3287 = getelementptr inbounds float, float* %tmp3286, i64 1
+ %tmp3288 = getelementptr inbounds float, float* %tmp3287, i64 1
+ %tmp3289 = getelementptr inbounds float, float* %tmp3288, i64 1
+ %tmp3290 = getelementptr inbounds float, float* %tmp3289, i64 1
+ %tmp3291 = getelementptr inbounds float, float* %tmp3290, i64 1
+ %tmp3292 = getelementptr inbounds float, float* %tmp3291, i64 1
+ %tmp3293 = getelementptr inbounds float, float* %tmp3292, i64 1
+ %tmp3294 = getelementptr inbounds float, float* %tmp3293, i64 1
+ %tmp3295 = getelementptr inbounds float, float* %tmp3294, i64 1
+ %tmp3296 = getelementptr inbounds float, float* %tmp3295, i64 1
+ %tmp3297 = getelementptr inbounds float, float* %tmp3296, i64 1
+ %tmp3298 = getelementptr inbounds float, float* %tmp3297, i64 1
+ %tmp3299 = getelementptr inbounds float, float* %tmp3298, i64 1
+ %tmp3300 = getelementptr inbounds float, float* %tmp3299, i64 1
+ %tmp3301 = getelementptr inbounds float, float* %tmp3300, i64 1
+ %tmp3302 = getelementptr inbounds float, float* %tmp3301, i64 1
+ %tmp3303 = getelementptr inbounds float, float* %tmp3302, i64 1
+ %tmp3304 = getelementptr inbounds float, float* %tmp3303, i64 1
+ %tmp3305 = getelementptr inbounds float, float* %tmp3304, i64 1
+ %tmp3306 = getelementptr inbounds float, float* %tmp3305, i64 1
+ %tmp3307 = getelementptr inbounds float, float* %tmp3306, i64 1
+ %tmp3308 = getelementptr inbounds float, float* %tmp3307, i64 1
+ %tmp3309 = getelementptr inbounds float, float* %tmp3308, i64 1
+ %tmp3310 = getelementptr inbounds float, float* %tmp3309, i64 1
+ %tmp3311 = getelementptr inbounds float, float* %tmp3310, i64 1
+ %tmp3312 = getelementptr inbounds float, float* %tmp3311, i64 1
+ %tmp3313 = getelementptr inbounds float, float* %tmp3312, i64 1
+ %tmp3314 = getelementptr inbounds float, float* %tmp3313, i64 1
+ %tmp3315 = getelementptr inbounds float, float* %tmp3314, i64 1
+ %tmp3316 = getelementptr inbounds float, float* %tmp3315, i64 1
+ %tmp3317 = getelementptr inbounds float, float* %tmp3316, i64 1
+ %tmp3318 = getelementptr inbounds float, float* %tmp3317, i64 1
+ %tmp3319 = getelementptr inbounds float, float* %tmp3318, i64 1
+ %tmp3320 = getelementptr inbounds float, float* %tmp3319, i64 1
+ %tmp3321 = getelementptr inbounds float, float* %tmp3320, i64 1
+ %tmp3322 = getelementptr inbounds float, float* %tmp3321, i64 1
+ %tmp3323 = getelementptr inbounds float, float* %tmp3322, i64 1
+ %tmp3324 = getelementptr inbounds float, float* %tmp3323, i64 1
+ %tmp3325 = getelementptr inbounds float, float* %tmp3324, i64 1
+ %tmp3326 = getelementptr inbounds float, float* %tmp3325, i64 1
+ %tmp3327 = getelementptr inbounds float, float* %tmp3326, i64 1
+ %tmp3328 = getelementptr inbounds float, float* %tmp3327, i64 1
+ %tmp3329 = getelementptr inbounds float, float* %tmp3328, i64 1
+ %tmp3330 = getelementptr inbounds float, float* %tmp3329, i64 1
+ %tmp3331 = getelementptr inbounds float, float* %tmp3330, i64 1
+ %tmp3332 = getelementptr inbounds float, float* %tmp3331, i64 1
+ %tmp3333 = getelementptr inbounds float, float* %tmp3332, i64 1
+ %tmp3334 = getelementptr inbounds float, float* %tmp3333, i64 1
+ %tmp3335 = getelementptr inbounds float, float* %tmp3334, i64 1
+ %tmp3336 = getelementptr inbounds float, float* %tmp3335, i64 1
+ %tmp3337 = getelementptr inbounds float, float* %tmp3336, i64 1
+ %tmp3338 = getelementptr inbounds float, float* %tmp3337, i64 1
+ %tmp3339 = getelementptr inbounds float, float* %tmp3338, i64 1
+ %tmp3340 = getelementptr inbounds float, float* %tmp3339, i64 1
+ %tmp3341 = getelementptr inbounds float, float* %tmp3340, i64 1
+ %tmp3342 = getelementptr inbounds float, float* %tmp3341, i64 1
+ %tmp3343 = getelementptr inbounds float, float* %tmp3342, i64 1
+ %tmp3344 = getelementptr inbounds float, float* %tmp3343, i64 1
+ %tmp3345 = getelementptr inbounds float, float* %tmp3344, i64 1
+ %tmp3346 = getelementptr inbounds float, float* %tmp3345, i64 1
+ %tmp3347 = getelementptr inbounds float, float* %tmp3346, i64 1
+ %tmp3348 = getelementptr inbounds float, float* %tmp3347, i64 1
+ %tmp3349 = getelementptr inbounds float, float* %tmp3348, i64 1
+ %tmp3350 = getelementptr inbounds float, float* %tmp3349, i64 1
+ %tmp3351 = getelementptr inbounds float, float* %tmp3350, i64 1
+ %tmp3352 = getelementptr inbounds float, float* %tmp3351, i64 1
+ %tmp3353 = getelementptr inbounds float, float* %tmp3352, i64 1
+ %tmp3354 = getelementptr inbounds float, float* %tmp3353, i64 1
+ %tmp3355 = getelementptr inbounds float, float* %tmp3354, i64 1
+ %tmp3356 = getelementptr inbounds float, float* %tmp3355, i64 1
+ %tmp3357 = getelementptr inbounds float, float* %tmp3356, i64 1
+ %tmp3358 = getelementptr inbounds float, float* %tmp3357, i64 1
+ %tmp3359 = getelementptr inbounds float, float* %tmp3358, i64 1
+ %tmp3360 = getelementptr inbounds float, float* %tmp3359, i64 1
+ %tmp3361 = getelementptr inbounds float, float* %tmp3360, i64 1
+ %tmp3362 = getelementptr inbounds float, float* %tmp3361, i64 1
+ %tmp3363 = getelementptr inbounds float, float* %tmp3362, i64 1
+ %tmp3364 = getelementptr inbounds float, float* %tmp3363, i64 1
+ %tmp3365 = getelementptr inbounds float, float* %tmp3364, i64 1
+ %tmp3366 = getelementptr inbounds float, float* %tmp3365, i64 1
+ %tmp3367 = getelementptr inbounds float, float* %tmp3366, i64 1
+ %tmp3368 = getelementptr inbounds float, float* %tmp3367, i64 1
+ %tmp3369 = getelementptr inbounds float, float* %tmp3368, i64 1
+ %tmp3370 = getelementptr inbounds float, float* %tmp3369, i64 1
+ %tmp3371 = getelementptr inbounds float, float* %tmp3370, i64 1
+ %tmp3372 = getelementptr inbounds float, float* %tmp3371, i64 1
+ %tmp3373 = getelementptr inbounds float, float* %tmp3372, i64 1
+ %tmp3374 = getelementptr inbounds float, float* %tmp3373, i64 1
+ %tmp3375 = getelementptr inbounds float, float* %tmp3374, i64 1
+ %tmp3376 = getelementptr inbounds float, float* %tmp3375, i64 1
+ %tmp3377 = getelementptr inbounds float, float* %tmp3376, i64 1
+ %tmp3378 = getelementptr inbounds float, float* %tmp3377, i64 1
+ %tmp3379 = getelementptr inbounds float, float* %tmp3378, i64 1
+ %tmp3380 = getelementptr inbounds float, float* %tmp3379, i64 1
+ %tmp3381 = getelementptr inbounds float, float* %tmp3380, i64 1
+ %tmp3382 = getelementptr inbounds float, float* %tmp3381, i64 1
+ %tmp3383 = getelementptr inbounds float, float* %tmp3382, i64 1
+ %tmp3384 = getelementptr inbounds float, float* %tmp3383, i64 1
+ %tmp3385 = getelementptr inbounds float, float* %tmp3384, i64 1
+ %tmp3386 = getelementptr inbounds float, float* %tmp3385, i64 1
+ %tmp3387 = getelementptr inbounds float, float* %tmp3386, i64 1
+ %tmp3388 = getelementptr inbounds float, float* %tmp3387, i64 1
+ %tmp3389 = getelementptr inbounds float, float* %tmp3388, i64 1
+ %tmp3390 = getelementptr inbounds float, float* %tmp3389, i64 1
+ %tmp3391 = getelementptr inbounds float, float* %tmp3390, i64 1
+ %tmp3392 = getelementptr inbounds float, float* %tmp3391, i64 1
+ %tmp3393 = getelementptr inbounds float, float* %tmp3392, i64 1
+ %tmp3394 = getelementptr inbounds float, float* %tmp3393, i64 1
+ %tmp3395 = getelementptr inbounds float, float* %tmp3394, i64 1
+ %tmp3396 = getelementptr inbounds float, float* %tmp3395, i64 1
+ %tmp3397 = getelementptr inbounds float, float* %tmp3396, i64 1
+ %tmp3398 = getelementptr inbounds float, float* %tmp3397, i64 1
+ %tmp3399 = getelementptr inbounds float, float* %tmp3398, i64 1
+ %tmp3400 = getelementptr inbounds float, float* %tmp3399, i64 1
+ %tmp3401 = getelementptr inbounds float, float* %tmp3400, i64 1
+ %tmp3402 = getelementptr inbounds float, float* %tmp3401, i64 1
+ %tmp3403 = getelementptr inbounds float, float* %tmp3402, i64 1
+ %tmp3404 = getelementptr inbounds float, float* %tmp3403, i64 1
+ %tmp3405 = getelementptr inbounds float, float* %tmp3404, i64 1
+ %tmp3406 = getelementptr inbounds float, float* %tmp3405, i64 1
+ %tmp3407 = getelementptr inbounds float, float* %tmp3406, i64 1
+ %tmp3408 = getelementptr inbounds float, float* %tmp3407, i64 1
+ %tmp3409 = getelementptr inbounds float, float* %tmp3408, i64 1
+ %tmp3410 = getelementptr inbounds float, float* %tmp3409, i64 1
+ %tmp3411 = getelementptr inbounds float, float* %tmp3410, i64 1
+ %tmp3412 = getelementptr inbounds float, float* %tmp3411, i64 1
+ %tmp3413 = getelementptr inbounds float, float* %tmp3412, i64 1
+ %tmp3414 = getelementptr inbounds float, float* %tmp3413, i64 1
+ %tmp3415 = getelementptr inbounds float, float* %tmp3414, i64 1
+ %tmp3416 = getelementptr inbounds float, float* %tmp3415, i64 1
+ %tmp3417 = getelementptr inbounds float, float* %tmp3416, i64 1
+ %tmp3418 = getelementptr inbounds float, float* %tmp3417, i64 1
+ %tmp3419 = getelementptr inbounds float, float* %tmp3418, i64 1
+ %tmp3420 = getelementptr inbounds float, float* %tmp3419, i64 1
+ %tmp3421 = getelementptr inbounds float, float* %tmp3420, i64 1
+ %tmp3422 = getelementptr inbounds float, float* %tmp3421, i64 1
+ %tmp3423 = getelementptr inbounds float, float* %tmp3422, i64 1
+ %tmp3424 = getelementptr inbounds float, float* %tmp3423, i64 1
+ %tmp3425 = getelementptr inbounds float, float* %tmp3424, i64 1
+ %tmp3426 = getelementptr inbounds float, float* %tmp3425, i64 1
+ %tmp3427 = getelementptr inbounds float, float* %tmp3426, i64 1
+ %tmp3428 = getelementptr inbounds float, float* %tmp3427, i64 1
+ %tmp3429 = getelementptr inbounds float, float* %tmp3428, i64 1
+ %tmp3430 = getelementptr inbounds float, float* %tmp3429, i64 1
+ %tmp3431 = getelementptr inbounds float, float* %tmp3430, i64 1
+ %tmp3432 = getelementptr inbounds float, float* %tmp3431, i64 1
+ %tmp3433 = getelementptr inbounds float, float* %tmp3432, i64 1
+ %tmp3434 = getelementptr inbounds float, float* %tmp3433, i64 1
+ %tmp3435 = getelementptr inbounds float, float* %tmp3434, i64 1
+ %tmp3436 = getelementptr inbounds float, float* %tmp3435, i64 1
+ %tmp3437 = getelementptr inbounds float, float* %tmp3436, i64 1
+ %tmp3438 = getelementptr inbounds float, float* %tmp3437, i64 1
+ %tmp3439 = getelementptr inbounds float, float* %tmp3438, i64 1
+ %tmp3440 = getelementptr inbounds float, float* %tmp3439, i64 1
+ %tmp3441 = getelementptr inbounds float, float* %tmp3440, i64 1
+ %tmp3442 = getelementptr inbounds float, float* %tmp3441, i64 1
+ %tmp3443 = getelementptr inbounds float, float* %tmp3442, i64 1
+ %tmp3444 = getelementptr inbounds float, float* %tmp3443, i64 1
+ %tmp3445 = getelementptr inbounds float, float* %tmp3444, i64 1
+ %tmp3446 = getelementptr inbounds float, float* %tmp3445, i64 1
+ %tmp3447 = getelementptr inbounds float, float* %tmp3446, i64 1
+ %tmp3448 = getelementptr inbounds float, float* %tmp3447, i64 1
+ %tmp3449 = getelementptr inbounds float, float* %tmp3448, i64 1
+ %tmp3450 = getelementptr inbounds float, float* %tmp3449, i64 1
+ %tmp3451 = getelementptr inbounds float, float* %tmp3450, i64 1
+ %tmp3452 = getelementptr inbounds float, float* %tmp3451, i64 1
+ %tmp3453 = getelementptr inbounds float, float* %tmp3452, i64 1
+ %tmp3454 = getelementptr inbounds float, float* %tmp3453, i64 1
+ %tmp3455 = getelementptr inbounds float, float* %tmp3454, i64 1
+ %tmp3456 = getelementptr inbounds float, float* %tmp3455, i64 1
+ %tmp3457 = getelementptr inbounds float, float* %tmp3456, i64 1
+ %tmp3458 = getelementptr inbounds float, float* %tmp3457, i64 1
+ %tmp3459 = getelementptr inbounds float, float* %tmp3458, i64 1
+ %tmp3460 = getelementptr inbounds float, float* %tmp3459, i64 1
+ %tmp3461 = getelementptr inbounds float, float* %tmp3460, i64 1
+ %tmp3462 = getelementptr inbounds float, float* %tmp3461, i64 1
+ %tmp3463 = getelementptr inbounds float, float* %tmp3462, i64 1
+ %tmp3464 = getelementptr inbounds float, float* %tmp3463, i64 1
+ %tmp3465 = getelementptr inbounds float, float* %tmp3464, i64 1
+ %tmp3466 = getelementptr inbounds float, float* %tmp3465, i64 1
+ %tmp3467 = getelementptr inbounds float, float* %tmp3466, i64 1
+ %tmp3468 = getelementptr inbounds float, float* %tmp3467, i64 1
+ %tmp3469 = getelementptr inbounds float, float* %tmp3468, i64 1
+ %tmp3470 = getelementptr inbounds float, float* %tmp3469, i64 1
+ %tmp3471 = getelementptr inbounds float, float* %tmp3470, i64 1
+ %tmp3472 = getelementptr inbounds float, float* %tmp3471, i64 1
+ %tmp3473 = getelementptr inbounds float, float* %tmp3472, i64 1
+ %tmp3474 = getelementptr inbounds float, float* %tmp3473, i64 1
+ %tmp3475 = getelementptr inbounds float, float* %tmp3474, i64 1
+ %tmp3476 = getelementptr inbounds float, float* %tmp3475, i64 1
+ %tmp3477 = getelementptr inbounds float, float* %tmp3476, i64 1
+ %tmp3478 = getelementptr inbounds float, float* %tmp3477, i64 1
+ %tmp3479 = getelementptr inbounds float, float* %tmp3478, i64 1
+ %tmp3480 = getelementptr inbounds float, float* %tmp3479, i64 1
+ %tmp3481 = getelementptr inbounds float, float* %tmp3480, i64 1
+ %tmp3482 = getelementptr inbounds float, float* %tmp3481, i64 1
+ %tmp3483 = getelementptr inbounds float, float* %tmp3482, i64 1
+ %tmp3484 = getelementptr inbounds float, float* %tmp3483, i64 1
+ %tmp3485 = getelementptr inbounds float, float* %tmp3484, i64 1
+ %tmp3486 = getelementptr inbounds float, float* %tmp3485, i64 1
+ %tmp3487 = getelementptr inbounds float, float* %tmp3486, i64 1
+ %tmp3488 = getelementptr inbounds float, float* %tmp3487, i64 1
+ %tmp3489 = getelementptr inbounds float, float* %tmp3488, i64 1
+ %tmp3490 = getelementptr inbounds float, float* %tmp3489, i64 1
+ %tmp3491 = getelementptr inbounds float, float* %tmp3490, i64 1
+ %tmp3492 = getelementptr inbounds float, float* %tmp3491, i64 1
+ %tmp3493 = getelementptr inbounds float, float* %tmp3492, i64 1
+ %tmp3494 = getelementptr inbounds float, float* %tmp3493, i64 1
+ %tmp3495 = getelementptr inbounds float, float* %tmp3494, i64 1
+ %tmp3496 = getelementptr inbounds float, float* %tmp3495, i64 1
+ %tmp3497 = getelementptr inbounds float, float* %tmp3496, i64 1
+ %tmp3498 = getelementptr inbounds float, float* %tmp3497, i64 1
+ %tmp3499 = getelementptr inbounds float, float* %tmp3498, i64 1
+ %tmp3500 = getelementptr inbounds float, float* %tmp3499, i64 1
+ %tmp3501 = getelementptr inbounds float, float* %tmp3500, i64 1
+ %tmp3502 = getelementptr inbounds float, float* %tmp3501, i64 1
+ %tmp3503 = getelementptr inbounds float, float* %tmp3502, i64 1
+ %tmp3504 = getelementptr inbounds float, float* %tmp3503, i64 1
+ %tmp3505 = getelementptr inbounds float, float* %tmp3504, i64 1
+ %tmp3506 = getelementptr inbounds float, float* %tmp3505, i64 1
+ %tmp3507 = getelementptr inbounds float, float* %tmp3506, i64 1
+ %tmp3508 = getelementptr inbounds float, float* %tmp3507, i64 1
+ %tmp3509 = getelementptr inbounds float, float* %tmp3508, i64 1
+ %tmp3510 = getelementptr inbounds float, float* %tmp3509, i64 1
+ %tmp3511 = getelementptr inbounds float, float* %tmp3510, i64 1
+ %tmp3512 = getelementptr inbounds float, float* %tmp3511, i64 1
+ %tmp3513 = getelementptr inbounds float, float* %tmp3512, i64 1
+ %tmp3514 = getelementptr inbounds float, float* %tmp3513, i64 1
+ %tmp3515 = getelementptr inbounds float, float* %tmp3514, i64 1
+ %tmp3516 = getelementptr inbounds float, float* %tmp3515, i64 1
+ %tmp3517 = getelementptr inbounds float, float* %tmp3516, i64 1
+ %tmp3518 = getelementptr inbounds float, float* %tmp3517, i64 1
+ %tmp3519 = getelementptr inbounds float, float* %tmp3518, i64 1
+ %tmp3520 = getelementptr inbounds float, float* %tmp3519, i64 1
+ %tmp3521 = getelementptr inbounds float, float* %tmp3520, i64 1
+ %tmp3522 = getelementptr inbounds float, float* %tmp3521, i64 1
+ %tmp3523 = getelementptr inbounds float, float* %tmp3522, i64 1
+ %tmp3524 = getelementptr inbounds float, float* %tmp3523, i64 1
+ %tmp3525 = getelementptr inbounds float, float* %tmp3524, i64 1
+ %tmp3526 = getelementptr inbounds float, float* %tmp3525, i64 1
+ %tmp3527 = getelementptr inbounds float, float* %tmp3526, i64 1
+ %tmp3528 = getelementptr inbounds float, float* %tmp3527, i64 1
+ %tmp3529 = getelementptr inbounds float, float* %tmp3528, i64 1
+ %tmp3530 = getelementptr inbounds float, float* %tmp3529, i64 1
+ %tmp3531 = getelementptr inbounds float, float* %tmp3530, i64 1
+ %tmp3532 = getelementptr inbounds float, float* %tmp3531, i64 1
+ %tmp3533 = getelementptr inbounds float, float* %tmp3532, i64 1
+ %tmp3534 = getelementptr inbounds float, float* %tmp3533, i64 1
+ %tmp3535 = getelementptr inbounds float, float* %tmp3534, i64 1
+ %tmp3536 = getelementptr inbounds float, float* %tmp3535, i64 1
+ %tmp3537 = getelementptr inbounds float, float* %tmp3536, i64 1
+ %tmp3538 = getelementptr inbounds float, float* %tmp3537, i64 1
+ %tmp3539 = getelementptr inbounds float, float* %tmp3538, i64 1
+ %tmp3540 = getelementptr inbounds float, float* %tmp3539, i64 1
+ %tmp3541 = getelementptr inbounds float, float* %tmp3540, i64 1
+ %tmp3542 = getelementptr inbounds float, float* %tmp3541, i64 1
+ %tmp3543 = getelementptr inbounds float, float* %tmp3542, i64 1
+ %tmp3544 = getelementptr inbounds float, float* %tmp3543, i64 1
+ %tmp3545 = getelementptr inbounds float, float* %tmp3544, i64 1
+ %tmp3546 = getelementptr inbounds float, float* %tmp3545, i64 1
+ %tmp3547 = getelementptr inbounds float, float* %tmp3546, i64 1
+ %tmp3548 = getelementptr inbounds float, float* %tmp3547, i64 1
+ %tmp3549 = getelementptr inbounds float, float* %tmp3548, i64 1
+ %tmp3550 = getelementptr inbounds float, float* %tmp3549, i64 1
+ %tmp3551 = getelementptr inbounds float, float* %tmp3550, i64 1
+ %tmp3552 = getelementptr inbounds float, float* %tmp3551, i64 1
+ %tmp3553 = getelementptr inbounds float, float* %tmp3552, i64 1
+ %tmp3554 = getelementptr inbounds float, float* %tmp3553, i64 1
+ %tmp3555 = getelementptr inbounds float, float* %tmp3554, i64 1
+ %tmp3556 = getelementptr inbounds float, float* %tmp3555, i64 1
+ %tmp3557 = getelementptr inbounds float, float* %tmp3556, i64 1
+ %tmp3558 = getelementptr inbounds float, float* %tmp3557, i64 1
+ %tmp3559 = getelementptr inbounds float, float* %tmp3558, i64 1
+ %tmp3560 = getelementptr inbounds float, float* %tmp3559, i64 1
+ %tmp3561 = getelementptr inbounds float, float* %tmp3560, i64 1
+ %tmp3562 = getelementptr inbounds float, float* %tmp3561, i64 1
+ %tmp3563 = getelementptr inbounds float, float* %tmp3562, i64 1
+ %tmp3564 = getelementptr inbounds float, float* %tmp3563, i64 1
+ %tmp3565 = getelementptr inbounds float, float* %tmp3564, i64 1
+ %tmp3566 = getelementptr inbounds float, float* %tmp3565, i64 1
+ %tmp3567 = getelementptr inbounds float, float* %tmp3566, i64 1
+ %tmp3568 = getelementptr inbounds float, float* %tmp3567, i64 1
+ %tmp3569 = getelementptr inbounds float, float* %tmp3568, i64 1
+ %tmp3570 = getelementptr inbounds float, float* %tmp3569, i64 1
+ %tmp3571 = getelementptr inbounds float, float* %tmp3570, i64 1
+ %tmp3572 = getelementptr inbounds float, float* %tmp3571, i64 1
+ %tmp3573 = getelementptr inbounds float, float* %tmp3572, i64 1
+ %tmp3574 = getelementptr inbounds float, float* %tmp3573, i64 1
+ %tmp3575 = getelementptr inbounds float, float* %tmp3574, i64 1
+ %tmp3576 = getelementptr inbounds float, float* %tmp3575, i64 1
+ %tmp3577 = getelementptr inbounds float, float* %tmp3576, i64 1
+ %tmp3578 = getelementptr inbounds float, float* %tmp3577, i64 1
+ %tmp3579 = getelementptr inbounds float, float* %tmp3578, i64 1
+ %tmp3580 = getelementptr inbounds float, float* %tmp3579, i64 1
+ %tmp3581 = getelementptr inbounds float, float* %tmp3580, i64 1
+ %tmp3582 = getelementptr inbounds float, float* %tmp3581, i64 1
+ %tmp3583 = getelementptr inbounds float, float* %tmp3582, i64 1
+ %tmp3584 = getelementptr inbounds float, float* %tmp3583, i64 1
+ %tmp3585 = getelementptr inbounds float, float* %tmp3584, i64 1
+ %tmp3586 = getelementptr inbounds float, float* %tmp3585, i64 1
+ %tmp3587 = getelementptr inbounds float, float* %tmp3586, i64 1
+ %tmp3588 = getelementptr inbounds float, float* %tmp3587, i64 1
+ %tmp3589 = getelementptr inbounds float, float* %tmp3588, i64 1
+ %tmp3590 = getelementptr inbounds float, float* %tmp3589, i64 1
+ %tmp3591 = getelementptr inbounds float, float* %tmp3590, i64 1
+ %tmp3592 = getelementptr inbounds float, float* %tmp3591, i64 1
+ %tmp3593 = getelementptr inbounds float, float* %tmp3592, i64 1
+ %tmp3594 = getelementptr inbounds float, float* %tmp3593, i64 1
+ %tmp3595 = getelementptr inbounds float, float* %tmp3594, i64 1
+ %tmp3596 = getelementptr inbounds float, float* %tmp3595, i64 1
+ %tmp3597 = getelementptr inbounds float, float* %tmp3596, i64 1
+ %tmp3598 = getelementptr inbounds float, float* %tmp3597, i64 1
+ %tmp3599 = getelementptr inbounds float, float* %tmp3598, i64 1
+ %tmp3600 = getelementptr inbounds float, float* %tmp3599, i64 1
+ %tmp3601 = getelementptr inbounds float, float* %tmp3600, i64 1
+ %tmp3602 = getelementptr inbounds float, float* %tmp3601, i64 1
+ %tmp3603 = getelementptr inbounds float, float* %tmp3602, i64 1
+ %tmp3604 = getelementptr inbounds float, float* %tmp3603, i64 1
+ %tmp3605 = getelementptr inbounds float, float* %tmp3604, i64 1
+ %tmp3606 = getelementptr inbounds float, float* %tmp3605, i64 1
+ %tmp3607 = getelementptr inbounds float, float* %tmp3606, i64 1
+ %tmp3608 = getelementptr inbounds float, float* %tmp3607, i64 1
+ %tmp3609 = getelementptr inbounds float, float* %tmp3608, i64 1
+ %tmp3610 = getelementptr inbounds float, float* %tmp3609, i64 1
+ %tmp3611 = getelementptr inbounds float, float* %tmp3610, i64 1
+ %tmp3612 = getelementptr inbounds float, float* %tmp3611, i64 1
+ %tmp3613 = getelementptr inbounds float, float* %tmp3612, i64 1
+ %tmp3614 = getelementptr inbounds float, float* %tmp3613, i64 1
+ %tmp3615 = getelementptr inbounds float, float* %tmp3614, i64 1
+ %tmp3616 = getelementptr inbounds float, float* %tmp3615, i64 1
+ %tmp3617 = getelementptr inbounds float, float* %tmp3616, i64 1
+ %tmp3618 = getelementptr inbounds float, float* %tmp3617, i64 1
+ %tmp3619 = getelementptr inbounds float, float* %tmp3618, i64 1
+ %tmp3620 = getelementptr inbounds float, float* %tmp3619, i64 1
+ %tmp3621 = getelementptr inbounds float, float* %tmp3620, i64 1
+ %tmp3622 = getelementptr inbounds float, float* %tmp3621, i64 1
+ %tmp3623 = getelementptr inbounds float, float* %tmp3622, i64 1
+ %tmp3624 = getelementptr inbounds float, float* %tmp3623, i64 1
+ %tmp3625 = getelementptr inbounds float, float* %tmp3624, i64 1
+ %tmp3626 = getelementptr inbounds float, float* %tmp3625, i64 1
+ %tmp3627 = getelementptr inbounds float, float* %tmp3626, i64 1
+ %tmp3628 = getelementptr inbounds float, float* %tmp3627, i64 1
+ %tmp3629 = getelementptr inbounds float, float* %tmp3628, i64 1
+ %tmp3630 = getelementptr inbounds float, float* %tmp3629, i64 1
+ %tmp3631 = getelementptr inbounds float, float* %tmp3630, i64 1
+ %tmp3632 = getelementptr inbounds float, float* %tmp3631, i64 1
+ %tmp3633 = getelementptr inbounds float, float* %tmp3632, i64 1
+ %tmp3634 = getelementptr inbounds float, float* %tmp3633, i64 1
+ %tmp3635 = getelementptr inbounds float, float* %tmp3634, i64 1
+ %tmp3636 = getelementptr inbounds float, float* %tmp3635, i64 1
+ %tmp3637 = getelementptr inbounds float, float* %tmp3636, i64 1
+ %tmp3638 = getelementptr inbounds float, float* %tmp3637, i64 1
+ %tmp3639 = getelementptr inbounds float, float* %tmp3638, i64 1
+ %tmp3640 = getelementptr inbounds float, float* %tmp3639, i64 1
+ %tmp3641 = getelementptr inbounds float, float* %tmp3640, i64 1
+ %tmp3642 = getelementptr inbounds float, float* %tmp3641, i64 1
+ %tmp3643 = getelementptr inbounds float, float* %tmp3642, i64 1
+ %tmp3644 = getelementptr inbounds float, float* %tmp3643, i64 1
+ %tmp3645 = getelementptr inbounds float, float* %tmp3644, i64 1
+ %tmp3646 = getelementptr inbounds float, float* %tmp3645, i64 1
+ %tmp3647 = getelementptr inbounds float, float* %tmp3646, i64 1
+ %tmp3648 = getelementptr inbounds float, float* %tmp3647, i64 1
+ %tmp3649 = getelementptr inbounds float, float* %tmp3648, i64 1
+ %tmp3650 = getelementptr inbounds float, float* %tmp3649, i64 1
+ %tmp3651 = getelementptr inbounds float, float* %tmp3650, i64 1
+ %tmp3652 = getelementptr inbounds float, float* %tmp3651, i64 1
+ %tmp3653 = getelementptr inbounds float, float* %tmp3652, i64 1
+ %tmp3654 = getelementptr inbounds float, float* %tmp3653, i64 1
+ %tmp3655 = getelementptr inbounds float, float* %tmp3654, i64 1
+ %tmp3656 = getelementptr inbounds float, float* %tmp3655, i64 1
+ %tmp3657 = getelementptr inbounds float, float* %tmp3656, i64 1
+ %tmp3658 = getelementptr inbounds float, float* %tmp3657, i64 1
+ %tmp3659 = getelementptr inbounds float, float* %tmp3658, i64 1
+ %tmp3660 = getelementptr inbounds float, float* %tmp3659, i64 1
+ %tmp3661 = getelementptr inbounds float, float* %tmp3660, i64 1
+ %tmp3662 = getelementptr inbounds float, float* %tmp3661, i64 1
+ %tmp3663 = getelementptr inbounds float, float* %tmp3662, i64 1
+ %tmp3664 = getelementptr inbounds float, float* %tmp3663, i64 1
+ %tmp3665 = getelementptr inbounds float, float* %tmp3664, i64 1
+ %tmp3666 = getelementptr inbounds float, float* %tmp3665, i64 1
+ %tmp3667 = getelementptr inbounds float, float* %tmp3666, i64 1
+ %tmp3668 = getelementptr inbounds float, float* %tmp3667, i64 1
+ %tmp3669 = getelementptr inbounds float, float* %tmp3668, i64 1
+ %tmp3670 = getelementptr inbounds float, float* %tmp3669, i64 1
+ %tmp3671 = getelementptr inbounds float, float* %tmp3670, i64 1
+ %tmp3672 = getelementptr inbounds float, float* %tmp3671, i64 1
+ %tmp3673 = getelementptr inbounds float, float* %tmp3672, i64 1
+ %tmp3674 = getelementptr inbounds float, float* %tmp3673, i64 1
+ %tmp3675 = getelementptr inbounds float, float* %tmp3674, i64 1
+ %tmp3676 = getelementptr inbounds float, float* %tmp3675, i64 1
+ %tmp3677 = getelementptr inbounds float, float* %tmp3676, i64 1
+ %tmp3678 = getelementptr inbounds float, float* %tmp3677, i64 1
+ %tmp3679 = getelementptr inbounds float, float* %tmp3678, i64 1
+ %tmp3680 = getelementptr inbounds float, float* %tmp3679, i64 1
+ %tmp3681 = getelementptr inbounds float, float* %tmp3680, i64 1
+ %tmp3682 = getelementptr inbounds float, float* %tmp3681, i64 1
+ %tmp3683 = getelementptr inbounds float, float* %tmp3682, i64 1
+ %tmp3684 = getelementptr inbounds float, float* %tmp3683, i64 1
+ %tmp3685 = getelementptr inbounds float, float* %tmp3684, i64 1
+ %tmp3686 = getelementptr inbounds float, float* %tmp3685, i64 1
+ %tmp3687 = getelementptr inbounds float, float* %tmp3686, i64 1
+ %tmp3688 = getelementptr inbounds float, float* %tmp3687, i64 1
+ %tmp3689 = getelementptr inbounds float, float* %tmp3688, i64 1
+ %tmp3690 = getelementptr inbounds float, float* %tmp3689, i64 1
+ %tmp3691 = getelementptr inbounds float, float* %tmp3690, i64 1
+ %tmp3692 = getelementptr inbounds float, float* %tmp3691, i64 1
+ %tmp3693 = getelementptr inbounds float, float* %tmp3692, i64 1
+ %tmp3694 = getelementptr inbounds float, float* %tmp3693, i64 1
+ %tmp3695 = getelementptr inbounds float, float* %tmp3694, i64 1
+ %tmp3696 = getelementptr inbounds float, float* %tmp3695, i64 1
+ %tmp3697 = getelementptr inbounds float, float* %tmp3696, i64 1
+ %tmp3698 = getelementptr inbounds float, float* %tmp3697, i64 1
+ %tmp3699 = getelementptr inbounds float, float* %tmp3698, i64 1
+ %tmp3700 = getelementptr inbounds float, float* %tmp3699, i64 1
+ %tmp3701 = getelementptr inbounds float, float* %tmp3700, i64 1
+ %tmp3702 = getelementptr inbounds float, float* %tmp3701, i64 1
+ %tmp3703 = getelementptr inbounds float, float* %tmp3702, i64 1
+ %tmp3704 = getelementptr inbounds float, float* %tmp3703, i64 1
+ %tmp3705 = getelementptr inbounds float, float* %tmp3704, i64 1
+ %tmp3706 = getelementptr inbounds float, float* %tmp3705, i64 1
+ %tmp3707 = getelementptr inbounds float, float* %tmp3706, i64 1
+ %tmp3708 = getelementptr inbounds float, float* %tmp3707, i64 1
+ %tmp3709 = getelementptr inbounds float, float* %tmp3708, i64 1
+ %tmp3710 = getelementptr inbounds float, float* %tmp3709, i64 1
+ %tmp3711 = getelementptr inbounds float, float* %tmp3710, i64 1
+ %tmp3712 = getelementptr inbounds float, float* %tmp3711, i64 1
+ %tmp3713 = getelementptr inbounds float, float* %tmp3712, i64 1
+ %tmp3714 = getelementptr inbounds float, float* %tmp3713, i64 1
+ %tmp3715 = getelementptr inbounds float, float* %tmp3714, i64 1
+ %tmp3716 = getelementptr inbounds float, float* %tmp3715, i64 1
+ %tmp3717 = getelementptr inbounds float, float* %tmp3716, i64 1
+ %tmp3718 = getelementptr inbounds float, float* %tmp3717, i64 1
+ %tmp3719 = getelementptr inbounds float, float* %tmp3718, i64 1
+ %tmp3720 = getelementptr inbounds float, float* %tmp3719, i64 1
+ %tmp3721 = getelementptr inbounds float, float* %tmp3720, i64 1
+ %tmp3722 = getelementptr inbounds float, float* %tmp3721, i64 1
+ %tmp3723 = getelementptr inbounds float, float* %tmp3722, i64 1
+ %tmp3724 = getelementptr inbounds float, float* %tmp3723, i64 1
+ %tmp3725 = getelementptr inbounds float, float* %tmp3724, i64 1
+ %tmp3726 = getelementptr inbounds float, float* %tmp3725, i64 1
+ %tmp3727 = getelementptr inbounds float, float* %tmp3726, i64 1
+ %tmp3728 = getelementptr inbounds float, float* %tmp3727, i64 1
+ %tmp3729 = getelementptr inbounds float, float* %tmp3728, i64 1
+ %tmp3730 = getelementptr inbounds float, float* %tmp3729, i64 1
+ %tmp3731 = getelementptr inbounds float, float* %tmp3730, i64 1
+ %tmp3732 = getelementptr inbounds float, float* %tmp3731, i64 1
+ %tmp3733 = getelementptr inbounds float, float* %tmp3732, i64 1
+ %tmp3734 = getelementptr inbounds float, float* %tmp3733, i64 1
+ %tmp3735 = getelementptr inbounds float, float* %tmp3734, i64 1
+ %tmp3736 = getelementptr inbounds float, float* %tmp3735, i64 1
+ %tmp3737 = getelementptr inbounds float, float* %tmp3736, i64 1
+ %tmp3738 = getelementptr inbounds float, float* %tmp3737, i64 1
+ %tmp3739 = getelementptr inbounds float, float* %tmp3738, i64 1
+ %tmp3740 = getelementptr inbounds float, float* %tmp3739, i64 1
+ %tmp3741 = getelementptr inbounds float, float* %tmp3740, i64 1
+ %tmp3742 = getelementptr inbounds float, float* %tmp3741, i64 1
+ %tmp3743 = getelementptr inbounds float, float* %tmp3742, i64 1
+ %tmp3744 = getelementptr inbounds float, float* %tmp3743, i64 1
+ %tmp3745 = getelementptr inbounds float, float* %tmp3744, i64 1
+ %tmp3746 = getelementptr inbounds float, float* %tmp3745, i64 1
+ %tmp3747 = getelementptr inbounds float, float* %tmp3746, i64 1
+ %tmp3748 = getelementptr inbounds float, float* %tmp3747, i64 1
+ %tmp3749 = getelementptr inbounds float, float* %tmp3748, i64 1
+ %tmp3750 = getelementptr inbounds float, float* %tmp3749, i64 1
+ %tmp3751 = getelementptr inbounds float, float* %tmp3750, i64 1
+ %tmp3752 = getelementptr inbounds float, float* %tmp3751, i64 1
+ %tmp3753 = getelementptr inbounds float, float* %tmp3752, i64 1
+ %tmp3754 = getelementptr inbounds float, float* %tmp3753, i64 1
+ %tmp3755 = getelementptr inbounds float, float* %tmp3754, i64 1
+ %tmp3756 = getelementptr inbounds float, float* %tmp3755, i64 1
+ %tmp3757 = getelementptr inbounds float, float* %tmp3756, i64 1
+ %tmp3758 = getelementptr inbounds float, float* %tmp3757, i64 1
+ %tmp3759 = getelementptr inbounds float, float* %tmp3758, i64 1
+ %tmp3760 = getelementptr inbounds float, float* %tmp3759, i64 1
+ %tmp3761 = getelementptr inbounds float, float* %tmp3760, i64 1
+ %tmp3762 = getelementptr inbounds float, float* %tmp3761, i64 1
+ %tmp3763 = getelementptr inbounds float, float* %tmp3762, i64 1
+ %tmp3764 = getelementptr inbounds float, float* %tmp3763, i64 1
+ %tmp3765 = getelementptr inbounds float, float* %tmp3764, i64 1
+ %tmp3766 = getelementptr inbounds float, float* %tmp3765, i64 1
+ %tmp3767 = getelementptr inbounds float, float* %tmp3766, i64 1
+ %tmp3768 = getelementptr inbounds float, float* %tmp3767, i64 1
+ %tmp3769 = getelementptr inbounds float, float* %tmp3768, i64 1
+ %tmp3770 = getelementptr inbounds float, float* %tmp3769, i64 1
+ %tmp3771 = getelementptr inbounds float, float* %tmp3770, i64 1
+ %tmp3772 = getelementptr inbounds float, float* %tmp3771, i64 1
+ %tmp3773 = getelementptr inbounds float, float* %tmp3772, i64 1
+ %tmp3774 = getelementptr inbounds float, float* %tmp3773, i64 1
+ %tmp3775 = getelementptr inbounds float, float* %tmp3774, i64 1
+ %tmp3776 = getelementptr inbounds float, float* %tmp3775, i64 1
+ %tmp3777 = getelementptr inbounds float, float* %tmp3776, i64 1
+ %tmp3778 = getelementptr inbounds float, float* %tmp3777, i64 1
+ %tmp3779 = getelementptr inbounds float, float* %tmp3778, i64 1
+ %tmp3780 = getelementptr inbounds float, float* %tmp3779, i64 1
+ %tmp3781 = getelementptr inbounds float, float* %tmp3780, i64 1
+ %tmp3782 = getelementptr inbounds float, float* %tmp3781, i64 1
+ %tmp3783 = getelementptr inbounds float, float* %tmp3782, i64 1
+ %tmp3784 = getelementptr inbounds float, float* %tmp3783, i64 1
+ %tmp3785 = getelementptr inbounds float, float* %tmp3784, i64 1
+ %tmp3786 = getelementptr inbounds float, float* %tmp3785, i64 1
+ %tmp3787 = getelementptr inbounds float, float* %tmp3786, i64 1
+ %tmp3788 = getelementptr inbounds float, float* %tmp3787, i64 1
+ %tmp3789 = getelementptr inbounds float, float* %tmp3788, i64 1
+ %tmp3790 = getelementptr inbounds float, float* %tmp3789, i64 1
+ %tmp3791 = getelementptr inbounds float, float* %tmp3790, i64 1
+ %tmp3792 = getelementptr inbounds float, float* %tmp3791, i64 1
+ %tmp3793 = getelementptr inbounds float, float* %tmp3792, i64 1
+ %tmp3794 = getelementptr inbounds float, float* %tmp3793, i64 1
+ %tmp3795 = getelementptr inbounds float, float* %tmp3794, i64 1
+ %tmp3796 = getelementptr inbounds float, float* %tmp3795, i64 1
+ %tmp3797 = getelementptr inbounds float, float* %tmp3796, i64 1
+ %tmp3798 = getelementptr inbounds float, float* %tmp3797, i64 1
+ %tmp3799 = getelementptr inbounds float, float* %tmp3798, i64 1
+ %tmp3800 = getelementptr inbounds float, float* %tmp3799, i64 1
+ %tmp3801 = getelementptr inbounds float, float* %tmp3800, i64 1
+ %tmp3802 = getelementptr inbounds float, float* %tmp3801, i64 1
+ %tmp3803 = getelementptr inbounds float, float* %tmp3802, i64 1
+ %tmp3804 = getelementptr inbounds float, float* %tmp3803, i64 1
+ %tmp3805 = getelementptr inbounds float, float* %tmp3804, i64 1
+ %tmp3806 = getelementptr inbounds float, float* %tmp3805, i64 1
+ %tmp3807 = getelementptr inbounds float, float* %tmp3806, i64 1
+ %tmp3808 = getelementptr inbounds float, float* %tmp3807, i64 1
+ %tmp3809 = getelementptr inbounds float, float* %tmp3808, i64 1
+ %tmp3810 = getelementptr inbounds float, float* %tmp3809, i64 1
+ %tmp3811 = getelementptr inbounds float, float* %tmp3810, i64 1
+ %tmp3812 = getelementptr inbounds float, float* %tmp3811, i64 1
+ %tmp3813 = getelementptr inbounds float, float* %tmp3812, i64 1
+ %tmp3814 = getelementptr inbounds float, float* %tmp3813, i64 1
+ %tmp3815 = getelementptr inbounds float, float* %tmp3814, i64 1
+ %tmp3816 = getelementptr inbounds float, float* %tmp3815, i64 1
+ %tmp3817 = getelementptr inbounds float, float* %tmp3816, i64 1
+ %tmp3818 = getelementptr inbounds float, float* %tmp3817, i64 1
+ %tmp3819 = getelementptr inbounds float, float* %tmp3818, i64 1
+ %tmp3820 = getelementptr inbounds float, float* %tmp3819, i64 1
+ %tmp3821 = getelementptr inbounds float, float* %tmp3820, i64 1
+ %tmp3822 = getelementptr inbounds float, float* %tmp3821, i64 1
+ %tmp3823 = getelementptr inbounds float, float* %tmp3822, i64 1
+ %tmp3824 = getelementptr inbounds float, float* %tmp3823, i64 1
+ %tmp3825 = getelementptr inbounds float, float* %tmp3824, i64 1
+ %tmp3826 = getelementptr inbounds float, float* %tmp3825, i64 1
+ %tmp3827 = getelementptr inbounds float, float* %tmp3826, i64 1
+ %tmp3828 = getelementptr inbounds float, float* %tmp3827, i64 1
+ %tmp3829 = getelementptr inbounds float, float* %tmp3828, i64 1
+ %tmp3830 = getelementptr inbounds float, float* %tmp3829, i64 1
+ %tmp3831 = getelementptr inbounds float, float* %tmp3830, i64 1
+ %tmp3832 = getelementptr inbounds float, float* %tmp3831, i64 1
+ %tmp3833 = getelementptr inbounds float, float* %tmp3832, i64 1
+ %tmp3834 = getelementptr inbounds float, float* %tmp3833, i64 1
+ %tmp3835 = getelementptr inbounds float, float* %tmp3834, i64 1
+ %tmp3836 = getelementptr inbounds float, float* %tmp3835, i64 1
+ %tmp3837 = getelementptr inbounds float, float* %tmp3836, i64 1
+ %tmp3838 = getelementptr inbounds float, float* %tmp3837, i64 1
+ %tmp3839 = getelementptr inbounds float, float* %tmp3838, i64 1
+ %tmp3840 = getelementptr inbounds float, float* %tmp3839, i64 1
+ %tmp3841 = getelementptr inbounds float, float* %tmp3840, i64 1
+ %tmp3842 = getelementptr inbounds float, float* %tmp3841, i64 1
+ %tmp3843 = getelementptr inbounds float, float* %tmp3842, i64 1
+ %tmp3844 = getelementptr inbounds float, float* %tmp3843, i64 1
+ %tmp3845 = getelementptr inbounds float, float* %tmp3844, i64 1
+ %tmp3846 = getelementptr inbounds float, float* %tmp3845, i64 1
+ %tmp3847 = getelementptr inbounds float, float* %tmp3846, i64 1
+ %tmp3848 = getelementptr inbounds float, float* %tmp3847, i64 1
+ %tmp3849 = getelementptr inbounds float, float* %tmp3848, i64 1
+ %tmp3850 = getelementptr inbounds float, float* %tmp3849, i64 1
+ %tmp3851 = getelementptr inbounds float, float* %tmp3850, i64 1
+ %tmp3852 = getelementptr inbounds float, float* %tmp3851, i64 1
+ %tmp3853 = getelementptr inbounds float, float* %tmp3852, i64 1
+ %tmp3854 = getelementptr inbounds float, float* %tmp3853, i64 1
+ %tmp3855 = getelementptr inbounds float, float* %tmp3854, i64 1
+ %tmp3856 = getelementptr inbounds float, float* %tmp3855, i64 1
+ %tmp3857 = getelementptr inbounds float, float* %tmp3856, i64 1
+ %tmp3858 = getelementptr inbounds float, float* %tmp3857, i64 1
+ %tmp3859 = getelementptr inbounds float, float* %tmp3858, i64 1
+ %tmp3860 = getelementptr inbounds float, float* %tmp3859, i64 1
+ %tmp3861 = getelementptr inbounds float, float* %tmp3860, i64 1
+ %tmp3862 = getelementptr inbounds float, float* %tmp3861, i64 1
+ %tmp3863 = getelementptr inbounds float, float* %tmp3862, i64 1
+ %tmp3864 = getelementptr inbounds float, float* %tmp3863, i64 1
+ %tmp3865 = getelementptr inbounds float, float* %tmp3864, i64 1
+ %tmp3866 = getelementptr inbounds float, float* %tmp3865, i64 1
+ %tmp3867 = getelementptr inbounds float, float* %tmp3866, i64 1
+ %tmp3868 = getelementptr inbounds float, float* %tmp3867, i64 1
+ %tmp3869 = getelementptr inbounds float, float* %tmp3868, i64 1
+ %tmp3870 = getelementptr inbounds float, float* %tmp3869, i64 1
+ %tmp3871 = getelementptr inbounds float, float* %tmp3870, i64 1
+ %tmp3872 = getelementptr inbounds float, float* %tmp3871, i64 1
+ %tmp3873 = getelementptr inbounds float, float* %tmp3872, i64 1
+ %tmp3874 = getelementptr inbounds float, float* %tmp3873, i64 1
+ %tmp3875 = getelementptr inbounds float, float* %tmp3874, i64 1
+ %tmp3876 = getelementptr inbounds float, float* %tmp3875, i64 1
+ %tmp3877 = getelementptr inbounds float, float* %tmp3876, i64 1
+ %tmp3878 = getelementptr inbounds float, float* %tmp3877, i64 1
+ %tmp3879 = getelementptr inbounds float, float* %tmp3878, i64 1
+ %tmp3880 = getelementptr inbounds float, float* %tmp3879, i64 1
+ %tmp3881 = getelementptr inbounds float, float* %tmp3880, i64 1
+ %tmp3882 = getelementptr inbounds float, float* %tmp3881, i64 1
+ %tmp3883 = getelementptr inbounds float, float* %tmp3882, i64 1
+ %tmp3884 = getelementptr inbounds float, float* %tmp3883, i64 1
+ %tmp3885 = getelementptr inbounds float, float* %tmp3884, i64 1
+ %tmp3886 = getelementptr inbounds float, float* %tmp3885, i64 1
+ %tmp3887 = getelementptr inbounds float, float* %tmp3886, i64 1
+ %tmp3888 = getelementptr inbounds float, float* %tmp3887, i64 1
+ %tmp3889 = getelementptr inbounds float, float* %tmp3888, i64 1
+ %tmp3890 = getelementptr inbounds float, float* %tmp3889, i64 1
+ %tmp3891 = getelementptr inbounds float, float* %tmp3890, i64 1
+ %tmp3892 = getelementptr inbounds float, float* %tmp3891, i64 1
+ %tmp3893 = getelementptr inbounds float, float* %tmp3892, i64 1
+ %tmp3894 = getelementptr inbounds float, float* %tmp3893, i64 1
+ %tmp3895 = getelementptr inbounds float, float* %tmp3894, i64 1
+ %tmp3896 = getelementptr inbounds float, float* %tmp3895, i64 1
+ %tmp3897 = getelementptr inbounds float, float* %tmp3896, i64 1
+ %tmp3898 = getelementptr inbounds float, float* %tmp3897, i64 1
+ %tmp3899 = getelementptr inbounds float, float* %tmp3898, i64 1
+ %tmp3900 = getelementptr inbounds float, float* %tmp3899, i64 1
+ %tmp3901 = getelementptr inbounds float, float* %tmp3900, i64 1
+ %tmp3902 = getelementptr inbounds float, float* %tmp3901, i64 1
+ %tmp3903 = getelementptr inbounds float, float* %tmp3902, i64 1
+ %tmp3904 = getelementptr inbounds float, float* %tmp3903, i64 1
+ %tmp3905 = getelementptr inbounds float, float* %tmp3904, i64 1
+ %tmp3906 = getelementptr inbounds float, float* %tmp3905, i64 1
+ %tmp3907 = getelementptr inbounds float, float* %tmp3906, i64 1
+ %tmp3908 = getelementptr inbounds float, float* %tmp3907, i64 1
+ %tmp3909 = getelementptr inbounds float, float* %tmp3908, i64 1
+ %tmp3910 = getelementptr inbounds float, float* %tmp3909, i64 1
+ %tmp3911 = getelementptr inbounds float, float* %tmp3910, i64 1
+ %tmp3912 = getelementptr inbounds float, float* %tmp3911, i64 1
+ %tmp3913 = getelementptr inbounds float, float* %tmp3912, i64 1
+ %tmp3914 = getelementptr inbounds float, float* %tmp3913, i64 1
+ %tmp3915 = getelementptr inbounds float, float* %tmp3914, i64 1
+ %tmp3916 = getelementptr inbounds float, float* %tmp3915, i64 1
+ %tmp3917 = getelementptr inbounds float, float* %tmp3916, i64 1
+ %tmp3918 = getelementptr inbounds float, float* %tmp3917, i64 1
+ %tmp3919 = getelementptr inbounds float, float* %tmp3918, i64 1
+ %tmp3920 = getelementptr inbounds float, float* %tmp3919, i64 1
+ %tmp3921 = getelementptr inbounds float, float* %tmp3920, i64 1
+ %tmp3922 = getelementptr inbounds float, float* %tmp3921, i64 1
+ %tmp3923 = getelementptr inbounds float, float* %tmp3922, i64 1
+ %tmp3924 = getelementptr inbounds float, float* %tmp3923, i64 1
+ %tmp3925 = getelementptr inbounds float, float* %tmp3924, i64 1
+ %tmp3926 = getelementptr inbounds float, float* %tmp3925, i64 1
+ %tmp3927 = getelementptr inbounds float, float* %tmp3926, i64 1
+ %tmp3928 = getelementptr inbounds float, float* %tmp3927, i64 1
+ %tmp3929 = getelementptr inbounds float, float* %tmp3928, i64 1
+ %tmp3930 = getelementptr inbounds float, float* %tmp3929, i64 1
+ %tmp3931 = getelementptr inbounds float, float* %tmp3930, i64 1
+ %tmp3932 = getelementptr inbounds float, float* %tmp3931, i64 1
+ %tmp3933 = getelementptr inbounds float, float* %tmp3932, i64 1
+ %tmp3934 = getelementptr inbounds float, float* %tmp3933, i64 1
+ %tmp3935 = getelementptr inbounds float, float* %tmp3934, i64 1
+ %tmp3936 = getelementptr inbounds float, float* %tmp3935, i64 1
+ %tmp3937 = getelementptr inbounds float, float* %tmp3936, i64 1
+ %tmp3938 = getelementptr inbounds float, float* %tmp3937, i64 1
+ %tmp3939 = getelementptr inbounds float, float* %tmp3938, i64 1
+ %tmp3940 = getelementptr inbounds float, float* %tmp3939, i64 1
+ %tmp3941 = getelementptr inbounds float, float* %tmp3940, i64 1
+ %tmp3942 = getelementptr inbounds float, float* %tmp3941, i64 1
+ %tmp3943 = getelementptr inbounds float, float* %tmp3942, i64 1
+ %tmp3944 = getelementptr inbounds float, float* %tmp3943, i64 1
+ %tmp3945 = getelementptr inbounds float, float* %tmp3944, i64 1
+ %tmp3946 = getelementptr inbounds float, float* %tmp3945, i64 1
+ %tmp3947 = getelementptr inbounds float, float* %tmp3946, i64 1
+ %tmp3948 = getelementptr inbounds float, float* %tmp3947, i64 1
+ %tmp3949 = getelementptr inbounds float, float* %tmp3948, i64 1
+ %tmp3950 = getelementptr inbounds float, float* %tmp3949, i64 1
+ %tmp3951 = getelementptr inbounds float, float* %tmp3950, i64 1
+ %tmp3952 = getelementptr inbounds float, float* %tmp3951, i64 1
+ %tmp3953 = getelementptr inbounds float, float* %tmp3952, i64 1
+ %tmp3954 = getelementptr inbounds float, float* %tmp3953, i64 1
+ %tmp3955 = getelementptr inbounds float, float* %tmp3954, i64 1
+ %tmp3956 = getelementptr inbounds float, float* %tmp3955, i64 1
+ %tmp3957 = getelementptr inbounds float, float* %tmp3956, i64 1
+ %tmp3958 = getelementptr inbounds float, float* %tmp3957, i64 1
+ %tmp3959 = getelementptr inbounds float, float* %tmp3958, i64 1
+ %tmp3960 = getelementptr inbounds float, float* %tmp3959, i64 1
+ %tmp3961 = getelementptr inbounds float, float* %tmp3960, i64 1
+ %tmp3962 = getelementptr inbounds float, float* %tmp3961, i64 1
+ %tmp3963 = getelementptr inbounds float, float* %tmp3962, i64 1
+ %tmp3964 = getelementptr inbounds float, float* %tmp3963, i64 1
+ %tmp3965 = getelementptr inbounds float, float* %tmp3964, i64 1
+ %tmp3966 = getelementptr inbounds float, float* %tmp3965, i64 1
+ %tmp3967 = getelementptr inbounds float, float* %tmp3966, i64 1
+ %tmp3968 = getelementptr inbounds float, float* %tmp3967, i64 1
+ %tmp3969 = getelementptr inbounds float, float* %tmp3968, i64 1
+ %tmp3970 = getelementptr inbounds float, float* %tmp3969, i64 1
+ %tmp3971 = getelementptr inbounds float, float* %tmp3970, i64 1
+ %tmp3972 = getelementptr inbounds float, float* %tmp3971, i64 1
+ %tmp3973 = getelementptr inbounds float, float* %tmp3972, i64 1
+ %tmp3974 = getelementptr inbounds float, float* %tmp3973, i64 1
+ %tmp3975 = getelementptr inbounds float, float* %tmp3974, i64 1
+ %tmp3976 = getelementptr inbounds float, float* %tmp3975, i64 1
+ %tmp3977 = getelementptr inbounds float, float* %tmp3976, i64 1
+ %tmp3978 = getelementptr inbounds float, float* %tmp3977, i64 1
+ %tmp3979 = getelementptr inbounds float, float* %tmp3978, i64 1
+ %tmp3980 = getelementptr inbounds float, float* %tmp3979, i64 1
+ %tmp3981 = getelementptr inbounds float, float* %tmp3980, i64 1
+ %tmp3982 = getelementptr inbounds float, float* %tmp3981, i64 1
+ %tmp3983 = getelementptr inbounds float, float* %tmp3982, i64 1
+ %tmp3984 = getelementptr inbounds float, float* %tmp3983, i64 1
+ %tmp3985 = getelementptr inbounds float, float* %tmp3984, i64 1
+ %tmp3986 = getelementptr inbounds float, float* %tmp3985, i64 1
+ %tmp3987 = getelementptr inbounds float, float* %tmp3986, i64 1
+ %tmp3988 = getelementptr inbounds float, float* %tmp3987, i64 1
+ %tmp3989 = getelementptr inbounds float, float* %tmp3988, i64 1
+ %tmp3990 = getelementptr inbounds float, float* %tmp3989, i64 1
+ %tmp3991 = getelementptr inbounds float, float* %tmp3990, i64 1
+ %tmp3992 = getelementptr inbounds float, float* %tmp3991, i64 1
+ %tmp3993 = getelementptr inbounds float, float* %tmp3992, i64 1
+ %tmp3994 = getelementptr inbounds float, float* %tmp3993, i64 1
+ %tmp3995 = getelementptr inbounds float, float* %tmp3994, i64 1
+ %tmp3996 = getelementptr inbounds float, float* %tmp3995, i64 1
+ %tmp3997 = getelementptr inbounds float, float* %tmp3996, i64 1
+ %tmp3998 = getelementptr inbounds float, float* %tmp3997, i64 1
+ %tmp3999 = getelementptr inbounds float, float* %tmp3998, i64 1
+ %tmp4000 = getelementptr inbounds float, float* %tmp3999, i64 1
+ %tmp4001 = getelementptr inbounds float, float* %tmp4000, i64 1
+ %tmp4002 = getelementptr inbounds float, float* %tmp4001, i64 1
+ %tmp4003 = getelementptr inbounds float, float* %tmp4002, i64 1
+ %tmp4004 = getelementptr inbounds float, float* %tmp4003, i64 1
+ %tmp4005 = getelementptr inbounds float, float* %tmp4004, i64 1
+ %tmp4006 = getelementptr inbounds float, float* %tmp4005, i64 1
+ %tmp4007 = getelementptr inbounds float, float* %tmp4006, i64 1
+ %tmp4008 = getelementptr inbounds float, float* %tmp4007, i64 1
+ %tmp4009 = getelementptr inbounds float, float* %tmp4008, i64 1
+ %tmp4010 = getelementptr inbounds float, float* %tmp4009, i64 1
+ %tmp4011 = getelementptr inbounds float, float* %tmp4010, i64 1
+ %tmp4012 = getelementptr inbounds float, float* %tmp4011, i64 1
+ %tmp4013 = getelementptr inbounds float, float* %tmp4012, i64 1
+ %tmp4014 = getelementptr inbounds float, float* %tmp4013, i64 1
+ %tmp4015 = getelementptr inbounds float, float* %tmp4014, i64 1
+ %tmp4016 = getelementptr inbounds float, float* %tmp4015, i64 1
+ %tmp4017 = getelementptr inbounds float, float* %tmp4016, i64 1
+ %tmp4018 = getelementptr inbounds float, float* %tmp4017, i64 1
+ %tmp4019 = getelementptr inbounds float, float* %tmp4018, i64 1
+ %tmp4020 = getelementptr inbounds float, float* %tmp4019, i64 1
+ %tmp4021 = getelementptr inbounds float, float* %tmp4020, i64 1
+ %tmp4022 = getelementptr inbounds float, float* %tmp4021, i64 1
+ %tmp4023 = getelementptr inbounds float, float* %tmp4022, i64 1
+ %tmp4024 = getelementptr inbounds float, float* %tmp4023, i64 1
+ %tmp4025 = getelementptr inbounds float, float* %tmp4024, i64 1
+ %tmp4026 = getelementptr inbounds float, float* %tmp4025, i64 1
+ %tmp4027 = getelementptr inbounds float, float* %tmp4026, i64 1
+ %tmp4028 = getelementptr inbounds float, float* %tmp4027, i64 1
+ %tmp4029 = getelementptr inbounds float, float* %tmp4028, i64 1
+ %tmp4030 = getelementptr inbounds float, float* %tmp4029, i64 1
+ %tmp4031 = getelementptr inbounds float, float* %tmp4030, i64 1
+ %tmp4032 = getelementptr inbounds float, float* %tmp4031, i64 1
+ %tmp4033 = getelementptr inbounds float, float* %tmp4032, i64 1
+ %tmp4034 = getelementptr inbounds float, float* %tmp4033, i64 1
+ %tmp4035 = getelementptr inbounds float, float* %tmp4034, i64 1
+ %tmp4036 = getelementptr inbounds float, float* %tmp4035, i64 1
+ %tmp4037 = getelementptr inbounds float, float* %tmp4036, i64 1
+ %tmp4038 = getelementptr inbounds float, float* %tmp4037, i64 1
+ %tmp4039 = getelementptr inbounds float, float* %tmp4038, i64 1
+ %tmp4040 = getelementptr inbounds float, float* %tmp4039, i64 1
+ %tmp4041 = getelementptr inbounds float, float* %tmp4040, i64 1
+ %tmp4042 = getelementptr inbounds float, float* %tmp4041, i64 1
+ %tmp4043 = getelementptr inbounds float, float* %tmp4042, i64 1
+ %tmp4044 = getelementptr inbounds float, float* %tmp4043, i64 1
+ %tmp4045 = getelementptr inbounds float, float* %tmp4044, i64 1
+ %tmp4046 = getelementptr inbounds float, float* %tmp4045, i64 1
+ %tmp4047 = getelementptr inbounds float, float* %tmp4046, i64 1
+ %tmp4048 = getelementptr inbounds float, float* %tmp4047, i64 1
+ %tmp4049 = getelementptr inbounds float, float* %tmp4048, i64 1
+ %tmp4050 = getelementptr inbounds float, float* %tmp4049, i64 1
+ %tmp4051 = getelementptr inbounds float, float* %tmp4050, i64 1
+ %tmp4052 = getelementptr inbounds float, float* %tmp4051, i64 1
+ %tmp4053 = getelementptr inbounds float, float* %tmp4052, i64 1
+ %tmp4054 = getelementptr inbounds float, float* %tmp4053, i64 1
+ %tmp4055 = getelementptr inbounds float, float* %tmp4054, i64 1
+ %tmp4056 = getelementptr inbounds float, float* %tmp4055, i64 1
+ %tmp4057 = getelementptr inbounds float, float* %tmp4056, i64 1
+ %tmp4058 = getelementptr inbounds float, float* %tmp4057, i64 1
+ %tmp4059 = getelementptr inbounds float, float* %tmp4058, i64 1
+ %tmp4060 = getelementptr inbounds float, float* %tmp4059, i64 1
+ %tmp4061 = getelementptr inbounds float, float* %tmp4060, i64 1
+ %tmp4062 = getelementptr inbounds float, float* %tmp4061, i64 1
+ %tmp4063 = getelementptr inbounds float, float* %tmp4062, i64 1
+ %tmp4064 = getelementptr inbounds float, float* %tmp4063, i64 1
+ %tmp4065 = getelementptr inbounds float, float* %tmp4064, i64 1
+ %tmp4066 = getelementptr inbounds float, float* %tmp4065, i64 1
+ %tmp4067 = getelementptr inbounds float, float* %tmp4066, i64 1
+ %tmp4068 = getelementptr inbounds float, float* %tmp4067, i64 1
+ %tmp4069 = getelementptr inbounds float, float* %tmp4068, i64 1
+ %tmp4070 = getelementptr inbounds float, float* %tmp4069, i64 1
+ %tmp4071 = getelementptr inbounds float, float* %tmp4070, i64 1
+ %tmp4072 = getelementptr inbounds float, float* %tmp4071, i64 1
+ %tmp4073 = getelementptr inbounds float, float* %tmp4072, i64 1
+ %tmp4074 = getelementptr inbounds float, float* %tmp4073, i64 1
+ %tmp4075 = getelementptr inbounds float, float* %tmp4074, i64 1
+ %tmp4076 = getelementptr inbounds float, float* %tmp4075, i64 1
+ %tmp4077 = getelementptr inbounds float, float* %tmp4076, i64 1
+ %tmp4078 = getelementptr inbounds float, float* %tmp4077, i64 1
+ %tmp4079 = getelementptr inbounds float, float* %tmp4078, i64 1
+ %tmp4080 = getelementptr inbounds float, float* %tmp4079, i64 1
+ %tmp4081 = getelementptr inbounds float, float* %tmp4080, i64 1
+ %tmp4082 = getelementptr inbounds float, float* %tmp4081, i64 1
+ %tmp4083 = getelementptr inbounds float, float* %tmp4082, i64 1
+ %tmp4084 = getelementptr inbounds float, float* %tmp4083, i64 1
+ %tmp4085 = getelementptr inbounds float, float* %tmp4084, i64 1
+ %tmp4086 = getelementptr inbounds float, float* %tmp4085, i64 1
+ %tmp4087 = getelementptr inbounds float, float* %tmp4086, i64 1
+ %tmp4088 = getelementptr inbounds float, float* %tmp4087, i64 1
+ %tmp4089 = getelementptr inbounds float, float* %tmp4088, i64 1
+ %tmp4090 = getelementptr inbounds float, float* %tmp4089, i64 1
+ %tmp4091 = getelementptr inbounds float, float* %tmp4090, i64 1
+ %tmp4092 = getelementptr inbounds float, float* %tmp4091, i64 1
+ %tmp4093 = getelementptr inbounds float, float* %tmp4092, i64 1
+ %tmp4094 = getelementptr inbounds float, float* %tmp4093, i64 1
+ %tmp4095 = getelementptr inbounds float, float* %tmp4094, i64 1
+ %tmp4096 = getelementptr inbounds float, float* %tmp4095, i64 1
+ %tmp4097 = getelementptr inbounds float, float* %tmp4096, i64 1
+ %tmp4098 = getelementptr inbounds float, float* %tmp4097, i64 1
+ %tmp4099 = getelementptr inbounds float, float* %tmp4098, i64 1
+ %tmp4100 = getelementptr inbounds float, float* %tmp4099, i64 1
+ %tmp4101 = getelementptr inbounds float, float* %tmp4100, i64 1
+ %tmp4102 = getelementptr inbounds float, float* %tmp4101, i64 1
+ %tmp4103 = getelementptr inbounds float, float* %tmp4102, i64 1
+ %tmp4104 = getelementptr inbounds float, float* %tmp4103, i64 1
+ %tmp4105 = getelementptr inbounds float, float* %tmp4104, i64 1
+ %tmp4106 = getelementptr inbounds float, float* %tmp4105, i64 1
+ %tmp4107 = getelementptr inbounds float, float* %tmp4106, i64 1
+ %tmp4108 = getelementptr inbounds float, float* %tmp4107, i64 1
+ %tmp4109 = getelementptr inbounds float, float* %tmp4108, i64 1
+ %tmp4110 = getelementptr inbounds float, float* %tmp4109, i64 1
+ %tmp4111 = getelementptr inbounds float, float* %tmp4110, i64 1
+ %tmp4112 = getelementptr inbounds float, float* %tmp4111, i64 1
+ %tmp4113 = getelementptr inbounds float, float* %tmp4112, i64 1
+ %tmp4114 = getelementptr inbounds float, float* %tmp4113, i64 1
+ %tmp4115 = getelementptr inbounds float, float* %tmp4114, i64 1
+ %tmp4116 = getelementptr inbounds float, float* %tmp4115, i64 1
+ %tmp4117 = getelementptr inbounds float, float* %tmp4116, i64 1
+ %tmp4118 = getelementptr inbounds float, float* %tmp4117, i64 1
+ %tmp4119 = getelementptr inbounds float, float* %tmp4118, i64 1
+ %tmp4120 = getelementptr inbounds float, float* %tmp4119, i64 1
+ %tmp4121 = getelementptr inbounds float, float* %tmp4120, i64 1
+ %tmp4122 = getelementptr inbounds float, float* %tmp4121, i64 1
+ %tmp4123 = getelementptr inbounds float, float* %tmp4122, i64 1
+ %tmp4124 = getelementptr inbounds float, float* %tmp4123, i64 1
+ %tmp4125 = getelementptr inbounds float, float* %tmp4124, i64 1
+ %tmp4126 = getelementptr inbounds float, float* %tmp4125, i64 1
+ %tmp4127 = getelementptr inbounds float, float* %tmp4126, i64 1
+ %tmp4128 = getelementptr inbounds float, float* %tmp4127, i64 1
+ %tmp4129 = getelementptr inbounds float, float* %tmp4128, i64 1
+ %tmp4130 = getelementptr inbounds float, float* %tmp4129, i64 1
+ %tmp4131 = getelementptr inbounds float, float* %tmp4130, i64 1
+ %tmp4132 = getelementptr inbounds float, float* %tmp4131, i64 1
+ %tmp4133 = getelementptr inbounds float, float* %tmp4132, i64 1
+ %tmp4134 = getelementptr inbounds float, float* %tmp4133, i64 1
+ %tmp4135 = getelementptr inbounds float, float* %tmp4134, i64 1
+ %tmp4136 = getelementptr inbounds float, float* %tmp4135, i64 1
+ %tmp4137 = getelementptr inbounds float, float* %tmp4136, i64 1
+ %tmp4138 = getelementptr inbounds float, float* %tmp4137, i64 1
+ %tmp4139 = getelementptr inbounds float, float* %tmp4138, i64 1
+ %tmp4140 = getelementptr inbounds float, float* %tmp4139, i64 1
+ %tmp4141 = getelementptr inbounds float, float* %tmp4140, i64 1
+ %tmp4142 = getelementptr inbounds float, float* %tmp4141, i64 1
+ %tmp4143 = getelementptr inbounds float, float* %tmp4142, i64 1
+ %tmp4144 = getelementptr inbounds float, float* %tmp4143, i64 1
+ %tmp4145 = getelementptr inbounds float, float* %tmp4144, i64 1
+ %tmp4146 = getelementptr inbounds float, float* %tmp4145, i64 1
+ %tmp4147 = getelementptr inbounds float, float* %tmp4146, i64 1
+ %tmp4148 = getelementptr inbounds float, float* %tmp4147, i64 1
+ %tmp4149 = getelementptr inbounds float, float* %tmp4148, i64 1
+ %tmp4150 = getelementptr inbounds float, float* %tmp4149, i64 1
+ %tmp4151 = getelementptr inbounds float, float* %tmp4150, i64 1
+ %tmp4152 = getelementptr inbounds float, float* %tmp4151, i64 1
+ %tmp4153 = getelementptr inbounds float, float* %tmp4152, i64 1
+ %tmp4154 = getelementptr inbounds float, float* %tmp4153, i64 1
+ %tmp4155 = getelementptr inbounds float, float* %tmp4154, i64 1
+ %tmp4156 = getelementptr inbounds float, float* %tmp4155, i64 1
+ %tmp4157 = getelementptr inbounds float, float* %tmp4156, i64 1
+ %tmp4158 = getelementptr inbounds float, float* %tmp4157, i64 1
+ %tmp4159 = getelementptr inbounds float, float* %tmp4158, i64 1
+ %tmp4160 = getelementptr inbounds float, float* %tmp4159, i64 1
+ %tmp4161 = getelementptr inbounds float, float* %tmp4160, i64 1
+ %tmp4162 = getelementptr inbounds float, float* %tmp4161, i64 1
+ %tmp4163 = getelementptr inbounds float, float* %tmp4162, i64 1
+ %tmp4164 = getelementptr inbounds float, float* %tmp4163, i64 1
+ %tmp4165 = getelementptr inbounds float, float* %tmp4164, i64 1
+ %tmp4166 = getelementptr inbounds float, float* %tmp4165, i64 1
+ %tmp4167 = getelementptr inbounds float, float* %tmp4166, i64 1
+ %tmp4168 = getelementptr inbounds float, float* %tmp4167, i64 1
+ %tmp4169 = getelementptr inbounds float, float* %tmp4168, i64 1
+ %tmp4170 = getelementptr inbounds float, float* %tmp4169, i64 1
+ %tmp4171 = getelementptr inbounds float, float* %tmp4170, i64 1
+ %tmp4172 = getelementptr inbounds float, float* %tmp4171, i64 1
+ %tmp4173 = getelementptr inbounds float, float* %tmp4172, i64 1
+ %tmp4174 = getelementptr inbounds float, float* %tmp4173, i64 1
+ %tmp4175 = getelementptr inbounds float, float* %tmp4174, i64 1
+ %tmp4176 = getelementptr inbounds float, float* %tmp4175, i64 1
+ %tmp4177 = getelementptr inbounds float, float* %tmp4176, i64 1
+ %tmp4178 = getelementptr inbounds float, float* %tmp4177, i64 1
+ %tmp4179 = getelementptr inbounds float, float* %tmp4178, i64 1
+ %tmp4180 = getelementptr inbounds float, float* %tmp4179, i64 1
+ %tmp4181 = getelementptr inbounds float, float* %tmp4180, i64 1
+ %tmp4182 = getelementptr inbounds float, float* %tmp4181, i64 1
+ %tmp4183 = getelementptr inbounds float, float* %tmp4182, i64 1
+ %tmp4184 = getelementptr inbounds float, float* %tmp4183, i64 1
+ %tmp4185 = getelementptr inbounds float, float* %tmp4184, i64 1
+ %tmp4186 = getelementptr inbounds float, float* %tmp4185, i64 1
+ %tmp4187 = getelementptr inbounds float, float* %tmp4186, i64 1
+ %tmp4188 = getelementptr inbounds float, float* %tmp4187, i64 1
+ %tmp4189 = getelementptr inbounds float, float* %tmp4188, i64 1
+ %tmp4190 = getelementptr inbounds float, float* %tmp4189, i64 1
+ %tmp4191 = getelementptr inbounds float, float* %tmp4190, i64 1
+ %tmp4192 = getelementptr inbounds float, float* %tmp4191, i64 1
+ %tmp4193 = getelementptr inbounds float, float* %tmp4192, i64 1
+ %tmp4194 = getelementptr inbounds float, float* %tmp4193, i64 1
+ %tmp4195 = getelementptr inbounds float, float* %tmp4194, i64 1
+ %tmp4196 = getelementptr inbounds float, float* %tmp4195, i64 1
+ %tmp4197 = getelementptr inbounds float, float* %tmp4196, i64 1
+ %tmp4198 = getelementptr inbounds float, float* %tmp4197, i64 1
+ %tmp4199 = getelementptr inbounds float, float* %tmp4198, i64 1
+ %tmp4200 = getelementptr inbounds float, float* %tmp4199, i64 1
+ %tmp4201 = getelementptr inbounds float, float* %tmp4200, i64 1
+ %tmp4202 = getelementptr inbounds float, float* %tmp4201, i64 1
+ %tmp4203 = getelementptr inbounds float, float* %tmp4202, i64 1
+ %tmp4204 = getelementptr inbounds float, float* %tmp4203, i64 1
+ %tmp4205 = getelementptr inbounds float, float* %tmp4204, i64 1
+ %tmp4206 = getelementptr inbounds float, float* %tmp4205, i64 1
+ %tmp4207 = getelementptr inbounds float, float* %tmp4206, i64 1
+ %tmp4208 = getelementptr inbounds float, float* %tmp4207, i64 1
+ %tmp4209 = getelementptr inbounds float, float* %tmp4208, i64 1
+ %tmp4210 = getelementptr inbounds float, float* %tmp4209, i64 1
+ %tmp4211 = getelementptr inbounds float, float* %tmp4210, i64 1
+ %tmp4212 = getelementptr inbounds float, float* %tmp4211, i64 1
+ %tmp4213 = getelementptr inbounds float, float* %tmp4212, i64 1
+ %tmp4214 = getelementptr inbounds float, float* %tmp4213, i64 1
+ %tmp4215 = getelementptr inbounds float, float* %tmp4214, i64 1
+ %tmp4216 = getelementptr inbounds float, float* %tmp4215, i64 1
+ %tmp4217 = getelementptr inbounds float, float* %tmp4216, i64 1
+ %tmp4218 = getelementptr inbounds float, float* %tmp4217, i64 1
+ %tmp4219 = getelementptr inbounds float, float* %tmp4218, i64 1
+ %tmp4220 = getelementptr inbounds float, float* %tmp4219, i64 1
+ %tmp4221 = getelementptr inbounds float, float* %tmp4220, i64 1
+ %tmp4222 = getelementptr inbounds float, float* %tmp4221, i64 1
+ %tmp4223 = getelementptr inbounds float, float* %tmp4222, i64 1
+ %tmp4224 = getelementptr inbounds float, float* %tmp4223, i64 1
+ %tmp4225 = getelementptr inbounds float, float* %tmp4224, i64 1
+ %tmp4226 = getelementptr inbounds float, float* %tmp4225, i64 1
+ %tmp4227 = getelementptr inbounds float, float* %tmp4226, i64 1
+ %tmp4228 = getelementptr inbounds float, float* %tmp4227, i64 1
+ %tmp4229 = getelementptr inbounds float, float* %tmp4228, i64 1
+ %tmp4230 = getelementptr inbounds float, float* %tmp4229, i64 1
+ %tmp4231 = getelementptr inbounds float, float* %tmp4230, i64 1
+ %tmp4232 = getelementptr inbounds float, float* %tmp4231, i64 1
+ %tmp4233 = getelementptr inbounds float, float* %tmp4232, i64 1
+ %tmp4234 = getelementptr inbounds float, float* %tmp4233, i64 1
+ %tmp4235 = getelementptr inbounds float, float* %tmp4234, i64 1
+ %tmp4236 = getelementptr inbounds float, float* %tmp4235, i64 1
+ %tmp4237 = getelementptr inbounds float, float* %tmp4236, i64 1
+ %tmp4238 = getelementptr inbounds float, float* %tmp4237, i64 1
+ %tmp4239 = getelementptr inbounds float, float* %tmp4238, i64 1
+ %tmp4240 = getelementptr inbounds float, float* %tmp4239, i64 1
+ %tmp4241 = getelementptr inbounds float, float* %tmp4240, i64 1
+ %tmp4242 = getelementptr inbounds float, float* %tmp4241, i64 1
+ %tmp4243 = getelementptr inbounds float, float* %tmp4242, i64 1
+ %tmp4244 = getelementptr inbounds float, float* %tmp4243, i64 1
+ %tmp4245 = getelementptr inbounds float, float* %tmp4244, i64 1
+ %tmp4246 = getelementptr inbounds float, float* %tmp4245, i64 1
+ %tmp4247 = getelementptr inbounds float, float* %tmp4246, i64 1
+ %tmp4248 = getelementptr inbounds float, float* %tmp4247, i64 1
+ %tmp4249 = getelementptr inbounds float, float* %tmp4248, i64 1
+ %tmp4250 = getelementptr inbounds float, float* %tmp4249, i64 1
+ %tmp4251 = getelementptr inbounds float, float* %tmp4250, i64 1
+ %tmp4252 = getelementptr inbounds float, float* %tmp4251, i64 1
+ %tmp4253 = getelementptr inbounds float, float* %tmp4252, i64 1
+ %tmp4254 = getelementptr inbounds float, float* %tmp4253, i64 1
+ %tmp4255 = getelementptr inbounds float, float* %tmp4254, i64 1
+ %tmp4256 = getelementptr inbounds float, float* %tmp4255, i64 1
+ %tmp4257 = getelementptr inbounds float, float* %tmp4256, i64 1
+ %tmp4258 = getelementptr inbounds float, float* %tmp4257, i64 1
+ %tmp4259 = getelementptr inbounds float, float* %tmp4258, i64 1
+ %tmp4260 = getelementptr inbounds float, float* %tmp4259, i64 1
+ %tmp4261 = getelementptr inbounds float, float* %tmp4260, i64 1
+ %tmp4262 = getelementptr inbounds float, float* %tmp4261, i64 1
+ %tmp4263 = getelementptr inbounds float, float* %tmp4262, i64 1
+ %tmp4264 = getelementptr inbounds float, float* %tmp4263, i64 1
+ %tmp4265 = getelementptr inbounds float, float* %tmp4264, i64 1
+ %tmp4266 = getelementptr inbounds float, float* %tmp4265, i64 1
+ %tmp4267 = getelementptr inbounds float, float* %tmp4266, i64 1
+ %tmp4268 = getelementptr inbounds float, float* %tmp4267, i64 1
+ %tmp4269 = getelementptr inbounds float, float* %tmp4268, i64 1
+ %tmp4270 = getelementptr inbounds float, float* %tmp4269, i64 1
+ %tmp4271 = getelementptr inbounds float, float* %tmp4270, i64 1
+ %tmp4272 = getelementptr inbounds float, float* %tmp4271, i64 1
+ %tmp4273 = getelementptr inbounds float, float* %tmp4272, i64 1
+ %tmp4274 = getelementptr inbounds float, float* %tmp4273, i64 1
+ %tmp4275 = getelementptr inbounds float, float* %tmp4274, i64 1
+ %tmp4276 = getelementptr inbounds float, float* %tmp4275, i64 1
+ %tmp4277 = getelementptr inbounds float, float* %tmp4276, i64 1
+ %tmp4278 = getelementptr inbounds float, float* %tmp4277, i64 1
+ %tmp4279 = getelementptr inbounds float, float* %tmp4278, i64 1
+ %tmp4280 = getelementptr inbounds float, float* %tmp4279, i64 1
+ %tmp4281 = getelementptr inbounds float, float* %tmp4280, i64 1
+ %tmp4282 = getelementptr inbounds float, float* %tmp4281, i64 1
+ %tmp4283 = getelementptr inbounds float, float* %tmp4282, i64 1
+ %tmp4284 = getelementptr inbounds float, float* %tmp4283, i64 1
+ %tmp4285 = getelementptr inbounds float, float* %tmp4284, i64 1
+ %tmp4286 = getelementptr inbounds float, float* %tmp4285, i64 1
+ %tmp4287 = getelementptr inbounds float, float* %tmp4286, i64 1
+ %tmp4288 = getelementptr inbounds float, float* %tmp4287, i64 1
+ %tmp4289 = getelementptr inbounds float, float* %tmp4288, i64 1
+ %tmp4290 = getelementptr inbounds float, float* %tmp4289, i64 1
+ %tmp4291 = getelementptr inbounds float, float* %tmp4290, i64 1
+ %tmp4292 = getelementptr inbounds float, float* %tmp4291, i64 1
+ %tmp4293 = getelementptr inbounds float, float* %tmp4292, i64 1
+ %tmp4294 = getelementptr inbounds float, float* %tmp4293, i64 1
+ %tmp4295 = getelementptr inbounds float, float* %tmp4294, i64 1
+ %tmp4296 = getelementptr inbounds float, float* %tmp4295, i64 1
+ %tmp4297 = getelementptr inbounds float, float* %tmp4296, i64 1
+ %tmp4298 = getelementptr inbounds float, float* %tmp4297, i64 1
+ %tmp4299 = getelementptr inbounds float, float* %tmp4298, i64 1
+ %tmp4300 = getelementptr inbounds float, float* %tmp4299, i64 1
+ %tmp4301 = getelementptr inbounds float, float* %tmp4300, i64 1
+ %tmp4302 = getelementptr inbounds float, float* %tmp4301, i64 1
+ %tmp4303 = getelementptr inbounds float, float* %tmp4302, i64 1
+ %tmp4304 = getelementptr inbounds float, float* %tmp4303, i64 1
+ %tmp4305 = getelementptr inbounds float, float* %tmp4304, i64 1
+ %tmp4306 = getelementptr inbounds float, float* %tmp4305, i64 1
+ %tmp4307 = getelementptr inbounds float, float* %tmp4306, i64 1
+ %tmp4308 = getelementptr inbounds float, float* %tmp4307, i64 1
+ %tmp4309 = getelementptr inbounds float, float* %tmp4308, i64 1
+ %tmp4310 = getelementptr inbounds float, float* %tmp4309, i64 1
+ %tmp4311 = getelementptr inbounds float, float* %tmp4310, i64 1
+ %tmp4312 = getelementptr inbounds float, float* %tmp4311, i64 1
+ %tmp4313 = getelementptr inbounds float, float* %tmp4312, i64 1
+ %tmp4314 = getelementptr inbounds float, float* %tmp4313, i64 1
+ %tmp4315 = getelementptr inbounds float, float* %tmp4314, i64 1
+ %tmp4316 = getelementptr inbounds float, float* %tmp4315, i64 1
+ %tmp4317 = getelementptr inbounds float, float* %tmp4316, i64 1
+ %tmp4318 = getelementptr inbounds float, float* %tmp4317, i64 1
+ %tmp4319 = getelementptr inbounds float, float* %tmp4318, i64 1
+ %tmp4320 = getelementptr inbounds float, float* %tmp4319, i64 1
+ %tmp4321 = getelementptr inbounds float, float* %tmp4320, i64 1
+ %tmp4322 = getelementptr inbounds float, float* %tmp4321, i64 1
+ %tmp4323 = getelementptr inbounds float, float* %tmp4322, i64 1
+ %tmp4324 = getelementptr inbounds float, float* %tmp4323, i64 1
+ %tmp4325 = getelementptr inbounds float, float* %tmp4324, i64 1
+ %tmp4326 = getelementptr inbounds float, float* %tmp4325, i64 1
+ %tmp4327 = getelementptr inbounds float, float* %tmp4326, i64 1
+ %tmp4328 = getelementptr inbounds float, float* %tmp4327, i64 1
+ %tmp4329 = getelementptr inbounds float, float* %tmp4328, i64 1
+ %tmp4330 = getelementptr inbounds float, float* %tmp4329, i64 1
+ %tmp4331 = getelementptr inbounds float, float* %tmp4330, i64 1
+ %tmp4332 = getelementptr inbounds float, float* %tmp4331, i64 1
+ %tmp4333 = getelementptr inbounds float, float* %tmp4332, i64 1
+ %tmp4334 = getelementptr inbounds float, float* %tmp4333, i64 1
+ %tmp4335 = getelementptr inbounds float, float* %tmp4334, i64 1
+ %tmp4336 = getelementptr inbounds float, float* %tmp4335, i64 1
+ %tmp4337 = getelementptr inbounds float, float* %tmp4336, i64 1
+ %tmp4338 = getelementptr inbounds float, float* %tmp4337, i64 1
+ %tmp4339 = getelementptr inbounds float, float* %tmp4338, i64 1
+ %tmp4340 = getelementptr inbounds float, float* %tmp4339, i64 1
+ %tmp4341 = getelementptr inbounds float, float* %tmp4340, i64 1
+ %tmp4342 = getelementptr inbounds float, float* %tmp4341, i64 1
+ %tmp4343 = getelementptr inbounds float, float* %tmp4342, i64 1
+ %tmp4344 = getelementptr inbounds float, float* %tmp4343, i64 1
+ %tmp4345 = getelementptr inbounds float, float* %tmp4344, i64 1
+ %tmp4346 = getelementptr inbounds float, float* %tmp4345, i64 1
+ %tmp4347 = getelementptr inbounds float, float* %tmp4346, i64 1
+ %tmp4348 = getelementptr inbounds float, float* %tmp4347, i64 1
+ %tmp4349 = getelementptr inbounds float, float* %tmp4348, i64 1
+ %tmp4350 = getelementptr inbounds float, float* %tmp4349, i64 1
+ %tmp4351 = getelementptr inbounds float, float* %tmp4350, i64 1
+ %tmp4352 = getelementptr inbounds float, float* %tmp4351, i64 1
+ %tmp4353 = getelementptr inbounds float, float* %tmp4352, i64 1
+ %tmp4354 = getelementptr inbounds float, float* %tmp4353, i64 1
+ %tmp4355 = getelementptr inbounds float, float* %tmp4354, i64 1
+ %tmp4356 = getelementptr inbounds float, float* %tmp4355, i64 1
+ %tmp4357 = getelementptr inbounds float, float* %tmp4356, i64 1
+ %tmp4358 = getelementptr inbounds float, float* %tmp4357, i64 1
+ %tmp4359 = getelementptr inbounds float, float* %tmp4358, i64 1
+ %tmp4360 = getelementptr inbounds float, float* %tmp4359, i64 1
+ %tmp4361 = getelementptr inbounds float, float* %tmp4360, i64 1
+ %tmp4362 = getelementptr inbounds float, float* %tmp4361, i64 1
+ %tmp4363 = getelementptr inbounds float, float* %tmp4362, i64 1
+ %tmp4364 = getelementptr inbounds float, float* %tmp4363, i64 1
+ %tmp4365 = getelementptr inbounds float, float* %tmp4364, i64 1
+ %tmp4366 = getelementptr inbounds float, float* %tmp4365, i64 1
+ %tmp4367 = getelementptr inbounds float, float* %tmp4366, i64 1
+ %tmp4368 = getelementptr inbounds float, float* %tmp4367, i64 1
+ %tmp4369 = getelementptr inbounds float, float* %tmp4368, i64 1
+ %tmp4370 = getelementptr inbounds float, float* %tmp4369, i64 1
+ %tmp4371 = getelementptr inbounds float, float* %tmp4370, i64 1
+ %tmp4372 = getelementptr inbounds float, float* %tmp4371, i64 1
+ %tmp4373 = getelementptr inbounds float, float* %tmp4372, i64 1
+ %tmp4374 = getelementptr inbounds float, float* %tmp4373, i64 1
+ %tmp4375 = getelementptr inbounds float, float* %tmp4374, i64 1
+ %tmp4376 = getelementptr inbounds float, float* %tmp4375, i64 1
+ %tmp4377 = getelementptr inbounds float, float* %tmp4376, i64 1
+ %tmp4378 = getelementptr inbounds float, float* %tmp4377, i64 1
+ %tmp4379 = getelementptr inbounds float, float* %tmp4378, i64 1
+ %tmp4380 = getelementptr inbounds float, float* %tmp4379, i64 1
+ %tmp4381 = getelementptr inbounds float, float* %tmp4380, i64 1
+ %tmp4382 = getelementptr inbounds float, float* %tmp4381, i64 1
+ %tmp4383 = getelementptr inbounds float, float* %tmp4382, i64 1
+ %tmp4384 = getelementptr inbounds float, float* %tmp4383, i64 1
+ %tmp4385 = getelementptr inbounds float, float* %tmp4384, i64 1
+ %tmp4386 = getelementptr inbounds float, float* %tmp4385, i64 1
+ %tmp4387 = getelementptr inbounds float, float* %tmp4386, i64 1
+ %tmp4388 = getelementptr inbounds float, float* %tmp4387, i64 1
+ %tmp4389 = getelementptr inbounds float, float* %tmp4388, i64 1
+ %tmp4390 = getelementptr inbounds float, float* %tmp4389, i64 1
+ %tmp4391 = getelementptr inbounds float, float* %tmp4390, i64 1
+ %tmp4392 = getelementptr inbounds float, float* %tmp4391, i64 1
+ %tmp4393 = getelementptr inbounds float, float* %tmp4392, i64 1
+ %tmp4394 = getelementptr inbounds float, float* %tmp4393, i64 1
+ %tmp4395 = getelementptr inbounds float, float* %tmp4394, i64 1
+ %tmp4396 = getelementptr inbounds float, float* %tmp4395, i64 1
+ %tmp4397 = getelementptr inbounds float, float* %tmp4396, i64 1
+ %tmp4398 = getelementptr inbounds float, float* %tmp4397, i64 1
+ %tmp4399 = getelementptr inbounds float, float* %tmp4398, i64 1
+ %tmp4400 = getelementptr inbounds float, float* %tmp4399, i64 1
+ %tmp4401 = getelementptr inbounds float, float* %tmp4400, i64 1
+ %tmp4402 = getelementptr inbounds float, float* %tmp4401, i64 1
+ %tmp4403 = getelementptr inbounds float, float* %tmp4402, i64 1
+ %tmp4404 = getelementptr inbounds float, float* %tmp4403, i64 1
+ %tmp4405 = getelementptr inbounds float, float* %tmp4404, i64 1
+ %tmp4406 = getelementptr inbounds float, float* %tmp4405, i64 1
+ %tmp4407 = getelementptr inbounds float, float* %tmp4406, i64 1
+ %tmp4408 = getelementptr inbounds float, float* %tmp4407, i64 1
+ %tmp4409 = getelementptr inbounds float, float* %tmp4408, i64 1
+ %tmp4410 = getelementptr inbounds float, float* %tmp4409, i64 1
+ %tmp4411 = getelementptr inbounds float, float* %tmp4410, i64 1
+ %tmp4412 = getelementptr inbounds float, float* %tmp4411, i64 1
+ %tmp4413 = getelementptr inbounds float, float* %tmp4412, i64 1
+ %tmp4414 = getelementptr inbounds float, float* %tmp4413, i64 1
+ %tmp4415 = getelementptr inbounds float, float* %tmp4414, i64 1
+ %tmp4416 = getelementptr inbounds float, float* %tmp4415, i64 1
+ %tmp4417 = getelementptr inbounds float, float* %tmp4416, i64 1
+ %tmp4418 = getelementptr inbounds float, float* %tmp4417, i64 1
+ %tmp4419 = getelementptr inbounds float, float* %tmp4418, i64 1
+ %tmp4420 = getelementptr inbounds float, float* %tmp4419, i64 1
+ %tmp4421 = getelementptr inbounds float, float* %tmp4420, i64 1
+ %tmp4422 = getelementptr inbounds float, float* %tmp4421, i64 1
+ %tmp4423 = getelementptr inbounds float, float* %tmp4422, i64 1
+ %tmp4424 = getelementptr inbounds float, float* %tmp4423, i64 1
+ %tmp4425 = getelementptr inbounds float, float* %tmp4424, i64 1
+ %tmp4426 = getelementptr inbounds float, float* %tmp4425, i64 1
+ %tmp4427 = getelementptr inbounds float, float* %tmp4426, i64 1
+ %tmp4428 = getelementptr inbounds float, float* %tmp4427, i64 1
+ %tmp4429 = getelementptr inbounds float, float* %tmp4428, i64 1
+ %tmp4430 = getelementptr inbounds float, float* %tmp4429, i64 1
+ %tmp4431 = getelementptr inbounds float, float* %tmp4430, i64 1
+ %tmp4432 = getelementptr inbounds float, float* %tmp4431, i64 1
+ %tmp4433 = getelementptr inbounds float, float* %tmp4432, i64 1
+ %tmp4434 = getelementptr inbounds float, float* %tmp4433, i64 1
+ %tmp4435 = getelementptr inbounds float, float* %tmp4434, i64 1
+ %tmp4436 = getelementptr inbounds float, float* %tmp4435, i64 1
+ %tmp4437 = getelementptr inbounds float, float* %tmp4436, i64 1
+ %tmp4438 = getelementptr inbounds float, float* %tmp4437, i64 1
+ %tmp4439 = getelementptr inbounds float, float* %tmp4438, i64 1
+ %tmp4440 = getelementptr inbounds float, float* %tmp4439, i64 1
+ %tmp4441 = getelementptr inbounds float, float* %tmp4440, i64 1
+ %tmp4442 = getelementptr inbounds float, float* %tmp4441, i64 1
+ %tmp4443 = getelementptr inbounds float, float* %tmp4442, i64 1
+ %tmp4444 = getelementptr inbounds float, float* %tmp4443, i64 1
+ %tmp4445 = getelementptr inbounds float, float* %tmp4444, i64 1
+ %tmp4446 = getelementptr inbounds float, float* %tmp4445, i64 1
+ %tmp4447 = getelementptr inbounds float, float* %tmp4446, i64 1
+ %tmp4448 = getelementptr inbounds float, float* %tmp4447, i64 1
+ %tmp4449 = getelementptr inbounds float, float* %tmp4448, i64 1
+ %tmp4450 = getelementptr inbounds float, float* %tmp4449, i64 1
+ %tmp4451 = getelementptr inbounds float, float* %tmp4450, i64 1
+ %tmp4452 = getelementptr inbounds float, float* %tmp4451, i64 1
+ %tmp4453 = getelementptr inbounds float, float* %tmp4452, i64 1
+ %tmp4454 = getelementptr inbounds float, float* %tmp4453, i64 1
+ %tmp4455 = getelementptr inbounds float, float* %tmp4454, i64 1
+ %tmp4456 = getelementptr inbounds float, float* %tmp4455, i64 1
+ %tmp4457 = getelementptr inbounds float, float* %tmp4456, i64 1
+ %tmp4458 = getelementptr inbounds float, float* %tmp4457, i64 1
+ %tmp4459 = getelementptr inbounds float, float* %tmp4458, i64 1
+ %tmp4460 = getelementptr inbounds float, float* %tmp4459, i64 1
+ %tmp4461 = getelementptr inbounds float, float* %tmp4460, i64 1
+ %tmp4462 = getelementptr inbounds float, float* %tmp4461, i64 1
+ %tmp4463 = getelementptr inbounds float, float* %tmp4462, i64 1
+ %tmp4464 = getelementptr inbounds float, float* %tmp4463, i64 1
+ %tmp4465 = getelementptr inbounds float, float* %tmp4464, i64 1
+ %tmp4466 = getelementptr inbounds float, float* %tmp4465, i64 1
+ %tmp4467 = getelementptr inbounds float, float* %tmp4466, i64 1
+ %tmp4468 = getelementptr inbounds float, float* %tmp4467, i64 1
+ %tmp4469 = getelementptr inbounds float, float* %tmp4468, i64 1
+ %tmp4470 = getelementptr inbounds float, float* %tmp4469, i64 1
+ %tmp4471 = getelementptr inbounds float, float* %tmp4470, i64 1
+ %tmp4472 = getelementptr inbounds float, float* %tmp4471, i64 1
+ %tmp4473 = getelementptr inbounds float, float* %tmp4472, i64 1
+ %tmp4474 = getelementptr inbounds float, float* %tmp4473, i64 1
+ %tmp4475 = getelementptr inbounds float, float* %tmp4474, i64 1
+ %tmp4476 = getelementptr inbounds float, float* %tmp4475, i64 1
+ %tmp4477 = getelementptr inbounds float, float* %tmp4476, i64 1
+ %tmp4478 = getelementptr inbounds float, float* %tmp4477, i64 1
+ %tmp4479 = getelementptr inbounds float, float* %tmp4478, i64 1
+ %tmp4480 = getelementptr inbounds float, float* %tmp4479, i64 1
+ %tmp4481 = getelementptr inbounds float, float* %tmp4480, i64 1
+ %tmp4482 = getelementptr inbounds float, float* %tmp4481, i64 1
+ %tmp4483 = getelementptr inbounds float, float* %tmp4482, i64 1
+ %tmp4484 = getelementptr inbounds float, float* %tmp4483, i64 1
+ %tmp4485 = getelementptr inbounds float, float* %tmp4484, i64 1
+ %tmp4486 = getelementptr inbounds float, float* %tmp4485, i64 1
+ %tmp4487 = getelementptr inbounds float, float* %tmp4486, i64 1
+ %tmp4488 = getelementptr inbounds float, float* %tmp4487, i64 1
+ %tmp4489 = getelementptr inbounds float, float* %tmp4488, i64 1
+ %tmp4490 = getelementptr inbounds float, float* %tmp4489, i64 1
+ %tmp4491 = getelementptr inbounds float, float* %tmp4490, i64 1
+ %tmp4492 = getelementptr inbounds float, float* %tmp4491, i64 1
+ %tmp4493 = getelementptr inbounds float, float* %tmp4492, i64 1
+ %tmp4494 = getelementptr inbounds float, float* %tmp4493, i64 1
+ %tmp4495 = getelementptr inbounds float, float* %tmp4494, i64 1
+ %tmp4496 = getelementptr inbounds float, float* %tmp4495, i64 1
+ %tmp4497 = getelementptr inbounds float, float* %tmp4496, i64 1
+ %tmp4498 = getelementptr inbounds float, float* %tmp4497, i64 1
+ %tmp4499 = getelementptr inbounds float, float* %tmp4498, i64 1
+ %tmp4500 = getelementptr inbounds float, float* %tmp4499, i64 1
+ %tmp4501 = getelementptr inbounds float, float* %tmp4500, i64 1
+ %tmp4502 = getelementptr inbounds float, float* %tmp4501, i64 1
+ %tmp4503 = getelementptr inbounds float, float* %tmp4502, i64 1
+ %tmp4504 = getelementptr inbounds float, float* %tmp4503, i64 1
+ %tmp4505 = getelementptr inbounds float, float* %tmp4504, i64 1
+ %tmp4506 = getelementptr inbounds float, float* %tmp4505, i64 1
+ %tmp4507 = getelementptr inbounds float, float* %tmp4506, i64 1
+ %tmp4508 = getelementptr inbounds float, float* %tmp4507, i64 1
+ %tmp4509 = getelementptr inbounds float, float* %tmp4508, i64 1
+ %tmp4510 = getelementptr inbounds float, float* %tmp4509, i64 1
+ %tmp4511 = getelementptr inbounds float, float* %tmp4510, i64 1
+ %tmp4512 = getelementptr inbounds float, float* %tmp4511, i64 1
+ %tmp4513 = getelementptr inbounds float, float* %tmp4512, i64 1
+ %tmp4514 = getelementptr inbounds float, float* %tmp4513, i64 1
+ %tmp4515 = getelementptr inbounds float, float* %tmp4514, i64 1
+ %tmp4516 = getelementptr inbounds float, float* %tmp4515, i64 1
+ %tmp4517 = getelementptr inbounds float, float* %tmp4516, i64 1
+ %tmp4518 = getelementptr inbounds float, float* %tmp4517, i64 1
+ %tmp4519 = getelementptr inbounds float, float* %tmp4518, i64 1
+ %tmp4520 = getelementptr inbounds float, float* %tmp4519, i64 1
+ %tmp4521 = getelementptr inbounds float, float* %tmp4520, i64 1
+ %tmp4522 = getelementptr inbounds float, float* %tmp4521, i64 1
+ %tmp4523 = getelementptr inbounds float, float* %tmp4522, i64 1
+ %tmp4524 = getelementptr inbounds float, float* %tmp4523, i64 1
+ %tmp4525 = getelementptr inbounds float, float* %tmp4524, i64 1
+ %tmp4526 = getelementptr inbounds float, float* %tmp4525, i64 1
+ %tmp4527 = getelementptr inbounds float, float* %tmp4526, i64 1
+ %tmp4528 = getelementptr inbounds float, float* %tmp4527, i64 1
+ %tmp4529 = getelementptr inbounds float, float* %tmp4528, i64 1
+ %tmp4530 = getelementptr inbounds float, float* %tmp4529, i64 1
+ %tmp4531 = getelementptr inbounds float, float* %tmp4530, i64 1
+ %tmp4532 = getelementptr inbounds float, float* %tmp4531, i64 1
+ %tmp4533 = getelementptr inbounds float, float* %tmp4532, i64 1
+ %tmp4534 = getelementptr inbounds float, float* %tmp4533, i64 1
+ %tmp4535 = getelementptr inbounds float, float* %tmp4534, i64 1
+ %tmp4536 = getelementptr inbounds float, float* %tmp4535, i64 1
+ %tmp4537 = getelementptr inbounds float, float* %tmp4536, i64 1
+ %tmp4538 = getelementptr inbounds float, float* %tmp4537, i64 1
+ %tmp4539 = getelementptr inbounds float, float* %tmp4538, i64 1
+ %tmp4540 = getelementptr inbounds float, float* %tmp4539, i64 1
+ %tmp4541 = getelementptr inbounds float, float* %tmp4540, i64 1
+ %tmp4542 = getelementptr inbounds float, float* %tmp4541, i64 1
+ %tmp4543 = getelementptr inbounds float, float* %tmp4542, i64 1
+ %tmp4544 = getelementptr inbounds float, float* %tmp4543, i64 1
+ %tmp4545 = getelementptr inbounds float, float* %tmp4544, i64 1
+ %tmp4546 = getelementptr inbounds float, float* %tmp4545, i64 1
+ %tmp4547 = getelementptr inbounds float, float* %tmp4546, i64 1
+ %tmp4548 = getelementptr inbounds float, float* %tmp4547, i64 1
+ %tmp4549 = getelementptr inbounds float, float* %tmp4548, i64 1
+ %tmp4550 = getelementptr inbounds float, float* %tmp4549, i64 1
+ %tmp4551 = getelementptr inbounds float, float* %tmp4550, i64 1
+ %tmp4552 = getelementptr inbounds float, float* %tmp4551, i64 1
+ %tmp4553 = getelementptr inbounds float, float* %tmp4552, i64 1
+ %tmp4554 = getelementptr inbounds float, float* %tmp4553, i64 1
+ %tmp4555 = getelementptr inbounds float, float* %tmp4554, i64 1
+ %tmp4556 = getelementptr inbounds float, float* %tmp4555, i64 1
+ %tmp4557 = getelementptr inbounds float, float* %tmp4556, i64 1
+ %tmp4558 = getelementptr inbounds float, float* %tmp4557, i64 1
+ %tmp4559 = getelementptr inbounds float, float* %tmp4558, i64 1
+ %tmp4560 = getelementptr inbounds float, float* %tmp4559, i64 1
+ %tmp4561 = getelementptr inbounds float, float* %tmp4560, i64 1
+ %tmp4562 = getelementptr inbounds float, float* %tmp4561, i64 1
+ %tmp4563 = getelementptr inbounds float, float* %tmp4562, i64 1
+ %tmp4564 = getelementptr inbounds float, float* %tmp4563, i64 1
+ %tmp4565 = getelementptr inbounds float, float* %tmp4564, i64 1
+ %tmp4566 = getelementptr inbounds float, float* %tmp4565, i64 1
+ %tmp4567 = getelementptr inbounds float, float* %tmp4566, i64 1
+ %tmp4568 = getelementptr inbounds float, float* %tmp4567, i64 1
+ %tmp4569 = getelementptr inbounds float, float* %tmp4568, i64 1
+ %tmp4570 = getelementptr inbounds float, float* %tmp4569, i64 1
+ %tmp4571 = getelementptr inbounds float, float* %tmp4570, i64 1
+ %tmp4572 = getelementptr inbounds float, float* %tmp4571, i64 1
+ %tmp4573 = getelementptr inbounds float, float* %tmp4572, i64 1
+ %tmp4574 = getelementptr inbounds float, float* %tmp4573, i64 1
+ %tmp4575 = getelementptr inbounds float, float* %tmp4574, i64 1
+ %tmp4576 = getelementptr inbounds float, float* %tmp4575, i64 1
+ %tmp4577 = getelementptr inbounds float, float* %tmp4576, i64 1
+ %tmp4578 = getelementptr inbounds float, float* %tmp4577, i64 1
+ %tmp4579 = getelementptr inbounds float, float* %tmp4578, i64 1
+ %tmp4580 = getelementptr inbounds float, float* %tmp4579, i64 1
+ %tmp4581 = getelementptr inbounds float, float* %tmp4580, i64 1
+ %tmp4582 = getelementptr inbounds float, float* %tmp4581, i64 1
+ %tmp4583 = getelementptr inbounds float, float* %tmp4582, i64 1
+ %tmp4584 = getelementptr inbounds float, float* %tmp4583, i64 1
+ %tmp4585 = getelementptr inbounds float, float* %tmp4584, i64 1
+ %tmp4586 = getelementptr inbounds float, float* %tmp4585, i64 1
+ %tmp4587 = getelementptr inbounds float, float* %tmp4586, i64 1
+ %tmp4588 = getelementptr inbounds float, float* %tmp4587, i64 1
+ %tmp4589 = getelementptr inbounds float, float* %tmp4588, i64 1
+ %tmp4590 = getelementptr inbounds float, float* %tmp4589, i64 1
+ %tmp4591 = getelementptr inbounds float, float* %tmp4590, i64 1
+ %tmp4592 = getelementptr inbounds float, float* %tmp4591, i64 1
+ %tmp4593 = getelementptr inbounds float, float* %tmp4592, i64 1
+ %tmp4594 = getelementptr inbounds float, float* %tmp4593, i64 1
+ %tmp4595 = getelementptr inbounds float, float* %tmp4594, i64 1
+ %tmp4596 = getelementptr inbounds float, float* %tmp4595, i64 1
+ %tmp4597 = getelementptr inbounds float, float* %tmp4596, i64 1
+ %tmp4598 = getelementptr inbounds float, float* %tmp4597, i64 1
+ %tmp4599 = getelementptr inbounds float, float* %tmp4598, i64 1
+ %tmp4600 = getelementptr inbounds float, float* %tmp4599, i64 1
+ %tmp4601 = getelementptr inbounds float, float* %tmp4600, i64 1
+ %tmp4602 = getelementptr inbounds float, float* %tmp4601, i64 1
+ %tmp4603 = getelementptr inbounds float, float* %tmp4602, i64 1
+ %tmp4604 = getelementptr inbounds float, float* %tmp4603, i64 1
+ %tmp4605 = getelementptr inbounds float, float* %tmp4604, i64 1
+ %tmp4606 = getelementptr inbounds float, float* %tmp4605, i64 1
+ %tmp4607 = getelementptr inbounds float, float* %tmp4606, i64 1
+ %tmp4608 = getelementptr inbounds float, float* %tmp4607, i64 1
+ %tmp4609 = getelementptr inbounds float, float* %tmp4608, i64 1
+ %tmp4610 = getelementptr inbounds float, float* %tmp4609, i64 1
+ %tmp4611 = getelementptr inbounds float, float* %tmp4610, i64 1
+ %tmp4612 = getelementptr inbounds float, float* %tmp4611, i64 1
+ %tmp4613 = getelementptr inbounds float, float* %tmp4612, i64 1
+ %tmp4614 = getelementptr inbounds float, float* %tmp4613, i64 1
+ %tmp4615 = getelementptr inbounds float, float* %tmp4614, i64 1
+ %tmp4616 = getelementptr inbounds float, float* %tmp4615, i64 1
+ %tmp4617 = getelementptr inbounds float, float* %tmp4616, i64 1
+ %tmp4618 = getelementptr inbounds float, float* %tmp4617, i64 1
+ %tmp4619 = getelementptr inbounds float, float* %tmp4618, i64 1
+ %tmp4620 = getelementptr inbounds float, float* %tmp4619, i64 1
+ %tmp4621 = getelementptr inbounds float, float* %tmp4620, i64 1
+ %tmp4622 = getelementptr inbounds float, float* %tmp4621, i64 1
+ %tmp4623 = getelementptr inbounds float, float* %tmp4622, i64 1
+ %tmp4624 = getelementptr inbounds float, float* %tmp4623, i64 1
+ %tmp4625 = getelementptr inbounds float, float* %tmp4624, i64 1
+ %tmp4626 = getelementptr inbounds float, float* %tmp4625, i64 1
+ %tmp4627 = getelementptr inbounds float, float* %tmp4626, i64 1
+ %tmp4628 = getelementptr inbounds float, float* %tmp4627, i64 1
+ %tmp4629 = getelementptr inbounds float, float* %tmp4628, i64 1
+ %tmp4630 = getelementptr inbounds float, float* %tmp4629, i64 1
+ %tmp4631 = getelementptr inbounds float, float* %tmp4630, i64 1
+ %tmp4632 = getelementptr inbounds float, float* %tmp4631, i64 1
+ %tmp4633 = getelementptr inbounds float, float* %tmp4632, i64 1
+ %tmp4634 = getelementptr inbounds float, float* %tmp4633, i64 1
+ %tmp4635 = getelementptr inbounds float, float* %tmp4634, i64 1
+ %tmp4636 = getelementptr inbounds float, float* %tmp4635, i64 1
+ %tmp4637 = getelementptr inbounds float, float* %tmp4636, i64 1
+ %tmp4638 = getelementptr inbounds float, float* %tmp4637, i64 1
+ %tmp4639 = getelementptr inbounds float, float* %tmp4638, i64 1
+ %tmp4640 = getelementptr inbounds float, float* %tmp4639, i64 1
+ %tmp4641 = getelementptr inbounds float, float* %tmp4640, i64 1
+ %tmp4642 = getelementptr inbounds float, float* %tmp4641, i64 1
+ %tmp4643 = getelementptr inbounds float, float* %tmp4642, i64 1
+ %tmp4644 = getelementptr inbounds float, float* %tmp4643, i64 1
+ %tmp4645 = getelementptr inbounds float, float* %tmp4644, i64 1
+ %tmp4646 = getelementptr inbounds float, float* %tmp4645, i64 1
+ %tmp4647 = getelementptr inbounds float, float* %tmp4646, i64 1
+ %tmp4648 = getelementptr inbounds float, float* %tmp4647, i64 1
+ %tmp4649 = getelementptr inbounds float, float* %tmp4648, i64 1
+ %tmp4650 = getelementptr inbounds float, float* %tmp4649, i64 1
+ %tmp4651 = getelementptr inbounds float, float* %tmp4650, i64 1
+ %tmp4652 = getelementptr inbounds float, float* %tmp4651, i64 1
+ %tmp4653 = getelementptr inbounds float, float* %tmp4652, i64 1
+ %tmp4654 = getelementptr inbounds float, float* %tmp4653, i64 1
+ %tmp4655 = getelementptr inbounds float, float* %tmp4654, i64 1
+ %tmp4656 = getelementptr inbounds float, float* %tmp4655, i64 1
+ %tmp4657 = getelementptr inbounds float, float* %tmp4656, i64 1
+ %tmp4658 = getelementptr inbounds float, float* %tmp4657, i64 1
+ %tmp4659 = getelementptr inbounds float, float* %tmp4658, i64 1
+ %tmp4660 = getelementptr inbounds float, float* %tmp4659, i64 1
+ %tmp4661 = getelementptr inbounds float, float* %tmp4660, i64 1
+ %tmp4662 = getelementptr inbounds float, float* %tmp4661, i64 1
+ %tmp4663 = getelementptr inbounds float, float* %tmp4662, i64 1
+ %tmp4664 = getelementptr inbounds float, float* %tmp4663, i64 1
+ %tmp4665 = getelementptr inbounds float, float* %tmp4664, i64 1
+ %tmp4666 = getelementptr inbounds float, float* %tmp4665, i64 1
+ %tmp4667 = getelementptr inbounds float, float* %tmp4666, i64 1
+ %tmp4668 = getelementptr inbounds float, float* %tmp4667, i64 1
+ %tmp4669 = getelementptr inbounds float, float* %tmp4668, i64 1
+ %tmp4670 = getelementptr inbounds float, float* %tmp4669, i64 1
+ %tmp4671 = getelementptr inbounds float, float* %tmp4670, i64 1
+ %tmp4672 = getelementptr inbounds float, float* %tmp4671, i64 1
+ %tmp4673 = getelementptr inbounds float, float* %tmp4672, i64 1
+ %tmp4674 = getelementptr inbounds float, float* %tmp4673, i64 1
+ %tmp4675 = getelementptr inbounds float, float* %tmp4674, i64 1
+ %tmp4676 = getelementptr inbounds float, float* %tmp4675, i64 1
+ %tmp4677 = getelementptr inbounds float, float* %tmp4676, i64 1
+ %tmp4678 = getelementptr inbounds float, float* %tmp4677, i64 1
+ %tmp4679 = getelementptr inbounds float, float* %tmp4678, i64 1
+ %tmp4680 = getelementptr inbounds float, float* %tmp4679, i64 1
+ %tmp4681 = getelementptr inbounds float, float* %tmp4680, i64 1
+ %tmp4682 = getelementptr inbounds float, float* %tmp4681, i64 1
+ %tmp4683 = getelementptr inbounds float, float* %tmp4682, i64 1
+ %tmp4684 = getelementptr inbounds float, float* %tmp4683, i64 1
+ %tmp4685 = getelementptr inbounds float, float* %tmp4684, i64 1
+ %tmp4686 = getelementptr inbounds float, float* %tmp4685, i64 1
+ %tmp4687 = getelementptr inbounds float, float* %tmp4686, i64 1
+ %tmp4688 = getelementptr inbounds float, float* %tmp4687, i64 1
+ %tmp4689 = getelementptr inbounds float, float* %tmp4688, i64 1
+ %tmp4690 = getelementptr inbounds float, float* %tmp4689, i64 1
+ %tmp4691 = getelementptr inbounds float, float* %tmp4690, i64 1
+ %tmp4692 = getelementptr inbounds float, float* %tmp4691, i64 1
+ %tmp4693 = getelementptr inbounds float, float* %tmp4692, i64 1
+ %tmp4694 = getelementptr inbounds float, float* %tmp4693, i64 1
+ %tmp4695 = getelementptr inbounds float, float* %tmp4694, i64 1
+ %tmp4696 = getelementptr inbounds float, float* %tmp4695, i64 1
+ %tmp4697 = getelementptr inbounds float, float* %tmp4696, i64 1
+ %tmp4698 = getelementptr inbounds float, float* %tmp4697, i64 1
+ %tmp4699 = getelementptr inbounds float, float* %tmp4698, i64 1
+ %tmp4700 = getelementptr inbounds float, float* %tmp4699, i64 1
+ %tmp4701 = getelementptr inbounds float, float* %tmp4700, i64 1
+ %tmp4702 = getelementptr inbounds float, float* %tmp4701, i64 1
+ %tmp4703 = getelementptr inbounds float, float* %tmp4702, i64 1
+ %tmp4704 = getelementptr inbounds float, float* %tmp4703, i64 1
+ %tmp4705 = getelementptr inbounds float, float* %tmp4704, i64 1
+ %tmp4706 = getelementptr inbounds float, float* %tmp4705, i64 1
+ %tmp4707 = getelementptr inbounds float, float* %tmp4706, i64 1
+ %tmp4708 = getelementptr inbounds float, float* %tmp4707, i64 1
+ %tmp4709 = getelementptr inbounds float, float* %tmp4708, i64 1
+ %tmp4710 = getelementptr inbounds float, float* %tmp4709, i64 1
+ %tmp4711 = getelementptr inbounds float, float* %tmp4710, i64 1
+ %tmp4712 = getelementptr inbounds float, float* %tmp4711, i64 1
+ %tmp4713 = getelementptr inbounds float, float* %tmp4712, i64 1
+ %tmp4714 = getelementptr inbounds float, float* %tmp4713, i64 1
+ %tmp4715 = getelementptr inbounds float, float* %tmp4714, i64 1
+ %tmp4716 = getelementptr inbounds float, float* %tmp4715, i64 1
+ %tmp4717 = getelementptr inbounds float, float* %tmp4716, i64 1
+ %tmp4718 = getelementptr inbounds float, float* %tmp4717, i64 1
+ %tmp4719 = getelementptr inbounds float, float* %tmp4718, i64 1
+ %tmp4720 = getelementptr inbounds float, float* %tmp4719, i64 1
+ %tmp4721 = getelementptr inbounds float, float* %tmp4720, i64 1
+ %tmp4722 = getelementptr inbounds float, float* %tmp4721, i64 1
+ %tmp4723 = getelementptr inbounds float, float* %tmp4722, i64 1
+ %tmp4724 = getelementptr inbounds float, float* %tmp4723, i64 1
+ %tmp4725 = getelementptr inbounds float, float* %tmp4724, i64 1
+ %tmp4726 = getelementptr inbounds float, float* %tmp4725, i64 1
+ %tmp4727 = getelementptr inbounds float, float* %tmp4726, i64 1
+ %tmp4728 = getelementptr inbounds float, float* %tmp4727, i64 1
+ %tmp4729 = getelementptr inbounds float, float* %tmp4728, i64 1
+ %tmp4730 = getelementptr inbounds float, float* %tmp4729, i64 1
+ %tmp4731 = getelementptr inbounds float, float* %tmp4730, i64 1
+ %tmp4732 = getelementptr inbounds float, float* %tmp4731, i64 1
+ %tmp4733 = getelementptr inbounds float, float* %tmp4732, i64 1
+ %tmp4734 = getelementptr inbounds float, float* %tmp4733, i64 1
+ %tmp4735 = getelementptr inbounds float, float* %tmp4734, i64 1
+ %tmp4736 = getelementptr inbounds float, float* %tmp4735, i64 1
+ %tmp4737 = getelementptr inbounds float, float* %tmp4736, i64 1
+ %tmp4738 = getelementptr inbounds float, float* %tmp4737, i64 1
+ %tmp4739 = getelementptr inbounds float, float* %tmp4738, i64 1
+ %tmp4740 = getelementptr inbounds float, float* %tmp4739, i64 1
+ %tmp4741 = getelementptr inbounds float, float* %tmp4740, i64 1
+ %tmp4742 = getelementptr inbounds float, float* %tmp4741, i64 1
+ %tmp4743 = getelementptr inbounds float, float* %tmp4742, i64 1
+ %tmp4744 = getelementptr inbounds float, float* %tmp4743, i64 1
+ %tmp4745 = getelementptr inbounds float, float* %tmp4744, i64 1
+ %tmp4746 = getelementptr inbounds float, float* %tmp4745, i64 1
+ %tmp4747 = getelementptr inbounds float, float* %tmp4746, i64 1
+ %tmp4748 = getelementptr inbounds float, float* %tmp4747, i64 1
+ %tmp4749 = getelementptr inbounds float, float* %tmp4748, i64 1
+ %tmp4750 = getelementptr inbounds float, float* %tmp4749, i64 1
+ %tmp4751 = getelementptr inbounds float, float* %tmp4750, i64 1
+ %tmp4752 = getelementptr inbounds float, float* %tmp4751, i64 1
+ %tmp4753 = getelementptr inbounds float, float* %tmp4752, i64 1
+ %tmp4754 = getelementptr inbounds float, float* %tmp4753, i64 1
+ %tmp4755 = getelementptr inbounds float, float* %tmp4754, i64 1
+ %tmp4756 = getelementptr inbounds float, float* %tmp4755, i64 1
+ %tmp4757 = getelementptr inbounds float, float* %tmp4756, i64 1
+ %tmp4758 = getelementptr inbounds float, float* %tmp4757, i64 1
+ %tmp4759 = getelementptr inbounds float, float* %tmp4758, i64 1
+ %tmp4760 = getelementptr inbounds float, float* %tmp4759, i64 1
+ %tmp4761 = getelementptr inbounds float, float* %tmp4760, i64 1
+ %tmp4762 = getelementptr inbounds float, float* %tmp4761, i64 1
+ %tmp4763 = getelementptr inbounds float, float* %tmp4762, i64 1
+ %tmp4764 = getelementptr inbounds float, float* %tmp4763, i64 1
+ %tmp4765 = getelementptr inbounds float, float* %tmp4764, i64 1
+ %tmp4766 = getelementptr inbounds float, float* %tmp4765, i64 1
+ %tmp4767 = getelementptr inbounds float, float* %tmp4766, i64 1
+ %tmp4768 = getelementptr inbounds float, float* %tmp4767, i64 1
+ %tmp4769 = getelementptr inbounds float, float* %tmp4768, i64 1
+ %tmp4770 = getelementptr inbounds float, float* %tmp4769, i64 1
+ %tmp4771 = getelementptr inbounds float, float* %tmp4770, i64 1
+ %tmp4772 = getelementptr inbounds float, float* %tmp4771, i64 1
+ %tmp4773 = getelementptr inbounds float, float* %tmp4772, i64 1
+ %tmp4774 = getelementptr inbounds float, float* %tmp4773, i64 1
+ %tmp4775 = getelementptr inbounds float, float* %tmp4774, i64 1
+ %tmp4776 = getelementptr inbounds float, float* %tmp4775, i64 1
+ %tmp4777 = getelementptr inbounds float, float* %tmp4776, i64 1
+ %tmp4778 = getelementptr inbounds float, float* %tmp4777, i64 1
+ %tmp4779 = getelementptr inbounds float, float* %tmp4778, i64 1
+ %tmp4780 = getelementptr inbounds float, float* %tmp4779, i64 1
+ %tmp4781 = getelementptr inbounds float, float* %tmp4780, i64 1
+ %tmp4782 = getelementptr inbounds float, float* %tmp4781, i64 1
+ %tmp4783 = getelementptr inbounds float, float* %tmp4782, i64 1
+ %tmp4784 = getelementptr inbounds float, float* %tmp4783, i64 1
+ %tmp4785 = getelementptr inbounds float, float* %tmp4784, i64 1
+ %tmp4786 = getelementptr inbounds float, float* %tmp4785, i64 1
+ %tmp4787 = getelementptr inbounds float, float* %tmp4786, i64 1
+ %tmp4788 = getelementptr inbounds float, float* %tmp4787, i64 1
+ %tmp4789 = getelementptr inbounds float, float* %tmp4788, i64 1
+ %tmp4790 = getelementptr inbounds float, float* %tmp4789, i64 1
+ %tmp4791 = getelementptr inbounds float, float* %tmp4790, i64 1
+ %tmp4792 = getelementptr inbounds float, float* %tmp4791, i64 1
+ %tmp4793 = getelementptr inbounds float, float* %tmp4792, i64 1
+ %tmp4794 = getelementptr inbounds float, float* %tmp4793, i64 1
+ %tmp4795 = getelementptr inbounds float, float* %tmp4794, i64 1
+ %tmp4796 = getelementptr inbounds float, float* %tmp4795, i64 1
+ %tmp4797 = getelementptr inbounds float, float* %tmp4796, i64 1
+ %tmp4798 = getelementptr inbounds float, float* %tmp4797, i64 1
+ %tmp4799 = getelementptr inbounds float, float* %tmp4798, i64 1
+ %tmp4800 = getelementptr inbounds float, float* %tmp4799, i64 1
+ %tmp4801 = getelementptr inbounds float, float* %tmp4800, i64 1
+ %tmp4802 = getelementptr inbounds float, float* %tmp4801, i64 1
+ %tmp4803 = getelementptr inbounds float, float* %tmp4802, i64 1
+ %tmp4804 = getelementptr inbounds float, float* %tmp4803, i64 1
+ %tmp4805 = getelementptr inbounds float, float* %tmp4804, i64 1
+ %tmp4806 = getelementptr inbounds float, float* %tmp4805, i64 1
+ %tmp4807 = getelementptr inbounds float, float* %tmp4806, i64 1
+ %tmp4808 = getelementptr inbounds float, float* %tmp4807, i64 1
+ %tmp4809 = getelementptr inbounds float, float* %tmp4808, i64 1
+ %tmp4810 = getelementptr inbounds float, float* %tmp4809, i64 1
+ %tmp4811 = getelementptr inbounds float, float* %tmp4810, i64 1
+ %tmp4812 = getelementptr inbounds float, float* %tmp4811, i64 1
+ %tmp4813 = getelementptr inbounds float, float* %tmp4812, i64 1
+ %tmp4814 = getelementptr inbounds float, float* %tmp4813, i64 1
+ %tmp4815 = getelementptr inbounds float, float* %tmp4814, i64 1
+ %tmp4816 = getelementptr inbounds float, float* %tmp4815, i64 1
+ %tmp4817 = getelementptr inbounds float, float* %tmp4816, i64 1
+ %tmp4818 = getelementptr inbounds float, float* %tmp4817, i64 1
+ %tmp4819 = getelementptr inbounds float, float* %tmp4818, i64 1
+ %tmp4820 = getelementptr inbounds float, float* %tmp4819, i64 1
+ %tmp4821 = getelementptr inbounds float, float* %tmp4820, i64 1
+ %tmp4822 = getelementptr inbounds float, float* %tmp4821, i64 1
+ %tmp4823 = getelementptr inbounds float, float* %tmp4822, i64 1
+ %tmp4824 = getelementptr inbounds float, float* %tmp4823, i64 1
+ %tmp4825 = getelementptr inbounds float, float* %tmp4824, i64 1
+ %tmp4826 = getelementptr inbounds float, float* %tmp4825, i64 1
+ %tmp4827 = getelementptr inbounds float, float* %tmp4826, i64 1
+ %tmp4828 = getelementptr inbounds float, float* %tmp4827, i64 1
+ %tmp4829 = getelementptr inbounds float, float* %tmp4828, i64 1
+ %tmp4830 = getelementptr inbounds float, float* %tmp4829, i64 1
+ %tmp4831 = getelementptr inbounds float, float* %tmp4830, i64 1
+ %tmp4832 = getelementptr inbounds float, float* %tmp4831, i64 1
+ %tmp4833 = getelementptr inbounds float, float* %tmp4832, i64 1
+ %tmp4834 = getelementptr inbounds float, float* %tmp4833, i64 1
+ %tmp4835 = getelementptr inbounds float, float* %tmp4834, i64 1
+ %tmp4836 = getelementptr inbounds float, float* %tmp4835, i64 1
+ %tmp4837 = getelementptr inbounds float, float* %tmp4836, i64 1
+ %tmp4838 = getelementptr inbounds float, float* %tmp4837, i64 1
+ %tmp4839 = getelementptr inbounds float, float* %tmp4838, i64 1
+ %tmp4840 = getelementptr inbounds float, float* %tmp4839, i64 1
+ %tmp4841 = getelementptr inbounds float, float* %tmp4840, i64 1
+ %tmp4842 = getelementptr inbounds float, float* %tmp4841, i64 1
+ %tmp4843 = getelementptr inbounds float, float* %tmp4842, i64 1
+ %tmp4844 = getelementptr inbounds float, float* %tmp4843, i64 1
+ %tmp4845 = getelementptr inbounds float, float* %tmp4844, i64 1
+ %tmp4846 = getelementptr inbounds float, float* %tmp4845, i64 1
+ %tmp4847 = getelementptr inbounds float, float* %tmp4846, i64 1
+ %tmp4848 = getelementptr inbounds float, float* %tmp4847, i64 1
+ %tmp4849 = getelementptr inbounds float, float* %tmp4848, i64 1
+ %tmp4850 = getelementptr inbounds float, float* %tmp4849, i64 1
+ %tmp4851 = getelementptr inbounds float, float* %tmp4850, i64 1
+ %tmp4852 = getelementptr inbounds float, float* %tmp4851, i64 1
+ %tmp4853 = getelementptr inbounds float, float* %tmp4852, i64 1
+ %tmp4854 = getelementptr inbounds float, float* %tmp4853, i64 1
+ %tmp4855 = getelementptr inbounds float, float* %tmp4854, i64 1
+ %tmp4856 = getelementptr inbounds float, float* %tmp4855, i64 1
+ %tmp4857 = getelementptr inbounds float, float* %tmp4856, i64 1
+ %tmp4858 = getelementptr inbounds float, float* %tmp4857, i64 1
+ %tmp4859 = getelementptr inbounds float, float* %tmp4858, i64 1
+ %tmp4860 = getelementptr inbounds float, float* %tmp4859, i64 1
+ %tmp4861 = getelementptr inbounds float, float* %tmp4860, i64 1
+ %tmp4862 = getelementptr inbounds float, float* %tmp4861, i64 1
+ %tmp4863 = getelementptr inbounds float, float* %tmp4862, i64 1
+ %tmp4864 = getelementptr inbounds float, float* %tmp4863, i64 1
+ %tmp4865 = getelementptr inbounds float, float* %tmp4864, i64 1
+ %tmp4866 = getelementptr inbounds float, float* %tmp4865, i64 1
+ %tmp4867 = getelementptr inbounds float, float* %tmp4866, i64 1
+ %tmp4868 = getelementptr inbounds float, float* %tmp4867, i64 1
+ %tmp4869 = getelementptr inbounds float, float* %tmp4868, i64 1
+ %tmp4870 = getelementptr inbounds float, float* %tmp4869, i64 1
+ %tmp4871 = getelementptr inbounds float, float* %tmp4870, i64 1
+ %tmp4872 = getelementptr inbounds float, float* %tmp4871, i64 1
+ %tmp4873 = getelementptr inbounds float, float* %tmp4872, i64 1
+ %tmp4874 = getelementptr inbounds float, float* %tmp4873, i64 1
+ %tmp4875 = getelementptr inbounds float, float* %tmp4874, i64 1
+ %tmp4876 = getelementptr inbounds float, float* %tmp4875, i64 1
+ %tmp4877 = getelementptr inbounds float, float* %tmp4876, i64 1
+ %tmp4878 = getelementptr inbounds float, float* %tmp4877, i64 1
+ %tmp4879 = getelementptr inbounds float, float* %tmp4878, i64 1
+ %tmp4880 = getelementptr inbounds float, float* %tmp4879, i64 1
+ %tmp4881 = getelementptr inbounds float, float* %tmp4880, i64 1
+ %tmp4882 = getelementptr inbounds float, float* %tmp4881, i64 1
+ %tmp4883 = getelementptr inbounds float, float* %tmp4882, i64 1
+ %tmp4884 = getelementptr inbounds float, float* %tmp4883, i64 1
+ %tmp4885 = getelementptr inbounds float, float* %tmp4884, i64 1
+ %tmp4886 = getelementptr inbounds float, float* %tmp4885, i64 1
+ %tmp4887 = getelementptr inbounds float, float* %tmp4886, i64 1
+ %tmp4888 = getelementptr inbounds float, float* %tmp4887, i64 1
+ %tmp4889 = getelementptr inbounds float, float* %tmp4888, i64 1
+ %tmp4890 = getelementptr inbounds float, float* %tmp4889, i64 1
+ %tmp4891 = getelementptr inbounds float, float* %tmp4890, i64 1
+ %tmp4892 = getelementptr inbounds float, float* %tmp4891, i64 1
+ %tmp4893 = getelementptr inbounds float, float* %tmp4892, i64 1
+ %tmp4894 = getelementptr inbounds float, float* %tmp4893, i64 1
+ %tmp4895 = getelementptr inbounds float, float* %tmp4894, i64 1
+ %tmp4896 = getelementptr inbounds float, float* %tmp4895, i64 1
+ %tmp4897 = getelementptr inbounds float, float* %tmp4896, i64 1
+ %tmp4898 = getelementptr inbounds float, float* %tmp4897, i64 1
+ %tmp4899 = getelementptr inbounds float, float* %tmp4898, i64 1
+ %tmp4900 = getelementptr inbounds float, float* %tmp4899, i64 1
+ %tmp4901 = getelementptr inbounds float, float* %tmp4900, i64 1
+ %tmp4902 = getelementptr inbounds float, float* %tmp4901, i64 1
+ %tmp4903 = getelementptr inbounds float, float* %tmp4902, i64 1
+ %tmp4904 = getelementptr inbounds float, float* %tmp4903, i64 1
+ %tmp4905 = getelementptr inbounds float, float* %tmp4904, i64 1
+ %tmp4906 = getelementptr inbounds float, float* %tmp4905, i64 1
+ %tmp4907 = getelementptr inbounds float, float* %tmp4906, i64 1
+ %tmp4908 = getelementptr inbounds float, float* %tmp4907, i64 1
+ %tmp4909 = getelementptr inbounds float, float* %tmp4908, i64 1
+ %tmp4910 = getelementptr inbounds float, float* %tmp4909, i64 1
+ %tmp4911 = getelementptr inbounds float, float* %tmp4910, i64 1
+ %tmp4912 = getelementptr inbounds float, float* %tmp4911, i64 1
+ %tmp4913 = getelementptr inbounds float, float* %tmp4912, i64 1
+ %tmp4914 = getelementptr inbounds float, float* %tmp4913, i64 1
+ %tmp4915 = getelementptr inbounds float, float* %tmp4914, i64 1
+ %tmp4916 = getelementptr inbounds float, float* %tmp4915, i64 1
+ %tmp4917 = getelementptr inbounds float, float* %tmp4916, i64 1
+ %tmp4918 = getelementptr inbounds float, float* %tmp4917, i64 1
+ %tmp4919 = getelementptr inbounds float, float* %tmp4918, i64 1
+ %tmp4920 = getelementptr inbounds float, float* %tmp4919, i64 1
+ %tmp4921 = getelementptr inbounds float, float* %tmp4920, i64 1
+ %tmp4922 = getelementptr inbounds float, float* %tmp4921, i64 1
+ %tmp4923 = getelementptr inbounds float, float* %tmp4922, i64 1
+ %tmp4924 = getelementptr inbounds float, float* %tmp4923, i64 1
+ %tmp4925 = getelementptr inbounds float, float* %tmp4924, i64 1
+ %tmp4926 = getelementptr inbounds float, float* %tmp4925, i64 1
+ %tmp4927 = getelementptr inbounds float, float* %tmp4926, i64 1
+ %tmp4928 = getelementptr inbounds float, float* %tmp4927, i64 1
+ %tmp4929 = getelementptr inbounds float, float* %tmp4928, i64 1
+ %tmp4930 = getelementptr inbounds float, float* %tmp4929, i64 1
+ %tmp4931 = getelementptr inbounds float, float* %tmp4930, i64 1
+ %tmp4932 = getelementptr inbounds float, float* %tmp4931, i64 1
+ %tmp4933 = getelementptr inbounds float, float* %tmp4932, i64 1
+ %tmp4934 = getelementptr inbounds float, float* %tmp4933, i64 1
+ %tmp4935 = getelementptr inbounds float, float* %tmp4934, i64 1
+ %tmp4936 = getelementptr inbounds float, float* %tmp4935, i64 1
+ %tmp4937 = getelementptr inbounds float, float* %tmp4936, i64 1
+ %tmp4938 = getelementptr inbounds float, float* %tmp4937, i64 1
+ %tmp4939 = getelementptr inbounds float, float* %tmp4938, i64 1
+ %tmp4940 = getelementptr inbounds float, float* %tmp4939, i64 1
+ %tmp4941 = getelementptr inbounds float, float* %tmp4940, i64 1
+ %tmp4942 = getelementptr inbounds float, float* %tmp4941, i64 1
+ %tmp4943 = getelementptr inbounds float, float* %tmp4942, i64 1
+ %tmp4944 = getelementptr inbounds float, float* %tmp4943, i64 1
+ %tmp4945 = getelementptr inbounds float, float* %tmp4944, i64 1
+ %tmp4946 = getelementptr inbounds float, float* %tmp4945, i64 1
+ %tmp4947 = getelementptr inbounds float, float* %tmp4946, i64 1
+ %tmp4948 = getelementptr inbounds float, float* %tmp4947, i64 1
+ %tmp4949 = getelementptr inbounds float, float* %tmp4948, i64 1
+ %tmp4950 = getelementptr inbounds float, float* %tmp4949, i64 1
+ %tmp4951 = getelementptr inbounds float, float* %tmp4950, i64 1
+ %tmp4952 = getelementptr inbounds float, float* %tmp4951, i64 1
+ %tmp4953 = getelementptr inbounds float, float* %tmp4952, i64 1
+ %tmp4954 = getelementptr inbounds float, float* %tmp4953, i64 1
+ %tmp4955 = getelementptr inbounds float, float* %tmp4954, i64 1
+ %tmp4956 = getelementptr inbounds float, float* %tmp4955, i64 1
+ %tmp4957 = getelementptr inbounds float, float* %tmp4956, i64 1
+ %tmp4958 = getelementptr inbounds float, float* %tmp4957, i64 1
+ %tmp4959 = getelementptr inbounds float, float* %tmp4958, i64 1
+ %tmp4960 = getelementptr inbounds float, float* %tmp4959, i64 1
+ %tmp4961 = getelementptr inbounds float, float* %tmp4960, i64 1
+ %tmp4962 = getelementptr inbounds float, float* %tmp4961, i64 1
+ %tmp4963 = getelementptr inbounds float, float* %tmp4962, i64 1
+ %tmp4964 = getelementptr inbounds float, float* %tmp4963, i64 1
+ %tmp4965 = getelementptr inbounds float, float* %tmp4964, i64 1
+ %tmp4966 = getelementptr inbounds float, float* %tmp4965, i64 1
+ %tmp4967 = getelementptr inbounds float, float* %tmp4966, i64 1
+ %tmp4968 = getelementptr inbounds float, float* %tmp4967, i64 1
+ %tmp4969 = getelementptr inbounds float, float* %tmp4968, i64 1
+ %tmp4970 = getelementptr inbounds float, float* %tmp4969, i64 1
+ %tmp4971 = getelementptr inbounds float, float* %tmp4970, i64 1
+ %tmp4972 = getelementptr inbounds float, float* %tmp4971, i64 1
+ %tmp4973 = getelementptr inbounds float, float* %tmp4972, i64 1
+ %tmp4974 = getelementptr inbounds float, float* %tmp4973, i64 1
+ %tmp4975 = getelementptr inbounds float, float* %tmp4974, i64 1
+ %tmp4976 = getelementptr inbounds float, float* %tmp4975, i64 1
+ %tmp4977 = getelementptr inbounds float, float* %tmp4976, i64 1
+ %tmp4978 = getelementptr inbounds float, float* %tmp4977, i64 1
+ %tmp4979 = getelementptr inbounds float, float* %tmp4978, i64 1
+ %tmp4980 = getelementptr inbounds float, float* %tmp4979, i64 1
+ %tmp4981 = getelementptr inbounds float, float* %tmp4980, i64 1
+ %tmp4982 = getelementptr inbounds float, float* %tmp4981, i64 1
+ %tmp4983 = getelementptr inbounds float, float* %tmp4982, i64 1
+ %tmp4984 = getelementptr inbounds float, float* %tmp4983, i64 1
+ %tmp4985 = getelementptr inbounds float, float* %tmp4984, i64 1
+ %tmp4986 = getelementptr inbounds float, float* %tmp4985, i64 1
+ %tmp4987 = getelementptr inbounds float, float* %tmp4986, i64 1
+ %tmp4988 = getelementptr inbounds float, float* %tmp4987, i64 1
+ %tmp4989 = getelementptr inbounds float, float* %tmp4988, i64 1
+ %tmp4990 = getelementptr inbounds float, float* %tmp4989, i64 1
+ %tmp4991 = getelementptr inbounds float, float* %tmp4990, i64 1
+ %tmp4992 = getelementptr inbounds float, float* %tmp4991, i64 1
+ %tmp4993 = getelementptr inbounds float, float* %tmp4992, i64 1
+ %tmp4994 = getelementptr inbounds float, float* %tmp4993, i64 1
+ %tmp4995 = getelementptr inbounds float, float* %tmp4994, i64 1
+ %tmp4996 = getelementptr inbounds float, float* %tmp4995, i64 1
+ %tmp4997 = getelementptr inbounds float, float* %tmp4996, i64 1
+ %tmp4998 = getelementptr inbounds float, float* %tmp4997, i64 1
+ %tmp4999 = getelementptr inbounds float, float* %tmp4998, i64 1
+ %tmp5000 = getelementptr inbounds float, float* %tmp4999, i64 1
+ %tmp5001 = getelementptr inbounds float, float* %tmp5000, i64 1
+ %tmp5002 = getelementptr inbounds float, float* %tmp5001, i64 1
+ %tmp5003 = getelementptr inbounds float, float* %tmp5002, i64 1
+ %tmp5004 = getelementptr inbounds float, float* %tmp5003, i64 1
+ %tmp5005 = getelementptr inbounds float, float* %tmp5004, i64 1
+ %tmp5006 = getelementptr inbounds float, float* %tmp5005, i64 1
+ %tmp5007 = getelementptr inbounds float, float* %tmp5006, i64 1
+ %tmp5008 = getelementptr inbounds float, float* %tmp5007, i64 1
+ %tmp5009 = getelementptr inbounds float, float* %tmp5008, i64 1
+ %tmp5010 = getelementptr inbounds float, float* %tmp5009, i64 1
+ %tmp5011 = getelementptr inbounds float, float* %tmp5010, i64 1
+ %tmp5012 = getelementptr inbounds float, float* %tmp5011, i64 1
+ %tmp5013 = getelementptr inbounds float, float* %tmp5012, i64 1
+ %tmp5014 = getelementptr inbounds float, float* %tmp5013, i64 1
+ %tmp5015 = getelementptr inbounds float, float* %tmp5014, i64 1
+ %tmp5016 = getelementptr inbounds float, float* %tmp5015, i64 1
+ %tmp5017 = getelementptr inbounds float, float* %tmp5016, i64 1
+ %tmp5018 = getelementptr inbounds float, float* %tmp5017, i64 1
+ %tmp5019 = getelementptr inbounds float, float* %tmp5018, i64 1
+ %tmp5020 = getelementptr inbounds float, float* %tmp5019, i64 1
+ %tmp5021 = getelementptr inbounds float, float* %tmp5020, i64 1
+ %tmp5022 = getelementptr inbounds float, float* %tmp5021, i64 1
+ %tmp5023 = getelementptr inbounds float, float* %tmp5022, i64 1
+ %tmp5024 = getelementptr inbounds float, float* %tmp5023, i64 1
+ %tmp5025 = getelementptr inbounds float, float* %tmp5024, i64 1
+ %tmp5026 = getelementptr inbounds float, float* %tmp5025, i64 1
+ %tmp5027 = getelementptr inbounds float, float* %tmp5026, i64 1
+ %tmp5028 = getelementptr inbounds float, float* %tmp5027, i64 1
+ %tmp5029 = getelementptr inbounds float, float* %tmp5028, i64 1
+ %tmp5030 = getelementptr inbounds float, float* %tmp5029, i64 1
+ %tmp5031 = getelementptr inbounds float, float* %tmp5030, i64 1
+ %tmp5032 = getelementptr inbounds float, float* %tmp5031, i64 1
+ %tmp5033 = getelementptr inbounds float, float* %tmp5032, i64 1
+ %tmp5034 = getelementptr inbounds float, float* %tmp5033, i64 1
+ %tmp5035 = getelementptr inbounds float, float* %tmp5034, i64 1
+ %tmp5036 = getelementptr inbounds float, float* %tmp5035, i64 1
+ %tmp5037 = getelementptr inbounds float, float* %tmp5036, i64 1
+ %tmp5038 = getelementptr inbounds float, float* %tmp5037, i64 1
+ %tmp5039 = getelementptr inbounds float, float* %tmp5038, i64 1
+ %tmp5040 = getelementptr inbounds float, float* %tmp5039, i64 1
+ %tmp5041 = getelementptr inbounds float, float* %tmp5040, i64 1
+ %tmp5042 = getelementptr inbounds float, float* %tmp5041, i64 1
+ %tmp5043 = getelementptr inbounds float, float* %tmp5042, i64 1
+ %tmp5044 = getelementptr inbounds float, float* %tmp5043, i64 1
+ %tmp5045 = getelementptr inbounds float, float* %tmp5044, i64 1
+ %tmp5046 = getelementptr inbounds float, float* %tmp5045, i64 1
+ %tmp5047 = getelementptr inbounds float, float* %tmp5046, i64 1
+ %tmp5048 = getelementptr inbounds float, float* %tmp5047, i64 1
+ %tmp5049 = getelementptr inbounds float, float* %tmp5048, i64 1
+ %tmp5050 = getelementptr inbounds float, float* %tmp5049, i64 1
+ %tmp5051 = getelementptr inbounds float, float* %tmp5050, i64 1
+ %tmp5052 = getelementptr inbounds float, float* %tmp5051, i64 1
+ %tmp5053 = getelementptr inbounds float, float* %tmp5052, i64 1
+ %tmp5054 = getelementptr inbounds float, float* %tmp5053, i64 1
+ %tmp5055 = getelementptr inbounds float, float* %tmp5054, i64 1
+ %tmp5056 = getelementptr inbounds float, float* %tmp5055, i64 1
+ %tmp5057 = getelementptr inbounds float, float* %tmp5056, i64 1
+ %tmp5058 = getelementptr inbounds float, float* %tmp5057, i64 1
+ %tmp5059 = getelementptr inbounds float, float* %tmp5058, i64 1
+ %tmp5060 = getelementptr inbounds float, float* %tmp5059, i64 1
+ %tmp5061 = getelementptr inbounds float, float* %tmp5060, i64 1
+ %tmp5062 = getelementptr inbounds float, float* %tmp5061, i64 1
+ %tmp5063 = getelementptr inbounds float, float* %tmp5062, i64 1
+ %tmp5064 = getelementptr inbounds float, float* %tmp5063, i64 1
+ %tmp5065 = getelementptr inbounds float, float* %tmp5064, i64 1
+ %tmp5066 = getelementptr inbounds float, float* %tmp5065, i64 1
+ %tmp5067 = getelementptr inbounds float, float* %tmp5066, i64 1
+ %tmp5068 = getelementptr inbounds float, float* %tmp5067, i64 1
+ %tmp5069 = getelementptr inbounds float, float* %tmp5068, i64 1
+ %tmp5070 = getelementptr inbounds float, float* %tmp5069, i64 1
+ %tmp5071 = getelementptr inbounds float, float* %tmp5070, i64 1
+ %tmp5072 = getelementptr inbounds float, float* %tmp5071, i64 1
+ %tmp5073 = getelementptr inbounds float, float* %tmp5072, i64 1
+ %tmp5074 = getelementptr inbounds float, float* %tmp5073, i64 1
+ %tmp5075 = getelementptr inbounds float, float* %tmp5074, i64 1
+ %tmp5076 = getelementptr inbounds float, float* %tmp5075, i64 1
+ %tmp5077 = getelementptr inbounds float, float* %tmp5076, i64 1
+ %tmp5078 = getelementptr inbounds float, float* %tmp5077, i64 1
+ %tmp5079 = getelementptr inbounds float, float* %tmp5078, i64 1
+ %tmp5080 = getelementptr inbounds float, float* %tmp5079, i64 1
+ %tmp5081 = getelementptr inbounds float, float* %tmp5080, i64 1
+ %tmp5082 = getelementptr inbounds float, float* %tmp5081, i64 1
+ %tmp5083 = getelementptr inbounds float, float* %tmp5082, i64 1
+ %tmp5084 = getelementptr inbounds float, float* %tmp5083, i64 1
+ %tmp5085 = getelementptr inbounds float, float* %tmp5084, i64 1
+ %tmp5086 = getelementptr inbounds float, float* %tmp5085, i64 1
+ %tmp5087 = getelementptr inbounds float, float* %tmp5086, i64 1
+ %tmp5088 = getelementptr inbounds float, float* %tmp5087, i64 1
+ %tmp5089 = getelementptr inbounds float, float* %tmp5088, i64 1
+ %tmp5090 = getelementptr inbounds float, float* %tmp5089, i64 1
+ %tmp5091 = getelementptr inbounds float, float* %tmp5090, i64 1
+ %tmp5092 = getelementptr inbounds float, float* %tmp5091, i64 1
+ %tmp5093 = getelementptr inbounds float, float* %tmp5092, i64 1
+ %tmp5094 = getelementptr inbounds float, float* %tmp5093, i64 1
+ %tmp5095 = getelementptr inbounds float, float* %tmp5094, i64 1
+ %tmp5096 = getelementptr inbounds float, float* %tmp5095, i64 1
+ %tmp5097 = getelementptr inbounds float, float* %tmp5096, i64 1
+ %tmp5098 = getelementptr inbounds float, float* %tmp5097, i64 1
+ %tmp5099 = getelementptr inbounds float, float* %tmp5098, i64 1
+ %tmp5100 = getelementptr inbounds float, float* %tmp5099, i64 1
+ %tmp5101 = getelementptr inbounds float, float* %tmp5100, i64 1
+ %tmp5102 = getelementptr inbounds float, float* %tmp5101, i64 1
+ %tmp5103 = getelementptr inbounds float, float* %tmp5102, i64 1
+ %tmp5104 = getelementptr inbounds float, float* %tmp5103, i64 1
+ %tmp5105 = getelementptr inbounds float, float* %tmp5104, i64 1
+ %tmp5106 = getelementptr inbounds float, float* %tmp5105, i64 1
+ %tmp5107 = getelementptr inbounds float, float* %tmp5106, i64 1
+ %tmp5108 = getelementptr inbounds float, float* %tmp5107, i64 1
+ %tmp5109 = getelementptr inbounds float, float* %tmp5108, i64 1
+ %tmp5110 = getelementptr inbounds float, float* %tmp5109, i64 1
+ %tmp5111 = getelementptr inbounds float, float* %tmp5110, i64 1
+ %tmp5112 = getelementptr inbounds float, float* %tmp5111, i64 1
+ %tmp5113 = getelementptr inbounds float, float* %tmp5112, i64 1
+ %tmp5114 = getelementptr inbounds float, float* %tmp5113, i64 1
+ %tmp5115 = getelementptr inbounds float, float* %tmp5114, i64 1
+ %tmp5116 = getelementptr inbounds float, float* %tmp5115, i64 1
+ %tmp5117 = getelementptr inbounds float, float* %tmp5116, i64 1
+ %tmp5118 = getelementptr inbounds float, float* %tmp5117, i64 1
+ %tmp5119 = getelementptr inbounds float, float* %tmp5118, i64 1
+ %tmp5120 = getelementptr inbounds float, float* %tmp5119, i64 1
+ %tmp5121 = getelementptr inbounds float, float* %tmp5120, i64 1
+ %tmp5122 = getelementptr inbounds float, float* %tmp5121, i64 1
+ %tmp5123 = getelementptr inbounds float, float* %tmp5122, i64 1
+ %tmp5124 = getelementptr inbounds float, float* %tmp5123, i64 1
+ %tmp5125 = getelementptr inbounds float, float* %tmp5124, i64 1
+ %tmp5126 = getelementptr inbounds float, float* %tmp5125, i64 1
+ %tmp5127 = getelementptr inbounds float, float* %tmp5126, i64 1
+ %tmp5128 = getelementptr inbounds float, float* %tmp5127, i64 1
+ %tmp5129 = getelementptr inbounds float, float* %tmp5128, i64 1
+ %tmp5130 = getelementptr inbounds float, float* %tmp5129, i64 1
+ %tmp5131 = getelementptr inbounds float, float* %tmp5130, i64 1
+ %tmp5132 = getelementptr inbounds float, float* %tmp5131, i64 1
+ %tmp5133 = getelementptr inbounds float, float* %tmp5132, i64 1
+ %tmp5134 = getelementptr inbounds float, float* %tmp5133, i64 1
+ %tmp5135 = getelementptr inbounds float, float* %tmp5134, i64 1
+ %tmp5136 = getelementptr inbounds float, float* %tmp5135, i64 1
+ %tmp5137 = getelementptr inbounds float, float* %tmp5136, i64 1
+ %tmp5138 = getelementptr inbounds float, float* %tmp5137, i64 1
+ %tmp5139 = getelementptr inbounds float, float* %tmp5138, i64 1
+ %tmp5140 = getelementptr inbounds float, float* %tmp5139, i64 1
+ %tmp5141 = getelementptr inbounds float, float* %tmp5140, i64 1
+ %tmp5142 = getelementptr inbounds float, float* %tmp5141, i64 1
+ %tmp5143 = getelementptr inbounds float, float* %tmp5142, i64 1
+ %tmp5144 = getelementptr inbounds float, float* %tmp5143, i64 1
+ %tmp5145 = getelementptr inbounds float, float* %tmp5144, i64 1
+ %tmp5146 = getelementptr inbounds float, float* %tmp5145, i64 1
+ %tmp5147 = getelementptr inbounds float, float* %tmp5146, i64 1
+ %tmp5148 = getelementptr inbounds float, float* %tmp5147, i64 1
+ %tmp5149 = getelementptr inbounds float, float* %tmp5148, i64 1
+ %tmp5150 = getelementptr inbounds float, float* %tmp5149, i64 1
+ %tmp5151 = getelementptr inbounds float, float* %tmp5150, i64 1
+ %tmp5152 = getelementptr inbounds float, float* %tmp5151, i64 1
+ %tmp5153 = getelementptr inbounds float, float* %tmp5152, i64 1
+ %tmp5154 = getelementptr inbounds float, float* %tmp5153, i64 1
+ %tmp5155 = getelementptr inbounds float, float* %tmp5154, i64 1
+ %tmp5156 = getelementptr inbounds float, float* %tmp5155, i64 1
+ %tmp5157 = getelementptr inbounds float, float* %tmp5156, i64 1
+ %tmp5158 = getelementptr inbounds float, float* %tmp5157, i64 1
+ %tmp5159 = getelementptr inbounds float, float* %tmp5158, i64 1
+ %tmp5160 = getelementptr inbounds float, float* %tmp5159, i64 1
+ %tmp5161 = getelementptr inbounds float, float* %tmp5160, i64 1
+ %tmp5162 = getelementptr inbounds float, float* %tmp5161, i64 1
+ %tmp5163 = getelementptr inbounds float, float* %tmp5162, i64 1
+ %tmp5164 = getelementptr inbounds float, float* %tmp5163, i64 1
+ %tmp5165 = getelementptr inbounds float, float* %tmp5164, i64 1
+ %tmp5166 = getelementptr inbounds float, float* %tmp5165, i64 1
+ %tmp5167 = getelementptr inbounds float, float* %tmp5166, i64 1
+ %tmp5168 = getelementptr inbounds float, float* %tmp5167, i64 1
+ %tmp5169 = getelementptr inbounds float, float* %tmp5168, i64 1
+ %tmp5170 = getelementptr inbounds float, float* %tmp5169, i64 1
+ %tmp5171 = getelementptr inbounds float, float* %tmp5170, i64 1
+ %tmp5172 = getelementptr inbounds float, float* %tmp5171, i64 1
+ %tmp5173 = getelementptr inbounds float, float* %tmp5172, i64 1
+ %tmp5174 = getelementptr inbounds float, float* %tmp5173, i64 1
+ %tmp5175 = getelementptr inbounds float, float* %tmp5174, i64 1
+ %tmp5176 = getelementptr inbounds float, float* %tmp5175, i64 1
+ %tmp5177 = getelementptr inbounds float, float* %tmp5176, i64 1
+ %tmp5178 = getelementptr inbounds float, float* %tmp5177, i64 1
+ %tmp5179 = getelementptr inbounds float, float* %tmp5178, i64 1
+ %tmp5180 = getelementptr inbounds float, float* %tmp5179, i64 1
+ %tmp5181 = getelementptr inbounds float, float* %tmp5180, i64 1
+ %tmp5182 = getelementptr inbounds float, float* %tmp5181, i64 1
+ %tmp5183 = getelementptr inbounds float, float* %tmp5182, i64 1
+ %tmp5184 = getelementptr inbounds float, float* %tmp5183, i64 1
+ %tmp5185 = getelementptr inbounds float, float* %tmp5184, i64 1
+ %tmp5186 = getelementptr inbounds float, float* %tmp5185, i64 1
+ %tmp5187 = getelementptr inbounds float, float* %tmp5186, i64 1
+ %tmp5188 = getelementptr inbounds float, float* %tmp5187, i64 1
+ %tmp5189 = getelementptr inbounds float, float* %tmp5188, i64 1
+ %tmp5190 = getelementptr inbounds float, float* %tmp5189, i64 1
+ %tmp5191 = getelementptr inbounds float, float* %tmp5190, i64 1
+ %tmp5192 = getelementptr inbounds float, float* %tmp5191, i64 1
+ %tmp5193 = getelementptr inbounds float, float* %tmp5192, i64 1
+ %tmp5194 = getelementptr inbounds float, float* %tmp5193, i64 1
+ %tmp5195 = getelementptr inbounds float, float* %tmp5194, i64 1
+ %tmp5196 = getelementptr inbounds float, float* %tmp5195, i64 1
+ %tmp5197 = getelementptr inbounds float, float* %tmp5196, i64 1
+ %tmp5198 = getelementptr inbounds float, float* %tmp5197, i64 1
+ %tmp5199 = getelementptr inbounds float, float* %tmp5198, i64 1
+ %tmp5200 = getelementptr inbounds float, float* %tmp5199, i64 1
+ %tmp5201 = getelementptr inbounds float, float* %tmp5200, i64 1
+ %tmp5202 = getelementptr inbounds float, float* %tmp5201, i64 1
+ %tmp5203 = getelementptr inbounds float, float* %tmp5202, i64 1
+ %tmp5204 = getelementptr inbounds float, float* %tmp5203, i64 1
+ %tmp5205 = getelementptr inbounds float, float* %tmp5204, i64 1
+ %tmp5206 = getelementptr inbounds float, float* %tmp5205, i64 1
+ %tmp5207 = getelementptr inbounds float, float* %tmp5206, i64 1
+ %tmp5208 = getelementptr inbounds float, float* %tmp5207, i64 1
+ %tmp5209 = getelementptr inbounds float, float* %tmp5208, i64 1
+ %tmp5210 = getelementptr inbounds float, float* %tmp5209, i64 1
+ %tmp5211 = getelementptr inbounds float, float* %tmp5210, i64 1
+ %tmp5212 = getelementptr inbounds float, float* %tmp5211, i64 1
+ %tmp5213 = getelementptr inbounds float, float* %tmp5212, i64 1
+ %tmp5214 = getelementptr inbounds float, float* %tmp5213, i64 1
+ %tmp5215 = getelementptr inbounds float, float* %tmp5214, i64 1
+ %tmp5216 = getelementptr inbounds float, float* %tmp5215, i64 1
+ %tmp5217 = getelementptr inbounds float, float* %tmp5216, i64 1
+ %tmp5218 = getelementptr inbounds float, float* %tmp5217, i64 1
+ %tmp5219 = getelementptr inbounds float, float* %tmp5218, i64 1
+ %tmp5220 = getelementptr inbounds float, float* %tmp5219, i64 1
+ %tmp5221 = getelementptr inbounds float, float* %tmp5220, i64 1
+ %tmp5222 = getelementptr inbounds float, float* %tmp5221, i64 1
+ %tmp5223 = getelementptr inbounds float, float* %tmp5222, i64 1
+ %tmp5224 = getelementptr inbounds float, float* %tmp5223, i64 1
+ %tmp5225 = getelementptr inbounds float, float* %tmp5224, i64 1
+ %tmp5226 = getelementptr inbounds float, float* %tmp5225, i64 1
+ %tmp5227 = getelementptr inbounds float, float* %tmp5226, i64 1
+ %tmp5228 = getelementptr inbounds float, float* %tmp5227, i64 1
+ %tmp5229 = getelementptr inbounds float, float* %tmp5228, i64 1
+ %tmp5230 = getelementptr inbounds float, float* %tmp5229, i64 1
+ %tmp5231 = getelementptr inbounds float, float* %tmp5230, i64 1
+ %tmp5232 = getelementptr inbounds float, float* %tmp5231, i64 1
+ %tmp5233 = getelementptr inbounds float, float* %tmp5232, i64 1
+ %tmp5234 = getelementptr inbounds float, float* %tmp5233, i64 1
+ %tmp5235 = getelementptr inbounds float, float* %tmp5234, i64 1
+ %tmp5236 = getelementptr inbounds float, float* %tmp5235, i64 1
+ %tmp5237 = getelementptr inbounds float, float* %tmp5236, i64 1
+ %tmp5238 = getelementptr inbounds float, float* %tmp5237, i64 1
+ %tmp5239 = getelementptr inbounds float, float* %tmp5238, i64 1
+ %tmp5240 = getelementptr inbounds float, float* %tmp5239, i64 1
+ %tmp5241 = getelementptr inbounds float, float* %tmp5240, i64 1
+ %tmp5242 = getelementptr inbounds float, float* %tmp5241, i64 1
+ %tmp5243 = getelementptr inbounds float, float* %tmp5242, i64 1
+ %tmp5244 = getelementptr inbounds float, float* %tmp5243, i64 1
+ %tmp5245 = getelementptr inbounds float, float* %tmp5244, i64 1
+ %tmp5246 = getelementptr inbounds float, float* %tmp5245, i64 1
+ %tmp5247 = getelementptr inbounds float, float* %tmp5246, i64 1
+ %tmp5248 = getelementptr inbounds float, float* %tmp5247, i64 1
+ %tmp5249 = getelementptr inbounds float, float* %tmp5248, i64 1
+ %tmp5250 = getelementptr inbounds float, float* %tmp5249, i64 1
+ %tmp5251 = getelementptr inbounds float, float* %tmp5250, i64 1
+ %tmp5252 = getelementptr inbounds float, float* %tmp5251, i64 1
+ %tmp5253 = getelementptr inbounds float, float* %tmp5252, i64 1
+ %tmp5254 = getelementptr inbounds float, float* %tmp5253, i64 1
+ %tmp5255 = getelementptr inbounds float, float* %tmp5254, i64 1
+ %tmp5256 = getelementptr inbounds float, float* %tmp5255, i64 1
+ %tmp5257 = getelementptr inbounds float, float* %tmp5256, i64 1
+ %tmp5258 = getelementptr inbounds float, float* %tmp5257, i64 1
+ %tmp5259 = getelementptr inbounds float, float* %tmp5258, i64 1
+ %tmp5260 = getelementptr inbounds float, float* %tmp5259, i64 1
+ %tmp5261 = getelementptr inbounds float, float* %tmp5260, i64 1
+ %tmp5262 = getelementptr inbounds float, float* %tmp5261, i64 1
+ %tmp5263 = getelementptr inbounds float, float* %tmp5262, i64 1
+ %tmp5264 = getelementptr inbounds float, float* %tmp5263, i64 1
+ %tmp5265 = getelementptr inbounds float, float* %tmp5264, i64 1
+ %tmp5266 = getelementptr inbounds float, float* %tmp5265, i64 1
+ %tmp5267 = getelementptr inbounds float, float* %tmp5266, i64 1
+ %tmp5268 = getelementptr inbounds float, float* %tmp5267, i64 1
+ %tmp5269 = getelementptr inbounds float, float* %tmp5268, i64 1
+ %tmp5270 = getelementptr inbounds float, float* %tmp5269, i64 1
+ %tmp5271 = getelementptr inbounds float, float* %tmp5270, i64 1
+ %tmp5272 = getelementptr inbounds float, float* %tmp5271, i64 1
+ %tmp5273 = getelementptr inbounds float, float* %tmp5272, i64 1
+ %tmp5274 = getelementptr inbounds float, float* %tmp5273, i64 1
+ %tmp5275 = getelementptr inbounds float, float* %tmp5274, i64 1
+ %tmp5276 = getelementptr inbounds float, float* %tmp5275, i64 1
+ %tmp5277 = getelementptr inbounds float, float* %tmp5276, i64 1
+ %tmp5278 = getelementptr inbounds float, float* %tmp5277, i64 1
+ %tmp5279 = getelementptr inbounds float, float* %tmp5278, i64 1
+ %tmp5280 = getelementptr inbounds float, float* %tmp5279, i64 1
+ %tmp5281 = getelementptr inbounds float, float* %tmp5280, i64 1
+ %tmp5282 = getelementptr inbounds float, float* %tmp5281, i64 1
+ %tmp5283 = getelementptr inbounds float, float* %tmp5282, i64 1
+ %tmp5284 = getelementptr inbounds float, float* %tmp5283, i64 1
+ %tmp5285 = getelementptr inbounds float, float* %tmp5284, i64 1
+ %tmp5286 = getelementptr inbounds float, float* %tmp5285, i64 1
+ %tmp5287 = getelementptr inbounds float, float* %tmp5286, i64 1
+ %tmp5288 = getelementptr inbounds float, float* %tmp5287, i64 1
+ %tmp5289 = getelementptr inbounds float, float* %tmp5288, i64 1
+ %tmp5290 = getelementptr inbounds float, float* %tmp5289, i64 1
+ %tmp5291 = getelementptr inbounds float, float* %tmp5290, i64 1
+ %tmp5292 = getelementptr inbounds float, float* %tmp5291, i64 1
+ %tmp5293 = getelementptr inbounds float, float* %tmp5292, i64 1
+ %tmp5294 = getelementptr inbounds float, float* %tmp5293, i64 1
+ %tmp5295 = getelementptr inbounds float, float* %tmp5294, i64 1
+ %tmp5296 = getelementptr inbounds float, float* %tmp5295, i64 1
+ %tmp5297 = getelementptr inbounds float, float* %tmp5296, i64 1
+ %tmp5298 = getelementptr inbounds float, float* %tmp5297, i64 1
+ %tmp5299 = getelementptr inbounds float, float* %tmp5298, i64 1
+ %tmp5300 = getelementptr inbounds float, float* %tmp5299, i64 1
+ %tmp5301 = getelementptr inbounds float, float* %tmp5300, i64 1
+ %tmp5302 = getelementptr inbounds float, float* %tmp5301, i64 1
+ %tmp5303 = getelementptr inbounds float, float* %tmp5302, i64 1
+ %tmp5304 = getelementptr inbounds float, float* %tmp5303, i64 1
+ %tmp5305 = getelementptr inbounds float, float* %tmp5304, i64 1
+ %tmp5306 = getelementptr inbounds float, float* %tmp5305, i64 1
+ %tmp5307 = getelementptr inbounds float, float* %tmp5306, i64 1
+ %tmp5308 = getelementptr inbounds float, float* %tmp5307, i64 1
+ %tmp5309 = getelementptr inbounds float, float* %tmp5308, i64 1
+ %tmp5310 = getelementptr inbounds float, float* %tmp5309, i64 1
+ %tmp5311 = getelementptr inbounds float, float* %tmp5310, i64 1
+ %tmp5312 = getelementptr inbounds float, float* %tmp5311, i64 1
+ %tmp5313 = getelementptr inbounds float, float* %tmp5312, i64 1
+ %tmp5314 = getelementptr inbounds float, float* %tmp5313, i64 1
+ %tmp5315 = getelementptr inbounds float, float* %tmp5314, i64 1
+ %tmp5316 = getelementptr inbounds float, float* %tmp5315, i64 1
+ %tmp5317 = getelementptr inbounds float, float* %tmp5316, i64 1
+ %tmp5318 = getelementptr inbounds float, float* %tmp5317, i64 1
+ %tmp5319 = getelementptr inbounds float, float* %tmp5318, i64 1
+ %tmp5320 = getelementptr inbounds float, float* %tmp5319, i64 1
+ %tmp5321 = getelementptr inbounds float, float* %tmp5320, i64 1
+ %tmp5322 = getelementptr inbounds float, float* %tmp5321, i64 1
+ %tmp5323 = getelementptr inbounds float, float* %tmp5322, i64 1
+ %tmp5324 = getelementptr inbounds float, float* %tmp5323, i64 1
+ %tmp5325 = getelementptr inbounds float, float* %tmp5324, i64 1
+ %tmp5326 = getelementptr inbounds float, float* %tmp5325, i64 1
+ %tmp5327 = getelementptr inbounds float, float* %tmp5326, i64 1
+ %tmp5328 = getelementptr inbounds float, float* %tmp5327, i64 1
+ %tmp5329 = getelementptr inbounds float, float* %tmp5328, i64 1
+ %tmp5330 = getelementptr inbounds float, float* %tmp5329, i64 1
+ %tmp5331 = getelementptr inbounds float, float* %tmp5330, i64 1
+ %tmp5332 = getelementptr inbounds float, float* %tmp5331, i64 1
+ %tmp5333 = getelementptr inbounds float, float* %tmp5332, i64 1
+ %tmp5334 = getelementptr inbounds float, float* %tmp5333, i64 1
+ %tmp5335 = getelementptr inbounds float, float* %tmp5334, i64 1
+ %tmp5336 = getelementptr inbounds float, float* %tmp5335, i64 1
+ %tmp5337 = getelementptr inbounds float, float* %tmp5336, i64 1
+ %tmp5338 = getelementptr inbounds float, float* %tmp5337, i64 1
+ %tmp5339 = getelementptr inbounds float, float* %tmp5338, i64 1
+ %tmp5340 = getelementptr inbounds float, float* %tmp5339, i64 1
+ %tmp5341 = getelementptr inbounds float, float* %tmp5340, i64 1
+ %tmp5342 = getelementptr inbounds float, float* %tmp5341, i64 1
+ %tmp5343 = getelementptr inbounds float, float* %tmp5342, i64 1
+ %tmp5344 = getelementptr inbounds float, float* %tmp5343, i64 1
+ %tmp5345 = getelementptr inbounds float, float* %tmp5344, i64 1
+ %tmp5346 = getelementptr inbounds float, float* %tmp5345, i64 1
+ %tmp5347 = getelementptr inbounds float, float* %tmp5346, i64 1
+ %tmp5348 = getelementptr inbounds float, float* %tmp5347, i64 1
+ %tmp5349 = getelementptr inbounds float, float* %tmp5348, i64 1
+ %tmp5350 = getelementptr inbounds float, float* %tmp5349, i64 1
+ %tmp5351 = getelementptr inbounds float, float* %tmp5350, i64 1
+ %tmp5352 = getelementptr inbounds float, float* %tmp5351, i64 1
+ %tmp5353 = getelementptr inbounds float, float* %tmp5352, i64 1
+ %tmp5354 = getelementptr inbounds float, float* %tmp5353, i64 1
+ %tmp5355 = getelementptr inbounds float, float* %tmp5354, i64 1
+ %tmp5356 = getelementptr inbounds float, float* %tmp5355, i64 1
+ %tmp5357 = getelementptr inbounds float, float* %tmp5356, i64 1
+ %tmp5358 = getelementptr inbounds float, float* %tmp5357, i64 1
+ %tmp5359 = getelementptr inbounds float, float* %tmp5358, i64 1
+ %tmp5360 = getelementptr inbounds float, float* %tmp5359, i64 1
+ %tmp5361 = getelementptr inbounds float, float* %tmp5360, i64 1
+ %tmp5362 = getelementptr inbounds float, float* %tmp5361, i64 1
+ %tmp5363 = getelementptr inbounds float, float* %tmp5362, i64 1
+ %tmp5364 = getelementptr inbounds float, float* %tmp5363, i64 1
+ %tmp5365 = getelementptr inbounds float, float* %tmp5364, i64 1
+ %tmp5366 = getelementptr inbounds float, float* %tmp5365, i64 1
+ %tmp5367 = getelementptr inbounds float, float* %tmp5366, i64 1
+ %tmp5368 = getelementptr inbounds float, float* %tmp5367, i64 1
+ %tmp5369 = getelementptr inbounds float, float* %tmp5368, i64 1
+ %tmp5370 = getelementptr inbounds float, float* %tmp5369, i64 1
+ %tmp5371 = getelementptr inbounds float, float* %tmp5370, i64 1
+ %tmp5372 = getelementptr inbounds float, float* %tmp5371, i64 1
+ %tmp5373 = getelementptr inbounds float, float* %tmp5372, i64 1
+ %tmp5374 = getelementptr inbounds float, float* %tmp5373, i64 1
+ %tmp5375 = getelementptr inbounds float, float* %tmp5374, i64 1
+ %tmp5376 = getelementptr inbounds float, float* %tmp5375, i64 1
+ %tmp5377 = getelementptr inbounds float, float* %tmp5376, i64 1
+ %tmp5378 = getelementptr inbounds float, float* %tmp5377, i64 1
+ %tmp5379 = getelementptr inbounds float, float* %tmp5378, i64 1
+ %tmp5380 = getelementptr inbounds float, float* %tmp5379, i64 1
+ %tmp5381 = getelementptr inbounds float, float* %tmp5380, i64 1
+ %tmp5382 = getelementptr inbounds float, float* %tmp5381, i64 1
+ %tmp5383 = getelementptr inbounds float, float* %tmp5382, i64 1
+ %tmp5384 = getelementptr inbounds float, float* %tmp5383, i64 1
+ %tmp5385 = getelementptr inbounds float, float* %tmp5384, i64 1
+ %tmp5386 = getelementptr inbounds float, float* %tmp5385, i64 1
+ %tmp5387 = getelementptr inbounds float, float* %tmp5386, i64 1
+ %tmp5388 = getelementptr inbounds float, float* %tmp5387, i64 1
+ %tmp5389 = getelementptr inbounds float, float* %tmp5388, i64 1
+ %tmp5390 = getelementptr inbounds float, float* %tmp5389, i64 1
+ %tmp5391 = getelementptr inbounds float, float* %tmp5390, i64 1
+ %tmp5392 = getelementptr inbounds float, float* %tmp5391, i64 1
+ %tmp5393 = getelementptr inbounds float, float* %tmp5392, i64 1
+ %tmp5394 = getelementptr inbounds float, float* %tmp5393, i64 1
+ %tmp5395 = getelementptr inbounds float, float* %tmp5394, i64 1
+ %tmp5396 = getelementptr inbounds float, float* %tmp5395, i64 1
+ %tmp5397 = getelementptr inbounds float, float* %tmp5396, i64 1
+ %tmp5398 = getelementptr inbounds float, float* %tmp5397, i64 1
+ %tmp5399 = getelementptr inbounds float, float* %tmp5398, i64 1
+ %tmp5400 = getelementptr inbounds float, float* %tmp5399, i64 1
+ %tmp5401 = getelementptr inbounds float, float* %tmp5400, i64 1
+ %tmp5402 = getelementptr inbounds float, float* %tmp5401, i64 1
+ %tmp5403 = getelementptr inbounds float, float* %tmp5402, i64 1
+ %tmp5404 = getelementptr inbounds float, float* %tmp5403, i64 1
+ %tmp5405 = getelementptr inbounds float, float* %tmp5404, i64 1
+ %tmp5406 = getelementptr inbounds float, float* %tmp5405, i64 1
+ %tmp5407 = getelementptr inbounds float, float* %tmp5406, i64 1
+ %tmp5408 = getelementptr inbounds float, float* %tmp5407, i64 1
+ %tmp5409 = getelementptr inbounds float, float* %tmp5408, i64 1
+ %tmp5410 = getelementptr inbounds float, float* %tmp5409, i64 1
+ %tmp5411 = getelementptr inbounds float, float* %tmp5410, i64 1
+ %tmp5412 = getelementptr inbounds float, float* %tmp5411, i64 1
+ %tmp5413 = getelementptr inbounds float, float* %tmp5412, i64 1
+ %tmp5414 = getelementptr inbounds float, float* %tmp5413, i64 1
+ %tmp5415 = getelementptr inbounds float, float* %tmp5414, i64 1
+ %tmp5416 = getelementptr inbounds float, float* %tmp5415, i64 1
+ %tmp5417 = getelementptr inbounds float, float* %tmp5416, i64 1
+ %tmp5418 = getelementptr inbounds float, float* %tmp5417, i64 1
+ %tmp5419 = getelementptr inbounds float, float* %tmp5418, i64 1
+ %tmp5420 = getelementptr inbounds float, float* %tmp5419, i64 1
+ %tmp5421 = getelementptr inbounds float, float* %tmp5420, i64 1
+ %tmp5422 = getelementptr inbounds float, float* %tmp5421, i64 1
+ %tmp5423 = getelementptr inbounds float, float* %tmp5422, i64 1
+ %tmp5424 = getelementptr inbounds float, float* %tmp5423, i64 1
+ %tmp5425 = getelementptr inbounds float, float* %tmp5424, i64 1
+ %tmp5426 = getelementptr inbounds float, float* %tmp5425, i64 1
+ %tmp5427 = getelementptr inbounds float, float* %tmp5426, i64 1
+ %tmp5428 = getelementptr inbounds float, float* %tmp5427, i64 1
+ %tmp5429 = getelementptr inbounds float, float* %tmp5428, i64 1
+ %tmp5430 = getelementptr inbounds float, float* %tmp5429, i64 1
+ %tmp5431 = getelementptr inbounds float, float* %tmp5430, i64 1
+ %tmp5432 = getelementptr inbounds float, float* %tmp5431, i64 1
+ %tmp5433 = getelementptr inbounds float, float* %tmp5432, i64 1
+ %tmp5434 = getelementptr inbounds float, float* %tmp5433, i64 1
+ %tmp5435 = getelementptr inbounds float, float* %tmp5434, i64 1
+ %tmp5436 = getelementptr inbounds float, float* %tmp5435, i64 1
+ %tmp5437 = getelementptr inbounds float, float* %tmp5436, i64 1
+ %tmp5438 = getelementptr inbounds float, float* %tmp5437, i64 1
+ %tmp5439 = getelementptr inbounds float, float* %tmp5438, i64 1
+ %tmp5440 = getelementptr inbounds float, float* %tmp5439, i64 1
+ %tmp5441 = getelementptr inbounds float, float* %tmp5440, i64 1
+ %tmp5442 = getelementptr inbounds float, float* %tmp5441, i64 1
+ %tmp5443 = getelementptr inbounds float, float* %tmp5442, i64 1
+ %tmp5444 = getelementptr inbounds float, float* %tmp5443, i64 1
+ %tmp5445 = getelementptr inbounds float, float* %tmp5444, i64 1
+ %tmp5446 = getelementptr inbounds float, float* %tmp5445, i64 1
+ %tmp5447 = getelementptr inbounds float, float* %tmp5446, i64 1
+ %tmp5448 = getelementptr inbounds float, float* %tmp5447, i64 1
+ %tmp5449 = getelementptr inbounds float, float* %tmp5448, i64 1
+ %tmp5450 = getelementptr inbounds float, float* %tmp5449, i64 1
+ %tmp5451 = getelementptr inbounds float, float* %tmp5450, i64 1
+ %tmp5452 = getelementptr inbounds float, float* %tmp5451, i64 1
+ %tmp5453 = getelementptr inbounds float, float* %tmp5452, i64 1
+ %tmp5454 = getelementptr inbounds float, float* %tmp5453, i64 1
+ %tmp5455 = getelementptr inbounds float, float* %tmp5454, i64 1
+ %tmp5456 = getelementptr inbounds float, float* %tmp5455, i64 1
+ %tmp5457 = getelementptr inbounds float, float* %tmp5456, i64 1
+ %tmp5458 = getelementptr inbounds float, float* %tmp5457, i64 1
+ %tmp5459 = getelementptr inbounds float, float* %tmp5458, i64 1
+ %tmp5460 = getelementptr inbounds float, float* %tmp5459, i64 1
+ %tmp5461 = getelementptr inbounds float, float* %tmp5460, i64 1
+ %tmp5462 = getelementptr inbounds float, float* %tmp5461, i64 1
+ %tmp5463 = getelementptr inbounds float, float* %tmp5462, i64 1
+ %tmp5464 = getelementptr inbounds float, float* %tmp5463, i64 1
+ %tmp5465 = getelementptr inbounds float, float* %tmp5464, i64 1
+ %tmp5466 = getelementptr inbounds float, float* %tmp5465, i64 1
+ %tmp5467 = getelementptr inbounds float, float* %tmp5466, i64 1
+ %tmp5468 = getelementptr inbounds float, float* %tmp5467, i64 1
+ %tmp5469 = getelementptr inbounds float, float* %tmp5468, i64 1
+ %tmp5470 = getelementptr inbounds float, float* %tmp5469, i64 1
+ %tmp5471 = getelementptr inbounds float, float* %tmp5470, i64 1
+ %tmp5472 = getelementptr inbounds float, float* %tmp5471, i64 1
+ %tmp5473 = getelementptr inbounds float, float* %tmp5472, i64 1
+ %tmp5474 = getelementptr inbounds float, float* %tmp5473, i64 1
+ %tmp5475 = getelementptr inbounds float, float* %tmp5474, i64 1
+ %tmp5476 = getelementptr inbounds float, float* %tmp5475, i64 1
+ %tmp5477 = getelementptr inbounds float, float* %tmp5476, i64 1
+ %tmp5478 = getelementptr inbounds float, float* %tmp5477, i64 1
+ %tmp5479 = getelementptr inbounds float, float* %tmp5478, i64 1
+ %tmp5480 = getelementptr inbounds float, float* %tmp5479, i64 1
+ %tmp5481 = getelementptr inbounds float, float* %tmp5480, i64 1
+ %tmp5482 = getelementptr inbounds float, float* %tmp5481, i64 1
+ %tmp5483 = getelementptr inbounds float, float* %tmp5482, i64 1
+ %tmp5484 = getelementptr inbounds float, float* %tmp5483, i64 1
+ %tmp5485 = getelementptr inbounds float, float* %tmp5484, i64 1
+ %tmp5486 = getelementptr inbounds float, float* %tmp5485, i64 1
+ %tmp5487 = getelementptr inbounds float, float* %tmp5486, i64 1
+ %tmp5488 = getelementptr inbounds float, float* %tmp5487, i64 1
+ %tmp5489 = getelementptr inbounds float, float* %tmp5488, i64 1
+ %tmp5490 = getelementptr inbounds float, float* %tmp5489, i64 1
+ %tmp5491 = getelementptr inbounds float, float* %tmp5490, i64 1
+ %tmp5492 = getelementptr inbounds float, float* %tmp5491, i64 1
+ %tmp5493 = getelementptr inbounds float, float* %tmp5492, i64 1
+ %tmp5494 = getelementptr inbounds float, float* %tmp5493, i64 1
+ %tmp5495 = getelementptr inbounds float, float* %tmp5494, i64 1
+ %tmp5496 = getelementptr inbounds float, float* %tmp5495, i64 1
+ %tmp5497 = getelementptr inbounds float, float* %tmp5496, i64 1
+ %tmp5498 = getelementptr inbounds float, float* %tmp5497, i64 1
+ %tmp5499 = getelementptr inbounds float, float* %tmp5498, i64 1
+ %tmp5500 = getelementptr inbounds float, float* %tmp5499, i64 1
+ %tmp5501 = getelementptr inbounds float, float* %tmp5500, i64 1
+ %tmp5502 = getelementptr inbounds float, float* %tmp5501, i64 1
+ %tmp5503 = getelementptr inbounds float, float* %tmp5502, i64 1
+ %tmp5504 = getelementptr inbounds float, float* %tmp5503, i64 1
+ %tmp5505 = getelementptr inbounds float, float* %tmp5504, i64 1
+ %tmp5506 = getelementptr inbounds float, float* %tmp5505, i64 1
+ %tmp5507 = getelementptr inbounds float, float* %tmp5506, i64 1
+ %tmp5508 = getelementptr inbounds float, float* %tmp5507, i64 1
+ %tmp5509 = getelementptr inbounds float, float* %tmp5508, i64 1
+ %tmp5510 = getelementptr inbounds float, float* %tmp5509, i64 1
+ %tmp5511 = getelementptr inbounds float, float* %tmp5510, i64 1
+ %tmp5512 = getelementptr inbounds float, float* %tmp5511, i64 1
+ %tmp5513 = getelementptr inbounds float, float* %tmp5512, i64 1
+ %tmp5514 = getelementptr inbounds float, float* %tmp5513, i64 1
+ %tmp5515 = getelementptr inbounds float, float* %tmp5514, i64 1
+ %tmp5516 = getelementptr inbounds float, float* %tmp5515, i64 1
+ %tmp5517 = getelementptr inbounds float, float* %tmp5516, i64 1
+ %tmp5518 = getelementptr inbounds float, float* %tmp5517, i64 1
+ %tmp5519 = getelementptr inbounds float, float* %tmp5518, i64 1
+ %tmp5520 = getelementptr inbounds float, float* %tmp5519, i64 1
+ %tmp5521 = getelementptr inbounds float, float* %tmp5520, i64 1
+ %tmp5522 = getelementptr inbounds float, float* %tmp5521, i64 1
+ %tmp5523 = getelementptr inbounds float, float* %tmp5522, i64 1
+ %tmp5524 = getelementptr inbounds float, float* %tmp5523, i64 1
+ %tmp5525 = getelementptr inbounds float, float* %tmp5524, i64 1
+ %tmp5526 = getelementptr inbounds float, float* %tmp5525, i64 1
+ %tmp5527 = getelementptr inbounds float, float* %tmp5526, i64 1
+ %tmp5528 = getelementptr inbounds float, float* %tmp5527, i64 1
+ %tmp5529 = getelementptr inbounds float, float* %tmp5528, i64 1
+ %tmp5530 = getelementptr inbounds float, float* %tmp5529, i64 1
+ %tmp5531 = getelementptr inbounds float, float* %tmp5530, i64 1
+ %tmp5532 = getelementptr inbounds float, float* %tmp5531, i64 1
+ %tmp5533 = getelementptr inbounds float, float* %tmp5532, i64 1
+ %tmp5534 = getelementptr inbounds float, float* %tmp5533, i64 1
+ %tmp5535 = getelementptr inbounds float, float* %tmp5534, i64 1
+ %tmp5536 = getelementptr inbounds float, float* %tmp5535, i64 1
+ %tmp5537 = getelementptr inbounds float, float* %tmp5536, i64 1
+ %tmp5538 = getelementptr inbounds float, float* %tmp5537, i64 1
+ %tmp5539 = getelementptr inbounds float, float* %tmp5538, i64 1
+ %tmp5540 = getelementptr inbounds float, float* %tmp5539, i64 1
+ %tmp5541 = getelementptr inbounds float, float* %tmp5540, i64 1
+ %tmp5542 = getelementptr inbounds float, float* %tmp5541, i64 1
+ %tmp5543 = getelementptr inbounds float, float* %tmp5542, i64 1
+ %tmp5544 = getelementptr inbounds float, float* %tmp5543, i64 1
+ %tmp5545 = getelementptr inbounds float, float* %tmp5544, i64 1
+ %tmp5546 = getelementptr inbounds float, float* %tmp5545, i64 1
+ %tmp5547 = getelementptr inbounds float, float* %tmp5546, i64 1
+ %tmp5548 = getelementptr inbounds float, float* %tmp5547, i64 1
+ %tmp5549 = getelementptr inbounds float, float* %tmp5548, i64 1
+ %tmp5550 = getelementptr inbounds float, float* %tmp5549, i64 1
+ %tmp5551 = getelementptr inbounds float, float* %tmp5550, i64 1
+ %tmp5552 = getelementptr inbounds float, float* %tmp5551, i64 1
+ %tmp5553 = getelementptr inbounds float, float* %tmp5552, i64 1
+ %tmp5554 = getelementptr inbounds float, float* %tmp5553, i64 1
+ %tmp5555 = getelementptr inbounds float, float* %tmp5554, i64 1
+ %tmp5556 = getelementptr inbounds float, float* %tmp5555, i64 1
+ %tmp5557 = getelementptr inbounds float, float* %tmp5556, i64 1
+ %tmp5558 = getelementptr inbounds float, float* %tmp5557, i64 1
+ %tmp5559 = getelementptr inbounds float, float* %tmp5558, i64 1
+ %tmp5560 = getelementptr inbounds float, float* %tmp5559, i64 1
+ %tmp5561 = getelementptr inbounds float, float* %tmp5560, i64 1
+ %tmp5562 = getelementptr inbounds float, float* %tmp5561, i64 1
+ %tmp5563 = getelementptr inbounds float, float* %tmp5562, i64 1
+ %tmp5564 = getelementptr inbounds float, float* %tmp5563, i64 1
+ %tmp5565 = getelementptr inbounds float, float* %tmp5564, i64 1
+ %tmp5566 = getelementptr inbounds float, float* %tmp5565, i64 1
+ %tmp5567 = getelementptr inbounds float, float* %tmp5566, i64 1
+ %tmp5568 = getelementptr inbounds float, float* %tmp5567, i64 1
+ %tmp5569 = getelementptr inbounds float, float* %tmp5568, i64 1
+ %tmp5570 = getelementptr inbounds float, float* %tmp5569, i64 1
+ %tmp5571 = getelementptr inbounds float, float* %tmp5570, i64 1
+ %tmp5572 = getelementptr inbounds float, float* %tmp5571, i64 1
+ %tmp5573 = getelementptr inbounds float, float* %tmp5572, i64 1
+ %tmp5574 = getelementptr inbounds float, float* %tmp5573, i64 1
+ %tmp5575 = getelementptr inbounds float, float* %tmp5574, i64 1
+ %tmp5576 = getelementptr inbounds float, float* %tmp5575, i64 1
+ %tmp5577 = getelementptr inbounds float, float* %tmp5576, i64 1
+ %tmp5578 = getelementptr inbounds float, float* %tmp5577, i64 1
+ %tmp5579 = getelementptr inbounds float, float* %tmp5578, i64 1
+ %tmp5580 = getelementptr inbounds float, float* %tmp5579, i64 1
+ %tmp5581 = getelementptr inbounds float, float* %tmp5580, i64 1
+ %tmp5582 = getelementptr inbounds float, float* %tmp5581, i64 1
+ %tmp5583 = getelementptr inbounds float, float* %tmp5582, i64 1
+ %tmp5584 = getelementptr inbounds float, float* %tmp5583, i64 1
+ %tmp5585 = getelementptr inbounds float, float* %tmp5584, i64 1
+ %tmp5586 = getelementptr inbounds float, float* %tmp5585, i64 1
+ %tmp5587 = getelementptr inbounds float, float* %tmp5586, i64 1
+ %tmp5588 = getelementptr inbounds float, float* %tmp5587, i64 1
+ %tmp5589 = getelementptr inbounds float, float* %tmp5588, i64 1
+ %tmp5590 = getelementptr inbounds float, float* %tmp5589, i64 1
+ %tmp5591 = getelementptr inbounds float, float* %tmp5590, i64 1
+ %tmp5592 = getelementptr inbounds float, float* %tmp5591, i64 1
+ %tmp5593 = getelementptr inbounds float, float* %tmp5592, i64 1
+ %tmp5594 = getelementptr inbounds float, float* %tmp5593, i64 1
+ %tmp5595 = getelementptr inbounds float, float* %tmp5594, i64 1
+ %tmp5596 = getelementptr inbounds float, float* %tmp5595, i64 1
+ %tmp5597 = getelementptr inbounds float, float* %tmp5596, i64 1
+ %tmp5598 = getelementptr inbounds float, float* %tmp5597, i64 1
+ %tmp5599 = getelementptr inbounds float, float* %tmp5598, i64 1
+ %tmp5600 = getelementptr inbounds float, float* %tmp5599, i64 1
+ %tmp5601 = getelementptr inbounds float, float* %tmp5600, i64 1
+ %tmp5602 = getelementptr inbounds float, float* %tmp5601, i64 1
+ %tmp5603 = getelementptr inbounds float, float* %tmp5602, i64 1
+ %tmp5604 = getelementptr inbounds float, float* %tmp5603, i64 1
+ %tmp5605 = getelementptr inbounds float, float* %tmp5604, i64 1
+ %tmp5606 = getelementptr inbounds float, float* %tmp5605, i64 1
+ %tmp5607 = getelementptr inbounds float, float* %tmp5606, i64 1
+ %tmp5608 = getelementptr inbounds float, float* %tmp5607, i64 1
+ %tmp5609 = getelementptr inbounds float, float* %tmp5608, i64 1
+ %tmp5610 = getelementptr inbounds float, float* %tmp5609, i64 1
+ %tmp5611 = getelementptr inbounds float, float* %tmp5610, i64 1
+ %tmp5612 = getelementptr inbounds float, float* %tmp5611, i64 1
+ %tmp5613 = getelementptr inbounds float, float* %tmp5612, i64 1
+ %tmp5614 = getelementptr inbounds float, float* %tmp5613, i64 1
+ %tmp5615 = getelementptr inbounds float, float* %tmp5614, i64 1
+ %tmp5616 = getelementptr inbounds float, float* %tmp5615, i64 1
+ %tmp5617 = getelementptr inbounds float, float* %tmp5616, i64 1
+ %tmp5618 = getelementptr inbounds float, float* %tmp5617, i64 1
+ %tmp5619 = getelementptr inbounds float, float* %tmp5618, i64 1
+ %tmp5620 = getelementptr inbounds float, float* %tmp5619, i64 1
+ %tmp5621 = getelementptr inbounds float, float* %tmp5620, i64 1
+ %tmp5622 = getelementptr inbounds float, float* %tmp5621, i64 1
+ %tmp5623 = getelementptr inbounds float, float* %tmp5622, i64 1
+ %tmp5624 = getelementptr inbounds float, float* %tmp5623, i64 1
+ %tmp5625 = getelementptr inbounds float, float* %tmp5624, i64 1
+ %tmp5626 = getelementptr inbounds float, float* %tmp5625, i64 1
+ %tmp5627 = getelementptr inbounds float, float* %tmp5626, i64 1
+ %tmp5628 = getelementptr inbounds float, float* %tmp5627, i64 1
+ %tmp5629 = getelementptr inbounds float, float* %tmp5628, i64 1
+ %tmp5630 = getelementptr inbounds float, float* %tmp5629, i64 1
+ %tmp5631 = getelementptr inbounds float, float* %tmp5630, i64 1
+ %tmp5632 = getelementptr inbounds float, float* %tmp5631, i64 1
+ %tmp5633 = getelementptr inbounds float, float* %tmp5632, i64 1
+ %tmp5634 = getelementptr inbounds float, float* %tmp5633, i64 1
+ %tmp5635 = getelementptr inbounds float, float* %tmp5634, i64 1
+ %tmp5636 = getelementptr inbounds float, float* %tmp5635, i64 1
+ %tmp5637 = getelementptr inbounds float, float* %tmp5636, i64 1
+ %tmp5638 = getelementptr inbounds float, float* %tmp5637, i64 1
+ %tmp5639 = getelementptr inbounds float, float* %tmp5638, i64 1
+ %tmp5640 = getelementptr inbounds float, float* %tmp5639, i64 1
+ %tmp5641 = getelementptr inbounds float, float* %tmp5640, i64 1
+ %tmp5642 = getelementptr inbounds float, float* %tmp5641, i64 1
+ %tmp5643 = getelementptr inbounds float, float* %tmp5642, i64 1
+ %tmp5644 = getelementptr inbounds float, float* %tmp5643, i64 1
+ %tmp5645 = getelementptr inbounds float, float* %tmp5644, i64 1
+ %tmp5646 = getelementptr inbounds float, float* %tmp5645, i64 1
+ %tmp5647 = getelementptr inbounds float, float* %tmp5646, i64 1
+ %tmp5648 = getelementptr inbounds float, float* %tmp5647, i64 1
+ %tmp5649 = getelementptr inbounds float, float* %tmp5648, i64 1
+ %tmp5650 = getelementptr inbounds float, float* %tmp5649, i64 1
+ %tmp5651 = getelementptr inbounds float, float* %tmp5650, i64 1
+ %tmp5652 = getelementptr inbounds float, float* %tmp5651, i64 1
+ %tmp5653 = getelementptr inbounds float, float* %tmp5652, i64 1
+ %tmp5654 = getelementptr inbounds float, float* %tmp5653, i64 1
+ %tmp5655 = getelementptr inbounds float, float* %tmp5654, i64 1
+ %tmp5656 = getelementptr inbounds float, float* %tmp5655, i64 1
+ %tmp5657 = getelementptr inbounds float, float* %tmp5656, i64 1
+ %tmp5658 = getelementptr inbounds float, float* %tmp5657, i64 1
+ %tmp5659 = getelementptr inbounds float, float* %tmp5658, i64 1
+ %tmp5660 = getelementptr inbounds float, float* %tmp5659, i64 1
+ %tmp5661 = getelementptr inbounds float, float* %tmp5660, i64 1
+ %tmp5662 = getelementptr inbounds float, float* %tmp5661, i64 1
+ %tmp5663 = getelementptr inbounds float, float* %tmp5662, i64 1
+ %tmp5664 = getelementptr inbounds float, float* %tmp5663, i64 1
+ %tmp5665 = getelementptr inbounds float, float* %tmp5664, i64 1
+ %tmp5666 = getelementptr inbounds float, float* %tmp5665, i64 1
+ %tmp5667 = getelementptr inbounds float, float* %tmp5666, i64 1
+ %tmp5668 = getelementptr inbounds float, float* %tmp5667, i64 1
+ %tmp5669 = getelementptr inbounds float, float* %tmp5668, i64 1
+ %tmp5670 = getelementptr inbounds float, float* %tmp5669, i64 1
+ %tmp5671 = getelementptr inbounds float, float* %tmp5670, i64 1
+ %tmp5672 = getelementptr inbounds float, float* %tmp5671, i64 1
+ %tmp5673 = getelementptr inbounds float, float* %tmp5672, i64 1
+ %tmp5674 = getelementptr inbounds float, float* %tmp5673, i64 1
+ %tmp5675 = getelementptr inbounds float, float* %tmp5674, i64 1
+ %tmp5676 = getelementptr inbounds float, float* %tmp5675, i64 1
+ %tmp5677 = getelementptr inbounds float, float* %tmp5676, i64 1
+ %tmp5678 = getelementptr inbounds float, float* %tmp5677, i64 1
+ %tmp5679 = getelementptr inbounds float, float* %tmp5678, i64 1
+ %tmp5680 = getelementptr inbounds float, float* %tmp5679, i64 1
+ %tmp5681 = getelementptr inbounds float, float* %tmp5680, i64 1
+ %tmp5682 = getelementptr inbounds float, float* %tmp5681, i64 1
+ %tmp5683 = getelementptr inbounds float, float* %tmp5682, i64 1
+ %tmp5684 = getelementptr inbounds float, float* %tmp5683, i64 1
+ %tmp5685 = getelementptr inbounds float, float* %tmp5684, i64 1
+ %tmp5686 = getelementptr inbounds float, float* %tmp5685, i64 1
+ %tmp5687 = getelementptr inbounds float, float* %tmp5686, i64 1
+ %tmp5688 = getelementptr inbounds float, float* %tmp5687, i64 1
+ %tmp5689 = getelementptr inbounds float, float* %tmp5688, i64 1
+ %tmp5690 = getelementptr inbounds float, float* %tmp5689, i64 1
+ %tmp5691 = getelementptr inbounds float, float* %tmp5690, i64 1
+ %tmp5692 = getelementptr inbounds float, float* %tmp5691, i64 1
+ %tmp5693 = getelementptr inbounds float, float* %tmp5692, i64 1
+ %tmp5694 = getelementptr inbounds float, float* %tmp5693, i64 1
+ %tmp5695 = getelementptr inbounds float, float* %tmp5694, i64 1
+ %tmp5696 = getelementptr inbounds float, float* %tmp5695, i64 1
+ %tmp5697 = getelementptr inbounds float, float* %tmp5696, i64 1
+ %tmp5698 = getelementptr inbounds float, float* %tmp5697, i64 1
+ %tmp5699 = getelementptr inbounds float, float* %tmp5698, i64 1
+ %tmp5700 = getelementptr inbounds float, float* %tmp5699, i64 1
+ %tmp5701 = getelementptr inbounds float, float* %tmp5700, i64 1
+ %tmp5702 = getelementptr inbounds float, float* %tmp5701, i64 1
+ %tmp5703 = getelementptr inbounds float, float* %tmp5702, i64 1
+ %tmp5704 = getelementptr inbounds float, float* %tmp5703, i64 1
+ %tmp5705 = getelementptr inbounds float, float* %tmp5704, i64 1
+ %tmp5706 = getelementptr inbounds float, float* %tmp5705, i64 1
+ %tmp5707 = getelementptr inbounds float, float* %tmp5706, i64 1
+ %tmp5708 = getelementptr inbounds float, float* %tmp5707, i64 1
+ %tmp5709 = getelementptr inbounds float, float* %tmp5708, i64 1
+ %tmp5710 = getelementptr inbounds float, float* %tmp5709, i64 1
+ %tmp5711 = getelementptr inbounds float, float* %tmp5710, i64 1
+ %tmp5712 = getelementptr inbounds float, float* %tmp5711, i64 1
+ %tmp5713 = getelementptr inbounds float, float* %tmp5712, i64 1
+ %tmp5714 = getelementptr inbounds float, float* %tmp5713, i64 1
+ %tmp5715 = getelementptr inbounds float, float* %tmp5714, i64 1
+ %tmp5716 = getelementptr inbounds float, float* %tmp5715, i64 1
+ %tmp5717 = getelementptr inbounds float, float* %tmp5716, i64 1
+ %tmp5718 = getelementptr inbounds float, float* %tmp5717, i64 1
+ %tmp5719 = getelementptr inbounds float, float* %tmp5718, i64 1
+ %tmp5720 = getelementptr inbounds float, float* %tmp5719, i64 1
+ %tmp5721 = getelementptr inbounds float, float* %tmp5720, i64 1
+ %tmp5722 = getelementptr inbounds float, float* %tmp5721, i64 1
+ %tmp5723 = getelementptr inbounds float, float* %tmp5722, i64 1
+ %tmp5724 = getelementptr inbounds float, float* %tmp5723, i64 1
+ %tmp5725 = getelementptr inbounds float, float* %tmp5724, i64 1
+ %tmp5726 = getelementptr inbounds float, float* %tmp5725, i64 1
+ %tmp5727 = getelementptr inbounds float, float* %tmp5726, i64 1
+ %tmp5728 = getelementptr inbounds float, float* %tmp5727, i64 1
+ %tmp5729 = getelementptr inbounds float, float* %tmp5728, i64 1
+ %tmp5730 = getelementptr inbounds float, float* %tmp5729, i64 1
+ %tmp5731 = getelementptr inbounds float, float* %tmp5730, i64 1
+ %tmp5732 = getelementptr inbounds float, float* %tmp5731, i64 1
+ %tmp5733 = getelementptr inbounds float, float* %tmp5732, i64 1
+ %tmp5734 = getelementptr inbounds float, float* %tmp5733, i64 1
+ %tmp5735 = getelementptr inbounds float, float* %tmp5734, i64 1
+ %tmp5736 = getelementptr inbounds float, float* %tmp5735, i64 1
+ %tmp5737 = getelementptr inbounds float, float* %tmp5736, i64 1
+ %tmp5738 = getelementptr inbounds float, float* %tmp5737, i64 1
+ %tmp5739 = getelementptr inbounds float, float* %tmp5738, i64 1
+ %tmp5740 = getelementptr inbounds float, float* %tmp5739, i64 1
+ %tmp5741 = getelementptr inbounds float, float* %tmp5740, i64 1
+ %tmp5742 = getelementptr inbounds float, float* %tmp5741, i64 1
+ %tmp5743 = getelementptr inbounds float, float* %tmp5742, i64 1
+ %tmp5744 = getelementptr inbounds float, float* %tmp5743, i64 1
+ %tmp5745 = getelementptr inbounds float, float* %tmp5744, i64 1
+ %tmp5746 = getelementptr inbounds float, float* %tmp5745, i64 1
+ %tmp5747 = getelementptr inbounds float, float* %tmp5746, i64 1
+ %tmp5748 = getelementptr inbounds float, float* %tmp5747, i64 1
+ %tmp5749 = getelementptr inbounds float, float* %tmp5748, i64 1
+ %tmp5750 = getelementptr inbounds float, float* %tmp5749, i64 1
+ %tmp5751 = getelementptr inbounds float, float* %tmp5750, i64 1
+ %tmp5752 = getelementptr inbounds float, float* %tmp5751, i64 1
+ %tmp5753 = getelementptr inbounds float, float* %tmp5752, i64 1
+ %tmp5754 = getelementptr inbounds float, float* %tmp5753, i64 1
+ %tmp5755 = getelementptr inbounds float, float* %tmp5754, i64 1
+ %tmp5756 = getelementptr inbounds float, float* %tmp5755, i64 1
+ %tmp5757 = getelementptr inbounds float, float* %tmp5756, i64 1
+ %tmp5758 = getelementptr inbounds float, float* %tmp5757, i64 1
+ %tmp5759 = getelementptr inbounds float, float* %tmp5758, i64 1
+ %tmp5760 = getelementptr inbounds float, float* %tmp5759, i64 1
+ %tmp5761 = getelementptr inbounds float, float* %tmp5760, i64 1
+ %tmp5762 = getelementptr inbounds float, float* %tmp5761, i64 1
+ %tmp5763 = getelementptr inbounds float, float* %tmp5762, i64 1
+ %tmp5764 = getelementptr inbounds float, float* %tmp5763, i64 1
+ %tmp5765 = getelementptr inbounds float, float* %tmp5764, i64 1
+ %tmp5766 = getelementptr inbounds float, float* %tmp5765, i64 1
+ %tmp5767 = getelementptr inbounds float, float* %tmp5766, i64 1
+ %tmp5768 = getelementptr inbounds float, float* %tmp5767, i64 1
+ %tmp5769 = getelementptr inbounds float, float* %tmp5768, i64 1
+ %tmp5770 = getelementptr inbounds float, float* %tmp5769, i64 1
+ %tmp5771 = getelementptr inbounds float, float* %tmp5770, i64 1
+ %tmp5772 = getelementptr inbounds float, float* %tmp5771, i64 1
+ %tmp5773 = getelementptr inbounds float, float* %tmp5772, i64 1
+ %tmp5774 = getelementptr inbounds float, float* %tmp5773, i64 1
+ %tmp5775 = getelementptr inbounds float, float* %tmp5774, i64 1
+ %tmp5776 = getelementptr inbounds float, float* %tmp5775, i64 1
+ %tmp5777 = getelementptr inbounds float, float* %tmp5776, i64 1
+ %tmp5778 = getelementptr inbounds float, float* %tmp5777, i64 1
+ %tmp5779 = getelementptr inbounds float, float* %tmp5778, i64 1
+ %tmp5780 = getelementptr inbounds float, float* %tmp5779, i64 1
+ %tmp5781 = getelementptr inbounds float, float* %tmp5780, i64 1
+ %tmp5782 = getelementptr inbounds float, float* %tmp5781, i64 1
+ %tmp5783 = getelementptr inbounds float, float* %tmp5782, i64 1
+ %tmp5784 = getelementptr inbounds float, float* %tmp5783, i64 1
+ %tmp5785 = getelementptr inbounds float, float* %tmp5784, i64 1
+ %tmp5786 = getelementptr inbounds float, float* %tmp5785, i64 1
+ %tmp5787 = getelementptr inbounds float, float* %tmp5786, i64 1
+ %tmp5788 = getelementptr inbounds float, float* %tmp5787, i64 1
+ %tmp5789 = getelementptr inbounds float, float* %tmp5788, i64 1
+ %tmp5790 = getelementptr inbounds float, float* %tmp5789, i64 1
+ %tmp5791 = getelementptr inbounds float, float* %tmp5790, i64 1
+ %tmp5792 = getelementptr inbounds float, float* %tmp5791, i64 1
+ %tmp5793 = getelementptr inbounds float, float* %tmp5792, i64 1
+ %tmp5794 = getelementptr inbounds float, float* %tmp5793, i64 1
+ %tmp5795 = getelementptr inbounds float, float* %tmp5794, i64 1
+ %tmp5796 = getelementptr inbounds float, float* %tmp5795, i64 1
+ %tmp5797 = getelementptr inbounds float, float* %tmp5796, i64 1
+ %tmp5798 = getelementptr inbounds float, float* %tmp5797, i64 1
+ %tmp5799 = getelementptr inbounds float, float* %tmp5798, i64 1
+ %tmp5800 = getelementptr inbounds float, float* %tmp5799, i64 1
+ %tmp5801 = getelementptr inbounds float, float* %tmp5800, i64 1
+ %tmp5802 = getelementptr inbounds float, float* %tmp5801, i64 1
+ %tmp5803 = getelementptr inbounds float, float* %tmp5802, i64 1
+ %tmp5804 = getelementptr inbounds float, float* %tmp5803, i64 1
+ %tmp5805 = getelementptr inbounds float, float* %tmp5804, i64 1
+ %tmp5806 = getelementptr inbounds float, float* %tmp5805, i64 1
+ %tmp5807 = getelementptr inbounds float, float* %tmp5806, i64 1
+ %tmp5808 = getelementptr inbounds float, float* %tmp5807, i64 1
+ %tmp5809 = getelementptr inbounds float, float* %tmp5808, i64 1
+ %tmp5810 = getelementptr inbounds float, float* %tmp5809, i64 1
+ %tmp5811 = getelementptr inbounds float, float* %tmp5810, i64 1
+ %tmp5812 = getelementptr inbounds float, float* %tmp5811, i64 1
+ %tmp5813 = getelementptr inbounds float, float* %tmp5812, i64 1
+ %tmp5814 = getelementptr inbounds float, float* %tmp5813, i64 1
+ %tmp5815 = getelementptr inbounds float, float* %tmp5814, i64 1
+ %tmp5816 = getelementptr inbounds float, float* %tmp5815, i64 1
+ %tmp5817 = getelementptr inbounds float, float* %tmp5816, i64 1
+ %tmp5818 = getelementptr inbounds float, float* %tmp5817, i64 1
+ %tmp5819 = getelementptr inbounds float, float* %tmp5818, i64 1
+ %tmp5820 = getelementptr inbounds float, float* %tmp5819, i64 1
+ %tmp5821 = getelementptr inbounds float, float* %tmp5820, i64 1
+ %tmp5822 = getelementptr inbounds float, float* %tmp5821, i64 1
+ %tmp5823 = getelementptr inbounds float, float* %tmp5822, i64 1
+ %tmp5824 = getelementptr inbounds float, float* %tmp5823, i64 1
+ %tmp5825 = getelementptr inbounds float, float* %tmp5824, i64 1
+ %tmp5826 = getelementptr inbounds float, float* %tmp5825, i64 1
+ %tmp5827 = getelementptr inbounds float, float* %tmp5826, i64 1
+ %tmp5828 = getelementptr inbounds float, float* %tmp5827, i64 1
+ %tmp5829 = getelementptr inbounds float, float* %tmp5828, i64 1
+ %tmp5830 = getelementptr inbounds float, float* %tmp5829, i64 1
+ %tmp5831 = getelementptr inbounds float, float* %tmp5830, i64 1
+ %tmp5832 = getelementptr inbounds float, float* %tmp5831, i64 1
+ %tmp5833 = getelementptr inbounds float, float* %tmp5832, i64 1
+ %tmp5834 = getelementptr inbounds float, float* %tmp5833, i64 1
+ %tmp5835 = getelementptr inbounds float, float* %tmp5834, i64 1
+ %tmp5836 = getelementptr inbounds float, float* %tmp5835, i64 1
+ %tmp5837 = getelementptr inbounds float, float* %tmp5836, i64 1
+ %tmp5838 = getelementptr inbounds float, float* %tmp5837, i64 1
+ %tmp5839 = getelementptr inbounds float, float* %tmp5838, i64 1
+ %tmp5840 = getelementptr inbounds float, float* %tmp5839, i64 1
+ %tmp5841 = getelementptr inbounds float, float* %tmp5840, i64 1
+ %tmp5842 = getelementptr inbounds float, float* %tmp5841, i64 1
+ %tmp5843 = getelementptr inbounds float, float* %tmp5842, i64 1
+ %tmp5844 = getelementptr inbounds float, float* %tmp5843, i64 1
+ %tmp5845 = getelementptr inbounds float, float* %tmp5844, i64 1
+ %tmp5846 = getelementptr inbounds float, float* %tmp5845, i64 1
+ %tmp5847 = getelementptr inbounds float, float* %tmp5846, i64 1
+ %tmp5848 = getelementptr inbounds float, float* %tmp5847, i64 1
+ %tmp5849 = getelementptr inbounds float, float* %tmp5848, i64 1
+ %tmp5850 = getelementptr inbounds float, float* %tmp5849, i64 1
+ %tmp5851 = getelementptr inbounds float, float* %tmp5850, i64 1
+ %tmp5852 = getelementptr inbounds float, float* %tmp5851, i64 1
+ %tmp5853 = getelementptr inbounds float, float* %tmp5852, i64 1
+ %tmp5854 = getelementptr inbounds float, float* %tmp5853, i64 1
+ %tmp5855 = getelementptr inbounds float, float* %tmp5854, i64 1
+ %tmp5856 = getelementptr inbounds float, float* %tmp5855, i64 1
+ %tmp5857 = getelementptr inbounds float, float* %tmp5856, i64 1
+ %tmp5858 = getelementptr inbounds float, float* %tmp5857, i64 1
+ %tmp5859 = getelementptr inbounds float, float* %tmp5858, i64 1
+ %tmp5860 = getelementptr inbounds float, float* %tmp5859, i64 1
+ %tmp5861 = getelementptr inbounds float, float* %tmp5860, i64 1
+ %tmp5862 = getelementptr inbounds float, float* %tmp5861, i64 1
+ %tmp5863 = getelementptr inbounds float, float* %tmp5862, i64 1
+ %tmp5864 = getelementptr inbounds float, float* %tmp5863, i64 1
+ %tmp5865 = getelementptr inbounds float, float* %tmp5864, i64 1
+ %tmp5866 = getelementptr inbounds float, float* %tmp5865, i64 1
+ %tmp5867 = getelementptr inbounds float, float* %tmp5866, i64 1
+ %tmp5868 = getelementptr inbounds float, float* %tmp5867, i64 1
+ %tmp5869 = getelementptr inbounds float, float* %tmp5868, i64 1
+ %tmp5870 = getelementptr inbounds float, float* %tmp5869, i64 1
+ %tmp5871 = getelementptr inbounds float, float* %tmp5870, i64 1
+ %tmp5872 = getelementptr inbounds float, float* %tmp5871, i64 1
+ %tmp5873 = getelementptr inbounds float, float* %tmp5872, i64 1
+ %tmp5874 = getelementptr inbounds float, float* %tmp5873, i64 1
+ %tmp5875 = getelementptr inbounds float, float* %tmp5874, i64 1
+ %tmp5876 = getelementptr inbounds float, float* %tmp5875, i64 1
+ %tmp5877 = getelementptr inbounds float, float* %tmp5876, i64 1
+ %tmp5878 = getelementptr inbounds float, float* %tmp5877, i64 1
+ %tmp5879 = getelementptr inbounds float, float* %tmp5878, i64 1
+ %tmp5880 = getelementptr inbounds float, float* %tmp5879, i64 1
+ %tmp5881 = getelementptr inbounds float, float* %tmp5880, i64 1
+ %tmp5882 = getelementptr inbounds float, float* %tmp5881, i64 1
+ %tmp5883 = getelementptr inbounds float, float* %tmp5882, i64 1
+ %tmp5884 = getelementptr inbounds float, float* %tmp5883, i64 1
+ %tmp5885 = getelementptr inbounds float, float* %tmp5884, i64 1
+ %tmp5886 = getelementptr inbounds float, float* %tmp5885, i64 1
+ %tmp5887 = getelementptr inbounds float, float* %tmp5886, i64 1
+ %tmp5888 = getelementptr inbounds float, float* %tmp5887, i64 1
+ %tmp5889 = getelementptr inbounds float, float* %tmp5888, i64 1
+ %tmp5890 = getelementptr inbounds float, float* %tmp5889, i64 1
+ %tmp5891 = getelementptr inbounds float, float* %tmp5890, i64 1
+ %tmp5892 = getelementptr inbounds float, float* %tmp5891, i64 1
+ %tmp5893 = getelementptr inbounds float, float* %tmp5892, i64 1
+ %tmp5894 = getelementptr inbounds float, float* %tmp5893, i64 1
+ %tmp5895 = getelementptr inbounds float, float* %tmp5894, i64 1
+ %tmp5896 = getelementptr inbounds float, float* %tmp5895, i64 1
+ %tmp5897 = getelementptr inbounds float, float* %tmp5896, i64 1
+ %tmp5898 = getelementptr inbounds float, float* %tmp5897, i64 1
+ %tmp5899 = getelementptr inbounds float, float* %tmp5898, i64 1
+ %tmp5900 = getelementptr inbounds float, float* %tmp5899, i64 1
+ %tmp5901 = getelementptr inbounds float, float* %tmp5900, i64 1
+ %tmp5902 = getelementptr inbounds float, float* %tmp5901, i64 1
+ %tmp5903 = getelementptr inbounds float, float* %tmp5902, i64 1
+ %tmp5904 = getelementptr inbounds float, float* %tmp5903, i64 1
+ %tmp5905 = getelementptr inbounds float, float* %tmp5904, i64 1
+ %tmp5906 = getelementptr inbounds float, float* %tmp5905, i64 1
+ %tmp5907 = getelementptr inbounds float, float* %tmp5906, i64 1
+ %tmp5908 = getelementptr inbounds float, float* %tmp5907, i64 1
+ %tmp5909 = getelementptr inbounds float, float* %tmp5908, i64 1
+ %tmp5910 = getelementptr inbounds float, float* %tmp5909, i64 1
+ %tmp5911 = getelementptr inbounds float, float* %tmp5910, i64 1
+ %tmp5912 = getelementptr inbounds float, float* %tmp5911, i64 1
+ %tmp5913 = getelementptr inbounds float, float* %tmp5912, i64 1
+ %tmp5914 = getelementptr inbounds float, float* %tmp5913, i64 1
+ %tmp5915 = getelementptr inbounds float, float* %tmp5914, i64 1
+ %tmp5916 = getelementptr inbounds float, float* %tmp5915, i64 1
+ %tmp5917 = getelementptr inbounds float, float* %tmp5916, i64 1
+ %tmp5918 = getelementptr inbounds float, float* %tmp5917, i64 1
+ %tmp5919 = getelementptr inbounds float, float* %tmp5918, i64 1
+ %tmp5920 = getelementptr inbounds float, float* %tmp5919, i64 1
+ %tmp5921 = getelementptr inbounds float, float* %tmp5920, i64 1
+ %tmp5922 = getelementptr inbounds float, float* %tmp5921, i64 1
+ %tmp5923 = getelementptr inbounds float, float* %tmp5922, i64 1
+ %tmp5924 = getelementptr inbounds float, float* %tmp5923, i64 1
+ %tmp5925 = getelementptr inbounds float, float* %tmp5924, i64 1
+ %tmp5926 = getelementptr inbounds float, float* %tmp5925, i64 1
+ %tmp5927 = getelementptr inbounds float, float* %tmp5926, i64 1
+ %tmp5928 = getelementptr inbounds float, float* %tmp5927, i64 1
+ %tmp5929 = getelementptr inbounds float, float* %tmp5928, i64 1
+ %tmp5930 = getelementptr inbounds float, float* %tmp5929, i64 1
+ %tmp5931 = getelementptr inbounds float, float* %tmp5930, i64 1
+ %tmp5932 = getelementptr inbounds float, float* %tmp5931, i64 1
+ %tmp5933 = getelementptr inbounds float, float* %tmp5932, i64 1
+ %tmp5934 = getelementptr inbounds float, float* %tmp5933, i64 1
+ %tmp5935 = getelementptr inbounds float, float* %tmp5934, i64 1
+ %tmp5936 = getelementptr inbounds float, float* %tmp5935, i64 1
+ %tmp5937 = getelementptr inbounds float, float* %tmp5936, i64 1
+ %tmp5938 = getelementptr inbounds float, float* %tmp5937, i64 1
+ %tmp5939 = getelementptr inbounds float, float* %tmp5938, i64 1
+ %tmp5940 = getelementptr inbounds float, float* %tmp5939, i64 1
+ %tmp5941 = getelementptr inbounds float, float* %tmp5940, i64 1
+ %tmp5942 = getelementptr inbounds float, float* %tmp5941, i64 1
+ %tmp5943 = getelementptr inbounds float, float* %tmp5942, i64 1
+ %tmp5944 = getelementptr inbounds float, float* %tmp5943, i64 1
+ %tmp5945 = getelementptr inbounds float, float* %tmp5944, i64 1
+ %tmp5946 = getelementptr inbounds float, float* %tmp5945, i64 1
+ %tmp5947 = getelementptr inbounds float, float* %tmp5946, i64 1
+ %tmp5948 = getelementptr inbounds float, float* %tmp5947, i64 1
+ %tmp5949 = getelementptr inbounds float, float* %tmp5948, i64 1
+ %tmp5950 = getelementptr inbounds float, float* %tmp5949, i64 1
+ %tmp5951 = getelementptr inbounds float, float* %tmp5950, i64 1
+ %tmp5952 = getelementptr inbounds float, float* %tmp5951, i64 1
+ %tmp5953 = getelementptr inbounds float, float* %tmp5952, i64 1
+ %tmp5954 = getelementptr inbounds float, float* %tmp5953, i64 1
+ %tmp5955 = getelementptr inbounds float, float* %tmp5954, i64 1
+ %tmp5956 = getelementptr inbounds float, float* %tmp5955, i64 1
+ %tmp5957 = getelementptr inbounds float, float* %tmp5956, i64 1
+ %tmp5958 = getelementptr inbounds float, float* %tmp5957, i64 1
+ %tmp5959 = getelementptr inbounds float, float* %tmp5958, i64 1
+ %tmp5960 = getelementptr inbounds float, float* %tmp5959, i64 1
+ %tmp5961 = getelementptr inbounds float, float* %tmp5960, i64 1
+ %tmp5962 = getelementptr inbounds float, float* %tmp5961, i64 1
+ %tmp5963 = getelementptr inbounds float, float* %tmp5962, i64 1
+ %tmp5964 = getelementptr inbounds float, float* %tmp5963, i64 1
+ %tmp5965 = getelementptr inbounds float, float* %tmp5964, i64 1
+ %tmp5966 = getelementptr inbounds float, float* %tmp5965, i64 1
+ %tmp5967 = getelementptr inbounds float, float* %tmp5966, i64 1
+ %tmp5968 = getelementptr inbounds float, float* %tmp5967, i64 1
+ %tmp5969 = getelementptr inbounds float, float* %tmp5968, i64 1
+ %tmp5970 = getelementptr inbounds float, float* %tmp5969, i64 1
+ %tmp5971 = getelementptr inbounds float, float* %tmp5970, i64 1
+ %tmp5972 = getelementptr inbounds float, float* %tmp5971, i64 1
+ %tmp5973 = getelementptr inbounds float, float* %tmp5972, i64 1
+ %tmp5974 = getelementptr inbounds float, float* %tmp5973, i64 1
+ %tmp5975 = getelementptr inbounds float, float* %tmp5974, i64 1
+ %tmp5976 = getelementptr inbounds float, float* %tmp5975, i64 1
+ %tmp5977 = getelementptr inbounds float, float* %tmp5976, i64 1
+ %tmp5978 = getelementptr inbounds float, float* %tmp5977, i64 1
+ %tmp5979 = getelementptr inbounds float, float* %tmp5978, i64 1
+ %tmp5980 = getelementptr inbounds float, float* %tmp5979, i64 1
+ %tmp5981 = getelementptr inbounds float, float* %tmp5980, i64 1
+ %tmp5982 = getelementptr inbounds float, float* %tmp5981, i64 1
+ %tmp5983 = getelementptr inbounds float, float* %tmp5982, i64 1
+ %tmp5984 = getelementptr inbounds float, float* %tmp5983, i64 1
+ %tmp5985 = getelementptr inbounds float, float* %tmp5984, i64 1
+ %tmp5986 = getelementptr inbounds float, float* %tmp5985, i64 1
+ %tmp5987 = getelementptr inbounds float, float* %tmp5986, i64 1
+ %tmp5988 = getelementptr inbounds float, float* %tmp5987, i64 1
+ %tmp5989 = getelementptr inbounds float, float* %tmp5988, i64 1
+ %tmp5990 = getelementptr inbounds float, float* %tmp5989, i64 1
+ %tmp5991 = getelementptr inbounds float, float* %tmp5990, i64 1
+ %tmp5992 = getelementptr inbounds float, float* %tmp5991, i64 1
+ %tmp5993 = getelementptr inbounds float, float* %tmp5992, i64 1
+ %tmp5994 = getelementptr inbounds float, float* %tmp5993, i64 1
+ %tmp5995 = getelementptr inbounds float, float* %tmp5994, i64 1
+ %tmp5996 = getelementptr inbounds float, float* %tmp5995, i64 1
+ %tmp5997 = getelementptr inbounds float, float* %tmp5996, i64 1
+ %tmp5998 = getelementptr inbounds float, float* %tmp5997, i64 1
+ %tmp5999 = getelementptr inbounds float, float* %tmp5998, i64 1
+ %tmp6000 = getelementptr inbounds float, float* %tmp5999, i64 1
+ %tmp6001 = getelementptr inbounds float, float* %tmp6000, i64 1
+ %tmp6002 = getelementptr inbounds float, float* %tmp6001, i64 1
+ %tmp6003 = getelementptr inbounds float, float* %tmp6002, i64 1
+ %tmp6004 = getelementptr inbounds float, float* %tmp6003, i64 1
+ %tmp6005 = getelementptr inbounds float, float* %tmp6004, i64 1
+ %tmp6006 = getelementptr inbounds float, float* %tmp6005, i64 1
+ %tmp6007 = getelementptr inbounds float, float* %tmp6006, i64 1
+ %tmp6008 = getelementptr inbounds float, float* %tmp6007, i64 1
+ %tmp6009 = getelementptr inbounds float, float* %tmp6008, i64 1
+ %tmp6010 = getelementptr inbounds float, float* %tmp6009, i64 1
+ %tmp6011 = getelementptr inbounds float, float* %tmp6010, i64 1
+ %tmp6012 = getelementptr inbounds float, float* %tmp6011, i64 1
+ %tmp6013 = getelementptr inbounds float, float* %tmp6012, i64 1
+ %tmp6014 = getelementptr inbounds float, float* %tmp6013, i64 1
+ %tmp6015 = getelementptr inbounds float, float* %tmp6014, i64 1
+ %tmp6016 = getelementptr inbounds float, float* %tmp6015, i64 1
+ %tmp6017 = getelementptr inbounds float, float* %tmp6016, i64 1
+ %tmp6018 = getelementptr inbounds float, float* %tmp6017, i64 1
+ %tmp6019 = getelementptr inbounds float, float* %tmp6018, i64 1
+ %tmp6020 = getelementptr inbounds float, float* %tmp6019, i64 1
+ %tmp6021 = getelementptr inbounds float, float* %tmp6020, i64 1
+ %tmp6022 = getelementptr inbounds float, float* %tmp6021, i64 1
+ %tmp6023 = getelementptr inbounds float, float* %tmp6022, i64 1
+ %tmp6024 = getelementptr inbounds float, float* %tmp6023, i64 1
+ %tmp6025 = getelementptr inbounds float, float* %tmp6024, i64 1
+ %tmp6026 = getelementptr inbounds float, float* %tmp6025, i64 1
+ %tmp6027 = getelementptr inbounds float, float* %tmp6026, i64 1
+ %tmp6028 = getelementptr inbounds float, float* %tmp6027, i64 1
+ %tmp6029 = getelementptr inbounds float, float* %tmp6028, i64 1
+ %tmp6030 = getelementptr inbounds float, float* %tmp6029, i64 1
+ %tmp6031 = getelementptr inbounds float, float* %tmp6030, i64 1
+ %tmp6032 = getelementptr inbounds float, float* %tmp6031, i64 1
+ %tmp6033 = getelementptr inbounds float, float* %tmp6032, i64 1
+ %tmp6034 = getelementptr inbounds float, float* %tmp6033, i64 1
+ %tmp6035 = getelementptr inbounds float, float* %tmp6034, i64 1
+ %tmp6036 = getelementptr inbounds float, float* %tmp6035, i64 1
+ %tmp6037 = getelementptr inbounds float, float* %tmp6036, i64 1
+ %tmp6038 = getelementptr inbounds float, float* %tmp6037, i64 1
+ %tmp6039 = getelementptr inbounds float, float* %tmp6038, i64 1
+ %tmp6040 = getelementptr inbounds float, float* %tmp6039, i64 1
+ %tmp6041 = getelementptr inbounds float, float* %tmp6040, i64 1
+ %tmp6042 = getelementptr inbounds float, float* %tmp6041, i64 1
+ %tmp6043 = getelementptr inbounds float, float* %tmp6042, i64 1
+ %tmp6044 = getelementptr inbounds float, float* %tmp6043, i64 1
+ %tmp6045 = getelementptr inbounds float, float* %tmp6044, i64 1
+ %tmp6046 = getelementptr inbounds float, float* %tmp6045, i64 1
+ %tmp6047 = getelementptr inbounds float, float* %tmp6046, i64 1
+ %tmp6048 = getelementptr inbounds float, float* %tmp6047, i64 1
+ %tmp6049 = getelementptr inbounds float, float* %tmp6048, i64 1
+ %tmp6050 = getelementptr inbounds float, float* %tmp6049, i64 1
+ %tmp6051 = getelementptr inbounds float, float* %tmp6050, i64 1
+ %tmp6052 = getelementptr inbounds float, float* %tmp6051, i64 1
+ %tmp6053 = getelementptr inbounds float, float* %tmp6052, i64 1
+ %tmp6054 = getelementptr inbounds float, float* %tmp6053, i64 1
+ %tmp6055 = getelementptr inbounds float, float* %tmp6054, i64 1
+ %tmp6056 = getelementptr inbounds float, float* %tmp6055, i64 1
+ %tmp6057 = getelementptr inbounds float, float* %tmp6056, i64 1
+ %tmp6058 = getelementptr inbounds float, float* %tmp6057, i64 1
+ %tmp6059 = getelementptr inbounds float, float* %tmp6058, i64 1
+ %tmp6060 = getelementptr inbounds float, float* %tmp6059, i64 1
+ %tmp6061 = getelementptr inbounds float, float* %tmp6060, i64 1
+ %tmp6062 = getelementptr inbounds float, float* %tmp6061, i64 1
+ %tmp6063 = getelementptr inbounds float, float* %tmp6062, i64 1
+ %tmp6064 = getelementptr inbounds float, float* %tmp6063, i64 1
+ %tmp6065 = getelementptr inbounds float, float* %tmp6064, i64 1
+ %tmp6066 = getelementptr inbounds float, float* %tmp6065, i64 1
+ %tmp6067 = getelementptr inbounds float, float* %tmp6066, i64 1
+ %tmp6068 = getelementptr inbounds float, float* %tmp6067, i64 1
+ %tmp6069 = getelementptr inbounds float, float* %tmp6068, i64 1
+ %tmp6070 = getelementptr inbounds float, float* %tmp6069, i64 1
+ %tmp6071 = getelementptr inbounds float, float* %tmp6070, i64 1
+ %tmp6072 = getelementptr inbounds float, float* %tmp6071, i64 1
+ %tmp6073 = getelementptr inbounds float, float* %tmp6072, i64 1
+ %tmp6074 = getelementptr inbounds float, float* %tmp6073, i64 1
+ %tmp6075 = getelementptr inbounds float, float* %tmp6074, i64 1
+ %tmp6076 = getelementptr inbounds float, float* %tmp6075, i64 1
+ %tmp6077 = getelementptr inbounds float, float* %tmp6076, i64 1
+ %tmp6078 = getelementptr inbounds float, float* %tmp6077, i64 1
+ %tmp6079 = getelementptr inbounds float, float* %tmp6078, i64 1
+ %tmp6080 = getelementptr inbounds float, float* %tmp6079, i64 1
+ %tmp6081 = getelementptr inbounds float, float* %tmp6080, i64 1
+ %tmp6082 = getelementptr inbounds float, float* %tmp6081, i64 1
+ %tmp6083 = getelementptr inbounds float, float* %tmp6082, i64 1
+ %tmp6084 = getelementptr inbounds float, float* %tmp6083, i64 1
+ %tmp6085 = getelementptr inbounds float, float* %tmp6084, i64 1
+ %tmp6086 = getelementptr inbounds float, float* %tmp6085, i64 1
+ %tmp6087 = getelementptr inbounds float, float* %tmp6086, i64 1
+ %tmp6088 = getelementptr inbounds float, float* %tmp6087, i64 1
+ %tmp6089 = getelementptr inbounds float, float* %tmp6088, i64 1
+ %tmp6090 = getelementptr inbounds float, float* %tmp6089, i64 1
+ %tmp6091 = getelementptr inbounds float, float* %tmp6090, i64 1
+ %tmp6092 = getelementptr inbounds float, float* %tmp6091, i64 1
+ %tmp6093 = getelementptr inbounds float, float* %tmp6092, i64 1
+ %tmp6094 = getelementptr inbounds float, float* %tmp6093, i64 1
+ %tmp6095 = getelementptr inbounds float, float* %tmp6094, i64 1
+ %tmp6096 = getelementptr inbounds float, float* %tmp6095, i64 1
+ %tmp6097 = getelementptr inbounds float, float* %tmp6096, i64 1
+ %tmp6098 = getelementptr inbounds float, float* %tmp6097, i64 1
+ %tmp6099 = getelementptr inbounds float, float* %tmp6098, i64 1
+ %tmp6100 = getelementptr inbounds float, float* %tmp6099, i64 1
+ %tmp6101 = getelementptr inbounds float, float* %tmp6100, i64 1
+ %tmp6102 = getelementptr inbounds float, float* %tmp6101, i64 1
+ %tmp6103 = getelementptr inbounds float, float* %tmp6102, i64 1
+ %tmp6104 = getelementptr inbounds float, float* %tmp6103, i64 1
+ %tmp6105 = getelementptr inbounds float, float* %tmp6104, i64 1
+ %tmp6106 = getelementptr inbounds float, float* %tmp6105, i64 1
+ %tmp6107 = getelementptr inbounds float, float* %tmp6106, i64 1
+ %tmp6108 = getelementptr inbounds float, float* %tmp6107, i64 1
+ %tmp6109 = getelementptr inbounds float, float* %tmp6108, i64 1
+ %tmp6110 = getelementptr inbounds float, float* %tmp6109, i64 1
+ %tmp6111 = getelementptr inbounds float, float* %tmp6110, i64 1
+ %tmp6112 = getelementptr inbounds float, float* %tmp6111, i64 1
+ %tmp6113 = getelementptr inbounds float, float* %tmp6112, i64 1
+ %tmp6114 = getelementptr inbounds float, float* %tmp6113, i64 1
+ %tmp6115 = getelementptr inbounds float, float* %tmp6114, i64 1
+ %tmp6116 = getelementptr inbounds float, float* %tmp6115, i64 1
+ %tmp6117 = getelementptr inbounds float, float* %tmp6116, i64 1
+ %tmp6118 = getelementptr inbounds float, float* %tmp6117, i64 1
+ %tmp6119 = getelementptr inbounds float, float* %tmp6118, i64 1
+ %tmp6120 = getelementptr inbounds float, float* %tmp6119, i64 1
+ %tmp6121 = getelementptr inbounds float, float* %tmp6120, i64 1
+ %tmp6122 = getelementptr inbounds float, float* %tmp6121, i64 1
+ %tmp6123 = getelementptr inbounds float, float* %tmp6122, i64 1
+ %tmp6124 = getelementptr inbounds float, float* %tmp6123, i64 1
+ %tmp6125 = getelementptr inbounds float, float* %tmp6124, i64 1
+ %tmp6126 = getelementptr inbounds float, float* %tmp6125, i64 1
+ %tmp6127 = getelementptr inbounds float, float* %tmp6126, i64 1
+ %tmp6128 = getelementptr inbounds float, float* %tmp6127, i64 1
+ %tmp6129 = getelementptr inbounds float, float* %tmp6128, i64 1
+ %tmp6130 = getelementptr inbounds float, float* %tmp6129, i64 1
+ %tmp6131 = getelementptr inbounds float, float* %tmp6130, i64 1
+ %tmp6132 = getelementptr inbounds float, float* %tmp6131, i64 1
+ %tmp6133 = getelementptr inbounds float, float* %tmp6132, i64 1
+ %tmp6134 = getelementptr inbounds float, float* %tmp6133, i64 1
+ %tmp6135 = getelementptr inbounds float, float* %tmp6134, i64 1
+ %tmp6136 = getelementptr inbounds float, float* %tmp6135, i64 1
+ %tmp6137 = getelementptr inbounds float, float* %tmp6136, i64 1
+ %tmp6138 = getelementptr inbounds float, float* %tmp6137, i64 1
+ %tmp6139 = getelementptr inbounds float, float* %tmp6138, i64 1
+ %tmp6140 = getelementptr inbounds float, float* %tmp6139, i64 1
+ %tmp6141 = getelementptr inbounds float, float* %tmp6140, i64 1
+ %tmp6142 = getelementptr inbounds float, float* %tmp6141, i64 1
+ %tmp6143 = getelementptr inbounds float, float* %tmp6142, i64 1
+ %tmp6144 = getelementptr inbounds float, float* %tmp6143, i64 1
+ %tmp6145 = getelementptr inbounds float, float* %tmp6144, i64 1
+ %tmp6146 = getelementptr inbounds float, float* %tmp6145, i64 1
+ %tmp6147 = getelementptr inbounds float, float* %tmp6146, i64 1
+ %tmp6148 = getelementptr inbounds float, float* %tmp6147, i64 1
+ %tmp6149 = getelementptr inbounds float, float* %tmp6148, i64 1
+ %tmp6150 = getelementptr inbounds float, float* %tmp6149, i64 1
+ %tmp6151 = getelementptr inbounds float, float* %tmp6150, i64 1
+ %tmp6152 = getelementptr inbounds float, float* %tmp6151, i64 1
+ %tmp6153 = getelementptr inbounds float, float* %tmp6152, i64 1
+ %tmp6154 = getelementptr inbounds float, float* %tmp6153, i64 1
+ %tmp6155 = getelementptr inbounds float, float* %tmp6154, i64 1
+ %tmp6156 = getelementptr inbounds float, float* %tmp6155, i64 1
+ %tmp6157 = getelementptr inbounds float, float* %tmp6156, i64 1
+ %tmp6158 = getelementptr inbounds float, float* %tmp6157, i64 1
+ %tmp6159 = getelementptr inbounds float, float* %tmp6158, i64 1
+ %tmp6160 = getelementptr inbounds float, float* %tmp6159, i64 1
+ %tmp6161 = getelementptr inbounds float, float* %tmp6160, i64 1
+ %tmp6162 = getelementptr inbounds float, float* %tmp6161, i64 1
+ %tmp6163 = getelementptr inbounds float, float* %tmp6162, i64 1
+ %tmp6164 = getelementptr inbounds float, float* %tmp6163, i64 1
+ %tmp6165 = getelementptr inbounds float, float* %tmp6164, i64 1
+ %tmp6166 = getelementptr inbounds float, float* %tmp6165, i64 1
+ %tmp6167 = getelementptr inbounds float, float* %tmp6166, i64 1
+ %tmp6168 = getelementptr inbounds float, float* %tmp6167, i64 1
+ %tmp6169 = getelementptr inbounds float, float* %tmp6168, i64 1
+ %tmp6170 = getelementptr inbounds float, float* %tmp6169, i64 1
+ %tmp6171 = getelementptr inbounds float, float* %tmp6170, i64 1
+ %tmp6172 = getelementptr inbounds float, float* %tmp6171, i64 1
+ %tmp6173 = getelementptr inbounds float, float* %tmp6172, i64 1
+ %tmp6174 = getelementptr inbounds float, float* %tmp6173, i64 1
+ %tmp6175 = getelementptr inbounds float, float* %tmp6174, i64 1
+ %tmp6176 = getelementptr inbounds float, float* %tmp6175, i64 1
+ %tmp6177 = getelementptr inbounds float, float* %tmp6176, i64 1
+ %tmp6178 = getelementptr inbounds float, float* %tmp6177, i64 1
+ %tmp6179 = getelementptr inbounds float, float* %tmp6178, i64 1
+ %tmp6180 = getelementptr inbounds float, float* %tmp6179, i64 1
+ %tmp6181 = getelementptr inbounds float, float* %tmp6180, i64 1
+ %tmp6182 = getelementptr inbounds float, float* %tmp6181, i64 1
+ %tmp6183 = getelementptr inbounds float, float* %tmp6182, i64 1
+ %tmp6184 = getelementptr inbounds float, float* %tmp6183, i64 1
+ %tmp6185 = getelementptr inbounds float, float* %tmp6184, i64 1
+ %tmp6186 = getelementptr inbounds float, float* %tmp6185, i64 1
+ %tmp6187 = getelementptr inbounds float, float* %tmp6186, i64 1
+ %tmp6188 = getelementptr inbounds float, float* %tmp6187, i64 1
+ %tmp6189 = getelementptr inbounds float, float* %tmp6188, i64 1
+ %tmp6190 = getelementptr inbounds float, float* %tmp6189, i64 1
+ %tmp6191 = getelementptr inbounds float, float* %tmp6190, i64 1
+ %tmp6192 = getelementptr inbounds float, float* %tmp6191, i64 1
+ %tmp6193 = getelementptr inbounds float, float* %tmp6192, i64 1
+ %tmp6194 = getelementptr inbounds float, float* %tmp6193, i64 1
+ %tmp6195 = getelementptr inbounds float, float* %tmp6194, i64 1
+ %tmp6196 = getelementptr inbounds float, float* %tmp6195, i64 1
+ %tmp6197 = getelementptr inbounds float, float* %tmp6196, i64 1
+ %tmp6198 = getelementptr inbounds float, float* %tmp6197, i64 1
+ %tmp6199 = getelementptr inbounds float, float* %tmp6198, i64 1
+ %tmp6200 = getelementptr inbounds float, float* %tmp6199, i64 1
+ %tmp6201 = getelementptr inbounds float, float* %tmp6200, i64 1
+ %tmp6202 = getelementptr inbounds float, float* %tmp6201, i64 1
+ %tmp6203 = getelementptr inbounds float, float* %tmp6202, i64 1
+ %tmp6204 = getelementptr inbounds float, float* %tmp6203, i64 1
+ %tmp6205 = getelementptr inbounds float, float* %tmp6204, i64 1
+ %tmp6206 = getelementptr inbounds float, float* %tmp6205, i64 1
+ %tmp6207 = getelementptr inbounds float, float* %tmp6206, i64 1
+ %tmp6208 = getelementptr inbounds float, float* %tmp6207, i64 1
+ %tmp6209 = getelementptr inbounds float, float* %tmp6208, i64 1
+ %tmp6210 = getelementptr inbounds float, float* %tmp6209, i64 1
+ %tmp6211 = getelementptr inbounds float, float* %tmp6210, i64 1
+ %tmp6212 = getelementptr inbounds float, float* %tmp6211, i64 1
+ %tmp6213 = getelementptr inbounds float, float* %tmp6212, i64 1
+ %tmp6214 = getelementptr inbounds float, float* %tmp6213, i64 1
+ %tmp6215 = getelementptr inbounds float, float* %tmp6214, i64 1
+ %tmp6216 = getelementptr inbounds float, float* %tmp6215, i64 1
+ %tmp6217 = getelementptr inbounds float, float* %tmp6216, i64 1
+ %tmp6218 = getelementptr inbounds float, float* %tmp6217, i64 1
+ %tmp6219 = getelementptr inbounds float, float* %tmp6218, i64 1
+ %tmp6220 = getelementptr inbounds float, float* %tmp6219, i64 1
+ %tmp6221 = getelementptr inbounds float, float* %tmp6220, i64 1
+ %tmp6222 = getelementptr inbounds float, float* %tmp6221, i64 1
+ %tmp6223 = getelementptr inbounds float, float* %tmp6222, i64 1
+ %tmp6224 = getelementptr inbounds float, float* %tmp6223, i64 1
+ %tmp6225 = getelementptr inbounds float, float* %tmp6224, i64 1
+ %tmp6226 = getelementptr inbounds float, float* %tmp6225, i64 1
+ %tmp6227 = getelementptr inbounds float, float* %tmp6226, i64 1
+ %tmp6228 = getelementptr inbounds float, float* %tmp6227, i64 1
+ %tmp6229 = getelementptr inbounds float, float* %tmp6228, i64 1
+ %tmp6230 = getelementptr inbounds float, float* %tmp6229, i64 1
+ %tmp6231 = getelementptr inbounds float, float* %tmp6230, i64 1
+ %tmp6232 = getelementptr inbounds float, float* %tmp6231, i64 1
+ %tmp6233 = getelementptr inbounds float, float* %tmp6232, i64 1
+ %tmp6234 = getelementptr inbounds float, float* %tmp6233, i64 1
+ %tmp6235 = getelementptr inbounds float, float* %tmp6234, i64 1
+ %tmp6236 = getelementptr inbounds float, float* %tmp6235, i64 1
+ %tmp6237 = getelementptr inbounds float, float* %tmp6236, i64 1
+ %tmp6238 = getelementptr inbounds float, float* %tmp6237, i64 1
+ %tmp6239 = getelementptr inbounds float, float* %tmp6238, i64 1
+ %tmp6240 = getelementptr inbounds float, float* %tmp6239, i64 1
+ %tmp6241 = getelementptr inbounds float, float* %tmp6240, i64 1
+ %tmp6242 = getelementptr inbounds float, float* %tmp6241, i64 1
+ %tmp6243 = getelementptr inbounds float, float* %tmp6242, i64 1
+ %tmp6244 = getelementptr inbounds float, float* %tmp6243, i64 1
+ %tmp6245 = getelementptr inbounds float, float* %tmp6244, i64 1
+ %tmp6246 = getelementptr inbounds float, float* %tmp6245, i64 1
+ %tmp6247 = getelementptr inbounds float, float* %tmp6246, i64 1
+ %tmp6248 = getelementptr inbounds float, float* %tmp6247, i64 1
+ %tmp6249 = getelementptr inbounds float, float* %tmp6248, i64 1
+ %tmp6250 = getelementptr inbounds float, float* %tmp6249, i64 1
+ %tmp6251 = getelementptr inbounds float, float* %tmp6250, i64 1
+ %tmp6252 = getelementptr inbounds float, float* %tmp6251, i64 1
+ %tmp6253 = getelementptr inbounds float, float* %tmp6252, i64 1
+ %tmp6254 = getelementptr inbounds float, float* %tmp6253, i64 1
+ %tmp6255 = getelementptr inbounds float, float* %tmp6254, i64 1
+ %tmp6256 = getelementptr inbounds float, float* %tmp6255, i64 1
+ %tmp6257 = getelementptr inbounds float, float* %tmp6256, i64 1
+ %tmp6258 = getelementptr inbounds float, float* %tmp6257, i64 1
+ %tmp6259 = getelementptr inbounds float, float* %tmp6258, i64 1
+ %tmp6260 = getelementptr inbounds float, float* %tmp6259, i64 1
+ %tmp6261 = getelementptr inbounds float, float* %tmp6260, i64 1
+ %tmp6262 = getelementptr inbounds float, float* %tmp6261, i64 1
+ %tmp6263 = getelementptr inbounds float, float* %tmp6262, i64 1
+ %tmp6264 = getelementptr inbounds float, float* %tmp6263, i64 1
+ %tmp6265 = getelementptr inbounds float, float* %tmp6264, i64 1
+ %tmp6266 = getelementptr inbounds float, float* %tmp6265, i64 1
+ %tmp6267 = getelementptr inbounds float, float* %tmp6266, i64 1
+ %tmp6268 = getelementptr inbounds float, float* %tmp6267, i64 1
+ %tmp6269 = getelementptr inbounds float, float* %tmp6268, i64 1
+ %tmp6270 = getelementptr inbounds float, float* %tmp6269, i64 1
+ %tmp6271 = getelementptr inbounds float, float* %tmp6270, i64 1
+ %tmp6272 = getelementptr inbounds float, float* %tmp6271, i64 1
+ %tmp6273 = getelementptr inbounds float, float* %tmp6272, i64 1
+ %tmp6274 = getelementptr inbounds float, float* %tmp6273, i64 1
+ %tmp6275 = getelementptr inbounds float, float* %tmp6274, i64 1
+ %tmp6276 = getelementptr inbounds float, float* %tmp6275, i64 1
+ %tmp6277 = getelementptr inbounds float, float* %tmp6276, i64 1
+ %tmp6278 = getelementptr inbounds float, float* %tmp6277, i64 1
+ %tmp6279 = getelementptr inbounds float, float* %tmp6278, i64 1
+ %tmp6280 = getelementptr inbounds float, float* %tmp6279, i64 1
+ %tmp6281 = getelementptr inbounds float, float* %tmp6280, i64 1
+ %tmp6282 = getelementptr inbounds float, float* %tmp6281, i64 1
+ %tmp6283 = getelementptr inbounds float, float* %tmp6282, i64 1
+ %tmp6284 = getelementptr inbounds float, float* %tmp6283, i64 1
+ %tmp6285 = getelementptr inbounds float, float* %tmp6284, i64 1
+ %tmp6286 = getelementptr inbounds float, float* %tmp6285, i64 1
+ %tmp6287 = getelementptr inbounds float, float* %tmp6286, i64 1
+ %tmp6288 = getelementptr inbounds float, float* %tmp6287, i64 1
+ %tmp6289 = getelementptr inbounds float, float* %tmp6288, i64 1
+ %tmp6290 = getelementptr inbounds float, float* %tmp6289, i64 1
+ %tmp6291 = getelementptr inbounds float, float* %tmp6290, i64 1
+ %tmp6292 = getelementptr inbounds float, float* %tmp6291, i64 1
+ %tmp6293 = getelementptr inbounds float, float* %tmp6292, i64 1
+ %tmp6294 = getelementptr inbounds float, float* %tmp6293, i64 1
+ %tmp6295 = getelementptr inbounds float, float* %tmp6294, i64 1
+ %tmp6296 = getelementptr inbounds float, float* %tmp6295, i64 1
+ %tmp6297 = getelementptr inbounds float, float* %tmp6296, i64 1
+ %tmp6298 = getelementptr inbounds float, float* %tmp6297, i64 1
+ %tmp6299 = getelementptr inbounds float, float* %tmp6298, i64 1
+ %tmp6300 = getelementptr inbounds float, float* %tmp6299, i64 1
+ %tmp6301 = getelementptr inbounds float, float* %tmp6300, i64 1
+ %tmp6302 = getelementptr inbounds float, float* %tmp6301, i64 1
+ %tmp6303 = getelementptr inbounds float, float* %tmp6302, i64 1
+ %tmp6304 = getelementptr inbounds float, float* %tmp6303, i64 1
+ %tmp6305 = getelementptr inbounds float, float* %tmp6304, i64 1
+ %tmp6306 = getelementptr inbounds float, float* %tmp6305, i64 1
+ %tmp6307 = getelementptr inbounds float, float* %tmp6306, i64 1
+ %tmp6308 = getelementptr inbounds float, float* %tmp6307, i64 1
+ %tmp6309 = getelementptr inbounds float, float* %tmp6308, i64 1
+ %tmp6310 = getelementptr inbounds float, float* %tmp6309, i64 1
+ %tmp6311 = getelementptr inbounds float, float* %tmp6310, i64 1
+ %tmp6312 = getelementptr inbounds float, float* %tmp6311, i64 1
+ %tmp6313 = getelementptr inbounds float, float* %tmp6312, i64 1
+ %tmp6314 = getelementptr inbounds float, float* %tmp6313, i64 1
+ %tmp6315 = getelementptr inbounds float, float* %tmp6314, i64 1
+ %tmp6316 = getelementptr inbounds float, float* %tmp6315, i64 1
+ %tmp6317 = getelementptr inbounds float, float* %tmp6316, i64 1
+ %tmp6318 = getelementptr inbounds float, float* %tmp6317, i64 1
+ %tmp6319 = getelementptr inbounds float, float* %tmp6318, i64 1
+ %tmp6320 = getelementptr inbounds float, float* %tmp6319, i64 1
+ %tmp6321 = getelementptr inbounds float, float* %tmp6320, i64 1
+ %tmp6322 = getelementptr inbounds float, float* %tmp6321, i64 1
+ %tmp6323 = getelementptr inbounds float, float* %tmp6322, i64 1
+ %tmp6324 = getelementptr inbounds float, float* %tmp6323, i64 1
+ %tmp6325 = getelementptr inbounds float, float* %tmp6324, i64 1
+ %tmp6326 = getelementptr inbounds float, float* %tmp6325, i64 1
+ %tmp6327 = getelementptr inbounds float, float* %tmp6326, i64 1
+ %tmp6328 = getelementptr inbounds float, float* %tmp6327, i64 1
+ %tmp6329 = getelementptr inbounds float, float* %tmp6328, i64 1
+ %tmp6330 = getelementptr inbounds float, float* %tmp6329, i64 1
+ %tmp6331 = getelementptr inbounds float, float* %tmp6330, i64 1
+ %tmp6332 = getelementptr inbounds float, float* %tmp6331, i64 1
+ %tmp6333 = getelementptr inbounds float, float* %tmp6332, i64 1
+ %tmp6334 = getelementptr inbounds float, float* %tmp6333, i64 1
+ %tmp6335 = getelementptr inbounds float, float* %tmp6334, i64 1
+ %tmp6336 = getelementptr inbounds float, float* %tmp6335, i64 1
+ %tmp6337 = getelementptr inbounds float, float* %tmp6336, i64 1
+ %tmp6338 = getelementptr inbounds float, float* %tmp6337, i64 1
+ %tmp6339 = getelementptr inbounds float, float* %tmp6338, i64 1
+ %tmp6340 = getelementptr inbounds float, float* %tmp6339, i64 1
+ %tmp6341 = getelementptr inbounds float, float* %tmp6340, i64 1
+ %tmp6342 = getelementptr inbounds float, float* %tmp6341, i64 1
+ %tmp6343 = getelementptr inbounds float, float* %tmp6342, i64 1
+ %tmp6344 = getelementptr inbounds float, float* %tmp6343, i64 1
+ %tmp6345 = getelementptr inbounds float, float* %tmp6344, i64 1
+ %tmp6346 = getelementptr inbounds float, float* %tmp6345, i64 1
+ %tmp6347 = getelementptr inbounds float, float* %tmp6346, i64 1
+ %tmp6348 = getelementptr inbounds float, float* %tmp6347, i64 1
+ %tmp6349 = getelementptr inbounds float, float* %tmp6348, i64 1
+ %tmp6350 = getelementptr inbounds float, float* %tmp6349, i64 1
+ %tmp6351 = getelementptr inbounds float, float* %tmp6350, i64 1
+ %tmp6352 = getelementptr inbounds float, float* %tmp6351, i64 1
+ %tmp6353 = getelementptr inbounds float, float* %tmp6352, i64 1
+ %tmp6354 = getelementptr inbounds float, float* %tmp6353, i64 1
+ %tmp6355 = getelementptr inbounds float, float* %tmp6354, i64 1
+ %tmp6356 = getelementptr inbounds float, float* %tmp6355, i64 1
+ %tmp6357 = getelementptr inbounds float, float* %tmp6356, i64 1
+ %tmp6358 = getelementptr inbounds float, float* %tmp6357, i64 1
+ %tmp6359 = getelementptr inbounds float, float* %tmp6358, i64 1
+ %tmp6360 = getelementptr inbounds float, float* %tmp6359, i64 1
+ %tmp6361 = getelementptr inbounds float, float* %tmp6360, i64 1
+ %tmp6362 = getelementptr inbounds float, float* %tmp6361, i64 1
+ %tmp6363 = getelementptr inbounds float, float* %tmp6362, i64 1
+ %tmp6364 = getelementptr inbounds float, float* %tmp6363, i64 1
+ %tmp6365 = getelementptr inbounds float, float* %tmp6364, i64 1
+ %tmp6366 = getelementptr inbounds float, float* %tmp6365, i64 1
+ %tmp6367 = getelementptr inbounds float, float* %tmp6366, i64 1
+ %tmp6368 = getelementptr inbounds float, float* %tmp6367, i64 1
+ %tmp6369 = getelementptr inbounds float, float* %tmp6368, i64 1
+ %tmp6370 = getelementptr inbounds float, float* %tmp6369, i64 1
+ %tmp6371 = getelementptr inbounds float, float* %tmp6370, i64 1
+ %tmp6372 = getelementptr inbounds float, float* %tmp6371, i64 1
+ %tmp6373 = getelementptr inbounds float, float* %tmp6372, i64 1
+ %tmp6374 = getelementptr inbounds float, float* %tmp6373, i64 1
+ %tmp6375 = getelementptr inbounds float, float* %tmp6374, i64 1
+ %tmp6376 = getelementptr inbounds float, float* %tmp6375, i64 1
+ %tmp6377 = getelementptr inbounds float, float* %tmp6376, i64 1
+ %tmp6378 = getelementptr inbounds float, float* %tmp6377, i64 1
+ %tmp6379 = getelementptr inbounds float, float* %tmp6378, i64 1
+ %tmp6380 = getelementptr inbounds float, float* %tmp6379, i64 1
+ %tmp6381 = getelementptr inbounds float, float* %tmp6380, i64 1
+ %tmp6382 = getelementptr inbounds float, float* %tmp6381, i64 1
+ %tmp6383 = getelementptr inbounds float, float* %tmp6382, i64 1
+ %tmp6384 = getelementptr inbounds float, float* %tmp6383, i64 1
+ %tmp6385 = getelementptr inbounds float, float* %tmp6384, i64 1
+ %tmp6386 = getelementptr inbounds float, float* %tmp6385, i64 1
+ %tmp6387 = getelementptr inbounds float, float* %tmp6386, i64 1
+ %tmp6388 = getelementptr inbounds float, float* %tmp6387, i64 1
+ %tmp6389 = getelementptr inbounds float, float* %tmp6388, i64 1
+ %tmp6390 = getelementptr inbounds float, float* %tmp6389, i64 1
+ %tmp6391 = getelementptr inbounds float, float* %tmp6390, i64 1
+ %tmp6392 = getelementptr inbounds float, float* %tmp6391, i64 1
+ %tmp6393 = getelementptr inbounds float, float* %tmp6392, i64 1
+ %tmp6394 = getelementptr inbounds float, float* %tmp6393, i64 1
+ %tmp6395 = getelementptr inbounds float, float* %tmp6394, i64 1
+ %tmp6396 = getelementptr inbounds float, float* %tmp6395, i64 1
+ %tmp6397 = getelementptr inbounds float, float* %tmp6396, i64 1
+ %tmp6398 = getelementptr inbounds float, float* %tmp6397, i64 1
+ %tmp6399 = getelementptr inbounds float, float* %tmp6398, i64 1
+ %tmp6400 = getelementptr inbounds float, float* %tmp6399, i64 1
+ %tmp6401 = getelementptr inbounds float, float* %tmp6400, i64 1
+ %tmp6402 = getelementptr inbounds float, float* %tmp6401, i64 1
+ %tmp6403 = getelementptr inbounds float, float* %tmp6402, i64 1
+ %tmp6404 = getelementptr inbounds float, float* %tmp6403, i64 1
+ %tmp6405 = getelementptr inbounds float, float* %tmp6404, i64 1
+ %tmp6406 = getelementptr inbounds float, float* %tmp6405, i64 1
+ %tmp6407 = getelementptr inbounds float, float* %tmp6406, i64 1
+ %tmp6408 = getelementptr inbounds float, float* %tmp6407, i64 1
+ %tmp6409 = getelementptr inbounds float, float* %tmp6408, i64 1
+ %tmp6410 = getelementptr inbounds float, float* %tmp6409, i64 1
+ %tmp6411 = getelementptr inbounds float, float* %tmp6410, i64 1
+ %tmp6412 = getelementptr inbounds float, float* %tmp6411, i64 1
+ %tmp6413 = getelementptr inbounds float, float* %tmp6412, i64 1
+ %tmp6414 = getelementptr inbounds float, float* %tmp6413, i64 1
+ %tmp6415 = getelementptr inbounds float, float* %tmp6414, i64 1
+ %tmp6416 = getelementptr inbounds float, float* %tmp6415, i64 1
+ %tmp6417 = getelementptr inbounds float, float* %tmp6416, i64 1
+ %tmp6418 = getelementptr inbounds float, float* %tmp6417, i64 1
+ %tmp6419 = getelementptr inbounds float, float* %tmp6418, i64 1
+ %tmp6420 = getelementptr inbounds float, float* %tmp6419, i64 1
+ %tmp6421 = getelementptr inbounds float, float* %tmp6420, i64 1
+ %tmp6422 = getelementptr inbounds float, float* %tmp6421, i64 1
+ %tmp6423 = getelementptr inbounds float, float* %tmp6422, i64 1
+ %tmp6424 = getelementptr inbounds float, float* %tmp6423, i64 1
+ %tmp6425 = getelementptr inbounds float, float* %tmp6424, i64 1
+ %tmp6426 = getelementptr inbounds float, float* %tmp6425, i64 1
+ %tmp6427 = getelementptr inbounds float, float* %tmp6426, i64 1
+ %tmp6428 = getelementptr inbounds float, float* %tmp6427, i64 1
+ %tmp6429 = getelementptr inbounds float, float* %tmp6428, i64 1
+ %tmp6430 = getelementptr inbounds float, float* %tmp6429, i64 1
+ %tmp6431 = getelementptr inbounds float, float* %tmp6430, i64 1
+ %tmp6432 = getelementptr inbounds float, float* %tmp6431, i64 1
+ %tmp6433 = getelementptr inbounds float, float* %tmp6432, i64 1
+ %tmp6434 = getelementptr inbounds float, float* %tmp6433, i64 1
+ %tmp6435 = getelementptr inbounds float, float* %tmp6434, i64 1
+ %tmp6436 = getelementptr inbounds float, float* %tmp6435, i64 1
+ %tmp6437 = getelementptr inbounds float, float* %tmp6436, i64 1
+ %tmp6438 = getelementptr inbounds float, float* %tmp6437, i64 1
+ %tmp6439 = getelementptr inbounds float, float* %tmp6438, i64 1
+ %tmp6440 = getelementptr inbounds float, float* %tmp6439, i64 1
+ %tmp6441 = getelementptr inbounds float, float* %tmp6440, i64 1
+ %tmp6442 = getelementptr inbounds float, float* %tmp6441, i64 1
+ %tmp6443 = getelementptr inbounds float, float* %tmp6442, i64 1
+ %tmp6444 = getelementptr inbounds float, float* %tmp6443, i64 1
+ %tmp6445 = getelementptr inbounds float, float* %tmp6444, i64 1
+ %tmp6446 = getelementptr inbounds float, float* %tmp6445, i64 1
+ %tmp6447 = getelementptr inbounds float, float* %tmp6446, i64 1
+ %tmp6448 = getelementptr inbounds float, float* %tmp6447, i64 1
+ %tmp6449 = getelementptr inbounds float, float* %tmp6448, i64 1
+ %tmp6450 = getelementptr inbounds float, float* %tmp6449, i64 1
+ %tmp6451 = getelementptr inbounds float, float* %tmp6450, i64 1
+ %tmp6452 = getelementptr inbounds float, float* %tmp6451, i64 1
+ %tmp6453 = getelementptr inbounds float, float* %tmp6452, i64 1
+ %tmp6454 = getelementptr inbounds float, float* %tmp6453, i64 1
+ %tmp6455 = getelementptr inbounds float, float* %tmp6454, i64 1
+ %tmp6456 = getelementptr inbounds float, float* %tmp6455, i64 1
+ %tmp6457 = getelementptr inbounds float, float* %tmp6456, i64 1
+ %tmp6458 = getelementptr inbounds float, float* %tmp6457, i64 1
+ %tmp6459 = getelementptr inbounds float, float* %tmp6458, i64 1
+ %tmp6460 = getelementptr inbounds float, float* %tmp6459, i64 1
+ %tmp6461 = getelementptr inbounds float, float* %tmp6460, i64 1
+ %tmp6462 = getelementptr inbounds float, float* %tmp6461, i64 1
+ %tmp6463 = getelementptr inbounds float, float* %tmp6462, i64 1
+ %tmp6464 = getelementptr inbounds float, float* %tmp6463, i64 1
+ %tmp6465 = getelementptr inbounds float, float* %tmp6464, i64 1
+ %tmp6466 = getelementptr inbounds float, float* %tmp6465, i64 1
+ %tmp6467 = getelementptr inbounds float, float* %tmp6466, i64 1
+ %tmp6468 = getelementptr inbounds float, float* %tmp6467, i64 1
+ %tmp6469 = getelementptr inbounds float, float* %tmp6468, i64 1
+ %tmp6470 = getelementptr inbounds float, float* %tmp6469, i64 1
+ %tmp6471 = getelementptr inbounds float, float* %tmp6470, i64 1
+ %tmp6472 = getelementptr inbounds float, float* %tmp6471, i64 1
+ %tmp6473 = getelementptr inbounds float, float* %tmp6472, i64 1
+ %tmp6474 = getelementptr inbounds float, float* %tmp6473, i64 1
+ %tmp6475 = getelementptr inbounds float, float* %tmp6474, i64 1
+ %tmp6476 = getelementptr inbounds float, float* %tmp6475, i64 1
+ %tmp6477 = getelementptr inbounds float, float* %tmp6476, i64 1
+ %tmp6478 = getelementptr inbounds float, float* %tmp6477, i64 1
+ %tmp6479 = getelementptr inbounds float, float* %tmp6478, i64 1
+ %tmp6480 = getelementptr inbounds float, float* %tmp6479, i64 1
+ %tmp6481 = getelementptr inbounds float, float* %tmp6480, i64 1
+ %tmp6482 = getelementptr inbounds float, float* %tmp6481, i64 1
+ %tmp6483 = getelementptr inbounds float, float* %tmp6482, i64 1
+ %tmp6484 = getelementptr inbounds float, float* %tmp6483, i64 1
+ %tmp6485 = getelementptr inbounds float, float* %tmp6484, i64 1
+ %tmp6486 = getelementptr inbounds float, float* %tmp6485, i64 1
+ %tmp6487 = getelementptr inbounds float, float* %tmp6486, i64 1
+ %tmp6488 = getelementptr inbounds float, float* %tmp6487, i64 1
+ %tmp6489 = getelementptr inbounds float, float* %tmp6488, i64 1
+ %tmp6490 = getelementptr inbounds float, float* %tmp6489, i64 1
+ %tmp6491 = getelementptr inbounds float, float* %tmp6490, i64 1
+ %tmp6492 = getelementptr inbounds float, float* %tmp6491, i64 1
+ %tmp6493 = getelementptr inbounds float, float* %tmp6492, i64 1
+ %tmp6494 = getelementptr inbounds float, float* %tmp6493, i64 1
+ %tmp6495 = getelementptr inbounds float, float* %tmp6494, i64 1
+ %tmp6496 = getelementptr inbounds float, float* %tmp6495, i64 1
+ %tmp6497 = getelementptr inbounds float, float* %tmp6496, i64 1
+ %tmp6498 = getelementptr inbounds float, float* %tmp6497, i64 1
+ %tmp6499 = getelementptr inbounds float, float* %tmp6498, i64 1
+ %tmp6500 = getelementptr inbounds float, float* %tmp6499, i64 1
+ %tmp6501 = getelementptr inbounds float, float* %tmp6500, i64 1
+ %tmp6502 = getelementptr inbounds float, float* %tmp6501, i64 1
+ %tmp6503 = getelementptr inbounds float, float* %tmp6502, i64 1
+ %tmp6504 = getelementptr inbounds float, float* %tmp6503, i64 1
+ %tmp6505 = getelementptr inbounds float, float* %tmp6504, i64 1
+ %tmp6506 = getelementptr inbounds float, float* %tmp6505, i64 1
+ %tmp6507 = getelementptr inbounds float, float* %tmp6506, i64 1
+ %tmp6508 = getelementptr inbounds float, float* %tmp6507, i64 1
+ %tmp6509 = getelementptr inbounds float, float* %tmp6508, i64 1
+ %tmp6510 = getelementptr inbounds float, float* %tmp6509, i64 1
+ %tmp6511 = getelementptr inbounds float, float* %tmp6510, i64 1
+ %tmp6512 = getelementptr inbounds float, float* %tmp6511, i64 1
+ %tmp6513 = getelementptr inbounds float, float* %tmp6512, i64 1
+ %tmp6514 = getelementptr inbounds float, float* %tmp6513, i64 1
+ %tmp6515 = getelementptr inbounds float, float* %tmp6514, i64 1
+ %tmp6516 = getelementptr inbounds float, float* %tmp6515, i64 1
+ %tmp6517 = getelementptr inbounds float, float* %tmp6516, i64 1
+ %tmp6518 = getelementptr inbounds float, float* %tmp6517, i64 1
+ %tmp6519 = getelementptr inbounds float, float* %tmp6518, i64 1
+ %tmp6520 = getelementptr inbounds float, float* %tmp6519, i64 1
+ %tmp6521 = getelementptr inbounds float, float* %tmp6520, i64 1
+ %tmp6522 = getelementptr inbounds float, float* %tmp6521, i64 1
+ %tmp6523 = getelementptr inbounds float, float* %tmp6522, i64 1
+ %tmp6524 = getelementptr inbounds float, float* %tmp6523, i64 1
+ %tmp6525 = getelementptr inbounds float, float* %tmp6524, i64 1
+ %tmp6526 = getelementptr inbounds float, float* %tmp6525, i64 1
+ %tmp6527 = getelementptr inbounds float, float* %tmp6526, i64 1
+ %tmp6528 = getelementptr inbounds float, float* %tmp6527, i64 1
+ %tmp6529 = getelementptr inbounds float, float* %tmp6528, i64 1
+ %tmp6530 = getelementptr inbounds float, float* %tmp6529, i64 1
+ %tmp6531 = getelementptr inbounds float, float* %tmp6530, i64 1
+ %tmp6532 = getelementptr inbounds float, float* %tmp6531, i64 1
+ %tmp6533 = getelementptr inbounds float, float* %tmp6532, i64 1
+ %tmp6534 = getelementptr inbounds float, float* %tmp6533, i64 1
+ %tmp6535 = getelementptr inbounds float, float* %tmp6534, i64 1
+ %tmp6536 = getelementptr inbounds float, float* %tmp6535, i64 1
+ %tmp6537 = getelementptr inbounds float, float* %tmp6536, i64 1
+ %tmp6538 = getelementptr inbounds float, float* %tmp6537, i64 1
+ %tmp6539 = getelementptr inbounds float, float* %tmp6538, i64 1
+ %tmp6540 = getelementptr inbounds float, float* %tmp6539, i64 1
+ %tmp6541 = getelementptr inbounds float, float* %tmp6540, i64 1
+ %tmp6542 = getelementptr inbounds float, float* %tmp6541, i64 1
+ %tmp6543 = getelementptr inbounds float, float* %tmp6542, i64 1
+ %tmp6544 = getelementptr inbounds float, float* %tmp6543, i64 1
+ %tmp6545 = getelementptr inbounds float, float* %tmp6544, i64 1
+ %tmp6546 = getelementptr inbounds float, float* %tmp6545, i64 1
+ %tmp6547 = getelementptr inbounds float, float* %tmp6546, i64 1
+ %tmp6548 = getelementptr inbounds float, float* %tmp6547, i64 1
+ %tmp6549 = getelementptr inbounds float, float* %tmp6548, i64 1
+ %tmp6550 = getelementptr inbounds float, float* %tmp6549, i64 1
+ %tmp6551 = getelementptr inbounds float, float* %tmp6550, i64 1
+ %tmp6552 = getelementptr inbounds float, float* %tmp6551, i64 1
+ %tmp6553 = getelementptr inbounds float, float* %tmp6552, i64 1
+ %tmp6554 = getelementptr inbounds float, float* %tmp6553, i64 1
+ %tmp6555 = getelementptr inbounds float, float* %tmp6554, i64 1
+ %tmp6556 = getelementptr inbounds float, float* %tmp6555, i64 1
+ %tmp6557 = getelementptr inbounds float, float* %tmp6556, i64 1
+ %tmp6558 = getelementptr inbounds float, float* %tmp6557, i64 1
+ %tmp6559 = getelementptr inbounds float, float* %tmp6558, i64 1
+ %tmp6560 = getelementptr inbounds float, float* %tmp6559, i64 1
+ %tmp6561 = getelementptr inbounds float, float* %tmp6560, i64 1
+ %tmp6562 = getelementptr inbounds float, float* %tmp6561, i64 1
+ %tmp6563 = getelementptr inbounds float, float* %tmp6562, i64 1
+ %tmp6564 = getelementptr inbounds float, float* %tmp6563, i64 1
+ %tmp6565 = getelementptr inbounds float, float* %tmp6564, i64 1
+ %tmp6566 = getelementptr inbounds float, float* %tmp6565, i64 1
+ %tmp6567 = getelementptr inbounds float, float* %tmp6566, i64 1
+ %tmp6568 = getelementptr inbounds float, float* %tmp6567, i64 1
+ %tmp6569 = getelementptr inbounds float, float* %tmp6568, i64 1
+ %tmp6570 = getelementptr inbounds float, float* %tmp6569, i64 1
+ %tmp6571 = getelementptr inbounds float, float* %tmp6570, i64 1
+ %tmp6572 = getelementptr inbounds float, float* %tmp6571, i64 1
+ %tmp6573 = getelementptr inbounds float, float* %tmp6572, i64 1
+ %tmp6574 = getelementptr inbounds float, float* %tmp6573, i64 1
+ %tmp6575 = getelementptr inbounds float, float* %tmp6574, i64 1
+ %tmp6576 = getelementptr inbounds float, float* %tmp6575, i64 1
+ %tmp6577 = getelementptr inbounds float, float* %tmp6576, i64 1
+ %tmp6578 = getelementptr inbounds float, float* %tmp6577, i64 1
+ %tmp6579 = getelementptr inbounds float, float* %tmp6578, i64 1
+ %tmp6580 = getelementptr inbounds float, float* %tmp6579, i64 1
+ %tmp6581 = getelementptr inbounds float, float* %tmp6580, i64 1
+ %tmp6582 = getelementptr inbounds float, float* %tmp6581, i64 1
+ %tmp6583 = getelementptr inbounds float, float* %tmp6582, i64 1
+ %tmp6584 = getelementptr inbounds float, float* %tmp6583, i64 1
+ %tmp6585 = getelementptr inbounds float, float* %tmp6584, i64 1
+ %tmp6586 = getelementptr inbounds float, float* %tmp6585, i64 1
+ %tmp6587 = getelementptr inbounds float, float* %tmp6586, i64 1
+ %tmp6588 = getelementptr inbounds float, float* %tmp6587, i64 1
+ %tmp6589 = getelementptr inbounds float, float* %tmp6588, i64 1
+ %tmp6590 = getelementptr inbounds float, float* %tmp6589, i64 1
+ %tmp6591 = getelementptr inbounds float, float* %tmp6590, i64 1
+ %tmp6592 = getelementptr inbounds float, float* %tmp6591, i64 1
+ %tmp6593 = getelementptr inbounds float, float* %tmp6592, i64 1
+ %tmp6594 = getelementptr inbounds float, float* %tmp6593, i64 1
+ %tmp6595 = getelementptr inbounds float, float* %tmp6594, i64 1
+ %tmp6596 = getelementptr inbounds float, float* %tmp6595, i64 1
+ %tmp6597 = getelementptr inbounds float, float* %tmp6596, i64 1
+ %tmp6598 = getelementptr inbounds float, float* %tmp6597, i64 1
+ %tmp6599 = getelementptr inbounds float, float* %tmp6598, i64 1
+ %tmp6600 = getelementptr inbounds float, float* %tmp6599, i64 1
+ %tmp6601 = getelementptr inbounds float, float* %tmp6600, i64 1
+ %tmp6602 = getelementptr inbounds float, float* %tmp6601, i64 1
+ %tmp6603 = getelementptr inbounds float, float* %tmp6602, i64 1
+ %tmp6604 = getelementptr inbounds float, float* %tmp6603, i64 1
+ %tmp6605 = getelementptr inbounds float, float* %tmp6604, i64 1
+ %tmp6606 = getelementptr inbounds float, float* %tmp6605, i64 1
+ %tmp6607 = getelementptr inbounds float, float* %tmp6606, i64 1
+ %tmp6608 = getelementptr inbounds float, float* %tmp6607, i64 1
+ %tmp6609 = getelementptr inbounds float, float* %tmp6608, i64 1
+ %tmp6610 = getelementptr inbounds float, float* %tmp6609, i64 1
+ %tmp6611 = getelementptr inbounds float, float* %tmp6610, i64 1
+ %tmp6612 = getelementptr inbounds float, float* %tmp6611, i64 1
+ %tmp6613 = getelementptr inbounds float, float* %tmp6612, i64 1
+ %tmp6614 = getelementptr inbounds float, float* %tmp6613, i64 1
+ %tmp6615 = getelementptr inbounds float, float* %tmp6614, i64 1
+ %tmp6616 = getelementptr inbounds float, float* %tmp6615, i64 1
+ %tmp6617 = getelementptr inbounds float, float* %tmp6616, i64 1
+ %tmp6618 = getelementptr inbounds float, float* %tmp6617, i64 1
+ %tmp6619 = getelementptr inbounds float, float* %tmp6618, i64 1
+ %tmp6620 = getelementptr inbounds float, float* %tmp6619, i64 1
+ %tmp6621 = getelementptr inbounds float, float* %tmp6620, i64 1
+ %tmp6622 = getelementptr inbounds float, float* %tmp6621, i64 1
+ %tmp6623 = getelementptr inbounds float, float* %tmp6622, i64 1
+ %tmp6624 = getelementptr inbounds float, float* %tmp6623, i64 1
+ %tmp6625 = getelementptr inbounds float, float* %tmp6624, i64 1
+ %tmp6626 = getelementptr inbounds float, float* %tmp6625, i64 1
+ %tmp6627 = getelementptr inbounds float, float* %tmp6626, i64 1
+ %tmp6628 = getelementptr inbounds float, float* %tmp6627, i64 1
+ %tmp6629 = getelementptr inbounds float, float* %tmp6628, i64 1
+ %tmp6630 = getelementptr inbounds float, float* %tmp6629, i64 1
+ %tmp6631 = getelementptr inbounds float, float* %tmp6630, i64 1
+ %tmp6632 = getelementptr inbounds float, float* %tmp6631, i64 1
+ %tmp6633 = getelementptr inbounds float, float* %tmp6632, i64 1
+ %tmp6634 = getelementptr inbounds float, float* %tmp6633, i64 1
+ %tmp6635 = getelementptr inbounds float, float* %tmp6634, i64 1
+ %tmp6636 = getelementptr inbounds float, float* %tmp6635, i64 1
+ %tmp6637 = getelementptr inbounds float, float* %tmp6636, i64 1
+ %tmp6638 = getelementptr inbounds float, float* %tmp6637, i64 1
+ %tmp6639 = getelementptr inbounds float, float* %tmp6638, i64 1
+ %tmp6640 = getelementptr inbounds float, float* %tmp6639, i64 1
+ %tmp6641 = getelementptr inbounds float, float* %tmp6640, i64 1
+ %tmp6642 = getelementptr inbounds float, float* %tmp6641, i64 1
+ %tmp6643 = getelementptr inbounds float, float* %tmp6642, i64 1
+ %tmp6644 = getelementptr inbounds float, float* %tmp6643, i64 1
+ %tmp6645 = getelementptr inbounds float, float* %tmp6644, i64 1
+ %tmp6646 = getelementptr inbounds float, float* %tmp6645, i64 1
+ %tmp6647 = getelementptr inbounds float, float* %tmp6646, i64 1
+ %tmp6648 = getelementptr inbounds float, float* %tmp6647, i64 1
+ %tmp6649 = getelementptr inbounds float, float* %tmp6648, i64 1
+ %tmp6650 = getelementptr inbounds float, float* %tmp6649, i64 1
+ %tmp6651 = getelementptr inbounds float, float* %tmp6650, i64 1
+ %tmp6652 = getelementptr inbounds float, float* %tmp6651, i64 1
+ %tmp6653 = getelementptr inbounds float, float* %tmp6652, i64 1
+ %tmp6654 = getelementptr inbounds float, float* %tmp6653, i64 1
+ %tmp6655 = getelementptr inbounds float, float* %tmp6654, i64 1
+ %tmp6656 = getelementptr inbounds float, float* %tmp6655, i64 1
+ %tmp6657 = getelementptr inbounds float, float* %tmp6656, i64 1
+ %tmp6658 = getelementptr inbounds float, float* %tmp6657, i64 1
+ %tmp6659 = getelementptr inbounds float, float* %tmp6658, i64 1
+ %tmp6660 = getelementptr inbounds float, float* %tmp6659, i64 1
+ %tmp6661 = getelementptr inbounds float, float* %tmp6660, i64 1
+ %tmp6662 = getelementptr inbounds float, float* %tmp6661, i64 1
+ %tmp6663 = getelementptr inbounds float, float* %tmp6662, i64 1
+ %tmp6664 = getelementptr inbounds float, float* %tmp6663, i64 1
+ %tmp6665 = getelementptr inbounds float, float* %tmp6664, i64 1
+ %tmp6666 = getelementptr inbounds float, float* %tmp6665, i64 1
+ %tmp6667 = getelementptr inbounds float, float* %tmp6666, i64 1
+ %tmp6668 = getelementptr inbounds float, float* %tmp6667, i64 1
+ %tmp6669 = getelementptr inbounds float, float* %tmp6668, i64 1
+ %tmp6670 = getelementptr inbounds float, float* %tmp6669, i64 1
+ %tmp6671 = getelementptr inbounds float, float* %tmp6670, i64 1
+ %tmp6672 = getelementptr inbounds float, float* %tmp6671, i64 1
+ %tmp6673 = getelementptr inbounds float, float* %tmp6672, i64 1
+ %tmp6674 = getelementptr inbounds float, float* %tmp6673, i64 1
+ %tmp6675 = getelementptr inbounds float, float* %tmp6674, i64 1
+ %tmp6676 = getelementptr inbounds float, float* %tmp6675, i64 1
+ %tmp6677 = getelementptr inbounds float, float* %tmp6676, i64 1
+ %tmp6678 = getelementptr inbounds float, float* %tmp6677, i64 1
+ %tmp6679 = getelementptr inbounds float, float* %tmp6678, i64 1
+ %tmp6680 = getelementptr inbounds float, float* %tmp6679, i64 1
+ %tmp6681 = getelementptr inbounds float, float* %tmp6680, i64 1
+ %tmp6682 = getelementptr inbounds float, float* %tmp6681, i64 1
+ %tmp6683 = getelementptr inbounds float, float* %tmp6682, i64 1
+ %tmp6684 = getelementptr inbounds float, float* %tmp6683, i64 1
+ %tmp6685 = getelementptr inbounds float, float* %tmp6684, i64 1
+ %tmp6686 = getelementptr inbounds float, float* %tmp6685, i64 1
+ %tmp6687 = getelementptr inbounds float, float* %tmp6686, i64 1
+ %tmp6688 = getelementptr inbounds float, float* %tmp6687, i64 1
+ %tmp6689 = getelementptr inbounds float, float* %tmp6688, i64 1
+ %tmp6690 = getelementptr inbounds float, float* %tmp6689, i64 1
+ %tmp6691 = getelementptr inbounds float, float* %tmp6690, i64 1
+ %tmp6692 = getelementptr inbounds float, float* %tmp6691, i64 1
+ %tmp6693 = getelementptr inbounds float, float* %tmp6692, i64 1
+ %tmp6694 = getelementptr inbounds float, float* %tmp6693, i64 1
+ %tmp6695 = getelementptr inbounds float, float* %tmp6694, i64 1
+ %tmp6696 = getelementptr inbounds float, float* %tmp6695, i64 1
+ %tmp6697 = getelementptr inbounds float, float* %tmp6696, i64 1
+ %tmp6698 = getelementptr inbounds float, float* %tmp6697, i64 1
+ %tmp6699 = getelementptr inbounds float, float* %tmp6698, i64 1
+ %tmp6700 = getelementptr inbounds float, float* %tmp6699, i64 1
+ %tmp6701 = getelementptr inbounds float, float* %tmp6700, i64 1
+ %tmp6702 = getelementptr inbounds float, float* %tmp6701, i64 1
+ %tmp6703 = getelementptr inbounds float, float* %tmp6702, i64 1
+ %tmp6704 = getelementptr inbounds float, float* %tmp6703, i64 1
+ %tmp6705 = getelementptr inbounds float, float* %tmp6704, i64 1
+ %tmp6706 = getelementptr inbounds float, float* %tmp6705, i64 1
+ %tmp6707 = getelementptr inbounds float, float* %tmp6706, i64 1
+ %tmp6708 = getelementptr inbounds float, float* %tmp6707, i64 1
+ %tmp6709 = getelementptr inbounds float, float* %tmp6708, i64 1
+ %tmp6710 = getelementptr inbounds float, float* %tmp6709, i64 1
+ %tmp6711 = getelementptr inbounds float, float* %tmp6710, i64 1
+ %tmp6712 = getelementptr inbounds float, float* %tmp6711, i64 1
+ %tmp6713 = getelementptr inbounds float, float* %tmp6712, i64 1
+ %tmp6714 = getelementptr inbounds float, float* %tmp6713, i64 1
+ %tmp6715 = getelementptr inbounds float, float* %tmp6714, i64 1
+ %tmp6716 = getelementptr inbounds float, float* %tmp6715, i64 1
+ %tmp6717 = getelementptr inbounds float, float* %tmp6716, i64 1
+ %tmp6718 = getelementptr inbounds float, float* %tmp6717, i64 1
+ %tmp6719 = getelementptr inbounds float, float* %tmp6718, i64 1
+ %tmp6720 = getelementptr inbounds float, float* %tmp6719, i64 1
+ %tmp6721 = getelementptr inbounds float, float* %tmp6720, i64 1
+ %tmp6722 = getelementptr inbounds float, float* %tmp6721, i64 1
+ %tmp6723 = getelementptr inbounds float, float* %tmp6722, i64 1
+ %tmp6724 = getelementptr inbounds float, float* %tmp6723, i64 1
+ %tmp6725 = getelementptr inbounds float, float* %tmp6724, i64 1
+ %tmp6726 = getelementptr inbounds float, float* %tmp6725, i64 1
+ %tmp6727 = getelementptr inbounds float, float* %tmp6726, i64 1
+ %tmp6728 = getelementptr inbounds float, float* %tmp6727, i64 1
+ %tmp6729 = getelementptr inbounds float, float* %tmp6728, i64 1
+ %tmp6730 = getelementptr inbounds float, float* %tmp6729, i64 1
+ %tmp6731 = getelementptr inbounds float, float* %tmp6730, i64 1
+ %tmp6732 = getelementptr inbounds float, float* %tmp6731, i64 1
+ %tmp6733 = getelementptr inbounds float, float* %tmp6732, i64 1
+ %tmp6734 = getelementptr inbounds float, float* %tmp6733, i64 1
+ %tmp6735 = getelementptr inbounds float, float* %tmp6734, i64 1
+ %tmp6736 = getelementptr inbounds float, float* %tmp6735, i64 1
+ %tmp6737 = getelementptr inbounds float, float* %tmp6736, i64 1
+ %tmp6738 = getelementptr inbounds float, float* %tmp6737, i64 1
+ %tmp6739 = getelementptr inbounds float, float* %tmp6738, i64 1
+ %tmp6740 = getelementptr inbounds float, float* %tmp6739, i64 1
+ %tmp6741 = getelementptr inbounds float, float* %tmp6740, i64 1
+ %tmp6742 = getelementptr inbounds float, float* %tmp6741, i64 1
+ %tmp6743 = getelementptr inbounds float, float* %tmp6742, i64 1
+ %tmp6744 = getelementptr inbounds float, float* %tmp6743, i64 1
+ %tmp6745 = getelementptr inbounds float, float* %tmp6744, i64 1
+ %tmp6746 = getelementptr inbounds float, float* %tmp6745, i64 1
+ %tmp6747 = getelementptr inbounds float, float* %tmp6746, i64 1
+ %tmp6748 = getelementptr inbounds float, float* %tmp6747, i64 1
+ %tmp6749 = getelementptr inbounds float, float* %tmp6748, i64 1
+ %tmp6750 = getelementptr inbounds float, float* %tmp6749, i64 1
+ %tmp6751 = getelementptr inbounds float, float* %tmp6750, i64 1
+ %tmp6752 = getelementptr inbounds float, float* %tmp6751, i64 1
+ %tmp6753 = getelementptr inbounds float, float* %tmp6752, i64 1
+ %tmp6754 = getelementptr inbounds float, float* %tmp6753, i64 1
+ %tmp6755 = getelementptr inbounds float, float* %tmp6754, i64 1
+ %tmp6756 = getelementptr inbounds float, float* %tmp6755, i64 1
+ %tmp6757 = getelementptr inbounds float, float* %tmp6756, i64 1
+ %tmp6758 = getelementptr inbounds float, float* %tmp6757, i64 1
+ %tmp6759 = getelementptr inbounds float, float* %tmp6758, i64 1
+ %tmp6760 = getelementptr inbounds float, float* %tmp6759, i64 1
+ %tmp6761 = getelementptr inbounds float, float* %tmp6760, i64 1
+ %tmp6762 = getelementptr inbounds float, float* %tmp6761, i64 1
+ %tmp6763 = getelementptr inbounds float, float* %tmp6762, i64 1
+ %tmp6764 = getelementptr inbounds float, float* %tmp6763, i64 1
+ %tmp6765 = getelementptr inbounds float, float* %tmp6764, i64 1
+ %tmp6766 = getelementptr inbounds float, float* %tmp6765, i64 1
+ %tmp6767 = getelementptr inbounds float, float* %tmp6766, i64 1
+ %tmp6768 = getelementptr inbounds float, float* %tmp6767, i64 1
+ %tmp6769 = getelementptr inbounds float, float* %tmp6768, i64 1
+ %tmp6770 = getelementptr inbounds float, float* %tmp6769, i64 1
+ %tmp6771 = getelementptr inbounds float, float* %tmp6770, i64 1
+ %tmp6772 = getelementptr inbounds float, float* %tmp6771, i64 1
+ %tmp6773 = getelementptr inbounds float, float* %tmp6772, i64 1
+ %tmp6774 = getelementptr inbounds float, float* %tmp6773, i64 1
+ %tmp6775 = getelementptr inbounds float, float* %tmp6774, i64 1
+ %tmp6776 = getelementptr inbounds float, float* %tmp6775, i64 1
+ %tmp6777 = getelementptr inbounds float, float* %tmp6776, i64 1
+ %tmp6778 = getelementptr inbounds float, float* %tmp6777, i64 1
+ %tmp6779 = getelementptr inbounds float, float* %tmp6778, i64 1
+ %tmp6780 = getelementptr inbounds float, float* %tmp6779, i64 1
+ %tmp6781 = getelementptr inbounds float, float* %tmp6780, i64 1
+ %tmp6782 = getelementptr inbounds float, float* %tmp6781, i64 1
+ %tmp6783 = getelementptr inbounds float, float* %tmp6782, i64 1
+ %tmp6784 = getelementptr inbounds float, float* %tmp6783, i64 1
+ %tmp6785 = getelementptr inbounds float, float* %tmp6784, i64 1
+ %tmp6786 = getelementptr inbounds float, float* %tmp6785, i64 1
+ %tmp6787 = getelementptr inbounds float, float* %tmp6786, i64 1
+ %tmp6788 = getelementptr inbounds float, float* %tmp6787, i64 1
+ %tmp6789 = getelementptr inbounds float, float* %tmp6788, i64 1
+ %tmp6790 = getelementptr inbounds float, float* %tmp6789, i64 1
+ %tmp6791 = getelementptr inbounds float, float* %tmp6790, i64 1
+ %tmp6792 = getelementptr inbounds float, float* %tmp6791, i64 1
+ %tmp6793 = getelementptr inbounds float, float* %tmp6792, i64 1
+ %tmp6794 = getelementptr inbounds float, float* %tmp6793, i64 1
+ %tmp6795 = getelementptr inbounds float, float* %tmp6794, i64 1
+ %tmp6796 = getelementptr inbounds float, float* %tmp6795, i64 1
+ %tmp6797 = getelementptr inbounds float, float* %tmp6796, i64 1
+ %tmp6798 = getelementptr inbounds float, float* %tmp6797, i64 1
+ %tmp6799 = getelementptr inbounds float, float* %tmp6798, i64 1
+ %tmp6800 = getelementptr inbounds float, float* %tmp6799, i64 1
+ %tmp6801 = getelementptr inbounds float, float* %tmp6800, i64 1
+ %tmp6802 = getelementptr inbounds float, float* %tmp6801, i64 1
+ %tmp6803 = getelementptr inbounds float, float* %tmp6802, i64 1
+ %tmp6804 = getelementptr inbounds float, float* %tmp6803, i64 1
+ %tmp6805 = getelementptr inbounds float, float* %tmp6804, i64 1
+ %tmp6806 = getelementptr inbounds float, float* %tmp6805, i64 1
+ %tmp6807 = getelementptr inbounds float, float* %tmp6806, i64 1
+ %tmp6808 = getelementptr inbounds float, float* %tmp6807, i64 1
+ %tmp6809 = getelementptr inbounds float, float* %tmp6808, i64 1
+ %tmp6810 = getelementptr inbounds float, float* %tmp6809, i64 1
+ %tmp6811 = getelementptr inbounds float, float* %tmp6810, i64 1
+ %tmp6812 = getelementptr inbounds float, float* %tmp6811, i64 1
+ %tmp6813 = getelementptr inbounds float, float* %tmp6812, i64 1
+ %tmp6814 = getelementptr inbounds float, float* %tmp6813, i64 1
+ %tmp6815 = getelementptr inbounds float, float* %tmp6814, i64 1
+ %tmp6816 = getelementptr inbounds float, float* %tmp6815, i64 1
+ %tmp6817 = getelementptr inbounds float, float* %tmp6816, i64 1
+ %tmp6818 = getelementptr inbounds float, float* %tmp6817, i64 1
+ %tmp6819 = getelementptr inbounds float, float* %tmp6818, i64 1
+ %tmp6820 = getelementptr inbounds float, float* %tmp6819, i64 1
+ %tmp6821 = getelementptr inbounds float, float* %tmp6820, i64 1
+ %tmp6822 = getelementptr inbounds float, float* %tmp6821, i64 1
+ %tmp6823 = getelementptr inbounds float, float* %tmp6822, i64 1
+ %tmp6824 = getelementptr inbounds float, float* %tmp6823, i64 1
+ %tmp6825 = getelementptr inbounds float, float* %tmp6824, i64 1
+ %tmp6826 = getelementptr inbounds float, float* %tmp6825, i64 1
+ %tmp6827 = getelementptr inbounds float, float* %tmp6826, i64 1
+ %tmp6828 = getelementptr inbounds float, float* %tmp6827, i64 1
+ %tmp6829 = getelementptr inbounds float, float* %tmp6828, i64 1
+ %tmp6830 = getelementptr inbounds float, float* %tmp6829, i64 1
+ %tmp6831 = getelementptr inbounds float, float* %tmp6830, i64 1
+ %tmp6832 = getelementptr inbounds float, float* %tmp6831, i64 1
+ %tmp6833 = getelementptr inbounds float, float* %tmp6832, i64 1
+ %tmp6834 = getelementptr inbounds float, float* %tmp6833, i64 1
+ %tmp6835 = getelementptr inbounds float, float* %tmp6834, i64 1
+ %tmp6836 = getelementptr inbounds float, float* %tmp6835, i64 1
+ %tmp6837 = getelementptr inbounds float, float* %tmp6836, i64 1
+ %tmp6838 = getelementptr inbounds float, float* %tmp6837, i64 1
+ %tmp6839 = getelementptr inbounds float, float* %tmp6838, i64 1
+ %tmp6840 = getelementptr inbounds float, float* %tmp6839, i64 1
+ %tmp6841 = getelementptr inbounds float, float* %tmp6840, i64 1
+ %tmp6842 = getelementptr inbounds float, float* %tmp6841, i64 1
+ %tmp6843 = getelementptr inbounds float, float* %tmp6842, i64 1
+ %tmp6844 = getelementptr inbounds float, float* %tmp6843, i64 1
+ %tmp6845 = getelementptr inbounds float, float* %tmp6844, i64 1
+ %tmp6846 = getelementptr inbounds float, float* %tmp6845, i64 1
+ %tmp6847 = getelementptr inbounds float, float* %tmp6846, i64 1
+ %tmp6848 = getelementptr inbounds float, float* %tmp6847, i64 1
+ %tmp6849 = getelementptr inbounds float, float* %tmp6848, i64 1
+ %tmp6850 = getelementptr inbounds float, float* %tmp6849, i64 1
+ %tmp6851 = getelementptr inbounds float, float* %tmp6850, i64 1
+ %tmp6852 = getelementptr inbounds float, float* %tmp6851, i64 1
+ %tmp6853 = getelementptr inbounds float, float* %tmp6852, i64 1
+ %tmp6854 = getelementptr inbounds float, float* %tmp6853, i64 1
+ %tmp6855 = getelementptr inbounds float, float* %tmp6854, i64 1
+ %tmp6856 = getelementptr inbounds float, float* %tmp6855, i64 1
+ %tmp6857 = getelementptr inbounds float, float* %tmp6856, i64 1
+ %tmp6858 = getelementptr inbounds float, float* %tmp6857, i64 1
+ %tmp6859 = getelementptr inbounds float, float* %tmp6858, i64 1
+ %tmp6860 = getelementptr inbounds float, float* %tmp6859, i64 1
+ %tmp6861 = getelementptr inbounds float, float* %tmp6860, i64 1
+ %tmp6862 = getelementptr inbounds float, float* %tmp6861, i64 1
+ %tmp6863 = getelementptr inbounds float, float* %tmp6862, i64 1
+ %tmp6864 = getelementptr inbounds float, float* %tmp6863, i64 1
+ %tmp6865 = getelementptr inbounds float, float* %tmp6864, i64 1
+ %tmp6866 = getelementptr inbounds float, float* %tmp6865, i64 1
+ %tmp6867 = getelementptr inbounds float, float* %tmp6866, i64 1
+ %tmp6868 = getelementptr inbounds float, float* %tmp6867, i64 1
+ %tmp6869 = getelementptr inbounds float, float* %tmp6868, i64 1
+ %tmp6870 = getelementptr inbounds float, float* %tmp6869, i64 1
+ %tmp6871 = getelementptr inbounds float, float* %tmp6870, i64 1
+ %tmp6872 = getelementptr inbounds float, float* %tmp6871, i64 1
+ %tmp6873 = getelementptr inbounds float, float* %tmp6872, i64 1
+ %tmp6874 = getelementptr inbounds float, float* %tmp6873, i64 1
+ %tmp6875 = getelementptr inbounds float, float* %tmp6874, i64 1
+ %tmp6876 = getelementptr inbounds float, float* %tmp6875, i64 1
+ %tmp6877 = getelementptr inbounds float, float* %tmp6876, i64 1
+ %tmp6878 = getelementptr inbounds float, float* %tmp6877, i64 1
+ %tmp6879 = getelementptr inbounds float, float* %tmp6878, i64 1
+ %tmp6880 = getelementptr inbounds float, float* %tmp6879, i64 1
+ %tmp6881 = getelementptr inbounds float, float* %tmp6880, i64 1
+ %tmp6882 = getelementptr inbounds float, float* %tmp6881, i64 1
+ %tmp6883 = getelementptr inbounds float, float* %tmp6882, i64 1
+ %tmp6884 = getelementptr inbounds float, float* %tmp6883, i64 1
+ %tmp6885 = getelementptr inbounds float, float* %tmp6884, i64 1
+ %tmp6886 = getelementptr inbounds float, float* %tmp6885, i64 1
+ %tmp6887 = getelementptr inbounds float, float* %tmp6886, i64 1
+ %tmp6888 = getelementptr inbounds float, float* %tmp6887, i64 1
+ %tmp6889 = getelementptr inbounds float, float* %tmp6888, i64 1
+ %tmp6890 = getelementptr inbounds float, float* %tmp6889, i64 1
+ %tmp6891 = getelementptr inbounds float, float* %tmp6890, i64 1
+ %tmp6892 = getelementptr inbounds float, float* %tmp6891, i64 1
+ %tmp6893 = getelementptr inbounds float, float* %tmp6892, i64 1
+ %tmp6894 = getelementptr inbounds float, float* %tmp6893, i64 1
+ %tmp6895 = getelementptr inbounds float, float* %tmp6894, i64 1
+ %tmp6896 = getelementptr inbounds float, float* %tmp6895, i64 1
+ %tmp6897 = getelementptr inbounds float, float* %tmp6896, i64 1
+ %tmp6898 = getelementptr inbounds float, float* %tmp6897, i64 1
+ %tmp6899 = getelementptr inbounds float, float* %tmp6898, i64 1
+ %tmp6900 = getelementptr inbounds float, float* %tmp6899, i64 1
+ %tmp6901 = getelementptr inbounds float, float* %tmp6900, i64 1
+ %tmp6902 = getelementptr inbounds float, float* %tmp6901, i64 1
+ %tmp6903 = getelementptr inbounds float, float* %tmp6902, i64 1
+ %tmp6904 = getelementptr inbounds float, float* %tmp6903, i64 1
+ %tmp6905 = getelementptr inbounds float, float* %tmp6904, i64 1
+ %tmp6906 = getelementptr inbounds float, float* %tmp6905, i64 1
+ %tmp6907 = getelementptr inbounds float, float* %tmp6906, i64 1
+ %tmp6908 = getelementptr inbounds float, float* %tmp6907, i64 1
+ %tmp6909 = getelementptr inbounds float, float* %tmp6908, i64 1
+ %tmp6910 = getelementptr inbounds float, float* %tmp6909, i64 1
+ %tmp6911 = getelementptr inbounds float, float* %tmp6910, i64 1
+ %tmp6912 = getelementptr inbounds float, float* %tmp6911, i64 1
+ %tmp6913 = getelementptr inbounds float, float* %tmp6912, i64 1
+ %tmp6914 = getelementptr inbounds float, float* %tmp6913, i64 1
+ %tmp6915 = getelementptr inbounds float, float* %tmp6914, i64 1
+ %tmp6916 = getelementptr inbounds float, float* %tmp6915, i64 1
+ %tmp6917 = getelementptr inbounds float, float* %tmp6916, i64 1
+ %tmp6918 = getelementptr inbounds float, float* %tmp6917, i64 1
+ %tmp6919 = getelementptr inbounds float, float* %tmp6918, i64 1
+ %tmp6920 = getelementptr inbounds float, float* %tmp6919, i64 1
+ %tmp6921 = getelementptr inbounds float, float* %tmp6920, i64 1
+ %tmp6922 = getelementptr inbounds float, float* %tmp6921, i64 1
+ %tmp6923 = getelementptr inbounds float, float* %tmp6922, i64 1
+ %tmp6924 = getelementptr inbounds float, float* %tmp6923, i64 1
+ %tmp6925 = getelementptr inbounds float, float* %tmp6924, i64 1
+ %tmp6926 = getelementptr inbounds float, float* %tmp6925, i64 1
+ %tmp6927 = getelementptr inbounds float, float* %tmp6926, i64 1
+ %tmp6928 = getelementptr inbounds float, float* %tmp6927, i64 1
+ %tmp6929 = getelementptr inbounds float, float* %tmp6928, i64 1
+ %tmp6930 = getelementptr inbounds float, float* %tmp6929, i64 1
+ %tmp6931 = getelementptr inbounds float, float* %tmp6930, i64 1
+ %tmp6932 = getelementptr inbounds float, float* %tmp6931, i64 1
+ %tmp6933 = getelementptr inbounds float, float* %tmp6932, i64 1
+ %tmp6934 = getelementptr inbounds float, float* %tmp6933, i64 1
+ %tmp6935 = getelementptr inbounds float, float* %tmp6934, i64 1
+ %tmp6936 = getelementptr inbounds float, float* %tmp6935, i64 1
+ %tmp6937 = getelementptr inbounds float, float* %tmp6936, i64 1
+ %tmp6938 = getelementptr inbounds float, float* %tmp6937, i64 1
+ %tmp6939 = getelementptr inbounds float, float* %tmp6938, i64 1
+ %tmp6940 = getelementptr inbounds float, float* %tmp6939, i64 1
+ %tmp6941 = getelementptr inbounds float, float* %tmp6940, i64 1
+ %tmp6942 = getelementptr inbounds float, float* %tmp6941, i64 1
+ %tmp6943 = getelementptr inbounds float, float* %tmp6942, i64 1
+ %tmp6944 = getelementptr inbounds float, float* %tmp6943, i64 1
+ %tmp6945 = getelementptr inbounds float, float* %tmp6944, i64 1
+ %tmp6946 = getelementptr inbounds float, float* %tmp6945, i64 1
+ %tmp6947 = getelementptr inbounds float, float* %tmp6946, i64 1
+ %tmp6948 = getelementptr inbounds float, float* %tmp6947, i64 1
+ %tmp6949 = getelementptr inbounds float, float* %tmp6948, i64 1
+ %tmp6950 = getelementptr inbounds float, float* %tmp6949, i64 1
+ %tmp6951 = getelementptr inbounds float, float* %tmp6950, i64 1
+ %tmp6952 = getelementptr inbounds float, float* %tmp6951, i64 1
+ %tmp6953 = getelementptr inbounds float, float* %tmp6952, i64 1
+ %tmp6954 = getelementptr inbounds float, float* %tmp6953, i64 1
+ %tmp6955 = getelementptr inbounds float, float* %tmp6954, i64 1
+ %tmp6956 = getelementptr inbounds float, float* %tmp6955, i64 1
+ %tmp6957 = getelementptr inbounds float, float* %tmp6956, i64 1
+ %tmp6958 = getelementptr inbounds float, float* %tmp6957, i64 1
+ %tmp6959 = getelementptr inbounds float, float* %tmp6958, i64 1
+ %tmp6960 = getelementptr inbounds float, float* %tmp6959, i64 1
+ %tmp6961 = getelementptr inbounds float, float* %tmp6960, i64 1
+ %tmp6962 = getelementptr inbounds float, float* %tmp6961, i64 1
+ %tmp6963 = getelementptr inbounds float, float* %tmp6962, i64 1
+ %tmp6964 = getelementptr inbounds float, float* %tmp6963, i64 1
+ %tmp6965 = getelementptr inbounds float, float* %tmp6964, i64 1
+ %tmp6966 = getelementptr inbounds float, float* %tmp6965, i64 1
+ %tmp6967 = getelementptr inbounds float, float* %tmp6966, i64 1
+ %tmp6968 = getelementptr inbounds float, float* %tmp6967, i64 1
+ %tmp6969 = getelementptr inbounds float, float* %tmp6968, i64 1
+ %tmp6970 = getelementptr inbounds float, float* %tmp6969, i64 1
+ %tmp6971 = getelementptr inbounds float, float* %tmp6970, i64 1
+ %tmp6972 = getelementptr inbounds float, float* %tmp6971, i64 1
+ %tmp6973 = getelementptr inbounds float, float* %tmp6972, i64 1
+ %tmp6974 = getelementptr inbounds float, float* %tmp6973, i64 1
+ %tmp6975 = getelementptr inbounds float, float* %tmp6974, i64 1
+ %tmp6976 = getelementptr inbounds float, float* %tmp6975, i64 1
+ %tmp6977 = getelementptr inbounds float, float* %tmp6976, i64 1
+ %tmp6978 = getelementptr inbounds float, float* %tmp6977, i64 1
+ %tmp6979 = getelementptr inbounds float, float* %tmp6978, i64 1
+ %tmp6980 = getelementptr inbounds float, float* %tmp6979, i64 1
+ %tmp6981 = getelementptr inbounds float, float* %tmp6980, i64 1
+ %tmp6982 = getelementptr inbounds float, float* %tmp6981, i64 1
+ %tmp6983 = getelementptr inbounds float, float* %tmp6982, i64 1
+ %tmp6984 = getelementptr inbounds float, float* %tmp6983, i64 1
+ %tmp6985 = getelementptr inbounds float, float* %tmp6984, i64 1
+ %tmp6986 = getelementptr inbounds float, float* %tmp6985, i64 1
+ %tmp6987 = getelementptr inbounds float, float* %tmp6986, i64 1
+ %tmp6988 = getelementptr inbounds float, float* %tmp6987, i64 1
+ %tmp6989 = getelementptr inbounds float, float* %tmp6988, i64 1
+ %tmp6990 = getelementptr inbounds float, float* %tmp6989, i64 1
+ %tmp6991 = getelementptr inbounds float, float* %tmp6990, i64 1
+ %tmp6992 = getelementptr inbounds float, float* %tmp6991, i64 1
+ %tmp6993 = getelementptr inbounds float, float* %tmp6992, i64 1
+ %tmp6994 = getelementptr inbounds float, float* %tmp6993, i64 1
+ %tmp6995 = getelementptr inbounds float, float* %tmp6994, i64 1
+ %tmp6996 = getelementptr inbounds float, float* %tmp6995, i64 1
+ %tmp6997 = getelementptr inbounds float, float* %tmp6996, i64 1
+ %tmp6998 = getelementptr inbounds float, float* %tmp6997, i64 1
+ %tmp6999 = getelementptr inbounds float, float* %tmp6998, i64 1
+ %tmp7000 = getelementptr inbounds float, float* %tmp6999, i64 1
+ %tmp7001 = getelementptr inbounds float, float* %tmp7000, i64 1
+ %tmp7002 = getelementptr inbounds float, float* %tmp7001, i64 1
+ %tmp7003 = getelementptr inbounds float, float* %tmp7002, i64 1
+ %tmp7004 = getelementptr inbounds float, float* %tmp7003, i64 1
+ %tmp7005 = getelementptr inbounds float, float* %tmp7004, i64 1
+ %tmp7006 = getelementptr inbounds float, float* %tmp7005, i64 1
+ %tmp7007 = getelementptr inbounds float, float* %tmp7006, i64 1
+ %tmp7008 = getelementptr inbounds float, float* %tmp7007, i64 1
+ %tmp7009 = getelementptr inbounds float, float* %tmp7008, i64 1
+ %tmp7010 = getelementptr inbounds float, float* %tmp7009, i64 1
+ %tmp7011 = getelementptr inbounds float, float* %tmp7010, i64 1
+ %tmp7012 = getelementptr inbounds float, float* %tmp7011, i64 1
+ %tmp7013 = getelementptr inbounds float, float* %tmp7012, i64 1
+ %tmp7014 = getelementptr inbounds float, float* %tmp7013, i64 1
+ %tmp7015 = getelementptr inbounds float, float* %tmp7014, i64 1
+ %tmp7016 = getelementptr inbounds float, float* %tmp7015, i64 1
+ %tmp7017 = getelementptr inbounds float, float* %tmp7016, i64 1
+ %tmp7018 = getelementptr inbounds float, float* %tmp7017, i64 1
+ %tmp7019 = getelementptr inbounds float, float* %tmp7018, i64 1
+ %tmp7020 = getelementptr inbounds float, float* %tmp7019, i64 1
+ %tmp7021 = getelementptr inbounds float, float* %tmp7020, i64 1
+ %tmp7022 = getelementptr inbounds float, float* %tmp7021, i64 1
+ %tmp7023 = getelementptr inbounds float, float* %tmp7022, i64 1
+ %tmp7024 = getelementptr inbounds float, float* %tmp7023, i64 1
+ %tmp7025 = getelementptr inbounds float, float* %tmp7024, i64 1
+ %tmp7026 = getelementptr inbounds float, float* %tmp7025, i64 1
+ %tmp7027 = getelementptr inbounds float, float* %tmp7026, i64 1
+ %tmp7028 = getelementptr inbounds float, float* %tmp7027, i64 1
+ %tmp7029 = getelementptr inbounds float, float* %tmp7028, i64 1
+ %tmp7030 = getelementptr inbounds float, float* %tmp7029, i64 1
+ %tmp7031 = getelementptr inbounds float, float* %tmp7030, i64 1
+ %tmp7032 = getelementptr inbounds float, float* %tmp7031, i64 1
+ %tmp7033 = getelementptr inbounds float, float* %tmp7032, i64 1
+ %tmp7034 = getelementptr inbounds float, float* %tmp7033, i64 1
+ %tmp7035 = getelementptr inbounds float, float* %tmp7034, i64 1
+ %tmp7036 = getelementptr inbounds float, float* %tmp7035, i64 1
+ %tmp7037 = getelementptr inbounds float, float* %tmp7036, i64 1
+ %tmp7038 = getelementptr inbounds float, float* %tmp7037, i64 1
+ %tmp7039 = getelementptr inbounds float, float* %tmp7038, i64 1
+ %tmp7040 = getelementptr inbounds float, float* %tmp7039, i64 1
+ %tmp7041 = getelementptr inbounds float, float* %tmp7040, i64 1
+ %tmp7042 = getelementptr inbounds float, float* %tmp7041, i64 1
+ %tmp7043 = getelementptr inbounds float, float* %tmp7042, i64 1
+ %tmp7044 = getelementptr inbounds float, float* %tmp7043, i64 1
+ %tmp7045 = getelementptr inbounds float, float* %tmp7044, i64 1
+ %tmp7046 = getelementptr inbounds float, float* %tmp7045, i64 1
+ %tmp7047 = getelementptr inbounds float, float* %tmp7046, i64 1
+ %tmp7048 = getelementptr inbounds float, float* %tmp7047, i64 1
+ %tmp7049 = getelementptr inbounds float, float* %tmp7048, i64 1
+ %tmp7050 = getelementptr inbounds float, float* %tmp7049, i64 1
+ %tmp7051 = getelementptr inbounds float, float* %tmp7050, i64 1
+ %tmp7052 = getelementptr inbounds float, float* %tmp7051, i64 1
+ %tmp7053 = getelementptr inbounds float, float* %tmp7052, i64 1
+ %tmp7054 = getelementptr inbounds float, float* %tmp7053, i64 1
+ %tmp7055 = getelementptr inbounds float, float* %tmp7054, i64 1
+ %tmp7056 = getelementptr inbounds float, float* %tmp7055, i64 1
+ %tmp7057 = getelementptr inbounds float, float* %tmp7056, i64 1
+ %tmp7058 = getelementptr inbounds float, float* %tmp7057, i64 1
+ %tmp7059 = getelementptr inbounds float, float* %tmp7058, i64 1
+ %tmp7060 = getelementptr inbounds float, float* %tmp7059, i64 1
+ %tmp7061 = getelementptr inbounds float, float* %tmp7060, i64 1
+ %tmp7062 = getelementptr inbounds float, float* %tmp7061, i64 1
+ %tmp7063 = getelementptr inbounds float, float* %tmp7062, i64 1
+ %tmp7064 = getelementptr inbounds float, float* %tmp7063, i64 1
+ %tmp7065 = getelementptr inbounds float, float* %tmp7064, i64 1
+ %tmp7066 = getelementptr inbounds float, float* %tmp7065, i64 1
+ %tmp7067 = getelementptr inbounds float, float* %tmp7066, i64 1
+ %tmp7068 = getelementptr inbounds float, float* %tmp7067, i64 1
+ %tmp7069 = getelementptr inbounds float, float* %tmp7068, i64 1
+ %tmp7070 = getelementptr inbounds float, float* %tmp7069, i64 1
+ %tmp7071 = getelementptr inbounds float, float* %tmp7070, i64 1
+ %tmp7072 = getelementptr inbounds float, float* %tmp7071, i64 1
+ %tmp7073 = getelementptr inbounds float, float* %tmp7072, i64 1
+ %tmp7074 = getelementptr inbounds float, float* %tmp7073, i64 1
+ %tmp7075 = getelementptr inbounds float, float* %tmp7074, i64 1
+ %tmp7076 = getelementptr inbounds float, float* %tmp7075, i64 1
+ %tmp7077 = getelementptr inbounds float, float* %tmp7076, i64 1
+ %tmp7078 = getelementptr inbounds float, float* %tmp7077, i64 1
+ %tmp7079 = getelementptr inbounds float, float* %tmp7078, i64 1
+ %tmp7080 = getelementptr inbounds float, float* %tmp7079, i64 1
+ %tmp7081 = getelementptr inbounds float, float* %tmp7080, i64 1
+ %tmp7082 = getelementptr inbounds float, float* %tmp7081, i64 1
+ %tmp7083 = getelementptr inbounds float, float* %tmp7082, i64 1
+ %tmp7084 = getelementptr inbounds float, float* %tmp7083, i64 1
+ %tmp7085 = getelementptr inbounds float, float* %tmp7084, i64 1
+ %tmp7086 = getelementptr inbounds float, float* %tmp7085, i64 1
+ %tmp7087 = getelementptr inbounds float, float* %tmp7086, i64 1
+ %tmp7088 = getelementptr inbounds float, float* %tmp7087, i64 1
+ %tmp7089 = getelementptr inbounds float, float* %tmp7088, i64 1
+ %tmp7090 = getelementptr inbounds float, float* %tmp7089, i64 1
+ %tmp7091 = getelementptr inbounds float, float* %tmp7090, i64 1
+ %tmp7092 = getelementptr inbounds float, float* %tmp7091, i64 1
+ %tmp7093 = getelementptr inbounds float, float* %tmp7092, i64 1
+ %tmp7094 = getelementptr inbounds float, float* %tmp7093, i64 1
+ %tmp7095 = getelementptr inbounds float, float* %tmp7094, i64 1
+ %tmp7096 = getelementptr inbounds float, float* %tmp7095, i64 1
+ %tmp7097 = getelementptr inbounds float, float* %tmp7096, i64 1
+ %tmp7098 = getelementptr inbounds float, float* %tmp7097, i64 1
+ %tmp7099 = getelementptr inbounds float, float* %tmp7098, i64 1
+ %tmp7100 = getelementptr inbounds float, float* %tmp7099, i64 1
+ %tmp7101 = getelementptr inbounds float, float* %tmp7100, i64 1
+ %tmp7102 = getelementptr inbounds float, float* %tmp7101, i64 1
+ %tmp7103 = getelementptr inbounds float, float* %tmp7102, i64 1
+ %tmp7104 = getelementptr inbounds float, float* %tmp7103, i64 1
+ %tmp7105 = getelementptr inbounds float, float* %tmp7104, i64 1
+ %tmp7106 = getelementptr inbounds float, float* %tmp7105, i64 1
+ %tmp7107 = getelementptr inbounds float, float* %tmp7106, i64 1
+ %tmp7108 = getelementptr inbounds float, float* %tmp7107, i64 1
+ %tmp7109 = getelementptr inbounds float, float* %tmp7108, i64 1
+ %tmp7110 = getelementptr inbounds float, float* %tmp7109, i64 1
+ %tmp7111 = getelementptr inbounds float, float* %tmp7110, i64 1
+ %tmp7112 = getelementptr inbounds float, float* %tmp7111, i64 1
+ %tmp7113 = getelementptr inbounds float, float* %tmp7112, i64 1
+ %tmp7114 = getelementptr inbounds float, float* %tmp7113, i64 1
+ %tmp7115 = getelementptr inbounds float, float* %tmp7114, i64 1
+ %tmp7116 = getelementptr inbounds float, float* %tmp7115, i64 1
+ %tmp7117 = getelementptr inbounds float, float* %tmp7116, i64 1
+ %tmp7118 = getelementptr inbounds float, float* %tmp7117, i64 1
+ %tmp7119 = getelementptr inbounds float, float* %tmp7118, i64 1
+ %tmp7120 = getelementptr inbounds float, float* %tmp7119, i64 1
+ %tmp7121 = getelementptr inbounds float, float* %tmp7120, i64 1
+ %tmp7122 = getelementptr inbounds float, float* %tmp7121, i64 1
+ %tmp7123 = getelementptr inbounds float, float* %tmp7122, i64 1
+ %tmp7124 = getelementptr inbounds float, float* %tmp7123, i64 1
+ %tmp7125 = getelementptr inbounds float, float* %tmp7124, i64 1
+ %tmp7126 = getelementptr inbounds float, float* %tmp7125, i64 1
+ %tmp7127 = getelementptr inbounds float, float* %tmp7126, i64 1
+ %tmp7128 = getelementptr inbounds float, float* %tmp7127, i64 1
+ %tmp7129 = getelementptr inbounds float, float* %tmp7128, i64 1
+ %tmp7130 = getelementptr inbounds float, float* %tmp7129, i64 1
+ %tmp7131 = getelementptr inbounds float, float* %tmp7130, i64 1
+ %tmp7132 = getelementptr inbounds float, float* %tmp7131, i64 1
+ %tmp7133 = getelementptr inbounds float, float* %tmp7132, i64 1
+ %tmp7134 = getelementptr inbounds float, float* %tmp7133, i64 1
+ %tmp7135 = getelementptr inbounds float, float* %tmp7134, i64 1
+ %tmp7136 = getelementptr inbounds float, float* %tmp7135, i64 1
+ %tmp7137 = getelementptr inbounds float, float* %tmp7136, i64 1
+ %tmp7138 = getelementptr inbounds float, float* %tmp7137, i64 1
+ %tmp7139 = getelementptr inbounds float, float* %tmp7138, i64 1
+ %tmp7140 = getelementptr inbounds float, float* %tmp7139, i64 1
+ %tmp7141 = getelementptr inbounds float, float* %tmp7140, i64 1
+ %tmp7142 = getelementptr inbounds float, float* %tmp7141, i64 1
+ %tmp7143 = getelementptr inbounds float, float* %tmp7142, i64 1
+ %tmp7144 = getelementptr inbounds float, float* %tmp7143, i64 1
+ %tmp7145 = getelementptr inbounds float, float* %tmp7144, i64 1
+ %tmp7146 = getelementptr inbounds float, float* %tmp7145, i64 1
+ %tmp7147 = getelementptr inbounds float, float* %tmp7146, i64 1
+ %tmp7148 = getelementptr inbounds float, float* %tmp7147, i64 1
+ %tmp7149 = getelementptr inbounds float, float* %tmp7148, i64 1
+ %tmp7150 = getelementptr inbounds float, float* %tmp7149, i64 1
+ %tmp7151 = getelementptr inbounds float, float* %tmp7150, i64 1
+ %tmp7152 = getelementptr inbounds float, float* %tmp7151, i64 1
+ %tmp7153 = getelementptr inbounds float, float* %tmp7152, i64 1
+ %tmp7154 = getelementptr inbounds float, float* %tmp7153, i64 1
+ %tmp7155 = getelementptr inbounds float, float* %tmp7154, i64 1
+ %tmp7156 = getelementptr inbounds float, float* %tmp7155, i64 1
+ %tmp7157 = getelementptr inbounds float, float* %tmp7156, i64 1
+ %tmp7158 = getelementptr inbounds float, float* %tmp7157, i64 1
+ %tmp7159 = getelementptr inbounds float, float* %tmp7158, i64 1
+ %tmp7160 = getelementptr inbounds float, float* %tmp7159, i64 1
+ %tmp7161 = getelementptr inbounds float, float* %tmp7160, i64 1
+ %tmp7162 = getelementptr inbounds float, float* %tmp7161, i64 1
+ %tmp7163 = getelementptr inbounds float, float* %tmp7162, i64 1
+ %tmp7164 = getelementptr inbounds float, float* %tmp7163, i64 1
+ %tmp7165 = getelementptr inbounds float, float* %tmp7164, i64 1
+ %tmp7166 = getelementptr inbounds float, float* %tmp7165, i64 1
+ %tmp7167 = getelementptr inbounds float, float* %tmp7166, i64 1
+ %tmp7168 = getelementptr inbounds float, float* %tmp7167, i64 1
+ %tmp7169 = getelementptr inbounds float, float* %tmp7168, i64 1
+ %tmp7170 = getelementptr inbounds float, float* %tmp7169, i64 1
+ %tmp7171 = getelementptr inbounds float, float* %tmp7170, i64 1
+ %tmp7172 = getelementptr inbounds float, float* %tmp7171, i64 1
+ %tmp7173 = getelementptr inbounds float, float* %tmp7172, i64 1
+ %tmp7174 = getelementptr inbounds float, float* %tmp7173, i64 1
+ %tmp7175 = getelementptr inbounds float, float* %tmp7174, i64 1
+ %tmp7176 = getelementptr inbounds float, float* %tmp7175, i64 1
+ %tmp7177 = getelementptr inbounds float, float* %tmp7176, i64 1
+ %tmp7178 = getelementptr inbounds float, float* %tmp7177, i64 1
+ %tmp7179 = getelementptr inbounds float, float* %tmp7178, i64 1
+ %tmp7180 = getelementptr inbounds float, float* %tmp7179, i64 1
+ %tmp7181 = getelementptr inbounds float, float* %tmp7180, i64 1
+ %tmp7182 = getelementptr inbounds float, float* %tmp7181, i64 1
+ %tmp7183 = getelementptr inbounds float, float* %tmp7182, i64 1
+ %tmp7184 = getelementptr inbounds float, float* %tmp7183, i64 1
+ %tmp7185 = getelementptr inbounds float, float* %tmp7184, i64 1
+ %tmp7186 = getelementptr inbounds float, float* %tmp7185, i64 1
+ %tmp7187 = getelementptr inbounds float, float* %tmp7186, i64 1
+ %tmp7188 = getelementptr inbounds float, float* %tmp7187, i64 1
+ %tmp7189 = getelementptr inbounds float, float* %tmp7188, i64 1
+ %tmp7190 = getelementptr inbounds float, float* %tmp7189, i64 1
+ %tmp7191 = getelementptr inbounds float, float* %tmp7190, i64 1
+ %tmp7192 = getelementptr inbounds float, float* %tmp7191, i64 1
+ %tmp7193 = getelementptr inbounds float, float* %tmp7192, i64 1
+ %tmp7194 = getelementptr inbounds float, float* %tmp7193, i64 1
+ %tmp7195 = getelementptr inbounds float, float* %tmp7194, i64 1
+ %tmp7196 = getelementptr inbounds float, float* %tmp7195, i64 1
+ %tmp7197 = getelementptr inbounds float, float* %tmp7196, i64 1
+ %tmp7198 = getelementptr inbounds float, float* %tmp7197, i64 1
+ %tmp7199 = getelementptr inbounds float, float* %tmp7198, i64 1
+ %tmp7200 = getelementptr inbounds float, float* %tmp7199, i64 1
+ %tmp7201 = getelementptr inbounds float, float* %tmp7200, i64 1
+ %tmp7202 = getelementptr inbounds float, float* %tmp7201, i64 1
+ %tmp7203 = getelementptr inbounds float, float* %tmp7202, i64 1
+ %tmp7204 = getelementptr inbounds float, float* %tmp7203, i64 1
+ %tmp7205 = getelementptr inbounds float, float* %tmp7204, i64 1
+ %tmp7206 = getelementptr inbounds float, float* %tmp7205, i64 1
+ %tmp7207 = getelementptr inbounds float, float* %tmp7206, i64 1
+ %tmp7208 = getelementptr inbounds float, float* %tmp7207, i64 1
+ %tmp7209 = getelementptr inbounds float, float* %tmp7208, i64 1
+ %tmp7210 = getelementptr inbounds float, float* %tmp7209, i64 1
+ %tmp7211 = getelementptr inbounds float, float* %tmp7210, i64 1
+ %tmp7212 = getelementptr inbounds float, float* %tmp7211, i64 1
+ %tmp7213 = getelementptr inbounds float, float* %tmp7212, i64 1
+ %tmp7214 = getelementptr inbounds float, float* %tmp7213, i64 1
+ %tmp7215 = getelementptr inbounds float, float* %tmp7214, i64 1
+ %tmp7216 = getelementptr inbounds float, float* %tmp7215, i64 1
+ %tmp7217 = getelementptr inbounds float, float* %tmp7216, i64 1
+ %tmp7218 = getelementptr inbounds float, float* %tmp7217, i64 1
+ %tmp7219 = getelementptr inbounds float, float* %tmp7218, i64 1
+ %tmp7220 = getelementptr inbounds float, float* %tmp7219, i64 1
+ %tmp7221 = getelementptr inbounds float, float* %tmp7220, i64 1
+ %tmp7222 = getelementptr inbounds float, float* %tmp7221, i64 1
+ %tmp7223 = getelementptr inbounds float, float* %tmp7222, i64 1
+ %tmp7224 = getelementptr inbounds float, float* %tmp7223, i64 1
+ %tmp7225 = getelementptr inbounds float, float* %tmp7224, i64 1
+ %tmp7226 = getelementptr inbounds float, float* %tmp7225, i64 1
+ %tmp7227 = getelementptr inbounds float, float* %tmp7226, i64 1
+ %tmp7228 = getelementptr inbounds float, float* %tmp7227, i64 1
+ %tmp7229 = getelementptr inbounds float, float* %tmp7228, i64 1
+ %tmp7230 = getelementptr inbounds float, float* %tmp7229, i64 1
+ %tmp7231 = getelementptr inbounds float, float* %tmp7230, i64 1
+ %tmp7232 = getelementptr inbounds float, float* %tmp7231, i64 1
+ %tmp7233 = getelementptr inbounds float, float* %tmp7232, i64 1
+ %tmp7234 = getelementptr inbounds float, float* %tmp7233, i64 1
+ %tmp7235 = getelementptr inbounds float, float* %tmp7234, i64 1
+ %tmp7236 = getelementptr inbounds float, float* %tmp7235, i64 1
+ %tmp7237 = getelementptr inbounds float, float* %tmp7236, i64 1
+ %tmp7238 = getelementptr inbounds float, float* %tmp7237, i64 1
+ %tmp7239 = getelementptr inbounds float, float* %tmp7238, i64 1
+ %tmp7240 = getelementptr inbounds float, float* %tmp7239, i64 1
+ %tmp7241 = getelementptr inbounds float, float* %tmp7240, i64 1
+ %tmp7242 = getelementptr inbounds float, float* %tmp7241, i64 1
+ %tmp7243 = getelementptr inbounds float, float* %tmp7242, i64 1
+ %tmp7244 = getelementptr inbounds float, float* %tmp7243, i64 1
+ %tmp7245 = getelementptr inbounds float, float* %tmp7244, i64 1
+ %tmp7246 = getelementptr inbounds float, float* %tmp7245, i64 1
+ %tmp7247 = getelementptr inbounds float, float* %tmp7246, i64 1
+ %tmp7248 = getelementptr inbounds float, float* %tmp7247, i64 1
+ %tmp7249 = getelementptr inbounds float, float* %tmp7248, i64 1
+ %tmp7250 = getelementptr inbounds float, float* %tmp7249, i64 1
+ %tmp7251 = getelementptr inbounds float, float* %tmp7250, i64 1
+ %tmp7252 = getelementptr inbounds float, float* %tmp7251, i64 1
+ %tmp7253 = getelementptr inbounds float, float* %tmp7252, i64 1
+ %tmp7254 = getelementptr inbounds float, float* %tmp7253, i64 1
+ %tmp7255 = getelementptr inbounds float, float* %tmp7254, i64 1
+ %tmp7256 = getelementptr inbounds float, float* %tmp7255, i64 1
+ %tmp7257 = getelementptr inbounds float, float* %tmp7256, i64 1
+ %tmp7258 = getelementptr inbounds float, float* %tmp7257, i64 1
+ %tmp7259 = getelementptr inbounds float, float* %tmp7258, i64 1
+ %tmp7260 = getelementptr inbounds float, float* %tmp7259, i64 1
+ %tmp7261 = getelementptr inbounds float, float* %tmp7260, i64 1
+ %tmp7262 = getelementptr inbounds float, float* %tmp7261, i64 1
+ %tmp7263 = getelementptr inbounds float, float* %tmp7262, i64 1
+ %tmp7264 = getelementptr inbounds float, float* %tmp7263, i64 1
+ %tmp7265 = getelementptr inbounds float, float* %tmp7264, i64 1
+ %tmp7266 = getelementptr inbounds float, float* %tmp7265, i64 1
+ %tmp7267 = getelementptr inbounds float, float* %tmp7266, i64 1
+ %tmp7268 = getelementptr inbounds float, float* %tmp7267, i64 1
+ %tmp7269 = getelementptr inbounds float, float* %tmp7268, i64 1
+ %tmp7270 = getelementptr inbounds float, float* %tmp7269, i64 1
+ %tmp7271 = getelementptr inbounds float, float* %tmp7270, i64 1
+ %tmp7272 = getelementptr inbounds float, float* %tmp7271, i64 1
+ %tmp7273 = getelementptr inbounds float, float* %tmp7272, i64 1
+ %tmp7274 = getelementptr inbounds float, float* %tmp7273, i64 1
+ %tmp7275 = getelementptr inbounds float, float* %tmp7274, i64 1
+ %tmp7276 = getelementptr inbounds float, float* %tmp7275, i64 1
+ %tmp7277 = getelementptr inbounds float, float* %tmp7276, i64 1
+ %tmp7278 = getelementptr inbounds float, float* %tmp7277, i64 1
+ %tmp7279 = getelementptr inbounds float, float* %tmp7278, i64 1
+ %tmp7280 = getelementptr inbounds float, float* %tmp7279, i64 1
+ %tmp7281 = getelementptr inbounds float, float* %tmp7280, i64 1
+ %tmp7282 = getelementptr inbounds float, float* %tmp7281, i64 1
+ %tmp7283 = getelementptr inbounds float, float* %tmp7282, i64 1
+ %tmp7284 = getelementptr inbounds float, float* %tmp7283, i64 1
+ %tmp7285 = getelementptr inbounds float, float* %tmp7284, i64 1
+ %tmp7286 = getelementptr inbounds float, float* %tmp7285, i64 1
+ %tmp7287 = getelementptr inbounds float, float* %tmp7286, i64 1
+ %tmp7288 = getelementptr inbounds float, float* %tmp7287, i64 1
+ %tmp7289 = getelementptr inbounds float, float* %tmp7288, i64 1
+ %tmp7290 = getelementptr inbounds float, float* %tmp7289, i64 1
+ %tmp7291 = getelementptr inbounds float, float* %tmp7290, i64 1
+ %tmp7292 = getelementptr inbounds float, float* %tmp7291, i64 1
+ %tmp7293 = getelementptr inbounds float, float* %tmp7292, i64 1
+ %tmp7294 = getelementptr inbounds float, float* %tmp7293, i64 1
+ %tmp7295 = getelementptr inbounds float, float* %tmp7294, i64 1
+ %tmp7296 = getelementptr inbounds float, float* %tmp7295, i64 1
+ %tmp7297 = getelementptr inbounds float, float* %tmp7296, i64 1
+ %tmp7298 = getelementptr inbounds float, float* %tmp7297, i64 1
+ %tmp7299 = getelementptr inbounds float, float* %tmp7298, i64 1
+ %tmp7300 = getelementptr inbounds float, float* %tmp7299, i64 1
+ %tmp7301 = getelementptr inbounds float, float* %tmp7300, i64 1
+ %tmp7302 = getelementptr inbounds float, float* %tmp7301, i64 1
+ %tmp7303 = getelementptr inbounds float, float* %tmp7302, i64 1
+ %tmp7304 = getelementptr inbounds float, float* %tmp7303, i64 1
+ %tmp7305 = getelementptr inbounds float, float* %tmp7304, i64 1
+ %tmp7306 = getelementptr inbounds float, float* %tmp7305, i64 1
+ %tmp7307 = getelementptr inbounds float, float* %tmp7306, i64 1
+ %tmp7308 = getelementptr inbounds float, float* %tmp7307, i64 1
+ %tmp7309 = getelementptr inbounds float, float* %tmp7308, i64 1
+ %tmp7310 = getelementptr inbounds float, float* %tmp7309, i64 1
+ %tmp7311 = getelementptr inbounds float, float* %tmp7310, i64 1
+ %tmp7312 = getelementptr inbounds float, float* %tmp7311, i64 1
+ %tmp7313 = getelementptr inbounds float, float* %tmp7312, i64 1
+ %tmp7314 = getelementptr inbounds float, float* %tmp7313, i64 1
+ %tmp7315 = getelementptr inbounds float, float* %tmp7314, i64 1
+ %tmp7316 = getelementptr inbounds float, float* %tmp7315, i64 1
+ %tmp7317 = getelementptr inbounds float, float* %tmp7316, i64 1
+ %tmp7318 = getelementptr inbounds float, float* %tmp7317, i64 1
+ %tmp7319 = getelementptr inbounds float, float* %tmp7318, i64 1
+ %tmp7320 = getelementptr inbounds float, float* %tmp7319, i64 1
+ %tmp7321 = getelementptr inbounds float, float* %tmp7320, i64 1
+ %tmp7322 = getelementptr inbounds float, float* %tmp7321, i64 1
+ %tmp7323 = getelementptr inbounds float, float* %tmp7322, i64 1
+ %tmp7324 = getelementptr inbounds float, float* %tmp7323, i64 1
+ %tmp7325 = getelementptr inbounds float, float* %tmp7324, i64 1
+ %tmp7326 = getelementptr inbounds float, float* %tmp7325, i64 1
+ %tmp7327 = getelementptr inbounds float, float* %tmp7326, i64 1
+ %tmp7328 = getelementptr inbounds float, float* %tmp7327, i64 1
+ %tmp7329 = getelementptr inbounds float, float* %tmp7328, i64 1
+ %tmp7330 = getelementptr inbounds float, float* %tmp7329, i64 1
+ %tmp7331 = getelementptr inbounds float, float* %tmp7330, i64 1
+ %tmp7332 = getelementptr inbounds float, float* %tmp7331, i64 1
+ %tmp7333 = getelementptr inbounds float, float* %tmp7332, i64 1
+ %tmp7334 = getelementptr inbounds float, float* %tmp7333, i64 1
+ %tmp7335 = getelementptr inbounds float, float* %tmp7334, i64 1
+ %tmp7336 = getelementptr inbounds float, float* %tmp7335, i64 1
+ %tmp7337 = getelementptr inbounds float, float* %tmp7336, i64 1
+ %tmp7338 = getelementptr inbounds float, float* %tmp7337, i64 1
+ %tmp7339 = getelementptr inbounds float, float* %tmp7338, i64 1
+ %tmp7340 = getelementptr inbounds float, float* %tmp7339, i64 1
+ %tmp7341 = getelementptr inbounds float, float* %tmp7340, i64 1
+ %tmp7342 = getelementptr inbounds float, float* %tmp7341, i64 1
+ %tmp7343 = getelementptr inbounds float, float* %tmp7342, i64 1
+ %tmp7344 = getelementptr inbounds float, float* %tmp7343, i64 1
+ %tmp7345 = getelementptr inbounds float, float* %tmp7344, i64 1
+ %tmp7346 = getelementptr inbounds float, float* %tmp7345, i64 1
+ %tmp7347 = getelementptr inbounds float, float* %tmp7346, i64 1
+ %tmp7348 = getelementptr inbounds float, float* %tmp7347, i64 1
+ %tmp7349 = getelementptr inbounds float, float* %tmp7348, i64 1
+ %tmp7350 = getelementptr inbounds float, float* %tmp7349, i64 1
+ %tmp7351 = getelementptr inbounds float, float* %tmp7350, i64 1
+ %tmp7352 = getelementptr inbounds float, float* %tmp7351, i64 1
+ %tmp7353 = getelementptr inbounds float, float* %tmp7352, i64 1
+ %tmp7354 = getelementptr inbounds float, float* %tmp7353, i64 1
+ %tmp7355 = getelementptr inbounds float, float* %tmp7354, i64 1
+ %tmp7356 = getelementptr inbounds float, float* %tmp7355, i64 1
+ %tmp7357 = getelementptr inbounds float, float* %tmp7356, i64 1
+ %tmp7358 = getelementptr inbounds float, float* %tmp7357, i64 1
+ %tmp7359 = getelementptr inbounds float, float* %tmp7358, i64 1
+ %tmp7360 = getelementptr inbounds float, float* %tmp7359, i64 1
+ %tmp7361 = getelementptr inbounds float, float* %tmp7360, i64 1
+ %tmp7362 = getelementptr inbounds float, float* %tmp7361, i64 1
+ %tmp7363 = getelementptr inbounds float, float* %tmp7362, i64 1
+ %tmp7364 = getelementptr inbounds float, float* %tmp7363, i64 1
+ %tmp7365 = getelementptr inbounds float, float* %tmp7364, i64 1
+ %tmp7366 = getelementptr inbounds float, float* %tmp7365, i64 1
+ %tmp7367 = getelementptr inbounds float, float* %tmp7366, i64 1
+ %tmp7368 = getelementptr inbounds float, float* %tmp7367, i64 1
+ %tmp7369 = getelementptr inbounds float, float* %tmp7368, i64 1
+ %tmp7370 = getelementptr inbounds float, float* %tmp7369, i64 1
+ %tmp7371 = getelementptr inbounds float, float* %tmp7370, i64 1
+ %tmp7372 = getelementptr inbounds float, float* %tmp7371, i64 1
+ %tmp7373 = getelementptr inbounds float, float* %tmp7372, i64 1
+ %tmp7374 = getelementptr inbounds float, float* %tmp7373, i64 1
+ %tmp7375 = getelementptr inbounds float, float* %tmp7374, i64 1
+ %tmp7376 = getelementptr inbounds float, float* %tmp7375, i64 1
+ %tmp7377 = getelementptr inbounds float, float* %tmp7376, i64 1
+ %tmp7378 = getelementptr inbounds float, float* %tmp7377, i64 1
+ %tmp7379 = getelementptr inbounds float, float* %tmp7378, i64 1
+ %tmp7380 = getelementptr inbounds float, float* %tmp7379, i64 1
+ %tmp7381 = getelementptr inbounds float, float* %tmp7380, i64 1
+ %tmp7382 = getelementptr inbounds float, float* %tmp7381, i64 1
+ %tmp7383 = getelementptr inbounds float, float* %tmp7382, i64 1
+ %tmp7384 = getelementptr inbounds float, float* %tmp7383, i64 1
+ %tmp7385 = getelementptr inbounds float, float* %tmp7384, i64 1
+ %tmp7386 = getelementptr inbounds float, float* %tmp7385, i64 1
+ %tmp7387 = getelementptr inbounds float, float* %tmp7386, i64 1
+ %tmp7388 = getelementptr inbounds float, float* %tmp7387, i64 1
+ %tmp7389 = getelementptr inbounds float, float* %tmp7388, i64 1
+ %tmp7390 = getelementptr inbounds float, float* %tmp7389, i64 1
+ %tmp7391 = getelementptr inbounds float, float* %tmp7390, i64 1
+ %tmp7392 = getelementptr inbounds float, float* %tmp7391, i64 1
+ %tmp7393 = getelementptr inbounds float, float* %tmp7392, i64 1
+ %tmp7394 = getelementptr inbounds float, float* %tmp7393, i64 1
+ %tmp7395 = getelementptr inbounds float, float* %tmp7394, i64 1
+ %tmp7396 = getelementptr inbounds float, float* %tmp7395, i64 1
+ %tmp7397 = getelementptr inbounds float, float* %tmp7396, i64 1
+ %tmp7398 = getelementptr inbounds float, float* %tmp7397, i64 1
+ %tmp7399 = getelementptr inbounds float, float* %tmp7398, i64 1
+ %tmp7400 = getelementptr inbounds float, float* %tmp7399, i64 1
+ %tmp7401 = getelementptr inbounds float, float* %tmp7400, i64 1
+ %tmp7402 = getelementptr inbounds float, float* %tmp7401, i64 1
+ %tmp7403 = getelementptr inbounds float, float* %tmp7402, i64 1
+ %tmp7404 = getelementptr inbounds float, float* %tmp7403, i64 1
+ %tmp7405 = getelementptr inbounds float, float* %tmp7404, i64 1
+ %tmp7406 = getelementptr inbounds float, float* %tmp7405, i64 1
+ %tmp7407 = getelementptr inbounds float, float* %tmp7406, i64 1
+ %tmp7408 = getelementptr inbounds float, float* %tmp7407, i64 1
+ %tmp7409 = getelementptr inbounds float, float* %tmp7408, i64 1
+ %tmp7410 = getelementptr inbounds float, float* %tmp7409, i64 1
+ %tmp7411 = getelementptr inbounds float, float* %tmp7410, i64 1
+ %tmp7412 = getelementptr inbounds float, float* %tmp7411, i64 1
+ %tmp7413 = getelementptr inbounds float, float* %tmp7412, i64 1
+ %tmp7414 = getelementptr inbounds float, float* %tmp7413, i64 1
+ %tmp7415 = getelementptr inbounds float, float* %tmp7414, i64 1
+ %tmp7416 = getelementptr inbounds float, float* %tmp7415, i64 1
+ %tmp7417 = getelementptr inbounds float, float* %tmp7416, i64 1
+ %tmp7418 = getelementptr inbounds float, float* %tmp7417, i64 1
+ %tmp7419 = getelementptr inbounds float, float* %tmp7418, i64 1
+ %tmp7420 = getelementptr inbounds float, float* %tmp7419, i64 1
+ %tmp7421 = getelementptr inbounds float, float* %tmp7420, i64 1
+ %tmp7422 = getelementptr inbounds float, float* %tmp7421, i64 1
+ %tmp7423 = getelementptr inbounds float, float* %tmp7422, i64 1
+ %tmp7424 = getelementptr inbounds float, float* %tmp7423, i64 1
+ %tmp7425 = getelementptr inbounds float, float* %tmp7424, i64 1
+ %tmp7426 = getelementptr inbounds float, float* %tmp7425, i64 1
+ %tmp7427 = getelementptr inbounds float, float* %tmp7426, i64 1
+ %tmp7428 = getelementptr inbounds float, float* %tmp7427, i64 1
+ %tmp7429 = getelementptr inbounds float, float* %tmp7428, i64 1
+ %tmp7430 = getelementptr inbounds float, float* %tmp7429, i64 1
+ %tmp7431 = getelementptr inbounds float, float* %tmp7430, i64 1
+ %tmp7432 = getelementptr inbounds float, float* %tmp7431, i64 1
+ %tmp7433 = getelementptr inbounds float, float* %tmp7432, i64 1
+ %tmp7434 = getelementptr inbounds float, float* %tmp7433, i64 1
+ %tmp7435 = getelementptr inbounds float, float* %tmp7434, i64 1
+ %tmp7436 = getelementptr inbounds float, float* %tmp7435, i64 1
+ %tmp7437 = getelementptr inbounds float, float* %tmp7436, i64 1
+ %tmp7438 = getelementptr inbounds float, float* %tmp7437, i64 1
+ %tmp7439 = getelementptr inbounds float, float* %tmp7438, i64 1
+ %tmp7440 = getelementptr inbounds float, float* %tmp7439, i64 1
+ %tmp7441 = getelementptr inbounds float, float* %tmp7440, i64 1
+ %tmp7442 = getelementptr inbounds float, float* %tmp7441, i64 1
+ %tmp7443 = getelementptr inbounds float, float* %tmp7442, i64 1
+ %tmp7444 = getelementptr inbounds float, float* %tmp7443, i64 1
+ %tmp7445 = getelementptr inbounds float, float* %tmp7444, i64 1
+ %tmp7446 = getelementptr inbounds float, float* %tmp7445, i64 1
+ %tmp7447 = getelementptr inbounds float, float* %tmp7446, i64 1
+ %tmp7448 = getelementptr inbounds float, float* %tmp7447, i64 1
+ %tmp7449 = getelementptr inbounds float, float* %tmp7448, i64 1
+ %tmp7450 = getelementptr inbounds float, float* %tmp7449, i64 1
+ %tmp7451 = getelementptr inbounds float, float* %tmp7450, i64 1
+ %tmp7452 = getelementptr inbounds float, float* %tmp7451, i64 1
+ %tmp7453 = getelementptr inbounds float, float* %tmp7452, i64 1
+ %tmp7454 = getelementptr inbounds float, float* %tmp7453, i64 1
+ %tmp7455 = getelementptr inbounds float, float* %tmp7454, i64 1
+ %tmp7456 = getelementptr inbounds float, float* %tmp7455, i64 1
+ %tmp7457 = getelementptr inbounds float, float* %tmp7456, i64 1
+ %tmp7458 = getelementptr inbounds float, float* %tmp7457, i64 1
+ %tmp7459 = getelementptr inbounds float, float* %tmp7458, i64 1
+ %tmp7460 = getelementptr inbounds float, float* %tmp7459, i64 1
+ %tmp7461 = getelementptr inbounds float, float* %tmp7460, i64 1
+ %tmp7462 = getelementptr inbounds float, float* %tmp7461, i64 1
+ %tmp7463 = getelementptr inbounds float, float* %tmp7462, i64 1
+ %tmp7464 = getelementptr inbounds float, float* %tmp7463, i64 1
+ %tmp7465 = getelementptr inbounds float, float* %tmp7464, i64 1
+ %tmp7466 = getelementptr inbounds float, float* %tmp7465, i64 1
+ %tmp7467 = getelementptr inbounds float, float* %tmp7466, i64 1
+ %tmp7468 = getelementptr inbounds float, float* %tmp7467, i64 1
+ %tmp7469 = getelementptr inbounds float, float* %tmp7468, i64 1
+ %tmp7470 = getelementptr inbounds float, float* %tmp7469, i64 1
+ %tmp7471 = getelementptr inbounds float, float* %tmp7470, i64 1
+ %tmp7472 = getelementptr inbounds float, float* %tmp7471, i64 1
+ %tmp7473 = getelementptr inbounds float, float* %tmp7472, i64 1
+ %tmp7474 = getelementptr inbounds float, float* %tmp7473, i64 1
+ %tmp7475 = getelementptr inbounds float, float* %tmp7474, i64 1
+ %tmp7476 = getelementptr inbounds float, float* %tmp7475, i64 1
+ %tmp7477 = getelementptr inbounds float, float* %tmp7476, i64 1
+ %tmp7478 = getelementptr inbounds float, float* %tmp7477, i64 1
+ %tmp7479 = getelementptr inbounds float, float* %tmp7478, i64 1
+ %tmp7480 = getelementptr inbounds float, float* %tmp7479, i64 1
+ %tmp7481 = getelementptr inbounds float, float* %tmp7480, i64 1
+ %tmp7482 = getelementptr inbounds float, float* %tmp7481, i64 1
+ %tmp7483 = getelementptr inbounds float, float* %tmp7482, i64 1
+ %tmp7484 = getelementptr inbounds float, float* %tmp7483, i64 1
+ %tmp7485 = getelementptr inbounds float, float* %tmp7484, i64 1
+ %tmp7486 = getelementptr inbounds float, float* %tmp7485, i64 1
+ %tmp7487 = getelementptr inbounds float, float* %tmp7486, i64 1
+ %tmp7488 = getelementptr inbounds float, float* %tmp7487, i64 1
+ %tmp7489 = getelementptr inbounds float, float* %tmp7488, i64 1
+ %tmp7490 = getelementptr inbounds float, float* %tmp7489, i64 1
+ %tmp7491 = getelementptr inbounds float, float* %tmp7490, i64 1
+ %tmp7492 = getelementptr inbounds float, float* %tmp7491, i64 1
+ %tmp7493 = getelementptr inbounds float, float* %tmp7492, i64 1
+ %tmp7494 = getelementptr inbounds float, float* %tmp7493, i64 1
+ %tmp7495 = getelementptr inbounds float, float* %tmp7494, i64 1
+ %tmp7496 = getelementptr inbounds float, float* %tmp7495, i64 1
+ %tmp7497 = getelementptr inbounds float, float* %tmp7496, i64 1
+ %tmp7498 = getelementptr inbounds float, float* %tmp7497, i64 1
+ %tmp7499 = getelementptr inbounds float, float* %tmp7498, i64 1
+ %tmp7500 = getelementptr inbounds float, float* %tmp7499, i64 1
+ %tmp7501 = getelementptr inbounds float, float* %tmp7500, i64 1
+ %tmp7502 = getelementptr inbounds float, float* %tmp7501, i64 1
+ %tmp7503 = getelementptr inbounds float, float* %tmp7502, i64 1
+ %tmp7504 = getelementptr inbounds float, float* %tmp7503, i64 1
+ %tmp7505 = getelementptr inbounds float, float* %tmp7504, i64 1
+ %tmp7506 = getelementptr inbounds float, float* %tmp7505, i64 1
+ %tmp7507 = getelementptr inbounds float, float* %tmp7506, i64 1
+ %tmp7508 = getelementptr inbounds float, float* %tmp7507, i64 1
+ %tmp7509 = getelementptr inbounds float, float* %tmp7508, i64 1
+ %tmp7510 = getelementptr inbounds float, float* %tmp7509, i64 1
+ %tmp7511 = getelementptr inbounds float, float* %tmp7510, i64 1
+ %tmp7512 = getelementptr inbounds float, float* %tmp7511, i64 1
+ %tmp7513 = getelementptr inbounds float, float* %tmp7512, i64 1
+ %tmp7514 = getelementptr inbounds float, float* %tmp7513, i64 1
+ %tmp7515 = getelementptr inbounds float, float* %tmp7514, i64 1
+ %tmp7516 = getelementptr inbounds float, float* %tmp7515, i64 1
+ %tmp7517 = getelementptr inbounds float, float* %tmp7516, i64 1
+ %tmp7518 = getelementptr inbounds float, float* %tmp7517, i64 1
+ %tmp7519 = getelementptr inbounds float, float* %tmp7518, i64 1
+ %tmp7520 = getelementptr inbounds float, float* %tmp7519, i64 1
+ %tmp7521 = getelementptr inbounds float, float* %tmp7520, i64 1
+ %tmp7522 = getelementptr inbounds float, float* %tmp7521, i64 1
+ %tmp7523 = getelementptr inbounds float, float* %tmp7522, i64 1
+ %tmp7524 = getelementptr inbounds float, float* %tmp7523, i64 1
+ %tmp7525 = getelementptr inbounds float, float* %tmp7524, i64 1
+ %tmp7526 = getelementptr inbounds float, float* %tmp7525, i64 1
+ %tmp7527 = getelementptr inbounds float, float* %tmp7526, i64 1
+ %tmp7528 = getelementptr inbounds float, float* %tmp7527, i64 1
+ %tmp7529 = getelementptr inbounds float, float* %tmp7528, i64 1
+ %tmp7530 = getelementptr inbounds float, float* %tmp7529, i64 1
+ %tmp7531 = getelementptr inbounds float, float* %tmp7530, i64 1
+ %tmp7532 = getelementptr inbounds float, float* %tmp7531, i64 1
+ %tmp7533 = getelementptr inbounds float, float* %tmp7532, i64 1
+ %tmp7534 = getelementptr inbounds float, float* %tmp7533, i64 1
+ %tmp7535 = getelementptr inbounds float, float* %tmp7534, i64 1
+ %tmp7536 = getelementptr inbounds float, float* %tmp7535, i64 1
+ %tmp7537 = getelementptr inbounds float, float* %tmp7536, i64 1
+ %tmp7538 = getelementptr inbounds float, float* %tmp7537, i64 1
+ %tmp7539 = getelementptr inbounds float, float* %tmp7538, i64 1
+ %tmp7540 = getelementptr inbounds float, float* %tmp7539, i64 1
+ %tmp7541 = getelementptr inbounds float, float* %tmp7540, i64 1
+ %tmp7542 = getelementptr inbounds float, float* %tmp7541, i64 1
+ %tmp7543 = getelementptr inbounds float, float* %tmp7542, i64 1
+ %tmp7544 = getelementptr inbounds float, float* %tmp7543, i64 1
+ %tmp7545 = getelementptr inbounds float, float* %tmp7544, i64 1
+ %tmp7546 = getelementptr inbounds float, float* %tmp7545, i64 1
+ %tmp7547 = getelementptr inbounds float, float* %tmp7546, i64 1
+ %tmp7548 = getelementptr inbounds float, float* %tmp7547, i64 1
+ %tmp7549 = getelementptr inbounds float, float* %tmp7548, i64 1
+ %tmp7550 = getelementptr inbounds float, float* %tmp7549, i64 1
+ %tmp7551 = getelementptr inbounds float, float* %tmp7550, i64 1
+ %tmp7552 = getelementptr inbounds float, float* %tmp7551, i64 1
+ %tmp7553 = getelementptr inbounds float, float* %tmp7552, i64 1
+ %tmp7554 = getelementptr inbounds float, float* %tmp7553, i64 1
+ %tmp7555 = getelementptr inbounds float, float* %tmp7554, i64 1
+ %tmp7556 = getelementptr inbounds float, float* %tmp7555, i64 1
+ %tmp7557 = getelementptr inbounds float, float* %tmp7556, i64 1
+ %tmp7558 = getelementptr inbounds float, float* %tmp7557, i64 1
+ %tmp7559 = getelementptr inbounds float, float* %tmp7558, i64 1
+ %tmp7560 = getelementptr inbounds float, float* %tmp7559, i64 1
+ %tmp7561 = getelementptr inbounds float, float* %tmp7560, i64 1
+ %tmp7562 = getelementptr inbounds float, float* %tmp7561, i64 1
+ %tmp7563 = getelementptr inbounds float, float* %tmp7562, i64 1
+ %tmp7564 = getelementptr inbounds float, float* %tmp7563, i64 1
+ %tmp7565 = getelementptr inbounds float, float* %tmp7564, i64 1
+ %tmp7566 = getelementptr inbounds float, float* %tmp7565, i64 1
+ %tmp7567 = getelementptr inbounds float, float* %tmp7566, i64 1
+ %tmp7568 = getelementptr inbounds float, float* %tmp7567, i64 1
+ %tmp7569 = getelementptr inbounds float, float* %tmp7568, i64 1
+ %tmp7570 = getelementptr inbounds float, float* %tmp7569, i64 1
+ %tmp7571 = getelementptr inbounds float, float* %tmp7570, i64 1
+ %tmp7572 = getelementptr inbounds float, float* %tmp7571, i64 1
+ %tmp7573 = getelementptr inbounds float, float* %tmp7572, i64 1
+ %tmp7574 = getelementptr inbounds float, float* %tmp7573, i64 1
+ %tmp7575 = getelementptr inbounds float, float* %tmp7574, i64 1
+ %tmp7576 = getelementptr inbounds float, float* %tmp7575, i64 1
+ %tmp7577 = getelementptr inbounds float, float* %tmp7576, i64 1
+ %tmp7578 = getelementptr inbounds float, float* %tmp7577, i64 1
+ %tmp7579 = getelementptr inbounds float, float* %tmp7578, i64 1
+ %tmp7580 = getelementptr inbounds float, float* %tmp7579, i64 1
+ %tmp7581 = getelementptr inbounds float, float* %tmp7580, i64 1
+ %tmp7582 = getelementptr inbounds float, float* %tmp7581, i64 1
+ %tmp7583 = getelementptr inbounds float, float* %tmp7582, i64 1
+ %tmp7584 = getelementptr inbounds float, float* %tmp7583, i64 1
+ %tmp7585 = getelementptr inbounds float, float* %tmp7584, i64 1
+ %tmp7586 = getelementptr inbounds float, float* %tmp7585, i64 1
+ %tmp7587 = getelementptr inbounds float, float* %tmp7586, i64 1
+ %tmp7588 = getelementptr inbounds float, float* %tmp7587, i64 1
+ %tmp7589 = getelementptr inbounds float, float* %tmp7588, i64 1
+ %tmp7590 = getelementptr inbounds float, float* %tmp7589, i64 1
+ %tmp7591 = getelementptr inbounds float, float* %tmp7590, i64 1
+ %tmp7592 = getelementptr inbounds float, float* %tmp7591, i64 1
+ %tmp7593 = getelementptr inbounds float, float* %tmp7592, i64 1
+ %tmp7594 = getelementptr inbounds float, float* %tmp7593, i64 1
+ %tmp7595 = getelementptr inbounds float, float* %tmp7594, i64 1
+ %tmp7596 = getelementptr inbounds float, float* %tmp7595, i64 1
+ %tmp7597 = getelementptr inbounds float, float* %tmp7596, i64 1
+ %tmp7598 = getelementptr inbounds float, float* %tmp7597, i64 1
+ %tmp7599 = getelementptr inbounds float, float* %tmp7598, i64 1
+ %tmp7600 = getelementptr inbounds float, float* %tmp7599, i64 1
+ %tmp7601 = getelementptr inbounds float, float* %tmp7600, i64 1
+ %tmp7602 = getelementptr inbounds float, float* %tmp7601, i64 1
+ %tmp7603 = getelementptr inbounds float, float* %tmp7602, i64 1
+ %tmp7604 = getelementptr inbounds float, float* %tmp7603, i64 1
+ %tmp7605 = getelementptr inbounds float, float* %tmp7604, i64 1
+ %tmp7606 = getelementptr inbounds float, float* %tmp7605, i64 1
+ %tmp7607 = getelementptr inbounds float, float* %tmp7606, i64 1
+ %tmp7608 = getelementptr inbounds float, float* %tmp7607, i64 1
+ %tmp7609 = getelementptr inbounds float, float* %tmp7608, i64 1
+ %tmp7610 = getelementptr inbounds float, float* %tmp7609, i64 1
+ %tmp7611 = getelementptr inbounds float, float* %tmp7610, i64 1
+ %tmp7612 = getelementptr inbounds float, float* %tmp7611, i64 1
+ %tmp7613 = getelementptr inbounds float, float* %tmp7612, i64 1
+ %tmp7614 = getelementptr inbounds float, float* %tmp7613, i64 1
+ %tmp7615 = getelementptr inbounds float, float* %tmp7614, i64 1
+ %tmp7616 = getelementptr inbounds float, float* %tmp7615, i64 1
+ %tmp7617 = getelementptr inbounds float, float* %tmp7616, i64 1
+ %tmp7618 = getelementptr inbounds float, float* %tmp7617, i64 1
+ %tmp7619 = getelementptr inbounds float, float* %tmp7618, i64 1
+ %tmp7620 = getelementptr inbounds float, float* %tmp7619, i64 1
+ %tmp7621 = getelementptr inbounds float, float* %tmp7620, i64 1
+ %tmp7622 = getelementptr inbounds float, float* %tmp7621, i64 1
+ %tmp7623 = getelementptr inbounds float, float* %tmp7622, i64 1
+ %tmp7624 = getelementptr inbounds float, float* %tmp7623, i64 1
+ %tmp7625 = getelementptr inbounds float, float* %tmp7624, i64 1
+ %tmp7626 = getelementptr inbounds float, float* %tmp7625, i64 1
+ %tmp7627 = getelementptr inbounds float, float* %tmp7626, i64 1
+ %tmp7628 = getelementptr inbounds float, float* %tmp7627, i64 1
+ %tmp7629 = getelementptr inbounds float, float* %tmp7628, i64 1
+ %tmp7630 = getelementptr inbounds float, float* %tmp7629, i64 1
+ %tmp7631 = getelementptr inbounds float, float* %tmp7630, i64 1
+ %tmp7632 = getelementptr inbounds float, float* %tmp7631, i64 1
+ %tmp7633 = getelementptr inbounds float, float* %tmp7632, i64 1
+ %tmp7634 = getelementptr inbounds float, float* %tmp7633, i64 1
+ %tmp7635 = getelementptr inbounds float, float* %tmp7634, i64 1
+ %tmp7636 = getelementptr inbounds float, float* %tmp7635, i64 1
+ %tmp7637 = getelementptr inbounds float, float* %tmp7636, i64 1
+ %tmp7638 = getelementptr inbounds float, float* %tmp7637, i64 1
+ %tmp7639 = getelementptr inbounds float, float* %tmp7638, i64 1
+ %tmp7640 = getelementptr inbounds float, float* %tmp7639, i64 1
+ %tmp7641 = getelementptr inbounds float, float* %tmp7640, i64 1
+ %tmp7642 = getelementptr inbounds float, float* %tmp7641, i64 1
+ %tmp7643 = getelementptr inbounds float, float* %tmp7642, i64 1
+ %tmp7644 = getelementptr inbounds float, float* %tmp7643, i64 1
+ %tmp7645 = getelementptr inbounds float, float* %tmp7644, i64 1
+ %tmp7646 = getelementptr inbounds float, float* %tmp7645, i64 1
+ %tmp7647 = getelementptr inbounds float, float* %tmp7646, i64 1
+ %tmp7648 = getelementptr inbounds float, float* %tmp7647, i64 1
+ %tmp7649 = getelementptr inbounds float, float* %tmp7648, i64 1
+ %tmp7650 = getelementptr inbounds float, float* %tmp7649, i64 1
+ %tmp7651 = getelementptr inbounds float, float* %tmp7650, i64 1
+ %tmp7652 = getelementptr inbounds float, float* %tmp7651, i64 1
+ %tmp7653 = getelementptr inbounds float, float* %tmp7652, i64 1
+ %tmp7654 = getelementptr inbounds float, float* %tmp7653, i64 1
+ %tmp7655 = getelementptr inbounds float, float* %tmp7654, i64 1
+ %tmp7656 = getelementptr inbounds float, float* %tmp7655, i64 1
+ %tmp7657 = getelementptr inbounds float, float* %tmp7656, i64 1
+ %tmp7658 = getelementptr inbounds float, float* %tmp7657, i64 1
+ %tmp7659 = getelementptr inbounds float, float* %tmp7658, i64 1
+ %tmp7660 = getelementptr inbounds float, float* %tmp7659, i64 1
+ %tmp7661 = getelementptr inbounds float, float* %tmp7660, i64 1
+ %tmp7662 = getelementptr inbounds float, float* %tmp7661, i64 1
+ %tmp7663 = getelementptr inbounds float, float* %tmp7662, i64 1
+ %tmp7664 = getelementptr inbounds float, float* %tmp7663, i64 1
+ %tmp7665 = getelementptr inbounds float, float* %tmp7664, i64 1
+ %tmp7666 = getelementptr inbounds float, float* %tmp7665, i64 1
+ %tmp7667 = getelementptr inbounds float, float* %tmp7666, i64 1
+ %tmp7668 = getelementptr inbounds float, float* %tmp7667, i64 1
+ %tmp7669 = getelementptr inbounds float, float* %tmp7668, i64 1
+ %tmp7670 = getelementptr inbounds float, float* %tmp7669, i64 1
+ %tmp7671 = getelementptr inbounds float, float* %tmp7670, i64 1
+ %tmp7672 = getelementptr inbounds float, float* %tmp7671, i64 1
+ %tmp7673 = getelementptr inbounds float, float* %tmp7672, i64 1
+ %tmp7674 = getelementptr inbounds float, float* %tmp7673, i64 1
+ %tmp7675 = getelementptr inbounds float, float* %tmp7674, i64 1
+ %tmp7676 = getelementptr inbounds float, float* %tmp7675, i64 1
+ %tmp7677 = getelementptr inbounds float, float* %tmp7676, i64 1
+ %tmp7678 = getelementptr inbounds float, float* %tmp7677, i64 1
+ %tmp7679 = getelementptr inbounds float, float* %tmp7678, i64 1
+ %tmp7680 = getelementptr inbounds float, float* %tmp7679, i64 1
+ %tmp7681 = getelementptr inbounds float, float* %tmp7680, i64 1
+ %tmp7682 = getelementptr inbounds float, float* %tmp7681, i64 1
+ %tmp7683 = getelementptr inbounds float, float* %tmp7682, i64 1
+ %tmp7684 = getelementptr inbounds float, float* %tmp7683, i64 1
+ %tmp7685 = getelementptr inbounds float, float* %tmp7684, i64 1
+ %tmp7686 = getelementptr inbounds float, float* %tmp7685, i64 1
+ %tmp7687 = getelementptr inbounds float, float* %tmp7686, i64 1
+ %tmp7688 = getelementptr inbounds float, float* %tmp7687, i64 1
+ %tmp7689 = getelementptr inbounds float, float* %tmp7688, i64 1
+ %tmp7690 = getelementptr inbounds float, float* %tmp7689, i64 1
+ %tmp7691 = getelementptr inbounds float, float* %tmp7690, i64 1
+ %tmp7692 = getelementptr inbounds float, float* %tmp7691, i64 1
+ %tmp7693 = getelementptr inbounds float, float* %tmp7692, i64 1
+ %tmp7694 = getelementptr inbounds float, float* %tmp7693, i64 1
+ %tmp7695 = getelementptr inbounds float, float* %tmp7694, i64 1
+ %tmp7696 = getelementptr inbounds float, float* %tmp7695, i64 1
+ %tmp7697 = getelementptr inbounds float, float* %tmp7696, i64 1
+ %tmp7698 = getelementptr inbounds float, float* %tmp7697, i64 1
+ %tmp7699 = getelementptr inbounds float, float* %tmp7698, i64 1
+ %tmp7700 = getelementptr inbounds float, float* %tmp7699, i64 1
+ %tmp7701 = getelementptr inbounds float, float* %tmp7700, i64 1
+ %tmp7702 = getelementptr inbounds float, float* %tmp7701, i64 1
+ %tmp7703 = getelementptr inbounds float, float* %tmp7702, i64 1
+ %tmp7704 = getelementptr inbounds float, float* %tmp7703, i64 1
+ %tmp7705 = getelementptr inbounds float, float* %tmp7704, i64 1
+ %tmp7706 = getelementptr inbounds float, float* %tmp7705, i64 1
+ %tmp7707 = getelementptr inbounds float, float* %tmp7706, i64 1
+ %tmp7708 = getelementptr inbounds float, float* %tmp7707, i64 1
+ %tmp7709 = getelementptr inbounds float, float* %tmp7708, i64 1
+ %tmp7710 = getelementptr inbounds float, float* %tmp7709, i64 1
+ %tmp7711 = getelementptr inbounds float, float* %tmp7710, i64 1
+ %tmp7712 = getelementptr inbounds float, float* %tmp7711, i64 1
+ %tmp7713 = getelementptr inbounds float, float* %tmp7712, i64 1
+ %tmp7714 = getelementptr inbounds float, float* %tmp7713, i64 1
+ %tmp7715 = getelementptr inbounds float, float* %tmp7714, i64 1
+ %tmp7716 = getelementptr inbounds float, float* %tmp7715, i64 1
+ %tmp7717 = getelementptr inbounds float, float* %tmp7716, i64 1
+ %tmp7718 = getelementptr inbounds float, float* %tmp7717, i64 1
+ %tmp7719 = getelementptr inbounds float, float* %tmp7718, i64 1
+ %tmp7720 = getelementptr inbounds float, float* %tmp7719, i64 1
+ %tmp7721 = getelementptr inbounds float, float* %tmp7720, i64 1
+ %tmp7722 = getelementptr inbounds float, float* %tmp7721, i64 1
+ %tmp7723 = getelementptr inbounds float, float* %tmp7722, i64 1
+ %tmp7724 = getelementptr inbounds float, float* %tmp7723, i64 1
+ %tmp7725 = getelementptr inbounds float, float* %tmp7724, i64 1
+ %tmp7726 = getelementptr inbounds float, float* %tmp7725, i64 1
+ %tmp7727 = getelementptr inbounds float, float* %tmp7726, i64 1
+ %tmp7728 = getelementptr inbounds float, float* %tmp7727, i64 1
+ %tmp7729 = getelementptr inbounds float, float* %tmp7728, i64 1
+ %tmp7730 = getelementptr inbounds float, float* %tmp7729, i64 1
+ %tmp7731 = getelementptr inbounds float, float* %tmp7730, i64 1
+ %tmp7732 = getelementptr inbounds float, float* %tmp7731, i64 1
+ %tmp7733 = getelementptr inbounds float, float* %tmp7732, i64 1
+ %tmp7734 = getelementptr inbounds float, float* %tmp7733, i64 1
+ %tmp7735 = getelementptr inbounds float, float* %tmp7734, i64 1
+ %tmp7736 = getelementptr inbounds float, float* %tmp7735, i64 1
+ %tmp7737 = getelementptr inbounds float, float* %tmp7736, i64 1
+ %tmp7738 = getelementptr inbounds float, float* %tmp7737, i64 1
+ %tmp7739 = getelementptr inbounds float, float* %tmp7738, i64 1
+ %tmp7740 = getelementptr inbounds float, float* %tmp7739, i64 1
+ %tmp7741 = getelementptr inbounds float, float* %tmp7740, i64 1
+ %tmp7742 = getelementptr inbounds float, float* %tmp7741, i64 1
+ %tmp7743 = getelementptr inbounds float, float* %tmp7742, i64 1
+ %tmp7744 = getelementptr inbounds float, float* %tmp7743, i64 1
+ %tmp7745 = getelementptr inbounds float, float* %tmp7744, i64 1
+ %tmp7746 = getelementptr inbounds float, float* %tmp7745, i64 1
+ %tmp7747 = getelementptr inbounds float, float* %tmp7746, i64 1
+ %tmp7748 = getelementptr inbounds float, float* %tmp7747, i64 1
+ %tmp7749 = getelementptr inbounds float, float* %tmp7748, i64 1
+ %tmp7750 = getelementptr inbounds float, float* %tmp7749, i64 1
+ %tmp7751 = getelementptr inbounds float, float* %tmp7750, i64 1
+ %tmp7752 = getelementptr inbounds float, float* %tmp7751, i64 1
+ %tmp7753 = getelementptr inbounds float, float* %tmp7752, i64 1
+ %tmp7754 = getelementptr inbounds float, float* %tmp7753, i64 1
+ %tmp7755 = getelementptr inbounds float, float* %tmp7754, i64 1
+ %tmp7756 = getelementptr inbounds float, float* %tmp7755, i64 1
+ %tmp7757 = getelementptr inbounds float, float* %tmp7756, i64 1
+ %tmp7758 = getelementptr inbounds float, float* %tmp7757, i64 1
+ %tmp7759 = getelementptr inbounds float, float* %tmp7758, i64 1
+ %tmp7760 = getelementptr inbounds float, float* %tmp7759, i64 1
+ %tmp7761 = getelementptr inbounds float, float* %tmp7760, i64 1
+ %tmp7762 = getelementptr inbounds float, float* %tmp7761, i64 1
+ %tmp7763 = getelementptr inbounds float, float* %tmp7762, i64 1
+ %tmp7764 = getelementptr inbounds float, float* %tmp7763, i64 1
+ %tmp7765 = getelementptr inbounds float, float* %tmp7764, i64 1
+ %tmp7766 = getelementptr inbounds float, float* %tmp7765, i64 1
+ %tmp7767 = getelementptr inbounds float, float* %tmp7766, i64 1
+ %tmp7768 = getelementptr inbounds float, float* %tmp7767, i64 1
+ %tmp7769 = getelementptr inbounds float, float* %tmp7768, i64 1
+ %tmp7770 = getelementptr inbounds float, float* %tmp7769, i64 1
+ %tmp7771 = getelementptr inbounds float, float* %tmp7770, i64 1
+ %tmp7772 = getelementptr inbounds float, float* %tmp7771, i64 1
+ %tmp7773 = getelementptr inbounds float, float* %tmp7772, i64 1
+ %tmp7774 = getelementptr inbounds float, float* %tmp7773, i64 1
+ %tmp7775 = getelementptr inbounds float, float* %tmp7774, i64 1
+ %tmp7776 = getelementptr inbounds float, float* %tmp7775, i64 1
+ %tmp7777 = getelementptr inbounds float, float* %tmp7776, i64 1
+ %tmp7778 = getelementptr inbounds float, float* %tmp7777, i64 1
+ %tmp7779 = getelementptr inbounds float, float* %tmp7778, i64 1
+ %tmp7780 = getelementptr inbounds float, float* %tmp7779, i64 1
+ %tmp7781 = getelementptr inbounds float, float* %tmp7780, i64 1
+ %tmp7782 = getelementptr inbounds float, float* %tmp7781, i64 1
+ %tmp7783 = getelementptr inbounds float, float* %tmp7782, i64 1
+ %tmp7784 = getelementptr inbounds float, float* %tmp7783, i64 1
+ %tmp7785 = getelementptr inbounds float, float* %tmp7784, i64 1
+ %tmp7786 = getelementptr inbounds float, float* %tmp7785, i64 1
+ %tmp7787 = getelementptr inbounds float, float* %tmp7786, i64 1
+ %tmp7788 = getelementptr inbounds float, float* %tmp7787, i64 1
+ %tmp7789 = getelementptr inbounds float, float* %tmp7788, i64 1
+ %tmp7790 = getelementptr inbounds float, float* %tmp7789, i64 1
+ %tmp7791 = getelementptr inbounds float, float* %tmp7790, i64 1
+ %tmp7792 = getelementptr inbounds float, float* %tmp7791, i64 1
+ %tmp7793 = getelementptr inbounds float, float* %tmp7792, i64 1
+ %tmp7794 = getelementptr inbounds float, float* %tmp7793, i64 1
+ %tmp7795 = getelementptr inbounds float, float* %tmp7794, i64 1
+ %tmp7796 = getelementptr inbounds float, float* %tmp7795, i64 1
+ %tmp7797 = getelementptr inbounds float, float* %tmp7796, i64 1
+ %tmp7798 = getelementptr inbounds float, float* %tmp7797, i64 1
+ %tmp7799 = getelementptr inbounds float, float* %tmp7798, i64 1
+ %tmp7800 = getelementptr inbounds float, float* %tmp7799, i64 1
+ %tmp7801 = getelementptr inbounds float, float* %tmp7800, i64 1
+ %tmp7802 = getelementptr inbounds float, float* %tmp7801, i64 1
+ %tmp7803 = getelementptr inbounds float, float* %tmp7802, i64 1
+ %tmp7804 = getelementptr inbounds float, float* %tmp7803, i64 1
+ %tmp7805 = getelementptr inbounds float, float* %tmp7804, i64 1
+ %tmp7806 = getelementptr inbounds float, float* %tmp7805, i64 1
+ %tmp7807 = getelementptr inbounds float, float* %tmp7806, i64 1
+ %tmp7808 = getelementptr inbounds float, float* %tmp7807, i64 1
+ %tmp7809 = getelementptr inbounds float, float* %tmp7808, i64 1
+ %tmp7810 = getelementptr inbounds float, float* %tmp7809, i64 1
+ %tmp7811 = getelementptr inbounds float, float* %tmp7810, i64 1
+ %tmp7812 = getelementptr inbounds float, float* %tmp7811, i64 1
+ %tmp7813 = getelementptr inbounds float, float* %tmp7812, i64 1
+ %tmp7814 = getelementptr inbounds float, float* %tmp7813, i64 1
+ %tmp7815 = getelementptr inbounds float, float* %tmp7814, i64 1
+ %tmp7816 = getelementptr inbounds float, float* %tmp7815, i64 1
+ %tmp7817 = getelementptr inbounds float, float* %tmp7816, i64 1
+ %tmp7818 = getelementptr inbounds float, float* %tmp7817, i64 1
+ %tmp7819 = getelementptr inbounds float, float* %tmp7818, i64 1
+ %tmp7820 = getelementptr inbounds float, float* %tmp7819, i64 1
+ %tmp7821 = getelementptr inbounds float, float* %tmp7820, i64 1
+ %tmp7822 = getelementptr inbounds float, float* %tmp7821, i64 1
+ %tmp7823 = getelementptr inbounds float, float* %tmp7822, i64 1
+ %tmp7824 = getelementptr inbounds float, float* %tmp7823, i64 1
+ %tmp7825 = getelementptr inbounds float, float* %tmp7824, i64 1
+ %tmp7826 = getelementptr inbounds float, float* %tmp7825, i64 1
+ %tmp7827 = getelementptr inbounds float, float* %tmp7826, i64 1
+ %tmp7828 = getelementptr inbounds float, float* %tmp7827, i64 1
+ %tmp7829 = getelementptr inbounds float, float* %tmp7828, i64 1
+ %tmp7830 = getelementptr inbounds float, float* %tmp7829, i64 1
+ %tmp7831 = getelementptr inbounds float, float* %tmp7830, i64 1
+ %tmp7832 = getelementptr inbounds float, float* %tmp7831, i64 1
+ %tmp7833 = getelementptr inbounds float, float* %tmp7832, i64 1
+ %tmp7834 = getelementptr inbounds float, float* %tmp7833, i64 1
+ %tmp7835 = getelementptr inbounds float, float* %tmp7834, i64 1
+ %tmp7836 = getelementptr inbounds float, float* %tmp7835, i64 1
+ %tmp7837 = getelementptr inbounds float, float* %tmp7836, i64 1
+ %tmp7838 = getelementptr inbounds float, float* %tmp7837, i64 1
+ %tmp7839 = getelementptr inbounds float, float* %tmp7838, i64 1
+ %tmp7840 = getelementptr inbounds float, float* %tmp7839, i64 1
+ %tmp7841 = getelementptr inbounds float, float* %tmp7840, i64 1
+ %tmp7842 = getelementptr inbounds float, float* %tmp7841, i64 1
+ %tmp7843 = getelementptr inbounds float, float* %tmp7842, i64 1
+ %tmp7844 = getelementptr inbounds float, float* %tmp7843, i64 1
+ %tmp7845 = getelementptr inbounds float, float* %tmp7844, i64 1
+ %tmp7846 = getelementptr inbounds float, float* %tmp7845, i64 1
+ %tmp7847 = getelementptr inbounds float, float* %tmp7846, i64 1
+ %tmp7848 = getelementptr inbounds float, float* %tmp7847, i64 1
+ %tmp7849 = getelementptr inbounds float, float* %tmp7848, i64 1
+ %tmp7850 = getelementptr inbounds float, float* %tmp7849, i64 1
+ %tmp7851 = getelementptr inbounds float, float* %tmp7850, i64 1
+ %tmp7852 = getelementptr inbounds float, float* %tmp7851, i64 1
+ %tmp7853 = getelementptr inbounds float, float* %tmp7852, i64 1
+ %tmp7854 = getelementptr inbounds float, float* %tmp7853, i64 1
+ %tmp7855 = getelementptr inbounds float, float* %tmp7854, i64 1
+ %tmp7856 = getelementptr inbounds float, float* %tmp7855, i64 1
+ %tmp7857 = getelementptr inbounds float, float* %tmp7856, i64 1
+ %tmp7858 = getelementptr inbounds float, float* %tmp7857, i64 1
+ %tmp7859 = getelementptr inbounds float, float* %tmp7858, i64 1
+ %tmp7860 = getelementptr inbounds float, float* %tmp7859, i64 1
+ %tmp7861 = getelementptr inbounds float, float* %tmp7860, i64 1
+ %tmp7862 = getelementptr inbounds float, float* %tmp7861, i64 1
+ %tmp7863 = getelementptr inbounds float, float* %tmp7862, i64 1
+ %tmp7864 = getelementptr inbounds float, float* %tmp7863, i64 1
+ %tmp7865 = getelementptr inbounds float, float* %tmp7864, i64 1
+ %tmp7866 = getelementptr inbounds float, float* %tmp7865, i64 1
+ %tmp7867 = getelementptr inbounds float, float* %tmp7866, i64 1
+ %tmp7868 = getelementptr inbounds float, float* %tmp7867, i64 1
+ %tmp7869 = getelementptr inbounds float, float* %tmp7868, i64 1
+ %tmp7870 = getelementptr inbounds float, float* %tmp7869, i64 1
+ %tmp7871 = getelementptr inbounds float, float* %tmp7870, i64 1
+ %tmp7872 = getelementptr inbounds float, float* %tmp7871, i64 1
+ %tmp7873 = getelementptr inbounds float, float* %tmp7872, i64 1
+ %tmp7874 = getelementptr inbounds float, float* %tmp7873, i64 1
+ %tmp7875 = getelementptr inbounds float, float* %tmp7874, i64 1
+ %tmp7876 = getelementptr inbounds float, float* %tmp7875, i64 1
+ %tmp7877 = getelementptr inbounds float, float* %tmp7876, i64 1
+ %tmp7878 = getelementptr inbounds float, float* %tmp7877, i64 1
+ %tmp7879 = getelementptr inbounds float, float* %tmp7878, i64 1
+ %tmp7880 = getelementptr inbounds float, float* %tmp7879, i64 1
+ %tmp7881 = getelementptr inbounds float, float* %tmp7880, i64 1
+ %tmp7882 = getelementptr inbounds float, float* %tmp7881, i64 1
+ %tmp7883 = getelementptr inbounds float, float* %tmp7882, i64 1
+ %tmp7884 = getelementptr inbounds float, float* %tmp7883, i64 1
+ %tmp7885 = getelementptr inbounds float, float* %tmp7884, i64 1
+ %tmp7886 = getelementptr inbounds float, float* %tmp7885, i64 1
+ %tmp7887 = getelementptr inbounds float, float* %tmp7886, i64 1
+ %tmp7888 = getelementptr inbounds float, float* %tmp7887, i64 1
+ %tmp7889 = getelementptr inbounds float, float* %tmp7888, i64 1
+ %tmp7890 = getelementptr inbounds float, float* %tmp7889, i64 1
+ %tmp7891 = getelementptr inbounds float, float* %tmp7890, i64 1
+ %tmp7892 = getelementptr inbounds float, float* %tmp7891, i64 1
+ %tmp7893 = getelementptr inbounds float, float* %tmp7892, i64 1
+ %tmp7894 = getelementptr inbounds float, float* %tmp7893, i64 1
+ %tmp7895 = getelementptr inbounds float, float* %tmp7894, i64 1
+ %tmp7896 = getelementptr inbounds float, float* %tmp7895, i64 1
+ %tmp7897 = getelementptr inbounds float, float* %tmp7896, i64 1
+ %tmp7898 = getelementptr inbounds float, float* %tmp7897, i64 1
+ %tmp7899 = getelementptr inbounds float, float* %tmp7898, i64 1
+ %tmp7900 = getelementptr inbounds float, float* %tmp7899, i64 1
+ %tmp7901 = getelementptr inbounds float, float* %tmp7900, i64 1
+ %tmp7902 = getelementptr inbounds float, float* %tmp7901, i64 1
+ %tmp7903 = getelementptr inbounds float, float* %tmp7902, i64 1
+ %tmp7904 = getelementptr inbounds float, float* %tmp7903, i64 1
+ %tmp7905 = getelementptr inbounds float, float* %tmp7904, i64 1
+ %tmp7906 = getelementptr inbounds float, float* %tmp7905, i64 1
+ %tmp7907 = getelementptr inbounds float, float* %tmp7906, i64 1
+ %tmp7908 = getelementptr inbounds float, float* %tmp7907, i64 1
+ %tmp7909 = getelementptr inbounds float, float* %tmp7908, i64 1
+ %tmp7910 = getelementptr inbounds float, float* %tmp7909, i64 1
+ %tmp7911 = getelementptr inbounds float, float* %tmp7910, i64 1
+ %tmp7912 = getelementptr inbounds float, float* %tmp7911, i64 1
+ %tmp7913 = getelementptr inbounds float, float* %tmp7912, i64 1
+ %tmp7914 = getelementptr inbounds float, float* %tmp7913, i64 1
+ %tmp7915 = getelementptr inbounds float, float* %tmp7914, i64 1
+ %tmp7916 = getelementptr inbounds float, float* %tmp7915, i64 1
+ %tmp7917 = getelementptr inbounds float, float* %tmp7916, i64 1
+ %tmp7918 = getelementptr inbounds float, float* %tmp7917, i64 1
+ %tmp7919 = getelementptr inbounds float, float* %tmp7918, i64 1
+ %tmp7920 = getelementptr inbounds float, float* %tmp7919, i64 1
+ %tmp7921 = getelementptr inbounds float, float* %tmp7920, i64 1
+ %tmp7922 = getelementptr inbounds float, float* %tmp7921, i64 1
+ %tmp7923 = getelementptr inbounds float, float* %tmp7922, i64 1
+ %tmp7924 = getelementptr inbounds float, float* %tmp7923, i64 1
+ %tmp7925 = getelementptr inbounds float, float* %tmp7924, i64 1
+ %tmp7926 = getelementptr inbounds float, float* %tmp7925, i64 1
+ %tmp7927 = getelementptr inbounds float, float* %tmp7926, i64 1
+ %tmp7928 = getelementptr inbounds float, float* %tmp7927, i64 1
+ %tmp7929 = getelementptr inbounds float, float* %tmp7928, i64 1
+ %tmp7930 = getelementptr inbounds float, float* %tmp7929, i64 1
+ %tmp7931 = getelementptr inbounds float, float* %tmp7930, i64 1
+ %tmp7932 = getelementptr inbounds float, float* %tmp7931, i64 1
+ %tmp7933 = getelementptr inbounds float, float* %tmp7932, i64 1
+ %tmp7934 = getelementptr inbounds float, float* %tmp7933, i64 1
+ %tmp7935 = getelementptr inbounds float, float* %tmp7934, i64 1
+ %tmp7936 = getelementptr inbounds float, float* %tmp7935, i64 1
+ %tmp7937 = getelementptr inbounds float, float* %tmp7936, i64 1
+ %tmp7938 = getelementptr inbounds float, float* %tmp7937, i64 1
+ %tmp7939 = getelementptr inbounds float, float* %tmp7938, i64 1
+ %tmp7940 = getelementptr inbounds float, float* %tmp7939, i64 1
+ %tmp7941 = getelementptr inbounds float, float* %tmp7940, i64 1
+ %tmp7942 = getelementptr inbounds float, float* %tmp7941, i64 1
+ %tmp7943 = getelementptr inbounds float, float* %tmp7942, i64 1
+ %tmp7944 = getelementptr inbounds float, float* %tmp7943, i64 1
+ %tmp7945 = getelementptr inbounds float, float* %tmp7944, i64 1
+ %tmp7946 = getelementptr inbounds float, float* %tmp7945, i64 1
+ %tmp7947 = getelementptr inbounds float, float* %tmp7946, i64 1
+ %tmp7948 = getelementptr inbounds float, float* %tmp7947, i64 1
+ %tmp7949 = getelementptr inbounds float, float* %tmp7948, i64 1
+ %tmp7950 = getelementptr inbounds float, float* %tmp7949, i64 1
+ %tmp7951 = getelementptr inbounds float, float* %tmp7950, i64 1
+ %tmp7952 = getelementptr inbounds float, float* %tmp7951, i64 1
+ %tmp7953 = getelementptr inbounds float, float* %tmp7952, i64 1
+ %tmp7954 = getelementptr inbounds float, float* %tmp7953, i64 1
+ %tmp7955 = getelementptr inbounds float, float* %tmp7954, i64 1
+ %tmp7956 = getelementptr inbounds float, float* %tmp7955, i64 1
+ %tmp7957 = getelementptr inbounds float, float* %tmp7956, i64 1
+ %tmp7958 = getelementptr inbounds float, float* %tmp7957, i64 1
+ %tmp7959 = getelementptr inbounds float, float* %tmp7958, i64 1
+ %tmp7960 = getelementptr inbounds float, float* %tmp7959, i64 1
+ %tmp7961 = getelementptr inbounds float, float* %tmp7960, i64 1
+ %tmp7962 = getelementptr inbounds float, float* %tmp7961, i64 1
+ %tmp7963 = getelementptr inbounds float, float* %tmp7962, i64 1
+ %tmp7964 = getelementptr inbounds float, float* %tmp7963, i64 1
+ %tmp7965 = getelementptr inbounds float, float* %tmp7964, i64 1
+ %tmp7966 = getelementptr inbounds float, float* %tmp7965, i64 1
+ %tmp7967 = getelementptr inbounds float, float* %tmp7966, i64 1
+ %tmp7968 = getelementptr inbounds float, float* %tmp7967, i64 1
+ %tmp7969 = getelementptr inbounds float, float* %tmp7968, i64 1
+ %tmp7970 = getelementptr inbounds float, float* %tmp7969, i64 1
+ %tmp7971 = getelementptr inbounds float, float* %tmp7970, i64 1
+ %tmp7972 = getelementptr inbounds float, float* %tmp7971, i64 1
+ %tmp7973 = getelementptr inbounds float, float* %tmp7972, i64 1
+ %tmp7974 = getelementptr inbounds float, float* %tmp7973, i64 1
+ %tmp7975 = getelementptr inbounds float, float* %tmp7974, i64 1
+ %tmp7976 = getelementptr inbounds float, float* %tmp7975, i64 1
+ %tmp7977 = getelementptr inbounds float, float* %tmp7976, i64 1
+ %tmp7978 = getelementptr inbounds float, float* %tmp7977, i64 1
+ %tmp7979 = getelementptr inbounds float, float* %tmp7978, i64 1
+ %tmp7980 = getelementptr inbounds float, float* %tmp7979, i64 1
+ %tmp7981 = getelementptr inbounds float, float* %tmp7980, i64 1
+ %tmp7982 = getelementptr inbounds float, float* %tmp7981, i64 1
+ %tmp7983 = getelementptr inbounds float, float* %tmp7982, i64 1
+ %tmp7984 = getelementptr inbounds float, float* %tmp7983, i64 1
+ %tmp7985 = getelementptr inbounds float, float* %tmp7984, i64 1
+ %tmp7986 = getelementptr inbounds float, float* %tmp7985, i64 1
+ %tmp7987 = getelementptr inbounds float, float* %tmp7986, i64 1
+ %tmp7988 = getelementptr inbounds float, float* %tmp7987, i64 1
+ %tmp7989 = getelementptr inbounds float, float* %tmp7988, i64 1
+ %tmp7990 = getelementptr inbounds float, float* %tmp7989, i64 1
+ %tmp7991 = getelementptr inbounds float, float* %tmp7990, i64 1
+ %tmp7992 = getelementptr inbounds float, float* %tmp7991, i64 1
+ %tmp7993 = getelementptr inbounds float, float* %tmp7992, i64 1
+ %tmp7994 = getelementptr inbounds float, float* %tmp7993, i64 1
+ %tmp7995 = getelementptr inbounds float, float* %tmp7994, i64 1
+ %tmp7996 = getelementptr inbounds float, float* %tmp7995, i64 1
+ %tmp7997 = getelementptr inbounds float, float* %tmp7996, i64 1
+ %tmp7998 = getelementptr inbounds float, float* %tmp7997, i64 1
+ %tmp7999 = getelementptr inbounds float, float* %tmp7998, i64 1
+ %tmp8000 = getelementptr inbounds float, float* %tmp7999, i64 1
+ %tmp8001 = getelementptr inbounds float, float* %tmp8000, i64 1
+ %tmp8002 = getelementptr inbounds float, float* %tmp8001, i64 1
+ %tmp8003 = getelementptr inbounds float, float* %tmp8002, i64 1
+ %tmp8004 = getelementptr inbounds float, float* %tmp8003, i64 1
+ %tmp8005 = getelementptr inbounds float, float* %tmp8004, i64 1
+ %tmp8006 = getelementptr inbounds float, float* %tmp8005, i64 1
+ %tmp8007 = getelementptr inbounds float, float* %tmp8006, i64 1
+ %tmp8008 = getelementptr inbounds float, float* %tmp8007, i64 1
+ %tmp8009 = getelementptr inbounds float, float* %tmp8008, i64 1
+ %tmp8010 = getelementptr inbounds float, float* %tmp8009, i64 1
+ %tmp8011 = getelementptr inbounds float, float* %tmp8010, i64 1
+ %tmp8012 = getelementptr inbounds float, float* %tmp8011, i64 1
+ %tmp8013 = getelementptr inbounds float, float* %tmp8012, i64 1
+ %tmp8014 = getelementptr inbounds float, float* %tmp8013, i64 1
+ %tmp8015 = getelementptr inbounds float, float* %tmp8014, i64 1
+ %tmp8016 = getelementptr inbounds float, float* %tmp8015, i64 1
+ %tmp8017 = getelementptr inbounds float, float* %tmp8016, i64 1
+ %tmp8018 = getelementptr inbounds float, float* %tmp8017, i64 1
+ %tmp8019 = getelementptr inbounds float, float* %tmp8018, i64 1
+ %tmp8020 = getelementptr inbounds float, float* %tmp8019, i64 1
+ %tmp8021 = getelementptr inbounds float, float* %tmp8020, i64 1
+ %tmp8022 = getelementptr inbounds float, float* %tmp8021, i64 1
+ %tmp8023 = getelementptr inbounds float, float* %tmp8022, i64 1
+ %tmp8024 = getelementptr inbounds float, float* %tmp8023, i64 1
+ %tmp8025 = getelementptr inbounds float, float* %tmp8024, i64 1
+ %tmp8026 = getelementptr inbounds float, float* %tmp8025, i64 1
+ %tmp8027 = getelementptr inbounds float, float* %tmp8026, i64 1
+ %tmp8028 = getelementptr inbounds float, float* %tmp8027, i64 1
+ %tmp8029 = getelementptr inbounds float, float* %tmp8028, i64 1
+ %tmp8030 = getelementptr inbounds float, float* %tmp8029, i64 1
+ %tmp8031 = getelementptr inbounds float, float* %tmp8030, i64 1
+ %tmp8032 = getelementptr inbounds float, float* %tmp8031, i64 1
+ %tmp8033 = getelementptr inbounds float, float* %tmp8032, i64 1
+ %tmp8034 = getelementptr inbounds float, float* %tmp8033, i64 1
+ %tmp8035 = getelementptr inbounds float, float* %tmp8034, i64 1
+ %tmp8036 = getelementptr inbounds float, float* %tmp8035, i64 1
+ %tmp8037 = getelementptr inbounds float, float* %tmp8036, i64 1
+ %tmp8038 = getelementptr inbounds float, float* %tmp8037, i64 1
+ %tmp8039 = getelementptr inbounds float, float* %tmp8038, i64 1
+ %tmp8040 = getelementptr inbounds float, float* %tmp8039, i64 1
+ %tmp8041 = getelementptr inbounds float, float* %tmp8040, i64 1
+ %tmp8042 = getelementptr inbounds float, float* %tmp8041, i64 1
+ %tmp8043 = getelementptr inbounds float, float* %tmp8042, i64 1
+ %tmp8044 = getelementptr inbounds float, float* %tmp8043, i64 1
+ %tmp8045 = getelementptr inbounds float, float* %tmp8044, i64 1
+ %tmp8046 = getelementptr inbounds float, float* %tmp8045, i64 1
+ %tmp8047 = getelementptr inbounds float, float* %tmp8046, i64 1
+ %tmp8048 = getelementptr inbounds float, float* %tmp8047, i64 1
+ %tmp8049 = getelementptr inbounds float, float* %tmp8048, i64 1
+ %tmp8050 = getelementptr inbounds float, float* %tmp8049, i64 1
+ %tmp8051 = getelementptr inbounds float, float* %tmp8050, i64 1
+ %tmp8052 = getelementptr inbounds float, float* %tmp8051, i64 1
+ %tmp8053 = getelementptr inbounds float, float* %tmp8052, i64 1
+ %tmp8054 = getelementptr inbounds float, float* %tmp8053, i64 1
+ %tmp8055 = getelementptr inbounds float, float* %tmp8054, i64 1
+ %tmp8056 = getelementptr inbounds float, float* %tmp8055, i64 1
+ %tmp8057 = getelementptr inbounds float, float* %tmp8056, i64 1
+ %tmp8058 = getelementptr inbounds float, float* %tmp8057, i64 1
+ %tmp8059 = getelementptr inbounds float, float* %tmp8058, i64 1
+ %tmp8060 = getelementptr inbounds float, float* %tmp8059, i64 1
+ %tmp8061 = getelementptr inbounds float, float* %tmp8060, i64 1
+ %tmp8062 = getelementptr inbounds float, float* %tmp8061, i64 1
+ %tmp8063 = getelementptr inbounds float, float* %tmp8062, i64 1
+ %tmp8064 = getelementptr inbounds float, float* %tmp8063, i64 1
+ %tmp8065 = getelementptr inbounds float, float* %tmp8064, i64 1
+ %tmp8066 = getelementptr inbounds float, float* %tmp8065, i64 1
+ %tmp8067 = getelementptr inbounds float, float* %tmp8066, i64 1
+ %tmp8068 = getelementptr inbounds float, float* %tmp8067, i64 1
+ %tmp8069 = getelementptr inbounds float, float* %tmp8068, i64 1
+ %tmp8070 = getelementptr inbounds float, float* %tmp8069, i64 1
+ %tmp8071 = getelementptr inbounds float, float* %tmp8070, i64 1
+ %tmp8072 = getelementptr inbounds float, float* %tmp8071, i64 1
+ %tmp8073 = getelementptr inbounds float, float* %tmp8072, i64 1
+ %tmp8074 = getelementptr inbounds float, float* %tmp8073, i64 1
+ %tmp8075 = getelementptr inbounds float, float* %tmp8074, i64 1
+ %tmp8076 = getelementptr inbounds float, float* %tmp8075, i64 1
+ %tmp8077 = getelementptr inbounds float, float* %tmp8076, i64 1
+ %tmp8078 = getelementptr inbounds float, float* %tmp8077, i64 1
+ %tmp8079 = getelementptr inbounds float, float* %tmp8078, i64 1
+ %tmp8080 = getelementptr inbounds float, float* %tmp8079, i64 1
+ %tmp8081 = getelementptr inbounds float, float* %tmp8080, i64 1
+ %tmp8082 = getelementptr inbounds float, float* %tmp8081, i64 1
+ %tmp8083 = getelementptr inbounds float, float* %tmp8082, i64 1
+ %tmp8084 = getelementptr inbounds float, float* %tmp8083, i64 1
+ %tmp8085 = getelementptr inbounds float, float* %tmp8084, i64 1
+ %tmp8086 = getelementptr inbounds float, float* %tmp8085, i64 1
+ %tmp8087 = getelementptr inbounds float, float* %tmp8086, i64 1
+ %tmp8088 = getelementptr inbounds float, float* %tmp8087, i64 1
+ %tmp8089 = getelementptr inbounds float, float* %tmp8088, i64 1
+ %tmp8090 = getelementptr inbounds float, float* %tmp8089, i64 1
+ %tmp8091 = getelementptr inbounds float, float* %tmp8090, i64 1
+ %tmp8092 = getelementptr inbounds float, float* %tmp8091, i64 1
+ %tmp8093 = getelementptr inbounds float, float* %tmp8092, i64 1
+ %tmp8094 = getelementptr inbounds float, float* %tmp8093, i64 1
+ %tmp8095 = getelementptr inbounds float, float* %tmp8094, i64 1
+ %tmp8096 = getelementptr inbounds float, float* %tmp8095, i64 1
+ %tmp8097 = getelementptr inbounds float, float* %tmp8096, i64 1
+ %tmp8098 = getelementptr inbounds float, float* %tmp8097, i64 1
+ %tmp8099 = getelementptr inbounds float, float* %tmp8098, i64 1
+ %tmp8100 = getelementptr inbounds float, float* %tmp8099, i64 1
+ %tmp8101 = getelementptr inbounds float, float* %tmp8100, i64 1
+ %tmp8102 = getelementptr inbounds float, float* %tmp8101, i64 1
+ %tmp8103 = getelementptr inbounds float, float* %tmp8102, i64 1
+ %tmp8104 = getelementptr inbounds float, float* %tmp8103, i64 1
+ %tmp8105 = getelementptr inbounds float, float* %tmp8104, i64 1
+ %tmp8106 = getelementptr inbounds float, float* %tmp8105, i64 1
+ %tmp8107 = getelementptr inbounds float, float* %tmp8106, i64 1
+ %tmp8108 = getelementptr inbounds float, float* %tmp8107, i64 1
+ %tmp8109 = getelementptr inbounds float, float* %tmp8108, i64 1
+ %tmp8110 = getelementptr inbounds float, float* %tmp8109, i64 1
+ %tmp8111 = getelementptr inbounds float, float* %tmp8110, i64 1
+ %tmp8112 = getelementptr inbounds float, float* %tmp8111, i64 1
+ %tmp8113 = getelementptr inbounds float, float* %tmp8112, i64 1
+ %tmp8114 = getelementptr inbounds float, float* %tmp8113, i64 1
+ %tmp8115 = getelementptr inbounds float, float* %tmp8114, i64 1
+ %tmp8116 = getelementptr inbounds float, float* %tmp8115, i64 1
+ %tmp8117 = getelementptr inbounds float, float* %tmp8116, i64 1
+ %tmp8118 = getelementptr inbounds float, float* %tmp8117, i64 1
+ %tmp8119 = getelementptr inbounds float, float* %tmp8118, i64 1
+ %tmp8120 = getelementptr inbounds float, float* %tmp8119, i64 1
+ %tmp8121 = getelementptr inbounds float, float* %tmp8120, i64 1
+ %tmp8122 = getelementptr inbounds float, float* %tmp8121, i64 1
+ %tmp8123 = getelementptr inbounds float, float* %tmp8122, i64 1
+ %tmp8124 = getelementptr inbounds float, float* %tmp8123, i64 1
+ %tmp8125 = getelementptr inbounds float, float* %tmp8124, i64 1
+ %tmp8126 = getelementptr inbounds float, float* %tmp8125, i64 1
+ %tmp8127 = getelementptr inbounds float, float* %tmp8126, i64 1
+ %tmp8128 = getelementptr inbounds float, float* %tmp8127, i64 1
+ %tmp8129 = getelementptr inbounds float, float* %tmp8128, i64 1
+ %tmp8130 = getelementptr inbounds float, float* %tmp8129, i64 1
+ %tmp8131 = getelementptr inbounds float, float* %tmp8130, i64 1
+ %tmp8132 = getelementptr inbounds float, float* %tmp8131, i64 1
+ %tmp8133 = getelementptr inbounds float, float* %tmp8132, i64 1
+ %tmp8134 = getelementptr inbounds float, float* %tmp8133, i64 1
+ %tmp8135 = getelementptr inbounds float, float* %tmp8134, i64 1
+ %tmp8136 = getelementptr inbounds float, float* %tmp8135, i64 1
+ %tmp8137 = getelementptr inbounds float, float* %tmp8136, i64 1
+ %tmp8138 = getelementptr inbounds float, float* %tmp8137, i64 1
+ %tmp8139 = getelementptr inbounds float, float* %tmp8138, i64 1
+ %tmp8140 = getelementptr inbounds float, float* %tmp8139, i64 1
+ %tmp8141 = getelementptr inbounds float, float* %tmp8140, i64 1
+ %tmp8142 = getelementptr inbounds float, float* %tmp8141, i64 1
+ %tmp8143 = getelementptr inbounds float, float* %tmp8142, i64 1
+ %tmp8144 = getelementptr inbounds float, float* %tmp8143, i64 1
+ %tmp8145 = getelementptr inbounds float, float* %tmp8144, i64 1
+ %tmp8146 = getelementptr inbounds float, float* %tmp8145, i64 1
+ %tmp8147 = getelementptr inbounds float, float* %tmp8146, i64 1
+ %tmp8148 = getelementptr inbounds float, float* %tmp8147, i64 1
+ %tmp8149 = getelementptr inbounds float, float* %tmp8148, i64 1
+ %tmp8150 = getelementptr inbounds float, float* %tmp8149, i64 1
+ %tmp8151 = getelementptr inbounds float, float* %tmp8150, i64 1
+ %tmp8152 = getelementptr inbounds float, float* %tmp8151, i64 1
+ %tmp8153 = getelementptr inbounds float, float* %tmp8152, i64 1
+ %tmp8154 = getelementptr inbounds float, float* %tmp8153, i64 1
+ %tmp8155 = getelementptr inbounds float, float* %tmp8154, i64 1
+ %tmp8156 = getelementptr inbounds float, float* %tmp8155, i64 1
+ %tmp8157 = getelementptr inbounds float, float* %tmp8156, i64 1
+ %tmp8158 = getelementptr inbounds float, float* %tmp8157, i64 1
+ %tmp8159 = getelementptr inbounds float, float* %tmp8158, i64 1
+ %tmp8160 = getelementptr inbounds float, float* %tmp8159, i64 1
+ %tmp8161 = getelementptr inbounds float, float* %tmp8160, i64 1
+ %tmp8162 = getelementptr inbounds float, float* %tmp8161, i64 1
+ %tmp8163 = getelementptr inbounds float, float* %tmp8162, i64 1
+ %tmp8164 = getelementptr inbounds float, float* %tmp8163, i64 1
+ %tmp8165 = getelementptr inbounds float, float* %tmp8164, i64 1
+ %tmp8166 = getelementptr inbounds float, float* %tmp8165, i64 1
+ %tmp8167 = getelementptr inbounds float, float* %tmp8166, i64 1
+ %tmp8168 = getelementptr inbounds float, float* %tmp8167, i64 1
+ %tmp8169 = getelementptr inbounds float, float* %tmp8168, i64 1
+ %tmp8170 = getelementptr inbounds float, float* %tmp8169, i64 1
+ %tmp8171 = getelementptr inbounds float, float* %tmp8170, i64 1
+ %tmp8172 = getelementptr inbounds float, float* %tmp8171, i64 1
+ %tmp8173 = getelementptr inbounds float, float* %tmp8172, i64 1
+ %tmp8174 = getelementptr inbounds float, float* %tmp8173, i64 1
+ %tmp8175 = getelementptr inbounds float, float* %tmp8174, i64 1
+ %tmp8176 = getelementptr inbounds float, float* %tmp8175, i64 1
+ %tmp8177 = getelementptr inbounds float, float* %tmp8176, i64 1
+ %tmp8178 = getelementptr inbounds float, float* %tmp8177, i64 1
+ %tmp8179 = getelementptr inbounds float, float* %tmp8178, i64 1
+ %tmp8180 = getelementptr inbounds float, float* %tmp8179, i64 1
+ %tmp8181 = getelementptr inbounds float, float* %tmp8180, i64 1
+ %tmp8182 = getelementptr inbounds float, float* %tmp8181, i64 1
+ %tmp8183 = getelementptr inbounds float, float* %tmp8182, i64 1
+ %tmp8184 = getelementptr inbounds float, float* %tmp8183, i64 1
+ %tmp8185 = getelementptr inbounds float, float* %tmp8184, i64 1
+ %tmp8186 = getelementptr inbounds float, float* %tmp8185, i64 1
+ %tmp8187 = getelementptr inbounds float, float* %tmp8186, i64 1
+ %tmp8188 = getelementptr inbounds float, float* %tmp8187, i64 1
+ %tmp8189 = getelementptr inbounds float, float* %tmp8188, i64 1
+ %tmp8190 = getelementptr inbounds float, float* %tmp8189, i64 1
+ %tmp8191 = getelementptr inbounds float, float* %tmp8190, i64 1
+ %tmp8192 = getelementptr inbounds float, float* %tmp8191, i64 1
+ %tmp8193 = getelementptr inbounds float, float* %tmp8192, i64 1
+ %tmp8194 = getelementptr inbounds float, float* %tmp8193, i64 1
+ %tmp8195 = getelementptr inbounds float, float* %tmp8194, i64 1
+ %tmp8196 = getelementptr inbounds float, float* %tmp8195, i64 1
+ %tmp8197 = getelementptr inbounds float, float* %tmp8196, i64 1
+ %tmp8198 = getelementptr inbounds float, float* %tmp8197, i64 1
+ %tmp8199 = getelementptr inbounds float, float* %tmp8198, i64 1
+ %tmp8200 = getelementptr inbounds float, float* %tmp8199, i64 1
+ %tmp8201 = getelementptr inbounds float, float* %tmp8200, i64 1
+ %tmp8202 = getelementptr inbounds float, float* %tmp8201, i64 1
+ %tmp8203 = getelementptr inbounds float, float* %tmp8202, i64 1
+ %tmp8204 = getelementptr inbounds float, float* %tmp8203, i64 1
+ %tmp8205 = getelementptr inbounds float, float* %tmp8204, i64 1
+ %tmp8206 = getelementptr inbounds float, float* %tmp8205, i64 1
+ %tmp8207 = getelementptr inbounds float, float* %tmp8206, i64 1
+ %tmp8208 = getelementptr inbounds float, float* %tmp8207, i64 1
+ %tmp8209 = getelementptr inbounds float, float* %tmp8208, i64 1
+ %tmp8210 = getelementptr inbounds float, float* %tmp8209, i64 1
+ %tmp8211 = getelementptr inbounds float, float* %tmp8210, i64 1
+ %tmp8212 = getelementptr inbounds float, float* %tmp8211, i64 1
+ %tmp8213 = getelementptr inbounds float, float* %tmp8212, i64 1
+ %tmp8214 = getelementptr inbounds float, float* %tmp8213, i64 1
+ %tmp8215 = getelementptr inbounds float, float* %tmp8214, i64 1
+ %tmp8216 = getelementptr inbounds float, float* %tmp8215, i64 1
+ %tmp8217 = getelementptr inbounds float, float* %tmp8216, i64 1
+ %tmp8218 = getelementptr inbounds float, float* %tmp8217, i64 1
+ %tmp8219 = getelementptr inbounds float, float* %tmp8218, i64 1
+ %tmp8220 = getelementptr inbounds float, float* %tmp8219, i64 1
+ %tmp8221 = getelementptr inbounds float, float* %tmp8220, i64 1
+ %tmp8222 = getelementptr inbounds float, float* %tmp8221, i64 1
+ %tmp8223 = getelementptr inbounds float, float* %tmp8222, i64 1
+ %tmp8224 = getelementptr inbounds float, float* %tmp8223, i64 1
+ %tmp8225 = getelementptr inbounds float, float* %tmp8224, i64 1
+ %tmp8226 = getelementptr inbounds float, float* %tmp8225, i64 1
+ %tmp8227 = getelementptr inbounds float, float* %tmp8226, i64 1
+ %tmp8228 = getelementptr inbounds float, float* %tmp8227, i64 1
+ %tmp8229 = getelementptr inbounds float, float* %tmp8228, i64 1
+ %tmp8230 = getelementptr inbounds float, float* %tmp8229, i64 1
+ %tmp8231 = getelementptr inbounds float, float* %tmp8230, i64 1
+ %tmp8232 = getelementptr inbounds float, float* %tmp8231, i64 1
+ %tmp8233 = getelementptr inbounds float, float* %tmp8232, i64 1
+ %tmp8234 = getelementptr inbounds float, float* %tmp8233, i64 1
+ %tmp8235 = getelementptr inbounds float, float* %tmp8234, i64 1
+ %tmp8236 = getelementptr inbounds float, float* %tmp8235, i64 1
+ %tmp8237 = getelementptr inbounds float, float* %tmp8236, i64 1
+ %tmp8238 = getelementptr inbounds float, float* %tmp8237, i64 1
+ %tmp8239 = getelementptr inbounds float, float* %tmp8238, i64 1
+ %tmp8240 = getelementptr inbounds float, float* %tmp8239, i64 1
+ %tmp8241 = getelementptr inbounds float, float* %tmp8240, i64 1
+ %tmp8242 = getelementptr inbounds float, float* %tmp8241, i64 1
+ %tmp8243 = getelementptr inbounds float, float* %tmp8242, i64 1
+ %tmp8244 = getelementptr inbounds float, float* %tmp8243, i64 1
+ %tmp8245 = getelementptr inbounds float, float* %tmp8244, i64 1
+ %tmp8246 = getelementptr inbounds float, float* %tmp8245, i64 1
+ %tmp8247 = getelementptr inbounds float, float* %tmp8246, i64 1
+ %tmp8248 = getelementptr inbounds float, float* %tmp8247, i64 1
+ %tmp8249 = getelementptr inbounds float, float* %tmp8248, i64 1
+ %tmp8250 = getelementptr inbounds float, float* %tmp8249, i64 1
+ %tmp8251 = getelementptr inbounds float, float* %tmp8250, i64 1
+ %tmp8252 = getelementptr inbounds float, float* %tmp8251, i64 1
+ %tmp8253 = getelementptr inbounds float, float* %tmp8252, i64 1
+ %tmp8254 = getelementptr inbounds float, float* %tmp8253, i64 1
+ %tmp8255 = getelementptr inbounds float, float* %tmp8254, i64 1
+ %tmp8256 = getelementptr inbounds float, float* %tmp8255, i64 1
+ %tmp8257 = getelementptr inbounds float, float* %tmp8256, i64 1
+ %tmp8258 = getelementptr inbounds float, float* %tmp8257, i64 1
+ %tmp8259 = getelementptr inbounds float, float* %tmp8258, i64 1
+ %tmp8260 = getelementptr inbounds float, float* %tmp8259, i64 1
+ %tmp8261 = getelementptr inbounds float, float* %tmp8260, i64 1
+ %tmp8262 = getelementptr inbounds float, float* %tmp8261, i64 1
+ %tmp8263 = getelementptr inbounds float, float* %tmp8262, i64 1
+ %tmp8264 = getelementptr inbounds float, float* %tmp8263, i64 1
+ %tmp8265 = getelementptr inbounds float, float* %tmp8264, i64 1
+ %tmp8266 = getelementptr inbounds float, float* %tmp8265, i64 1
+ %tmp8267 = getelementptr inbounds float, float* %tmp8266, i64 1
+ %tmp8268 = getelementptr inbounds float, float* %tmp8267, i64 1
+ %tmp8269 = getelementptr inbounds float, float* %tmp8268, i64 1
+ %tmp8270 = getelementptr inbounds float, float* %tmp8269, i64 1
+ %tmp8271 = getelementptr inbounds float, float* %tmp8270, i64 1
+ %tmp8272 = getelementptr inbounds float, float* %tmp8271, i64 1
+ %tmp8273 = getelementptr inbounds float, float* %tmp8272, i64 1
+ %tmp8274 = getelementptr inbounds float, float* %tmp8273, i64 1
+ %tmp8275 = getelementptr inbounds float, float* %tmp8274, i64 1
+ %tmp8276 = getelementptr inbounds float, float* %tmp8275, i64 1
+ %tmp8277 = getelementptr inbounds float, float* %tmp8276, i64 1
+ %tmp8278 = getelementptr inbounds float, float* %tmp8277, i64 1
+ %tmp8279 = getelementptr inbounds float, float* %tmp8278, i64 1
+ %tmp8280 = getelementptr inbounds float, float* %tmp8279, i64 1
+ %tmp8281 = getelementptr inbounds float, float* %tmp8280, i64 1
+ %tmp8282 = getelementptr inbounds float, float* %tmp8281, i64 1
+ %tmp8283 = getelementptr inbounds float, float* %tmp8282, i64 1
+ %tmp8284 = getelementptr inbounds float, float* %tmp8283, i64 1
+ %tmp8285 = getelementptr inbounds float, float* %tmp8284, i64 1
+ %tmp8286 = getelementptr inbounds float, float* %tmp8285, i64 1
+ %tmp8287 = getelementptr inbounds float, float* %tmp8286, i64 1
+ %tmp8288 = getelementptr inbounds float, float* %tmp8287, i64 1
+ %tmp8289 = getelementptr inbounds float, float* %tmp8288, i64 1
+ %tmp8290 = getelementptr inbounds float, float* %tmp8289, i64 1
+ %tmp8291 = getelementptr inbounds float, float* %tmp8290, i64 1
+ %tmp8292 = getelementptr inbounds float, float* %tmp8291, i64 1
+ %tmp8293 = getelementptr inbounds float, float* %tmp8292, i64 1
+ %tmp8294 = getelementptr inbounds float, float* %tmp8293, i64 1
+ %tmp8295 = getelementptr inbounds float, float* %tmp8294, i64 1
+ %tmp8296 = getelementptr inbounds float, float* %tmp8295, i64 1
+ %tmp8297 = getelementptr inbounds float, float* %tmp8296, i64 1
+ %tmp8298 = getelementptr inbounds float, float* %tmp8297, i64 1
+ %tmp8299 = getelementptr inbounds float, float* %tmp8298, i64 1
+ %tmp8300 = getelementptr inbounds float, float* %tmp8299, i64 1
+ %tmp8301 = getelementptr inbounds float, float* %tmp8300, i64 1
+ %tmp8302 = getelementptr inbounds float, float* %tmp8301, i64 1
+ %tmp8303 = getelementptr inbounds float, float* %tmp8302, i64 1
+ %tmp8304 = getelementptr inbounds float, float* %tmp8303, i64 1
+ %tmp8305 = getelementptr inbounds float, float* %tmp8304, i64 1
+ %tmp8306 = getelementptr inbounds float, float* %tmp8305, i64 1
+ %tmp8307 = getelementptr inbounds float, float* %tmp8306, i64 1
+ %tmp8308 = getelementptr inbounds float, float* %tmp8307, i64 1
+ %tmp8309 = getelementptr inbounds float, float* %tmp8308, i64 1
+ %tmp8310 = getelementptr inbounds float, float* %tmp8309, i64 1
+ %tmp8311 = getelementptr inbounds float, float* %tmp8310, i64 1
+ %tmp8312 = getelementptr inbounds float, float* %tmp8311, i64 1
+ %tmp8313 = getelementptr inbounds float, float* %tmp8312, i64 1
+ %tmp8314 = getelementptr inbounds float, float* %tmp8313, i64 1
+ %tmp8315 = getelementptr inbounds float, float* %tmp8314, i64 1
+ %tmp8316 = getelementptr inbounds float, float* %tmp8315, i64 1
+ %tmp8317 = getelementptr inbounds float, float* %tmp8316, i64 1
+ %tmp8318 = getelementptr inbounds float, float* %tmp8317, i64 1
+ %tmp8319 = getelementptr inbounds float, float* %tmp8318, i64 1
+ %tmp8320 = getelementptr inbounds float, float* %tmp8319, i64 1
+ %tmp8321 = getelementptr inbounds float, float* %tmp8320, i64 1
+ %tmp8322 = getelementptr inbounds float, float* %tmp8321, i64 1
+ %tmp8323 = getelementptr inbounds float, float* %tmp8322, i64 1
+ %tmp8324 = getelementptr inbounds float, float* %tmp8323, i64 1
+ %tmp8325 = getelementptr inbounds float, float* %tmp8324, i64 1
+ %tmp8326 = getelementptr inbounds float, float* %tmp8325, i64 1
+ %tmp8327 = getelementptr inbounds float, float* %tmp8326, i64 1
+ %tmp8328 = getelementptr inbounds float, float* %tmp8327, i64 1
+ %tmp8329 = getelementptr inbounds float, float* %tmp8328, i64 1
+ %tmp8330 = getelementptr inbounds float, float* %tmp8329, i64 1
+ %tmp8331 = getelementptr inbounds float, float* %tmp8330, i64 1
+ %tmp8332 = getelementptr inbounds float, float* %tmp8331, i64 1
+ %tmp8333 = getelementptr inbounds float, float* %tmp8332, i64 1
+ %tmp8334 = getelementptr inbounds float, float* %tmp8333, i64 1
+ %tmp8335 = getelementptr inbounds float, float* %tmp8334, i64 1
+ %tmp8336 = getelementptr inbounds float, float* %tmp8335, i64 1
+ %tmp8337 = getelementptr inbounds float, float* %tmp8336, i64 1
+ %tmp8338 = getelementptr inbounds float, float* %tmp8337, i64 1
+ %tmp8339 = getelementptr inbounds float, float* %tmp8338, i64 1
+ %tmp8340 = getelementptr inbounds float, float* %tmp8339, i64 1
+ %tmp8341 = getelementptr inbounds float, float* %tmp8340, i64 1
+ %tmp8342 = getelementptr inbounds float, float* %tmp8341, i64 1
+ %tmp8343 = getelementptr inbounds float, float* %tmp8342, i64 1
+ %tmp8344 = getelementptr inbounds float, float* %tmp8343, i64 1
+ %tmp8345 = getelementptr inbounds float, float* %tmp8344, i64 1
+ %tmp8346 = getelementptr inbounds float, float* %tmp8345, i64 1
+ %tmp8347 = getelementptr inbounds float, float* %tmp8346, i64 1
+ %tmp8348 = getelementptr inbounds float, float* %tmp8347, i64 1
+ %tmp8349 = getelementptr inbounds float, float* %tmp8348, i64 1
+ %tmp8350 = getelementptr inbounds float, float* %tmp8349, i64 1
+ %tmp8351 = getelementptr inbounds float, float* %tmp8350, i64 1
+ %tmp8352 = getelementptr inbounds float, float* %tmp8351, i64 1
+ %tmp8353 = getelementptr inbounds float, float* %tmp8352, i64 1
+ %tmp8354 = getelementptr inbounds float, float* %tmp8353, i64 1
+ %tmp8355 = getelementptr inbounds float, float* %tmp8354, i64 1
+ %tmp8356 = getelementptr inbounds float, float* %tmp8355, i64 1
+ %tmp8357 = getelementptr inbounds float, float* %tmp8356, i64 1
+ %tmp8358 = getelementptr inbounds float, float* %tmp8357, i64 1
+ %tmp8359 = getelementptr inbounds float, float* %tmp8358, i64 1
+ %tmp8360 = getelementptr inbounds float, float* %tmp8359, i64 1
+ %tmp8361 = getelementptr inbounds float, float* %tmp8360, i64 1
+ %tmp8362 = getelementptr inbounds float, float* %tmp8361, i64 1
+ %tmp8363 = getelementptr inbounds float, float* %tmp8362, i64 1
+ %tmp8364 = getelementptr inbounds float, float* %tmp8363, i64 1
+ %tmp8365 = getelementptr inbounds float, float* %tmp8364, i64 1
+ %tmp8366 = getelementptr inbounds float, float* %tmp8365, i64 1
+ %tmp8367 = getelementptr inbounds float, float* %tmp8366, i64 1
+ %tmp8368 = getelementptr inbounds float, float* %tmp8367, i64 1
+ %tmp8369 = getelementptr inbounds float, float* %tmp8368, i64 1
+ %tmp8370 = getelementptr inbounds float, float* %tmp8369, i64 1
+ %tmp8371 = getelementptr inbounds float, float* %tmp8370, i64 1
+ %tmp8372 = getelementptr inbounds float, float* %tmp8371, i64 1
+ %tmp8373 = getelementptr inbounds float, float* %tmp8372, i64 1
+ %tmp8374 = getelementptr inbounds float, float* %tmp8373, i64 1
+ %tmp8375 = getelementptr inbounds float, float* %tmp8374, i64 1
+ %tmp8376 = getelementptr inbounds float, float* %tmp8375, i64 1
+ %tmp8377 = getelementptr inbounds float, float* %tmp8376, i64 1
+ %tmp8378 = getelementptr inbounds float, float* %tmp8377, i64 1
+ %tmp8379 = getelementptr inbounds float, float* %tmp8378, i64 1
+ %tmp8380 = getelementptr inbounds float, float* %tmp8379, i64 1
+ %tmp8381 = getelementptr inbounds float, float* %tmp8380, i64 1
+ %tmp8382 = getelementptr inbounds float, float* %tmp8381, i64 1
+ %tmp8383 = getelementptr inbounds float, float* %tmp8382, i64 1
+ %tmp8384 = getelementptr inbounds float, float* %tmp8383, i64 1
+ %tmp8385 = getelementptr inbounds float, float* %tmp8384, i64 1
+ %tmp8386 = getelementptr inbounds float, float* %tmp8385, i64 1
+ %tmp8387 = getelementptr inbounds float, float* %tmp8386, i64 1
+ %tmp8388 = getelementptr inbounds float, float* %tmp8387, i64 1
+ %tmp8389 = getelementptr inbounds float, float* %tmp8388, i64 1
+ %tmp8390 = getelementptr inbounds float, float* %tmp8389, i64 1
+ %tmp8391 = getelementptr inbounds float, float* %tmp8390, i64 1
+ %tmp8392 = getelementptr inbounds float, float* %tmp8391, i64 1
+ %tmp8393 = getelementptr inbounds float, float* %tmp8392, i64 1
+ %tmp8394 = getelementptr inbounds float, float* %tmp8393, i64 1
+ %tmp8395 = getelementptr inbounds float, float* %tmp8394, i64 1
+ %tmp8396 = getelementptr inbounds float, float* %tmp8395, i64 1
+ %tmp8397 = getelementptr inbounds float, float* %tmp8396, i64 1
+ %tmp8398 = getelementptr inbounds float, float* %tmp8397, i64 1
+ %tmp8399 = getelementptr inbounds float, float* %tmp8398, i64 1
+ %tmp8400 = getelementptr inbounds float, float* %tmp8399, i64 1
+ %tmp8401 = getelementptr inbounds float, float* %tmp8400, i64 1
+ %tmp8402 = getelementptr inbounds float, float* %tmp8401, i64 1
+ %tmp8403 = getelementptr inbounds float, float* %tmp8402, i64 1
+ %tmp8404 = getelementptr inbounds float, float* %tmp8403, i64 1
+ %tmp8405 = getelementptr inbounds float, float* %tmp8404, i64 1
+ %tmp8406 = getelementptr inbounds float, float* %tmp8405, i64 1
+ %tmp8407 = getelementptr inbounds float, float* %tmp8406, i64 1
+ %tmp8408 = getelementptr inbounds float, float* %tmp8407, i64 1
+ %tmp8409 = getelementptr inbounds float, float* %tmp8408, i64 1
+ %tmp8410 = getelementptr inbounds float, float* %tmp8409, i64 1
+ %tmp8411 = getelementptr inbounds float, float* %tmp8410, i64 1
+ %tmp8412 = getelementptr inbounds float, float* %tmp8411, i64 1
+ %tmp8413 = getelementptr inbounds float, float* %tmp8412, i64 1
+ %tmp8414 = getelementptr inbounds float, float* %tmp8413, i64 1
+ %tmp8415 = getelementptr inbounds float, float* %tmp8414, i64 1
+ %tmp8416 = getelementptr inbounds float, float* %tmp8415, i64 1
+ %tmp8417 = getelementptr inbounds float, float* %tmp8416, i64 1
+ %tmp8418 = getelementptr inbounds float, float* %tmp8417, i64 1
+ %tmp8419 = getelementptr inbounds float, float* %tmp8418, i64 1
+ %tmp8420 = getelementptr inbounds float, float* %tmp8419, i64 1
+ %tmp8421 = getelementptr inbounds float, float* %tmp8420, i64 1
+ %tmp8422 = getelementptr inbounds float, float* %tmp8421, i64 1
+ %tmp8423 = getelementptr inbounds float, float* %tmp8422, i64 1
+ %tmp8424 = getelementptr inbounds float, float* %tmp8423, i64 1
+ %tmp8425 = getelementptr inbounds float, float* %tmp8424, i64 1
+ %tmp8426 = getelementptr inbounds float, float* %tmp8425, i64 1
+ %tmp8427 = getelementptr inbounds float, float* %tmp8426, i64 1
+ %tmp8428 = getelementptr inbounds float, float* %tmp8427, i64 1
+ %tmp8429 = getelementptr inbounds float, float* %tmp8428, i64 1
+ %tmp8430 = getelementptr inbounds float, float* %tmp8429, i64 1
+ %tmp8431 = getelementptr inbounds float, float* %tmp8430, i64 1
+ %tmp8432 = getelementptr inbounds float, float* %tmp8431, i64 1
+ %tmp8433 = getelementptr inbounds float, float* %tmp8432, i64 1
+ %tmp8434 = getelementptr inbounds float, float* %tmp8433, i64 1
+ %tmp8435 = getelementptr inbounds float, float* %tmp8434, i64 1
+ %tmp8436 = getelementptr inbounds float, float* %tmp8435, i64 1
+ %tmp8437 = getelementptr inbounds float, float* %tmp8436, i64 1
+ %tmp8438 = getelementptr inbounds float, float* %tmp8437, i64 1
+ %tmp8439 = getelementptr inbounds float, float* %tmp8438, i64 1
+ %tmp8440 = getelementptr inbounds float, float* %tmp8439, i64 1
+ %tmp8441 = getelementptr inbounds float, float* %tmp8440, i64 1
+ %tmp8442 = getelementptr inbounds float, float* %tmp8441, i64 1
+ %tmp8443 = getelementptr inbounds float, float* %tmp8442, i64 1
+ %tmp8444 = getelementptr inbounds float, float* %tmp8443, i64 1
+ %tmp8445 = getelementptr inbounds float, float* %tmp8444, i64 1
+ %tmp8446 = getelementptr inbounds float, float* %tmp8445, i64 1
+ %tmp8447 = getelementptr inbounds float, float* %tmp8446, i64 1
+ %tmp8448 = getelementptr inbounds float, float* %tmp8447, i64 1
+ %tmp8449 = getelementptr inbounds float, float* %tmp8448, i64 1
+ %tmp8450 = getelementptr inbounds float, float* %tmp8449, i64 1
+ %tmp8451 = getelementptr inbounds float, float* %tmp8450, i64 1
+ %tmp8452 = getelementptr inbounds float, float* %tmp8451, i64 1
+ %tmp8453 = getelementptr inbounds float, float* %tmp8452, i64 1
+ %tmp8454 = getelementptr inbounds float, float* %tmp8453, i64 1
+ %tmp8455 = getelementptr inbounds float, float* %tmp8454, i64 1
+ %tmp8456 = getelementptr inbounds float, float* %tmp8455, i64 1
+ %tmp8457 = getelementptr inbounds float, float* %tmp8456, i64 1
+ %tmp8458 = getelementptr inbounds float, float* %tmp8457, i64 1
+ %tmp8459 = getelementptr inbounds float, float* %tmp8458, i64 1
+ %tmp8460 = getelementptr inbounds float, float* %tmp8459, i64 1
+ %tmp8461 = getelementptr inbounds float, float* %tmp8460, i64 1
+ %tmp8462 = getelementptr inbounds float, float* %tmp8461, i64 1
+ %tmp8463 = getelementptr inbounds float, float* %tmp8462, i64 1
+ %tmp8464 = getelementptr inbounds float, float* %tmp8463, i64 1
+ %tmp8465 = getelementptr inbounds float, float* %tmp8464, i64 1
+ %tmp8466 = getelementptr inbounds float, float* %tmp8465, i64 1
+ %tmp8467 = getelementptr inbounds float, float* %tmp8466, i64 1
+ %tmp8468 = getelementptr inbounds float, float* %tmp8467, i64 1
+ %tmp8469 = getelementptr inbounds float, float* %tmp8468, i64 1
+ %tmp8470 = getelementptr inbounds float, float* %tmp8469, i64 1
+ %tmp8471 = getelementptr inbounds float, float* %tmp8470, i64 1
+ %tmp8472 = getelementptr inbounds float, float* %tmp8471, i64 1
+ %tmp8473 = getelementptr inbounds float, float* %tmp8472, i64 1
+ %tmp8474 = getelementptr inbounds float, float* %tmp8473, i64 1
+ %tmp8475 = getelementptr inbounds float, float* %tmp8474, i64 1
+ %tmp8476 = getelementptr inbounds float, float* %tmp8475, i64 1
+ %tmp8477 = getelementptr inbounds float, float* %tmp8476, i64 1
+ %tmp8478 = getelementptr inbounds float, float* %tmp8477, i64 1
+ %tmp8479 = getelementptr inbounds float, float* %tmp8478, i64 1
+ %tmp8480 = getelementptr inbounds float, float* %tmp8479, i64 1
+ %tmp8481 = getelementptr inbounds float, float* %tmp8480, i64 1
+ %tmp8482 = getelementptr inbounds float, float* %tmp8481, i64 1
+ %tmp8483 = getelementptr inbounds float, float* %tmp8482, i64 1
+ %tmp8484 = getelementptr inbounds float, float* %tmp8483, i64 1
+ %tmp8485 = getelementptr inbounds float, float* %tmp8484, i64 1
+ %tmp8486 = getelementptr inbounds float, float* %tmp8485, i64 1
+ %tmp8487 = getelementptr inbounds float, float* %tmp8486, i64 1
+ %tmp8488 = getelementptr inbounds float, float* %tmp8487, i64 1
+ %tmp8489 = getelementptr inbounds float, float* %tmp8488, i64 1
+ %tmp8490 = getelementptr inbounds float, float* %tmp8489, i64 1
+ %tmp8491 = getelementptr inbounds float, float* %tmp8490, i64 1
+ %tmp8492 = getelementptr inbounds float, float* %tmp8491, i64 1
+ %tmp8493 = getelementptr inbounds float, float* %tmp8492, i64 1
+ %tmp8494 = getelementptr inbounds float, float* %tmp8493, i64 1
+ %tmp8495 = getelementptr inbounds float, float* %tmp8494, i64 1
+ %tmp8496 = getelementptr inbounds float, float* %tmp8495, i64 1
+ %tmp8497 = getelementptr inbounds float, float* %tmp8496, i64 1
+ %tmp8498 = getelementptr inbounds float, float* %tmp8497, i64 1
+ %tmp8499 = getelementptr inbounds float, float* %tmp8498, i64 1
+ %tmp8500 = getelementptr inbounds float, float* %tmp8499, i64 1
+ %tmp8501 = getelementptr inbounds float, float* %tmp8500, i64 1
+ %tmp8502 = getelementptr inbounds float, float* %tmp8501, i64 1
+ %tmp8503 = getelementptr inbounds float, float* %tmp8502, i64 1
+ %tmp8504 = getelementptr inbounds float, float* %tmp8503, i64 1
+ %tmp8505 = getelementptr inbounds float, float* %tmp8504, i64 1
+ %tmp8506 = getelementptr inbounds float, float* %tmp8505, i64 1
+ %tmp8507 = getelementptr inbounds float, float* %tmp8506, i64 1
+ %tmp8508 = getelementptr inbounds float, float* %tmp8507, i64 1
+ %tmp8509 = getelementptr inbounds float, float* %tmp8508, i64 1
+ %tmp8510 = getelementptr inbounds float, float* %tmp8509, i64 1
+ %tmp8511 = getelementptr inbounds float, float* %tmp8510, i64 1
+ %tmp8512 = getelementptr inbounds float, float* %tmp8511, i64 1
+ %tmp8513 = getelementptr inbounds float, float* %tmp8512, i64 1
+ %tmp8514 = getelementptr inbounds float, float* %tmp8513, i64 1
+ %tmp8515 = getelementptr inbounds float, float* %tmp8514, i64 1
+ %tmp8516 = getelementptr inbounds float, float* %tmp8515, i64 1
+ %tmp8517 = getelementptr inbounds float, float* %tmp8516, i64 1
+ %tmp8518 = getelementptr inbounds float, float* %tmp8517, i64 1
+ %tmp8519 = getelementptr inbounds float, float* %tmp8518, i64 1
+ %tmp8520 = getelementptr inbounds float, float* %tmp8519, i64 1
+ %tmp8521 = getelementptr inbounds float, float* %tmp8520, i64 1
+ %tmp8522 = getelementptr inbounds float, float* %tmp8521, i64 1
+ %tmp8523 = getelementptr inbounds float, float* %tmp8522, i64 1
+ %tmp8524 = getelementptr inbounds float, float* %tmp8523, i64 1
+ %tmp8525 = getelementptr inbounds float, float* %tmp8524, i64 1
+ %tmp8526 = getelementptr inbounds float, float* %tmp8525, i64 1
+ %tmp8527 = getelementptr inbounds float, float* %tmp8526, i64 1
+ %tmp8528 = getelementptr inbounds float, float* %tmp8527, i64 1
+ %tmp8529 = getelementptr inbounds float, float* %tmp8528, i64 1
+ %tmp8530 = getelementptr inbounds float, float* %tmp8529, i64 1
+ %tmp8531 = getelementptr inbounds float, float* %tmp8530, i64 1
+ %tmp8532 = getelementptr inbounds float, float* %tmp8531, i64 1
+ %tmp8533 = getelementptr inbounds float, float* %tmp8532, i64 1
+ %tmp8534 = getelementptr inbounds float, float* %tmp8533, i64 1
+ %tmp8535 = getelementptr inbounds float, float* %tmp8534, i64 1
+ %tmp8536 = getelementptr inbounds float, float* %tmp8535, i64 1
+ %tmp8537 = getelementptr inbounds float, float* %tmp8536, i64 1
+ %tmp8538 = getelementptr inbounds float, float* %tmp8537, i64 1
+ %tmp8539 = getelementptr inbounds float, float* %tmp8538, i64 1
+ %tmp8540 = getelementptr inbounds float, float* %tmp8539, i64 1
+ %tmp8541 = getelementptr inbounds float, float* %tmp8540, i64 1
+ %tmp8542 = getelementptr inbounds float, float* %tmp8541, i64 1
+ %tmp8543 = getelementptr inbounds float, float* %tmp8542, i64 1
+ %tmp8544 = getelementptr inbounds float, float* %tmp8543, i64 1
+ %tmp8545 = getelementptr inbounds float, float* %tmp8544, i64 1
+ %tmp8546 = getelementptr inbounds float, float* %tmp8545, i64 1
+ %tmp8547 = getelementptr inbounds float, float* %tmp8546, i64 1
+ %tmp8548 = getelementptr inbounds float, float* %tmp8547, i64 1
+ %tmp8549 = getelementptr inbounds float, float* %tmp8548, i64 1
+ %tmp8550 = getelementptr inbounds float, float* %tmp8549, i64 1
+ %tmp8551 = getelementptr inbounds float, float* %tmp8550, i64 1
+ %tmp8552 = getelementptr inbounds float, float* %tmp8551, i64 1
+ %tmp8553 = getelementptr inbounds float, float* %tmp8552, i64 1
+ %tmp8554 = getelementptr inbounds float, float* %tmp8553, i64 1
+ %tmp8555 = getelementptr inbounds float, float* %tmp8554, i64 1
+ %tmp8556 = getelementptr inbounds float, float* %tmp8555, i64 1
+ %tmp8557 = getelementptr inbounds float, float* %tmp8556, i64 1
+ %tmp8558 = getelementptr inbounds float, float* %tmp8557, i64 1
+ %tmp8559 = getelementptr inbounds float, float* %tmp8558, i64 1
+ %tmp8560 = getelementptr inbounds float, float* %tmp8559, i64 1
+ %tmp8561 = getelementptr inbounds float, float* %tmp8560, i64 1
+ %tmp8562 = getelementptr inbounds float, float* %tmp8561, i64 1
+ %tmp8563 = getelementptr inbounds float, float* %tmp8562, i64 1
+ %tmp8564 = getelementptr inbounds float, float* %tmp8563, i64 1
+ %tmp8565 = getelementptr inbounds float, float* %tmp8564, i64 1
+ %tmp8566 = getelementptr inbounds float, float* %tmp8565, i64 1
+ %tmp8567 = getelementptr inbounds float, float* %tmp8566, i64 1
+ %tmp8568 = getelementptr inbounds float, float* %tmp8567, i64 1
+ %tmp8569 = getelementptr inbounds float, float* %tmp8568, i64 1
+ %tmp8570 = getelementptr inbounds float, float* %tmp8569, i64 1
+ %tmp8571 = getelementptr inbounds float, float* %tmp8570, i64 1
+ %tmp8572 = getelementptr inbounds float, float* %tmp8571, i64 1
+ %tmp8573 = getelementptr inbounds float, float* %tmp8572, i64 1
+ %tmp8574 = getelementptr inbounds float, float* %tmp8573, i64 1
+ %tmp8575 = getelementptr inbounds float, float* %tmp8574, i64 1
+ %tmp8576 = getelementptr inbounds float, float* %tmp8575, i64 1
+ %tmp8577 = getelementptr inbounds float, float* %tmp8576, i64 1
+ %tmp8578 = getelementptr inbounds float, float* %tmp8577, i64 1
+ %tmp8579 = getelementptr inbounds float, float* %tmp8578, i64 1
+ %tmp8580 = getelementptr inbounds float, float* %tmp8579, i64 1
+ %tmp8581 = getelementptr inbounds float, float* %tmp8580, i64 1
+ %tmp8582 = getelementptr inbounds float, float* %tmp8581, i64 1
+ %tmp8583 = getelementptr inbounds float, float* %tmp8582, i64 1
+ %tmp8584 = getelementptr inbounds float, float* %tmp8583, i64 1
+ %tmp8585 = getelementptr inbounds float, float* %tmp8584, i64 1
+ %tmp8586 = getelementptr inbounds float, float* %tmp8585, i64 1
+ %tmp8587 = getelementptr inbounds float, float* %tmp8586, i64 1
+ %tmp8588 = getelementptr inbounds float, float* %tmp8587, i64 1
+ %tmp8589 = getelementptr inbounds float, float* %tmp8588, i64 1
+ %tmp8590 = getelementptr inbounds float, float* %tmp8589, i64 1
+ %tmp8591 = getelementptr inbounds float, float* %tmp8590, i64 1
+ %tmp8592 = getelementptr inbounds float, float* %tmp8591, i64 1
+ %tmp8593 = getelementptr inbounds float, float* %tmp8592, i64 1
+ %tmp8594 = getelementptr inbounds float, float* %tmp8593, i64 1
+ %tmp8595 = getelementptr inbounds float, float* %tmp8594, i64 1
+ %tmp8596 = getelementptr inbounds float, float* %tmp8595, i64 1
+ %tmp8597 = getelementptr inbounds float, float* %tmp8596, i64 1
+ %tmp8598 = getelementptr inbounds float, float* %tmp8597, i64 1
+ %tmp8599 = getelementptr inbounds float, float* %tmp8598, i64 1
+ %tmp8600 = getelementptr inbounds float, float* %tmp8599, i64 1
+ %tmp8601 = getelementptr inbounds float, float* %tmp8600, i64 1
+ %tmp8602 = getelementptr inbounds float, float* %tmp8601, i64 1
+ %tmp8603 = getelementptr inbounds float, float* %tmp8602, i64 1
+ %tmp8604 = getelementptr inbounds float, float* %tmp8603, i64 1
+ %tmp8605 = getelementptr inbounds float, float* %tmp8604, i64 1
+ %tmp8606 = getelementptr inbounds float, float* %tmp8605, i64 1
+ %tmp8607 = getelementptr inbounds float, float* %tmp8606, i64 1
+ %tmp8608 = getelementptr inbounds float, float* %tmp8607, i64 1
+ %tmp8609 = getelementptr inbounds float, float* %tmp8608, i64 1
+ %tmp8610 = getelementptr inbounds float, float* %tmp8609, i64 1
+ %tmp8611 = getelementptr inbounds float, float* %tmp8610, i64 1
+ %tmp8612 = getelementptr inbounds float, float* %tmp8611, i64 1
+ %tmp8613 = getelementptr inbounds float, float* %tmp8612, i64 1
+ %tmp8614 = getelementptr inbounds float, float* %tmp8613, i64 1
+ %tmp8615 = getelementptr inbounds float, float* %tmp8614, i64 1
+ %tmp8616 = getelementptr inbounds float, float* %tmp8615, i64 1
+ %tmp8617 = getelementptr inbounds float, float* %tmp8616, i64 1
+ %tmp8618 = getelementptr inbounds float, float* %tmp8617, i64 1
+ %tmp8619 = getelementptr inbounds float, float* %tmp8618, i64 1
+ %tmp8620 = getelementptr inbounds float, float* %tmp8619, i64 1
+ %tmp8621 = getelementptr inbounds float, float* %tmp8620, i64 1
+ %tmp8622 = getelementptr inbounds float, float* %tmp8621, i64 1
+ %tmp8623 = getelementptr inbounds float, float* %tmp8622, i64 1
+ %tmp8624 = getelementptr inbounds float, float* %tmp8623, i64 1
+ %tmp8625 = getelementptr inbounds float, float* %tmp8624, i64 1
+ %tmp8626 = getelementptr inbounds float, float* %tmp8625, i64 1
+ %tmp8627 = getelementptr inbounds float, float* %tmp8626, i64 1
+ %tmp8628 = getelementptr inbounds float, float* %tmp8627, i64 1
+ %tmp8629 = getelementptr inbounds float, float* %tmp8628, i64 1
+ %tmp8630 = getelementptr inbounds float, float* %tmp8629, i64 1
+ %tmp8631 = getelementptr inbounds float, float* %tmp8630, i64 1
+ %tmp8632 = getelementptr inbounds float, float* %tmp8631, i64 1
+ %tmp8633 = getelementptr inbounds float, float* %tmp8632, i64 1
+ %tmp8634 = getelementptr inbounds float, float* %tmp8633, i64 1
+ %tmp8635 = getelementptr inbounds float, float* %tmp8634, i64 1
+ %tmp8636 = getelementptr inbounds float, float* %tmp8635, i64 1
+ %tmp8637 = getelementptr inbounds float, float* %tmp8636, i64 1
+ %tmp8638 = getelementptr inbounds float, float* %tmp8637, i64 1
+ %tmp8639 = getelementptr inbounds float, float* %tmp8638, i64 1
+ %tmp8640 = getelementptr inbounds float, float* %tmp8639, i64 1
+ %tmp8641 = getelementptr inbounds float, float* %tmp8640, i64 1
+ %tmp8642 = getelementptr inbounds float, float* %tmp8641, i64 1
+ %tmp8643 = getelementptr inbounds float, float* %tmp8642, i64 1
+ %tmp8644 = getelementptr inbounds float, float* %tmp8643, i64 1
+ %tmp8645 = getelementptr inbounds float, float* %tmp8644, i64 1
+ %tmp8646 = getelementptr inbounds float, float* %tmp8645, i64 1
+ %tmp8647 = getelementptr inbounds float, float* %tmp8646, i64 1
+ %tmp8648 = getelementptr inbounds float, float* %tmp8647, i64 1
+ %tmp8649 = getelementptr inbounds float, float* %tmp8648, i64 1
+ %tmp8650 = getelementptr inbounds float, float* %tmp8649, i64 1
+ %tmp8651 = getelementptr inbounds float, float* %tmp8650, i64 1
+ %tmp8652 = getelementptr inbounds float, float* %tmp8651, i64 1
+ %tmp8653 = getelementptr inbounds float, float* %tmp8652, i64 1
+ %tmp8654 = getelementptr inbounds float, float* %tmp8653, i64 1
+ %tmp8655 = getelementptr inbounds float, float* %tmp8654, i64 1
+ %tmp8656 = getelementptr inbounds float, float* %tmp8655, i64 1
+ %tmp8657 = getelementptr inbounds float, float* %tmp8656, i64 1
+ %tmp8658 = getelementptr inbounds float, float* %tmp8657, i64 1
+ %tmp8659 = getelementptr inbounds float, float* %tmp8658, i64 1
+ %tmp8660 = getelementptr inbounds float, float* %tmp8659, i64 1
+ %tmp8661 = getelementptr inbounds float, float* %tmp8660, i64 1
+ %tmp8662 = getelementptr inbounds float, float* %tmp8661, i64 1
+ %tmp8663 = getelementptr inbounds float, float* %tmp8662, i64 1
+ %tmp8664 = getelementptr inbounds float, float* %tmp8663, i64 1
+ %tmp8665 = getelementptr inbounds float, float* %tmp8664, i64 1
+ %tmp8666 = getelementptr inbounds float, float* %tmp8665, i64 1
+ %tmp8667 = getelementptr inbounds float, float* %tmp8666, i64 1
+ %tmp8668 = getelementptr inbounds float, float* %tmp8667, i64 1
+ %tmp8669 = getelementptr inbounds float, float* %tmp8668, i64 1
+ %tmp8670 = getelementptr inbounds float, float* %tmp8669, i64 1
+ %tmp8671 = getelementptr inbounds float, float* %tmp8670, i64 1
+ %tmp8672 = getelementptr inbounds float, float* %tmp8671, i64 1
+ %tmp8673 = getelementptr inbounds float, float* %tmp8672, i64 1
+ %tmp8674 = getelementptr inbounds float, float* %tmp8673, i64 1
+ %tmp8675 = getelementptr inbounds float, float* %tmp8674, i64 1
+ %tmp8676 = getelementptr inbounds float, float* %tmp8675, i64 1
+ %tmp8677 = getelementptr inbounds float, float* %tmp8676, i64 1
+ %tmp8678 = getelementptr inbounds float, float* %tmp8677, i64 1
+ %tmp8679 = getelementptr inbounds float, float* %tmp8678, i64 1
+ %tmp8680 = getelementptr inbounds float, float* %tmp8679, i64 1
+ %tmp8681 = getelementptr inbounds float, float* %tmp8680, i64 1
+ %tmp8682 = getelementptr inbounds float, float* %tmp8681, i64 1
+ %tmp8683 = getelementptr inbounds float, float* %tmp8682, i64 1
+ %tmp8684 = getelementptr inbounds float, float* %tmp8683, i64 1
+ %tmp8685 = getelementptr inbounds float, float* %tmp8684, i64 1
+ %tmp8686 = getelementptr inbounds float, float* %tmp8685, i64 1
+ %tmp8687 = getelementptr inbounds float, float* %tmp8686, i64 1
+ %tmp8688 = getelementptr inbounds float, float* %tmp8687, i64 1
+ %tmp8689 = getelementptr inbounds float, float* %tmp8688, i64 1
+ %tmp8690 = getelementptr inbounds float, float* %tmp8689, i64 1
+ %tmp8691 = getelementptr inbounds float, float* %tmp8690, i64 1
+ %tmp8692 = getelementptr inbounds float, float* %tmp8691, i64 1
+ %tmp8693 = getelementptr inbounds float, float* %tmp8692, i64 1
+ %tmp8694 = getelementptr inbounds float, float* %tmp8693, i64 1
+ %tmp8695 = getelementptr inbounds float, float* %tmp8694, i64 1
+ %tmp8696 = getelementptr inbounds float, float* %tmp8695, i64 1
+ %tmp8697 = getelementptr inbounds float, float* %tmp8696, i64 1
+ %tmp8698 = getelementptr inbounds float, float* %tmp8697, i64 1
+ %tmp8699 = getelementptr inbounds float, float* %tmp8698, i64 1
+ %tmp8700 = getelementptr inbounds float, float* %tmp8699, i64 1
+ %tmp8701 = getelementptr inbounds float, float* %tmp8700, i64 1
+ %tmp8702 = getelementptr inbounds float, float* %tmp8701, i64 1
+ %tmp8703 = getelementptr inbounds float, float* %tmp8702, i64 1
+ %tmp8704 = getelementptr inbounds float, float* %tmp8703, i64 1
+ %tmp8705 = getelementptr inbounds float, float* %tmp8704, i64 1
+ %tmp8706 = getelementptr inbounds float, float* %tmp8705, i64 1
+ %tmp8707 = getelementptr inbounds float, float* %tmp8706, i64 1
+ %tmp8708 = getelementptr inbounds float, float* %tmp8707, i64 1
+ %tmp8709 = getelementptr inbounds float, float* %tmp8708, i64 1
+ %tmp8710 = getelementptr inbounds float, float* %tmp8709, i64 1
+ %tmp8711 = getelementptr inbounds float, float* %tmp8710, i64 1
+ %tmp8712 = getelementptr inbounds float, float* %tmp8711, i64 1
+ %tmp8713 = getelementptr inbounds float, float* %tmp8712, i64 1
+ %tmp8714 = getelementptr inbounds float, float* %tmp8713, i64 1
+ %tmp8715 = getelementptr inbounds float, float* %tmp8714, i64 1
+ %tmp8716 = getelementptr inbounds float, float* %tmp8715, i64 1
+ %tmp8717 = getelementptr inbounds float, float* %tmp8716, i64 1
+ %tmp8718 = getelementptr inbounds float, float* %tmp8717, i64 1
+ %tmp8719 = getelementptr inbounds float, float* %tmp8718, i64 1
+ %tmp8720 = getelementptr inbounds float, float* %tmp8719, i64 1
+ %tmp8721 = getelementptr inbounds float, float* %tmp8720, i64 1
+ %tmp8722 = getelementptr inbounds float, float* %tmp8721, i64 1
+ %tmp8723 = getelementptr inbounds float, float* %tmp8722, i64 1
+ %tmp8724 = getelementptr inbounds float, float* %tmp8723, i64 1
+ %tmp8725 = getelementptr inbounds float, float* %tmp8724, i64 1
+ %tmp8726 = getelementptr inbounds float, float* %tmp8725, i64 1
+ %tmp8727 = getelementptr inbounds float, float* %tmp8726, i64 1
+ %tmp8728 = getelementptr inbounds float, float* %tmp8727, i64 1
+ %tmp8729 = getelementptr inbounds float, float* %tmp8728, i64 1
+ %tmp8730 = getelementptr inbounds float, float* %tmp8729, i64 1
+ %tmp8731 = getelementptr inbounds float, float* %tmp8730, i64 1
+ %tmp8732 = getelementptr inbounds float, float* %tmp8731, i64 1
+ %tmp8733 = getelementptr inbounds float, float* %tmp8732, i64 1
+ %tmp8734 = getelementptr inbounds float, float* %tmp8733, i64 1
+ %tmp8735 = getelementptr inbounds float, float* %tmp8734, i64 1
+ %tmp8736 = getelementptr inbounds float, float* %tmp8735, i64 1
+ %tmp8737 = getelementptr inbounds float, float* %tmp8736, i64 1
+ %tmp8738 = getelementptr inbounds float, float* %tmp8737, i64 1
+ %tmp8739 = getelementptr inbounds float, float* %tmp8738, i64 1
+ %tmp8740 = getelementptr inbounds float, float* %tmp8739, i64 1
+ %tmp8741 = getelementptr inbounds float, float* %tmp8740, i64 1
+ %tmp8742 = getelementptr inbounds float, float* %tmp8741, i64 1
+ %tmp8743 = getelementptr inbounds float, float* %tmp8742, i64 1
+ %tmp8744 = getelementptr inbounds float, float* %tmp8743, i64 1
+ %tmp8745 = getelementptr inbounds float, float* %tmp8744, i64 1
+ %tmp8746 = getelementptr inbounds float, float* %tmp8745, i64 1
+ %tmp8747 = getelementptr inbounds float, float* %tmp8746, i64 1
+ %tmp8748 = getelementptr inbounds float, float* %tmp8747, i64 1
+ %tmp8749 = getelementptr inbounds float, float* %tmp8748, i64 1
+ %tmp8750 = getelementptr inbounds float, float* %tmp8749, i64 1
+ %tmp8751 = getelementptr inbounds float, float* %tmp8750, i64 1
+ %tmp8752 = getelementptr inbounds float, float* %tmp8751, i64 1
+ %tmp8753 = getelementptr inbounds float, float* %tmp8752, i64 1
+ %tmp8754 = getelementptr inbounds float, float* %tmp8753, i64 1
+ %tmp8755 = getelementptr inbounds float, float* %tmp8754, i64 1
+ %tmp8756 = getelementptr inbounds float, float* %tmp8755, i64 1
+ %tmp8757 = getelementptr inbounds float, float* %tmp8756, i64 1
+ %tmp8758 = getelementptr inbounds float, float* %tmp8757, i64 1
+ %tmp8759 = getelementptr inbounds float, float* %tmp8758, i64 1
+ %tmp8760 = getelementptr inbounds float, float* %tmp8759, i64 1
+ %tmp8761 = getelementptr inbounds float, float* %tmp8760, i64 1
+ %tmp8762 = getelementptr inbounds float, float* %tmp8761, i64 1
+ %tmp8763 = getelementptr inbounds float, float* %tmp8762, i64 1
+ %tmp8764 = getelementptr inbounds float, float* %tmp8763, i64 1
+ %tmp8765 = getelementptr inbounds float, float* %tmp8764, i64 1
+ %tmp8766 = getelementptr inbounds float, float* %tmp8765, i64 1
+ %tmp8767 = getelementptr inbounds float, float* %tmp8766, i64 1
+ %tmp8768 = getelementptr inbounds float, float* %tmp8767, i64 1
+ %tmp8769 = getelementptr inbounds float, float* %tmp8768, i64 1
+ %tmp8770 = getelementptr inbounds float, float* %tmp8769, i64 1
+ %tmp8771 = getelementptr inbounds float, float* %tmp8770, i64 1
+ %tmp8772 = getelementptr inbounds float, float* %tmp8771, i64 1
+ %tmp8773 = getelementptr inbounds float, float* %tmp8772, i64 1
+ %tmp8774 = getelementptr inbounds float, float* %tmp8773, i64 1
+ %tmp8775 = getelementptr inbounds float, float* %tmp8774, i64 1
+ %tmp8776 = getelementptr inbounds float, float* %tmp8775, i64 1
+ %tmp8777 = getelementptr inbounds float, float* %tmp8776, i64 1
+ %tmp8778 = getelementptr inbounds float, float* %tmp8777, i64 1
+ %tmp8779 = getelementptr inbounds float, float* %tmp8778, i64 1
+ %tmp8780 = getelementptr inbounds float, float* %tmp8779, i64 1
+ %tmp8781 = getelementptr inbounds float, float* %tmp8780, i64 1
+ %tmp8782 = getelementptr inbounds float, float* %tmp8781, i64 1
+ %tmp8783 = getelementptr inbounds float, float* %tmp8782, i64 1
+ %tmp8784 = getelementptr inbounds float, float* %tmp8783, i64 1
+ %tmp8785 = getelementptr inbounds float, float* %tmp8784, i64 1
+ %tmp8786 = getelementptr inbounds float, float* %tmp8785, i64 1
+ %tmp8787 = getelementptr inbounds float, float* %tmp8786, i64 1
+ %tmp8788 = getelementptr inbounds float, float* %tmp8787, i64 1
+ %tmp8789 = getelementptr inbounds float, float* %tmp8788, i64 1
+ %tmp8790 = getelementptr inbounds float, float* %tmp8789, i64 1
+ %tmp8791 = getelementptr inbounds float, float* %tmp8790, i64 1
+ %tmp8792 = getelementptr inbounds float, float* %tmp8791, i64 1
+ %tmp8793 = getelementptr inbounds float, float* %tmp8792, i64 1
+ %tmp8794 = getelementptr inbounds float, float* %tmp8793, i64 1
+ %tmp8795 = getelementptr inbounds float, float* %tmp8794, i64 1
+ %tmp8796 = getelementptr inbounds float, float* %tmp8795, i64 1
+ %tmp8797 = getelementptr inbounds float, float* %tmp8796, i64 1
+ %tmp8798 = getelementptr inbounds float, float* %tmp8797, i64 1
+ %tmp8799 = getelementptr inbounds float, float* %tmp8798, i64 1
+ %tmp8800 = getelementptr inbounds float, float* %tmp8799, i64 1
+ %tmp8801 = getelementptr inbounds float, float* %tmp8800, i64 1
+ %tmp8802 = getelementptr inbounds float, float* %tmp8801, i64 1
+ %tmp8803 = getelementptr inbounds float, float* %tmp8802, i64 1
+ %tmp8804 = getelementptr inbounds float, float* %tmp8803, i64 1
+ %tmp8805 = getelementptr inbounds float, float* %tmp8804, i64 1
+ %tmp8806 = getelementptr inbounds float, float* %tmp8805, i64 1
+ %tmp8807 = getelementptr inbounds float, float* %tmp8806, i64 1
+ %tmp8808 = getelementptr inbounds float, float* %tmp8807, i64 1
+ %tmp8809 = getelementptr inbounds float, float* %tmp8808, i64 1
+ %tmp8810 = getelementptr inbounds float, float* %tmp8809, i64 1
+ %tmp8811 = getelementptr inbounds float, float* %tmp8810, i64 1
+ %tmp8812 = getelementptr inbounds float, float* %tmp8811, i64 1
+ %tmp8813 = getelementptr inbounds float, float* %tmp8812, i64 1
+ %tmp8814 = getelementptr inbounds float, float* %tmp8813, i64 1
+ %tmp8815 = getelementptr inbounds float, float* %tmp8814, i64 1
+ %tmp8816 = getelementptr inbounds float, float* %tmp8815, i64 1
+ %tmp8817 = getelementptr inbounds float, float* %tmp8816, i64 1
+ %tmp8818 = getelementptr inbounds float, float* %tmp8817, i64 1
+ %tmp8819 = getelementptr inbounds float, float* %tmp8818, i64 1
+ %tmp8820 = getelementptr inbounds float, float* %tmp8819, i64 1
+ %tmp8821 = getelementptr inbounds float, float* %tmp8820, i64 1
+ %tmp8822 = getelementptr inbounds float, float* %tmp8821, i64 1
+ %tmp8823 = getelementptr inbounds float, float* %tmp8822, i64 1
+ %tmp8824 = getelementptr inbounds float, float* %tmp8823, i64 1
+ %tmp8825 = getelementptr inbounds float, float* %tmp8824, i64 1
+ %tmp8826 = getelementptr inbounds float, float* %tmp8825, i64 1
+ %tmp8827 = getelementptr inbounds float, float* %tmp8826, i64 1
+ %tmp8828 = getelementptr inbounds float, float* %tmp8827, i64 1
+ %tmp8829 = getelementptr inbounds float, float* %tmp8828, i64 1
+ %tmp8830 = getelementptr inbounds float, float* %tmp8829, i64 1
+ %tmp8831 = getelementptr inbounds float, float* %tmp8830, i64 1
+ %tmp8832 = getelementptr inbounds float, float* %tmp8831, i64 1
+ %tmp8833 = getelementptr inbounds float, float* %tmp8832, i64 1
+ %tmp8834 = getelementptr inbounds float, float* %tmp8833, i64 1
+ %tmp8835 = getelementptr inbounds float, float* %tmp8834, i64 1
+ %tmp8836 = getelementptr inbounds float, float* %tmp8835, i64 1
+ %tmp8837 = getelementptr inbounds float, float* %tmp8836, i64 1
+ %tmp8838 = getelementptr inbounds float, float* %tmp8837, i64 1
+ %tmp8839 = getelementptr inbounds float, float* %tmp8838, i64 1
+ %tmp8840 = getelementptr inbounds float, float* %tmp8839, i64 1
+ %tmp8841 = getelementptr inbounds float, float* %tmp8840, i64 1
+ %tmp8842 = getelementptr inbounds float, float* %tmp8841, i64 1
+ %tmp8843 = getelementptr inbounds float, float* %tmp8842, i64 1
+ %tmp8844 = getelementptr inbounds float, float* %tmp8843, i64 1
+ %tmp8845 = getelementptr inbounds float, float* %tmp8844, i64 1
+ %tmp8846 = getelementptr inbounds float, float* %tmp8845, i64 1
+ %tmp8847 = getelementptr inbounds float, float* %tmp8846, i64 1
+ %tmp8848 = getelementptr inbounds float, float* %tmp8847, i64 1
+ %tmp8849 = getelementptr inbounds float, float* %tmp8848, i64 1
+ %tmp8850 = getelementptr inbounds float, float* %tmp8849, i64 1
+ %tmp8851 = getelementptr inbounds float, float* %tmp8850, i64 1
+ %tmp8852 = getelementptr inbounds float, float* %tmp8851, i64 1
+ %tmp8853 = getelementptr inbounds float, float* %tmp8852, i64 1
+ %tmp8854 = getelementptr inbounds float, float* %tmp8853, i64 1
+ %tmp8855 = getelementptr inbounds float, float* %tmp8854, i64 1
+ %tmp8856 = getelementptr inbounds float, float* %tmp8855, i64 1
+ %tmp8857 = getelementptr inbounds float, float* %tmp8856, i64 1
+ %tmp8858 = getelementptr inbounds float, float* %tmp8857, i64 1
+ %tmp8859 = getelementptr inbounds float, float* %tmp8858, i64 1
+ %tmp8860 = getelementptr inbounds float, float* %tmp8859, i64 1
+ %tmp8861 = getelementptr inbounds float, float* %tmp8860, i64 1
+ %tmp8862 = getelementptr inbounds float, float* %tmp8861, i64 1
+ %tmp8863 = getelementptr inbounds float, float* %tmp8862, i64 1
+ %tmp8864 = getelementptr inbounds float, float* %tmp8863, i64 1
+ %tmp8865 = getelementptr inbounds float, float* %tmp8864, i64 1
+ %tmp8866 = getelementptr inbounds float, float* %tmp8865, i64 1
+ %tmp8867 = getelementptr inbounds float, float* %tmp8866, i64 1
+ %tmp8868 = getelementptr inbounds float, float* %tmp8867, i64 1
+ %tmp8869 = getelementptr inbounds float, float* %tmp8868, i64 1
+ %tmp8870 = getelementptr inbounds float, float* %tmp8869, i64 1
+ %tmp8871 = getelementptr inbounds float, float* %tmp8870, i64 1
+ %tmp8872 = getelementptr inbounds float, float* %tmp8871, i64 1
+ %tmp8873 = getelementptr inbounds float, float* %tmp8872, i64 1
+ %tmp8874 = getelementptr inbounds float, float* %tmp8873, i64 1
+ %tmp8875 = getelementptr inbounds float, float* %tmp8874, i64 1
+ %tmp8876 = getelementptr inbounds float, float* %tmp8875, i64 1
+ %tmp8877 = getelementptr inbounds float, float* %tmp8876, i64 1
+ %tmp8878 = getelementptr inbounds float, float* %tmp8877, i64 1
+ %tmp8879 = getelementptr inbounds float, float* %tmp8878, i64 1
+ %tmp8880 = getelementptr inbounds float, float* %tmp8879, i64 1
+ %tmp8881 = getelementptr inbounds float, float* %tmp8880, i64 1
+ %tmp8882 = getelementptr inbounds float, float* %tmp8881, i64 1
+ %tmp8883 = getelementptr inbounds float, float* %tmp8882, i64 1
+ %tmp8884 = getelementptr inbounds float, float* %tmp8883, i64 1
+ %tmp8885 = getelementptr inbounds float, float* %tmp8884, i64 1
+ %tmp8886 = getelementptr inbounds float, float* %tmp8885, i64 1
+ %tmp8887 = getelementptr inbounds float, float* %tmp8886, i64 1
+ %tmp8888 = getelementptr inbounds float, float* %tmp8887, i64 1
+ %tmp8889 = getelementptr inbounds float, float* %tmp8888, i64 1
+ %tmp8890 = getelementptr inbounds float, float* %tmp8889, i64 1
+ %tmp8891 = getelementptr inbounds float, float* %tmp8890, i64 1
+ %tmp8892 = getelementptr inbounds float, float* %tmp8891, i64 1
+ %tmp8893 = getelementptr inbounds float, float* %tmp8892, i64 1
+ %tmp8894 = getelementptr inbounds float, float* %tmp8893, i64 1
+ %tmp8895 = getelementptr inbounds float, float* %tmp8894, i64 1
+ %tmp8896 = getelementptr inbounds float, float* %tmp8895, i64 1
+ %tmp8897 = getelementptr inbounds float, float* %tmp8896, i64 1
+ %tmp8898 = getelementptr inbounds float, float* %tmp8897, i64 1
+ %tmp8899 = getelementptr inbounds float, float* %tmp8898, i64 1
+ %tmp8900 = getelementptr inbounds float, float* %tmp8899, i64 1
+ %tmp8901 = getelementptr inbounds float, float* %tmp8900, i64 1
+ %tmp8902 = getelementptr inbounds float, float* %tmp8901, i64 1
+ %tmp8903 = getelementptr inbounds float, float* %tmp8902, i64 1
+ %tmp8904 = getelementptr inbounds float, float* %tmp8903, i64 1
+ %tmp8905 = getelementptr inbounds float, float* %tmp8904, i64 1
+ %tmp8906 = getelementptr inbounds float, float* %tmp8905, i64 1
+ %tmp8907 = getelementptr inbounds float, float* %tmp8906, i64 1
+ %tmp8908 = getelementptr inbounds float, float* %tmp8907, i64 1
+ %tmp8909 = getelementptr inbounds float, float* %tmp8908, i64 1
+ %tmp8910 = getelementptr inbounds float, float* %tmp8909, i64 1
+ %tmp8911 = getelementptr inbounds float, float* %tmp8910, i64 1
+ %tmp8912 = getelementptr inbounds float, float* %tmp8911, i64 1
+ %tmp8913 = getelementptr inbounds float, float* %tmp8912, i64 1
+ %tmp8914 = getelementptr inbounds float, float* %tmp8913, i64 1
+ %tmp8915 = getelementptr inbounds float, float* %tmp8914, i64 1
+ %tmp8916 = getelementptr inbounds float, float* %tmp8915, i64 1
+ %tmp8917 = getelementptr inbounds float, float* %tmp8916, i64 1
+ %tmp8918 = getelementptr inbounds float, float* %tmp8917, i64 1
+ %tmp8919 = getelementptr inbounds float, float* %tmp8918, i64 1
+ %tmp8920 = getelementptr inbounds float, float* %tmp8919, i64 1
+ %tmp8921 = getelementptr inbounds float, float* %tmp8920, i64 1
+ %tmp8922 = getelementptr inbounds float, float* %tmp8921, i64 1
+ %tmp8923 = getelementptr inbounds float, float* %tmp8922, i64 1
+ %tmp8924 = getelementptr inbounds float, float* %tmp8923, i64 1
+ %tmp8925 = getelementptr inbounds float, float* %tmp8924, i64 1
+ %tmp8926 = getelementptr inbounds float, float* %tmp8925, i64 1
+ %tmp8927 = getelementptr inbounds float, float* %tmp8926, i64 1
+ %tmp8928 = getelementptr inbounds float, float* %tmp8927, i64 1
+ %tmp8929 = getelementptr inbounds float, float* %tmp8928, i64 1
+ %tmp8930 = getelementptr inbounds float, float* %tmp8929, i64 1
+ %tmp8931 = getelementptr inbounds float, float* %tmp8930, i64 1
+ %tmp8932 = getelementptr inbounds float, float* %tmp8931, i64 1
+ %tmp8933 = getelementptr inbounds float, float* %tmp8932, i64 1
+ %tmp8934 = getelementptr inbounds float, float* %tmp8933, i64 1
+ %tmp8935 = getelementptr inbounds float, float* %tmp8934, i64 1
+ %tmp8936 = getelementptr inbounds float, float* %tmp8935, i64 1
+ %tmp8937 = getelementptr inbounds float, float* %tmp8936, i64 1
+ %tmp8938 = getelementptr inbounds float, float* %tmp8937, i64 1
+ %tmp8939 = getelementptr inbounds float, float* %tmp8938, i64 1
+ %tmp8940 = getelementptr inbounds float, float* %tmp8939, i64 1
+ %tmp8941 = getelementptr inbounds float, float* %tmp8940, i64 1
+ %tmp8942 = getelementptr inbounds float, float* %tmp8941, i64 1
+ %tmp8943 = getelementptr inbounds float, float* %tmp8942, i64 1
+ %tmp8944 = getelementptr inbounds float, float* %tmp8943, i64 1
+ %tmp8945 = getelementptr inbounds float, float* %tmp8944, i64 1
+ %tmp8946 = getelementptr inbounds float, float* %tmp8945, i64 1
+ %tmp8947 = getelementptr inbounds float, float* %tmp8946, i64 1
+ %tmp8948 = getelementptr inbounds float, float* %tmp8947, i64 1
+ %tmp8949 = getelementptr inbounds float, float* %tmp8948, i64 1
+ %tmp8950 = getelementptr inbounds float, float* %tmp8949, i64 1
+ %tmp8951 = getelementptr inbounds float, float* %tmp8950, i64 1
+ %tmp8952 = getelementptr inbounds float, float* %tmp8951, i64 1
+ %tmp8953 = getelementptr inbounds float, float* %tmp8952, i64 1
+ %tmp8954 = getelementptr inbounds float, float* %tmp8953, i64 1
+ %tmp8955 = getelementptr inbounds float, float* %tmp8954, i64 1
+ %tmp8956 = getelementptr inbounds float, float* %tmp8955, i64 1
+ %tmp8957 = getelementptr inbounds float, float* %tmp8956, i64 1
+ %tmp8958 = getelementptr inbounds float, float* %tmp8957, i64 1
+ %tmp8959 = getelementptr inbounds float, float* %tmp8958, i64 1
+ %tmp8960 = getelementptr inbounds float, float* %tmp8959, i64 1
+ %tmp8961 = getelementptr inbounds float, float* %tmp8960, i64 1
+ %tmp8962 = getelementptr inbounds float, float* %tmp8961, i64 1
+ %tmp8963 = getelementptr inbounds float, float* %tmp8962, i64 1
+ %tmp8964 = getelementptr inbounds float, float* %tmp8963, i64 1
+ %tmp8965 = getelementptr inbounds float, float* %tmp8964, i64 1
+ %tmp8966 = getelementptr inbounds float, float* %tmp8965, i64 1
+ %tmp8967 = getelementptr inbounds float, float* %tmp8966, i64 1
+ %tmp8968 = getelementptr inbounds float, float* %tmp8967, i64 1
+ %tmp8969 = getelementptr inbounds float, float* %tmp8968, i64 1
+ %tmp8970 = getelementptr inbounds float, float* %tmp8969, i64 1
+ %tmp8971 = getelementptr inbounds float, float* %tmp8970, i64 1
+ %tmp8972 = getelementptr inbounds float, float* %tmp8971, i64 1
+ %tmp8973 = getelementptr inbounds float, float* %tmp8972, i64 1
+ %tmp8974 = getelementptr inbounds float, float* %tmp8973, i64 1
+ %tmp8975 = getelementptr inbounds float, float* %tmp8974, i64 1
+ %tmp8976 = getelementptr inbounds float, float* %tmp8975, i64 1
+ %tmp8977 = getelementptr inbounds float, float* %tmp8976, i64 1
+ %tmp8978 = getelementptr inbounds float, float* %tmp8977, i64 1
+ %tmp8979 = getelementptr inbounds float, float* %tmp8978, i64 1
+ %tmp8980 = getelementptr inbounds float, float* %tmp8979, i64 1
+ %tmp8981 = getelementptr inbounds float, float* %tmp8980, i64 1
+ %tmp8982 = getelementptr inbounds float, float* %tmp8981, i64 1
+ %tmp8983 = getelementptr inbounds float, float* %tmp8982, i64 1
+ %tmp8984 = getelementptr inbounds float, float* %tmp8983, i64 1
+ %tmp8985 = getelementptr inbounds float, float* %tmp8984, i64 1
+ %tmp8986 = getelementptr inbounds float, float* %tmp8985, i64 1
+ %tmp8987 = getelementptr inbounds float, float* %tmp8986, i64 1
+ %tmp8988 = getelementptr inbounds float, float* %tmp8987, i64 1
+ %tmp8989 = getelementptr inbounds float, float* %tmp8988, i64 1
+ %tmp8990 = getelementptr inbounds float, float* %tmp8989, i64 1
+ %tmp8991 = getelementptr inbounds float, float* %tmp8990, i64 1
+ %tmp8992 = getelementptr inbounds float, float* %tmp8991, i64 1
+ %tmp8993 = getelementptr inbounds float, float* %tmp8992, i64 1
+ %tmp8994 = getelementptr inbounds float, float* %tmp8993, i64 1
+ %tmp8995 = getelementptr inbounds float, float* %tmp8994, i64 1
+ %tmp8996 = getelementptr inbounds float, float* %tmp8995, i64 1
+ %tmp8997 = getelementptr inbounds float, float* %tmp8996, i64 1
+ %tmp8998 = getelementptr inbounds float, float* %tmp8997, i64 1
+ %tmp8999 = getelementptr inbounds float, float* %tmp8998, i64 1
+ %tmp9000 = getelementptr inbounds float, float* %tmp8999, i64 1
+ %tmp9001 = getelementptr inbounds float, float* %tmp9000, i64 1
+ %tmp9002 = getelementptr inbounds float, float* %tmp9001, i64 1
+ %tmp9003 = getelementptr inbounds float, float* %tmp9002, i64 1
+ %tmp9004 = getelementptr inbounds float, float* %tmp9003, i64 1
+ %tmp9005 = getelementptr inbounds float, float* %tmp9004, i64 1
+ %tmp9006 = getelementptr inbounds float, float* %tmp9005, i64 1
+ %tmp9007 = getelementptr inbounds float, float* %tmp9006, i64 1
+ %tmp9008 = getelementptr inbounds float, float* %tmp9007, i64 1
+ %tmp9009 = getelementptr inbounds float, float* %tmp9008, i64 1
+ %tmp9010 = getelementptr inbounds float, float* %tmp9009, i64 1
+ %tmp9011 = getelementptr inbounds float, float* %tmp9010, i64 1
+ %tmp9012 = getelementptr inbounds float, float* %tmp9011, i64 1
+ %tmp9013 = getelementptr inbounds float, float* %tmp9012, i64 1
+ %tmp9014 = getelementptr inbounds float, float* %tmp9013, i64 1
+ %tmp9015 = getelementptr inbounds float, float* %tmp9014, i64 1
+ %tmp9016 = getelementptr inbounds float, float* %tmp9015, i64 1
+ %tmp9017 = getelementptr inbounds float, float* %tmp9016, i64 1
+ %tmp9018 = getelementptr inbounds float, float* %tmp9017, i64 1
+ %tmp9019 = getelementptr inbounds float, float* %tmp9018, i64 1
+ %tmp9020 = getelementptr inbounds float, float* %tmp9019, i64 1
+ %tmp9021 = getelementptr inbounds float, float* %tmp9020, i64 1
+ %tmp9022 = getelementptr inbounds float, float* %tmp9021, i64 1
+ %tmp9023 = getelementptr inbounds float, float* %tmp9022, i64 1
+ %tmp9024 = getelementptr inbounds float, float* %tmp9023, i64 1
+ %tmp9025 = getelementptr inbounds float, float* %tmp9024, i64 1
+ %tmp9026 = getelementptr inbounds float, float* %tmp9025, i64 1
+ %tmp9027 = getelementptr inbounds float, float* %tmp9026, i64 1
+ %tmp9028 = getelementptr inbounds float, float* %tmp9027, i64 1
+ %tmp9029 = getelementptr inbounds float, float* %tmp9028, i64 1
+ %tmp9030 = getelementptr inbounds float, float* %tmp9029, i64 1
+ %tmp9031 = getelementptr inbounds float, float* %tmp9030, i64 1
+ %tmp9032 = getelementptr inbounds float, float* %tmp9031, i64 1
+ %tmp9033 = getelementptr inbounds float, float* %tmp9032, i64 1
+ %tmp9034 = getelementptr inbounds float, float* %tmp9033, i64 1
+ %tmp9035 = getelementptr inbounds float, float* %tmp9034, i64 1
+ %tmp9036 = getelementptr inbounds float, float* %tmp9035, i64 1
+ %tmp9037 = getelementptr inbounds float, float* %tmp9036, i64 1
+ %tmp9038 = getelementptr inbounds float, float* %tmp9037, i64 1
+ %tmp9039 = getelementptr inbounds float, float* %tmp9038, i64 1
+ %tmp9040 = getelementptr inbounds float, float* %tmp9039, i64 1
+ %tmp9041 = getelementptr inbounds float, float* %tmp9040, i64 1
+ %tmp9042 = getelementptr inbounds float, float* %tmp9041, i64 1
+ %tmp9043 = getelementptr inbounds float, float* %tmp9042, i64 1
+ %tmp9044 = getelementptr inbounds float, float* %tmp9043, i64 1
+ %tmp9045 = getelementptr inbounds float, float* %tmp9044, i64 1
+ %tmp9046 = getelementptr inbounds float, float* %tmp9045, i64 1
+ %tmp9047 = getelementptr inbounds float, float* %tmp9046, i64 1
+ %tmp9048 = getelementptr inbounds float, float* %tmp9047, i64 1
+ %tmp9049 = getelementptr inbounds float, float* %tmp9048, i64 1
+ %tmp9050 = getelementptr inbounds float, float* %tmp9049, i64 1
+ %tmp9051 = getelementptr inbounds float, float* %tmp9050, i64 1
+ %tmp9052 = getelementptr inbounds float, float* %tmp9051, i64 1
+ %tmp9053 = getelementptr inbounds float, float* %tmp9052, i64 1
+ %tmp9054 = getelementptr inbounds float, float* %tmp9053, i64 1
+ %tmp9055 = getelementptr inbounds float, float* %tmp9054, i64 1
+ %tmp9056 = getelementptr inbounds float, float* %tmp9055, i64 1
+ %tmp9057 = getelementptr inbounds float, float* %tmp9056, i64 1
+ %tmp9058 = getelementptr inbounds float, float* %tmp9057, i64 1
+ %tmp9059 = getelementptr inbounds float, float* %tmp9058, i64 1
+ %tmp9060 = getelementptr inbounds float, float* %tmp9059, i64 1
+ %tmp9061 = getelementptr inbounds float, float* %tmp9060, i64 1
+ %tmp9062 = getelementptr inbounds float, float* %tmp9061, i64 1
+ %tmp9063 = getelementptr inbounds float, float* %tmp9062, i64 1
+ %tmp9064 = getelementptr inbounds float, float* %tmp9063, i64 1
+ %tmp9065 = getelementptr inbounds float, float* %tmp9064, i64 1
+ %tmp9066 = getelementptr inbounds float, float* %tmp9065, i64 1
+ %tmp9067 = getelementptr inbounds float, float* %tmp9066, i64 1
+ %tmp9068 = getelementptr inbounds float, float* %tmp9067, i64 1
+ %tmp9069 = getelementptr inbounds float, float* %tmp9068, i64 1
+ %tmp9070 = getelementptr inbounds float, float* %tmp9069, i64 1
+ %tmp9071 = getelementptr inbounds float, float* %tmp9070, i64 1
+ %tmp9072 = getelementptr inbounds float, float* %tmp9071, i64 1
+ %tmp9073 = getelementptr inbounds float, float* %tmp9072, i64 1
+ %tmp9074 = getelementptr inbounds float, float* %tmp9073, i64 1
+ %tmp9075 = getelementptr inbounds float, float* %tmp9074, i64 1
+ %tmp9076 = getelementptr inbounds float, float* %tmp9075, i64 1
+ %tmp9077 = getelementptr inbounds float, float* %tmp9076, i64 1
+ %tmp9078 = getelementptr inbounds float, float* %tmp9077, i64 1
+ %tmp9079 = getelementptr inbounds float, float* %tmp9078, i64 1
+ %tmp9080 = getelementptr inbounds float, float* %tmp9079, i64 1
+ %tmp9081 = getelementptr inbounds float, float* %tmp9080, i64 1
+ %tmp9082 = getelementptr inbounds float, float* %tmp9081, i64 1
+ %tmp9083 = getelementptr inbounds float, float* %tmp9082, i64 1
+ %tmp9084 = getelementptr inbounds float, float* %tmp9083, i64 1
+ %tmp9085 = getelementptr inbounds float, float* %tmp9084, i64 1
+ %tmp9086 = getelementptr inbounds float, float* %tmp9085, i64 1
+ %tmp9087 = getelementptr inbounds float, float* %tmp9086, i64 1
+ %tmp9088 = getelementptr inbounds float, float* %tmp9087, i64 1
+ %tmp9089 = getelementptr inbounds float, float* %tmp9088, i64 1
+ %tmp9090 = getelementptr inbounds float, float* %tmp9089, i64 1
+ %tmp9091 = getelementptr inbounds float, float* %tmp9090, i64 1
+ %tmp9092 = getelementptr inbounds float, float* %tmp9091, i64 1
+ %tmp9093 = getelementptr inbounds float, float* %tmp9092, i64 1
+ %tmp9094 = getelementptr inbounds float, float* %tmp9093, i64 1
+ %tmp9095 = getelementptr inbounds float, float* %tmp9094, i64 1
+ %tmp9096 = getelementptr inbounds float, float* %tmp9095, i64 1
+ %tmp9097 = getelementptr inbounds float, float* %tmp9096, i64 1
+ %tmp9098 = getelementptr inbounds float, float* %tmp9097, i64 1
+ %tmp9099 = getelementptr inbounds float, float* %tmp9098, i64 1
+ %tmp9100 = getelementptr inbounds float, float* %tmp9099, i64 1
+ %tmp9101 = getelementptr inbounds float, float* %tmp9100, i64 1
+ %tmp9102 = getelementptr inbounds float, float* %tmp9101, i64 1
+ %tmp9103 = getelementptr inbounds float, float* %tmp9102, i64 1
+ %tmp9104 = getelementptr inbounds float, float* %tmp9103, i64 1
+ %tmp9105 = getelementptr inbounds float, float* %tmp9104, i64 1
+ %tmp9106 = getelementptr inbounds float, float* %tmp9105, i64 1
+ %tmp9107 = getelementptr inbounds float, float* %tmp9106, i64 1
+ %tmp9108 = getelementptr inbounds float, float* %tmp9107, i64 1
+ %tmp9109 = getelementptr inbounds float, float* %tmp9108, i64 1
+ %tmp9110 = getelementptr inbounds float, float* %tmp9109, i64 1
+ %tmp9111 = getelementptr inbounds float, float* %tmp9110, i64 1
+ %tmp9112 = getelementptr inbounds float, float* %tmp9111, i64 1
+ %tmp9113 = getelementptr inbounds float, float* %tmp9112, i64 1
+ %tmp9114 = getelementptr inbounds float, float* %tmp9113, i64 1
+ %tmp9115 = getelementptr inbounds float, float* %tmp9114, i64 1
+ %tmp9116 = getelementptr inbounds float, float* %tmp9115, i64 1
+ %tmp9117 = getelementptr inbounds float, float* %tmp9116, i64 1
+ %tmp9118 = getelementptr inbounds float, float* %tmp9117, i64 1
+ %tmp9119 = getelementptr inbounds float, float* %tmp9118, i64 1
+ %tmp9120 = getelementptr inbounds float, float* %tmp9119, i64 1
+ %tmp9121 = getelementptr inbounds float, float* %tmp9120, i64 1
+ %tmp9122 = getelementptr inbounds float, float* %tmp9121, i64 1
+ %tmp9123 = getelementptr inbounds float, float* %tmp9122, i64 1
+ %tmp9124 = getelementptr inbounds float, float* %tmp9123, i64 1
+ %tmp9125 = getelementptr inbounds float, float* %tmp9124, i64 1
+ %tmp9126 = getelementptr inbounds float, float* %tmp9125, i64 1
+ %tmp9127 = getelementptr inbounds float, float* %tmp9126, i64 1
+ %tmp9128 = getelementptr inbounds float, float* %tmp9127, i64 1
+ %tmp9129 = getelementptr inbounds float, float* %tmp9128, i64 1
+ %tmp9130 = getelementptr inbounds float, float* %tmp9129, i64 1
+ %tmp9131 = getelementptr inbounds float, float* %tmp9130, i64 1
+ %tmp9132 = getelementptr inbounds float, float* %tmp9131, i64 1
+ %tmp9133 = getelementptr inbounds float, float* %tmp9132, i64 1
+ %tmp9134 = getelementptr inbounds float, float* %tmp9133, i64 1
+ %tmp9135 = getelementptr inbounds float, float* %tmp9134, i64 1
+ %tmp9136 = getelementptr inbounds float, float* %tmp9135, i64 1
+ %tmp9137 = getelementptr inbounds float, float* %tmp9136, i64 1
+ %tmp9138 = getelementptr inbounds float, float* %tmp9137, i64 1
+ %tmp9139 = getelementptr inbounds float, float* %tmp9138, i64 1
+ %tmp9140 = getelementptr inbounds float, float* %tmp9139, i64 1
+ %tmp9141 = getelementptr inbounds float, float* %tmp9140, i64 1
+ %tmp9142 = getelementptr inbounds float, float* %tmp9141, i64 1
+ %tmp9143 = getelementptr inbounds float, float* %tmp9142, i64 1
+ %tmp9144 = getelementptr inbounds float, float* %tmp9143, i64 1
+ %tmp9145 = getelementptr inbounds float, float* %tmp9144, i64 1
+ %tmp9146 = getelementptr inbounds float, float* %tmp9145, i64 1
+ %tmp9147 = getelementptr inbounds float, float* %tmp9146, i64 1
+ %tmp9148 = getelementptr inbounds float, float* %tmp9147, i64 1
+ %tmp9149 = getelementptr inbounds float, float* %tmp9148, i64 1
+ %tmp9150 = getelementptr inbounds float, float* %tmp9149, i64 1
+ %tmp9151 = getelementptr inbounds float, float* %tmp9150, i64 1
+ %tmp9152 = getelementptr inbounds float, float* %tmp9151, i64 1
+ %tmp9153 = getelementptr inbounds float, float* %tmp9152, i64 1
+ %tmp9154 = getelementptr inbounds float, float* %tmp9153, i64 1
+ %tmp9155 = getelementptr inbounds float, float* %tmp9154, i64 1
+ %tmp9156 = getelementptr inbounds float, float* %tmp9155, i64 1
+ %tmp9157 = getelementptr inbounds float, float* %tmp9156, i64 1
+ %tmp9158 = getelementptr inbounds float, float* %tmp9157, i64 1
+ %tmp9159 = getelementptr inbounds float, float* %tmp9158, i64 1
+ %tmp9160 = getelementptr inbounds float, float* %tmp9159, i64 1
+ %tmp9161 = getelementptr inbounds float, float* %tmp9160, i64 1
+ %tmp9162 = getelementptr inbounds float, float* %tmp9161, i64 1
+ %tmp9163 = getelementptr inbounds float, float* %tmp9162, i64 1
+ %tmp9164 = getelementptr inbounds float, float* %tmp9163, i64 1
+ %tmp9165 = getelementptr inbounds float, float* %tmp9164, i64 1
+ %tmp9166 = getelementptr inbounds float, float* %tmp9165, i64 1
+ %tmp9167 = getelementptr inbounds float, float* %tmp9166, i64 1
+ %tmp9168 = getelementptr inbounds float, float* %tmp9167, i64 1
+ %tmp9169 = getelementptr inbounds float, float* %tmp9168, i64 1
+ %tmp9170 = getelementptr inbounds float, float* %tmp9169, i64 1
+ %tmp9171 = getelementptr inbounds float, float* %tmp9170, i64 1
+ %tmp9172 = getelementptr inbounds float, float* %tmp9171, i64 1
+ %tmp9173 = getelementptr inbounds float, float* %tmp9172, i64 1
+ %tmp9174 = getelementptr inbounds float, float* %tmp9173, i64 1
+ %tmp9175 = getelementptr inbounds float, float* %tmp9174, i64 1
+ %tmp9176 = getelementptr inbounds float, float* %tmp9175, i64 1
+ %tmp9177 = getelementptr inbounds float, float* %tmp9176, i64 1
+ %tmp9178 = getelementptr inbounds float, float* %tmp9177, i64 1
+ %tmp9179 = getelementptr inbounds float, float* %tmp9178, i64 1
+ %tmp9180 = getelementptr inbounds float, float* %tmp9179, i64 1
+ %tmp9181 = getelementptr inbounds float, float* %tmp9180, i64 1
+ %tmp9182 = getelementptr inbounds float, float* %tmp9181, i64 1
+ %tmp9183 = getelementptr inbounds float, float* %tmp9182, i64 1
+ %tmp9184 = getelementptr inbounds float, float* %tmp9183, i64 1
+ %tmp9185 = getelementptr inbounds float, float* %tmp9184, i64 1
+ %tmp9186 = getelementptr inbounds float, float* %tmp9185, i64 1
+ %tmp9187 = getelementptr inbounds float, float* %tmp9186, i64 1
+ %tmp9188 = getelementptr inbounds float, float* %tmp9187, i64 1
+ %tmp9189 = getelementptr inbounds float, float* %tmp9188, i64 1
+ %tmp9190 = getelementptr inbounds float, float* %tmp9189, i64 1
+ %tmp9191 = getelementptr inbounds float, float* %tmp9190, i64 1
+ %tmp9192 = getelementptr inbounds float, float* %tmp9191, i64 1
+ %tmp9193 = getelementptr inbounds float, float* %tmp9192, i64 1
+ %tmp9194 = getelementptr inbounds float, float* %tmp9193, i64 1
+ %tmp9195 = getelementptr inbounds float, float* %tmp9194, i64 1
+ %tmp9196 = getelementptr inbounds float, float* %tmp9195, i64 1
+ %tmp9197 = getelementptr inbounds float, float* %tmp9196, i64 1
+ %tmp9198 = getelementptr inbounds float, float* %tmp9197, i64 1
+ %tmp9199 = getelementptr inbounds float, float* %tmp9198, i64 1
+ %tmp9200 = getelementptr inbounds float, float* %tmp9199, i64 1
+ %tmp9201 = getelementptr inbounds float, float* %tmp9200, i64 1
+ %tmp9202 = getelementptr inbounds float, float* %tmp9201, i64 1
+ %tmp9203 = getelementptr inbounds float, float* %tmp9202, i64 1
+ %tmp9204 = getelementptr inbounds float, float* %tmp9203, i64 1
+ %tmp9205 = getelementptr inbounds float, float* %tmp9204, i64 1
+ %tmp9206 = getelementptr inbounds float, float* %tmp9205, i64 1
+ %tmp9207 = getelementptr inbounds float, float* %tmp9206, i64 1
+ %tmp9208 = getelementptr inbounds float, float* %tmp9207, i64 1
+ %tmp9209 = getelementptr inbounds float, float* %tmp9208, i64 1
+ %tmp9210 = getelementptr inbounds float, float* %tmp9209, i64 1
+ %tmp9211 = getelementptr inbounds float, float* %tmp9210, i64 1
+ %tmp9212 = getelementptr inbounds float, float* %tmp9211, i64 1
+ %tmp9213 = getelementptr inbounds float, float* %tmp9212, i64 1
+ %tmp9214 = getelementptr inbounds float, float* %tmp9213, i64 1
+ %tmp9215 = getelementptr inbounds float, float* %tmp9214, i64 1
+ %tmp9216 = getelementptr inbounds float, float* %tmp9215, i64 1
+ %tmp9217 = getelementptr inbounds float, float* %tmp9216, i64 1
+ %tmp9218 = getelementptr inbounds float, float* %tmp9217, i64 1
+ %tmp9219 = getelementptr inbounds float, float* %tmp9218, i64 1
+ %tmp9220 = getelementptr inbounds float, float* %tmp9219, i64 1
+ %tmp9221 = getelementptr inbounds float, float* %tmp9220, i64 1
+ %tmp9222 = getelementptr inbounds float, float* %tmp9221, i64 1
+ %tmp9223 = getelementptr inbounds float, float* %tmp9222, i64 1
+ %tmp9224 = getelementptr inbounds float, float* %tmp9223, i64 1
+ %tmp9225 = getelementptr inbounds float, float* %tmp9224, i64 1
+ %tmp9226 = getelementptr inbounds float, float* %tmp9225, i64 1
+ %tmp9227 = getelementptr inbounds float, float* %tmp9226, i64 1
+ %tmp9228 = getelementptr inbounds float, float* %tmp9227, i64 1
+ %tmp9229 = getelementptr inbounds float, float* %tmp9228, i64 1
+ %tmp9230 = getelementptr inbounds float, float* %tmp9229, i64 1
+ %tmp9231 = getelementptr inbounds float, float* %tmp9230, i64 1
+ %tmp9232 = getelementptr inbounds float, float* %tmp9231, i64 1
+ %tmp9233 = getelementptr inbounds float, float* %tmp9232, i64 1
+ %tmp9234 = getelementptr inbounds float, float* %tmp9233, i64 1
+ %tmp9235 = getelementptr inbounds float, float* %tmp9234, i64 1
+ %tmp9236 = getelementptr inbounds float, float* %tmp9235, i64 1
+ %tmp9237 = getelementptr inbounds float, float* %tmp9236, i64 1
+ %tmp9238 = getelementptr inbounds float, float* %tmp9237, i64 1
+ %tmp9239 = getelementptr inbounds float, float* %tmp9238, i64 1
+ %tmp9240 = getelementptr inbounds float, float* %tmp9239, i64 1
+ %tmp9241 = getelementptr inbounds float, float* %tmp9240, i64 1
+ %tmp9242 = getelementptr inbounds float, float* %tmp9241, i64 1
+ %tmp9243 = getelementptr inbounds float, float* %tmp9242, i64 1
+ %tmp9244 = getelementptr inbounds float, float* %tmp9243, i64 1
+ %tmp9245 = getelementptr inbounds float, float* %tmp9244, i64 1
+ %tmp9246 = getelementptr inbounds float, float* %tmp9245, i64 1
+ %tmp9247 = getelementptr inbounds float, float* %tmp9246, i64 1
+ %tmp9248 = getelementptr inbounds float, float* %tmp9247, i64 1
+ %tmp9249 = getelementptr inbounds float, float* %tmp9248, i64 1
+ %tmp9250 = getelementptr inbounds float, float* %tmp9249, i64 1
+ %tmp9251 = getelementptr inbounds float, float* %tmp9250, i64 1
+ %tmp9252 = getelementptr inbounds float, float* %tmp9251, i64 1
+ %tmp9253 = getelementptr inbounds float, float* %tmp9252, i64 1
+ %tmp9254 = getelementptr inbounds float, float* %tmp9253, i64 1
+ %tmp9255 = getelementptr inbounds float, float* %tmp9254, i64 1
+ %tmp9256 = getelementptr inbounds float, float* %tmp9255, i64 1
+ %tmp9257 = getelementptr inbounds float, float* %tmp9256, i64 1
+ %tmp9258 = getelementptr inbounds float, float* %tmp9257, i64 1
+ %tmp9259 = getelementptr inbounds float, float* %tmp9258, i64 1
+ %tmp9260 = getelementptr inbounds float, float* %tmp9259, i64 1
+ %tmp9261 = getelementptr inbounds float, float* %tmp9260, i64 1
+ %tmp9262 = getelementptr inbounds float, float* %tmp9261, i64 1
+ %tmp9263 = getelementptr inbounds float, float* %tmp9262, i64 1
+ %tmp9264 = getelementptr inbounds float, float* %tmp9263, i64 1
+ %tmp9265 = getelementptr inbounds float, float* %tmp9264, i64 1
+ %tmp9266 = getelementptr inbounds float, float* %tmp9265, i64 1
+ %tmp9267 = getelementptr inbounds float, float* %tmp9266, i64 1
+ %tmp9268 = getelementptr inbounds float, float* %tmp9267, i64 1
+ %tmp9269 = getelementptr inbounds float, float* %tmp9268, i64 1
+ %tmp9270 = getelementptr inbounds float, float* %tmp9269, i64 1
+ %tmp9271 = getelementptr inbounds float, float* %tmp9270, i64 1
+ %tmp9272 = getelementptr inbounds float, float* %tmp9271, i64 1
+ %tmp9273 = getelementptr inbounds float, float* %tmp9272, i64 1
+ %tmp9274 = getelementptr inbounds float, float* %tmp9273, i64 1
+ %tmp9275 = getelementptr inbounds float, float* %tmp9274, i64 1
+ %tmp9276 = getelementptr inbounds float, float* %tmp9275, i64 1
+ %tmp9277 = getelementptr inbounds float, float* %tmp9276, i64 1
+ %tmp9278 = getelementptr inbounds float, float* %tmp9277, i64 1
+ %tmp9279 = getelementptr inbounds float, float* %tmp9278, i64 1
+ %tmp9280 = getelementptr inbounds float, float* %tmp9279, i64 1
+ %tmp9281 = getelementptr inbounds float, float* %tmp9280, i64 1
+ %tmp9282 = getelementptr inbounds float, float* %tmp9281, i64 1
+ %tmp9283 = getelementptr inbounds float, float* %tmp9282, i64 1
+ %tmp9284 = getelementptr inbounds float, float* %tmp9283, i64 1
+ %tmp9285 = getelementptr inbounds float, float* %tmp9284, i64 1
+ %tmp9286 = getelementptr inbounds float, float* %tmp9285, i64 1
+ %tmp9287 = getelementptr inbounds float, float* %tmp9286, i64 1
+ %tmp9288 = getelementptr inbounds float, float* %tmp9287, i64 1
+ %tmp9289 = getelementptr inbounds float, float* %tmp9288, i64 1
+ %tmp9290 = getelementptr inbounds float, float* %tmp9289, i64 1
+ %tmp9291 = getelementptr inbounds float, float* %tmp9290, i64 1
+ %tmp9292 = getelementptr inbounds float, float* %tmp9291, i64 1
+ %tmp9293 = getelementptr inbounds float, float* %tmp9292, i64 1
+ %tmp9294 = getelementptr inbounds float, float* %tmp9293, i64 1
+ %tmp9295 = getelementptr inbounds float, float* %tmp9294, i64 1
+ %tmp9296 = getelementptr inbounds float, float* %tmp9295, i64 1
+ %tmp9297 = getelementptr inbounds float, float* %tmp9296, i64 1
+ %tmp9298 = getelementptr inbounds float, float* %tmp9297, i64 1
+ %tmp9299 = getelementptr inbounds float, float* %tmp9298, i64 1
+ %tmp9300 = getelementptr inbounds float, float* %tmp9299, i64 1
+ %tmp9301 = getelementptr inbounds float, float* %tmp9300, i64 1
+ %tmp9302 = getelementptr inbounds float, float* %tmp9301, i64 1
+ %tmp9303 = getelementptr inbounds float, float* %tmp9302, i64 1
+ %tmp9304 = getelementptr inbounds float, float* %tmp9303, i64 1
+ %tmp9305 = getelementptr inbounds float, float* %tmp9304, i64 1
+ %tmp9306 = getelementptr inbounds float, float* %tmp9305, i64 1
+ %tmp9307 = getelementptr inbounds float, float* %tmp9306, i64 1
+ %tmp9308 = getelementptr inbounds float, float* %tmp9307, i64 1
+ %tmp9309 = getelementptr inbounds float, float* %tmp9308, i64 1
+ %tmp9310 = getelementptr inbounds float, float* %tmp9309, i64 1
+ %tmp9311 = getelementptr inbounds float, float* %tmp9310, i64 1
+ %tmp9312 = getelementptr inbounds float, float* %tmp9311, i64 1
+ %tmp9313 = getelementptr inbounds float, float* %tmp9312, i64 1
+ %tmp9314 = getelementptr inbounds float, float* %tmp9313, i64 1
+ %tmp9315 = getelementptr inbounds float, float* %tmp9314, i64 1
+ %tmp9316 = getelementptr inbounds float, float* %tmp9315, i64 1
+ %tmp9317 = getelementptr inbounds float, float* %tmp9316, i64 1
+ %tmp9318 = getelementptr inbounds float, float* %tmp9317, i64 1
+ %tmp9319 = getelementptr inbounds float, float* %tmp9318, i64 1
+ %tmp9320 = getelementptr inbounds float, float* %tmp9319, i64 1
+ %tmp9321 = getelementptr inbounds float, float* %tmp9320, i64 1
+ %tmp9322 = getelementptr inbounds float, float* %tmp9321, i64 1
+ %tmp9323 = getelementptr inbounds float, float* %tmp9322, i64 1
+ %tmp9324 = getelementptr inbounds float, float* %tmp9323, i64 1
+ %tmp9325 = getelementptr inbounds float, float* %tmp9324, i64 1
+ %tmp9326 = getelementptr inbounds float, float* %tmp9325, i64 1
+ %tmp9327 = getelementptr inbounds float, float* %tmp9326, i64 1
+ %tmp9328 = getelementptr inbounds float, float* %tmp9327, i64 1
+ %tmp9329 = getelementptr inbounds float, float* %tmp9328, i64 1
+ %tmp9330 = getelementptr inbounds float, float* %tmp9329, i64 1
+ %tmp9331 = getelementptr inbounds float, float* %tmp9330, i64 1
+ %tmp9332 = getelementptr inbounds float, float* %tmp9331, i64 1
+ %tmp9333 = getelementptr inbounds float, float* %tmp9332, i64 1
+ %tmp9334 = getelementptr inbounds float, float* %tmp9333, i64 1
+ %tmp9335 = getelementptr inbounds float, float* %tmp9334, i64 1
+ %tmp9336 = getelementptr inbounds float, float* %tmp9335, i64 1
+ %tmp9337 = getelementptr inbounds float, float* %tmp9336, i64 1
+ %tmp9338 = getelementptr inbounds float, float* %tmp9337, i64 1
+ %tmp9339 = getelementptr inbounds float, float* %tmp9338, i64 1
+ %tmp9340 = getelementptr inbounds float, float* %tmp9339, i64 1
+ %tmp9341 = getelementptr inbounds float, float* %tmp9340, i64 1
+ %tmp9342 = getelementptr inbounds float, float* %tmp9341, i64 1
+ %tmp9343 = getelementptr inbounds float, float* %tmp9342, i64 1
+ %tmp9344 = getelementptr inbounds float, float* %tmp9343, i64 1
+ %tmp9345 = getelementptr inbounds float, float* %tmp9344, i64 1
+ %tmp9346 = getelementptr inbounds float, float* %tmp9345, i64 1
+ %tmp9347 = getelementptr inbounds float, float* %tmp9346, i64 1
+ %tmp9348 = getelementptr inbounds float, float* %tmp9347, i64 1
+ %tmp9349 = getelementptr inbounds float, float* %tmp9348, i64 1
+ %tmp9350 = getelementptr inbounds float, float* %tmp9349, i64 1
+ %tmp9351 = getelementptr inbounds float, float* %tmp9350, i64 1
+ %tmp9352 = getelementptr inbounds float, float* %tmp9351, i64 1
+ %tmp9353 = getelementptr inbounds float, float* %tmp9352, i64 1
+ %tmp9354 = getelementptr inbounds float, float* %tmp9353, i64 1
+ %tmp9355 = getelementptr inbounds float, float* %tmp9354, i64 1
+ %tmp9356 = getelementptr inbounds float, float* %tmp9355, i64 1
+ %tmp9357 = getelementptr inbounds float, float* %tmp9356, i64 1
+ %tmp9358 = getelementptr inbounds float, float* %tmp9357, i64 1
+ %tmp9359 = getelementptr inbounds float, float* %tmp9358, i64 1
+ %tmp9360 = getelementptr inbounds float, float* %tmp9359, i64 1
+ %tmp9361 = getelementptr inbounds float, float* %tmp9360, i64 1
+ %tmp9362 = getelementptr inbounds float, float* %tmp9361, i64 1
+ %tmp9363 = getelementptr inbounds float, float* %tmp9362, i64 1
+ %tmp9364 = getelementptr inbounds float, float* %tmp9363, i64 1
+ %tmp9365 = getelementptr inbounds float, float* %tmp9364, i64 1
+ %tmp9366 = getelementptr inbounds float, float* %tmp9365, i64 1
+ %tmp9367 = getelementptr inbounds float, float* %tmp9366, i64 1
+ %tmp9368 = getelementptr inbounds float, float* %tmp9367, i64 1
+ %tmp9369 = getelementptr inbounds float, float* %tmp9368, i64 1
+ %tmp9370 = getelementptr inbounds float, float* %tmp9369, i64 1
+ %tmp9371 = getelementptr inbounds float, float* %tmp9370, i64 1
+ %tmp9372 = getelementptr inbounds float, float* %tmp9371, i64 1
+ %tmp9373 = getelementptr inbounds float, float* %tmp9372, i64 1
+ %tmp9374 = getelementptr inbounds float, float* %tmp9373, i64 1
+ %tmp9375 = getelementptr inbounds float, float* %tmp9374, i64 1
+ %tmp9376 = getelementptr inbounds float, float* %tmp9375, i64 1
+ %tmp9377 = getelementptr inbounds float, float* %tmp9376, i64 1
+ %tmp9378 = getelementptr inbounds float, float* %tmp9377, i64 1
+ %tmp9379 = getelementptr inbounds float, float* %tmp9378, i64 1
+ %tmp9380 = getelementptr inbounds float, float* %tmp9379, i64 1
+ %tmp9381 = getelementptr inbounds float, float* %tmp9380, i64 1
+ %tmp9382 = getelementptr inbounds float, float* %tmp9381, i64 1
+ %tmp9383 = getelementptr inbounds float, float* %tmp9382, i64 1
+ %tmp9384 = getelementptr inbounds float, float* %tmp9383, i64 1
+ %tmp9385 = getelementptr inbounds float, float* %tmp9384, i64 1
+ %tmp9386 = getelementptr inbounds float, float* %tmp9385, i64 1
+ %tmp9387 = getelementptr inbounds float, float* %tmp9386, i64 1
+ %tmp9388 = getelementptr inbounds float, float* %tmp9387, i64 1
+ %tmp9389 = getelementptr inbounds float, float* %tmp9388, i64 1
+ %tmp9390 = getelementptr inbounds float, float* %tmp9389, i64 1
+ %tmp9391 = getelementptr inbounds float, float* %tmp9390, i64 1
+ %tmp9392 = getelementptr inbounds float, float* %tmp9391, i64 1
+ %tmp9393 = getelementptr inbounds float, float* %tmp9392, i64 1
+ %tmp9394 = getelementptr inbounds float, float* %tmp9393, i64 1
+ %tmp9395 = getelementptr inbounds float, float* %tmp9394, i64 1
+ %tmp9396 = getelementptr inbounds float, float* %tmp9395, i64 1
+ %tmp9397 = getelementptr inbounds float, float* %tmp9396, i64 1
+ %tmp9398 = getelementptr inbounds float, float* %tmp9397, i64 1
+ %tmp9399 = getelementptr inbounds float, float* %tmp9398, i64 1
+ %tmp9400 = getelementptr inbounds float, float* %tmp9399, i64 1
+ %tmp9401 = getelementptr inbounds float, float* %tmp9400, i64 1
+ %tmp9402 = getelementptr inbounds float, float* %tmp9401, i64 1
+ %tmp9403 = getelementptr inbounds float, float* %tmp9402, i64 1
+ %tmp9404 = getelementptr inbounds float, float* %tmp9403, i64 1
+ %tmp9405 = getelementptr inbounds float, float* %tmp9404, i64 1
+ %tmp9406 = getelementptr inbounds float, float* %tmp9405, i64 1
+ %tmp9407 = getelementptr inbounds float, float* %tmp9406, i64 1
+ %tmp9408 = getelementptr inbounds float, float* %tmp9407, i64 1
+ %tmp9409 = getelementptr inbounds float, float* %tmp9408, i64 1
+ %tmp9410 = getelementptr inbounds float, float* %tmp9409, i64 1
+ %tmp9411 = getelementptr inbounds float, float* %tmp9410, i64 1
+ %tmp9412 = getelementptr inbounds float, float* %tmp9411, i64 1
+ %tmp9413 = getelementptr inbounds float, float* %tmp9412, i64 1
+ %tmp9414 = getelementptr inbounds float, float* %tmp9413, i64 1
+ %tmp9415 = getelementptr inbounds float, float* %tmp9414, i64 1
+ %tmp9416 = getelementptr inbounds float, float* %tmp9415, i64 1
+ %tmp9417 = getelementptr inbounds float, float* %tmp9416, i64 1
+ %tmp9418 = getelementptr inbounds float, float* %tmp9417, i64 1
+ %tmp9419 = getelementptr inbounds float, float* %tmp9418, i64 1
+ %tmp9420 = getelementptr inbounds float, float* %tmp9419, i64 1
+ %tmp9421 = getelementptr inbounds float, float* %tmp9420, i64 1
+ %tmp9422 = getelementptr inbounds float, float* %tmp9421, i64 1
+ %tmp9423 = getelementptr inbounds float, float* %tmp9422, i64 1
+ %tmp9424 = getelementptr inbounds float, float* %tmp9423, i64 1
+ %tmp9425 = getelementptr inbounds float, float* %tmp9424, i64 1
+ %tmp9426 = getelementptr inbounds float, float* %tmp9425, i64 1
+ %tmp9427 = getelementptr inbounds float, float* %tmp9426, i64 1
+ %tmp9428 = getelementptr inbounds float, float* %tmp9427, i64 1
+ %tmp9429 = getelementptr inbounds float, float* %tmp9428, i64 1
+ %tmp9430 = getelementptr inbounds float, float* %tmp9429, i64 1
+ %tmp9431 = getelementptr inbounds float, float* %tmp9430, i64 1
+ %tmp9432 = getelementptr inbounds float, float* %tmp9431, i64 1
+ %tmp9433 = getelementptr inbounds float, float* %tmp9432, i64 1
+ %tmp9434 = getelementptr inbounds float, float* %tmp9433, i64 1
+ %tmp9435 = getelementptr inbounds float, float* %tmp9434, i64 1
+ %tmp9436 = getelementptr inbounds float, float* %tmp9435, i64 1
+ %tmp9437 = getelementptr inbounds float, float* %tmp9436, i64 1
+ %tmp9438 = getelementptr inbounds float, float* %tmp9437, i64 1
+ %tmp9439 = getelementptr inbounds float, float* %tmp9438, i64 1
+ %tmp9440 = getelementptr inbounds float, float* %tmp9439, i64 1
+ %tmp9441 = getelementptr inbounds float, float* %tmp9440, i64 1
+ %tmp9442 = getelementptr inbounds float, float* %tmp9441, i64 1
+ %tmp9443 = getelementptr inbounds float, float* %tmp9442, i64 1
+ %tmp9444 = getelementptr inbounds float, float* %tmp9443, i64 1
+ %tmp9445 = getelementptr inbounds float, float* %tmp9444, i64 1
+ %tmp9446 = getelementptr inbounds float, float* %tmp9445, i64 1
+ %tmp9447 = getelementptr inbounds float, float* %tmp9446, i64 1
+ %tmp9448 = getelementptr inbounds float, float* %tmp9447, i64 1
+ %tmp9449 = getelementptr inbounds float, float* %tmp9448, i64 1
+ %tmp9450 = getelementptr inbounds float, float* %tmp9449, i64 1
+ %tmp9451 = getelementptr inbounds float, float* %tmp9450, i64 1
+ %tmp9452 = getelementptr inbounds float, float* %tmp9451, i64 1
+ %tmp9453 = getelementptr inbounds float, float* %tmp9452, i64 1
+ %tmp9454 = getelementptr inbounds float, float* %tmp9453, i64 1
+ %tmp9455 = getelementptr inbounds float, float* %tmp9454, i64 1
+ %tmp9456 = getelementptr inbounds float, float* %tmp9455, i64 1
+ %tmp9457 = getelementptr inbounds float, float* %tmp9456, i64 1
+ %tmp9458 = getelementptr inbounds float, float* %tmp9457, i64 1
+ %tmp9459 = getelementptr inbounds float, float* %tmp9458, i64 1
+ %tmp9460 = getelementptr inbounds float, float* %tmp9459, i64 1
+ %tmp9461 = getelementptr inbounds float, float* %tmp9460, i64 1
+ %tmp9462 = getelementptr inbounds float, float* %tmp9461, i64 1
+ %tmp9463 = getelementptr inbounds float, float* %tmp9462, i64 1
+ %tmp9464 = getelementptr inbounds float, float* %tmp9463, i64 1
+ %tmp9465 = getelementptr inbounds float, float* %tmp9464, i64 1
+ %tmp9466 = getelementptr inbounds float, float* %tmp9465, i64 1
+ %tmp9467 = getelementptr inbounds float, float* %tmp9466, i64 1
+ %tmp9468 = getelementptr inbounds float, float* %tmp9467, i64 1
+ %tmp9469 = getelementptr inbounds float, float* %tmp9468, i64 1
+ %tmp9470 = getelementptr inbounds float, float* %tmp9469, i64 1
+ %tmp9471 = getelementptr inbounds float, float* %tmp9470, i64 1
+ %tmp9472 = getelementptr inbounds float, float* %tmp9471, i64 1
+ %tmp9473 = getelementptr inbounds float, float* %tmp9472, i64 1
+ %tmp9474 = getelementptr inbounds float, float* %tmp9473, i64 1
+ %tmp9475 = getelementptr inbounds float, float* %tmp9474, i64 1
+ %tmp9476 = getelementptr inbounds float, float* %tmp9475, i64 1
+ %tmp9477 = getelementptr inbounds float, float* %tmp9476, i64 1
+ %tmp9478 = getelementptr inbounds float, float* %tmp9477, i64 1
+ %tmp9479 = getelementptr inbounds float, float* %tmp9478, i64 1
+ %tmp9480 = getelementptr inbounds float, float* %tmp9479, i64 1
+ %tmp9481 = getelementptr inbounds float, float* %tmp9480, i64 1
+ %tmp9482 = getelementptr inbounds float, float* %tmp9481, i64 1
+ %tmp9483 = getelementptr inbounds float, float* %tmp9482, i64 1
+ %tmp9484 = getelementptr inbounds float, float* %tmp9483, i64 1
+ %tmp9485 = getelementptr inbounds float, float* %tmp9484, i64 1
+ %tmp9486 = getelementptr inbounds float, float* %tmp9485, i64 1
+ %tmp9487 = getelementptr inbounds float, float* %tmp9486, i64 1
+ %tmp9488 = getelementptr inbounds float, float* %tmp9487, i64 1
+ %tmp9489 = getelementptr inbounds float, float* %tmp9488, i64 1
+ %tmp9490 = getelementptr inbounds float, float* %tmp9489, i64 1
+ %tmp9491 = getelementptr inbounds float, float* %tmp9490, i64 1
+ %tmp9492 = getelementptr inbounds float, float* %tmp9491, i64 1
+ %tmp9493 = getelementptr inbounds float, float* %tmp9492, i64 1
+ %tmp9494 = getelementptr inbounds float, float* %tmp9493, i64 1
+ %tmp9495 = getelementptr inbounds float, float* %tmp9494, i64 1
+ %tmp9496 = getelementptr inbounds float, float* %tmp9495, i64 1
+ %tmp9497 = getelementptr inbounds float, float* %tmp9496, i64 1
+ %tmp9498 = getelementptr inbounds float, float* %tmp9497, i64 1
+ %tmp9499 = getelementptr inbounds float, float* %tmp9498, i64 1
+ %tmp9500 = getelementptr inbounds float, float* %tmp9499, i64 1
+ %tmp9501 = getelementptr inbounds float, float* %tmp9500, i64 1
+ %tmp9502 = getelementptr inbounds float, float* %tmp9501, i64 1
+ %tmp9503 = getelementptr inbounds float, float* %tmp9502, i64 1
+ %tmp9504 = getelementptr inbounds float, float* %tmp9503, i64 1
+ %tmp9505 = getelementptr inbounds float, float* %tmp9504, i64 1
+ %tmp9506 = getelementptr inbounds float, float* %tmp9505, i64 1
+ %tmp9507 = getelementptr inbounds float, float* %tmp9506, i64 1
+ %tmp9508 = getelementptr inbounds float, float* %tmp9507, i64 1
+ %tmp9509 = getelementptr inbounds float, float* %tmp9508, i64 1
+ %tmp9510 = getelementptr inbounds float, float* %tmp9509, i64 1
+ %tmp9511 = getelementptr inbounds float, float* %tmp9510, i64 1
+ %tmp9512 = getelementptr inbounds float, float* %tmp9511, i64 1
+ %tmp9513 = getelementptr inbounds float, float* %tmp9512, i64 1
+ %tmp9514 = getelementptr inbounds float, float* %tmp9513, i64 1
+ %tmp9515 = getelementptr inbounds float, float* %tmp9514, i64 1
+ %tmp9516 = getelementptr inbounds float, float* %tmp9515, i64 1
+ %tmp9517 = getelementptr inbounds float, float* %tmp9516, i64 1
+ %tmp9518 = getelementptr inbounds float, float* %tmp9517, i64 1
+ %tmp9519 = getelementptr inbounds float, float* %tmp9518, i64 1
+ %tmp9520 = getelementptr inbounds float, float* %tmp9519, i64 1
+ %tmp9521 = getelementptr inbounds float, float* %tmp9520, i64 1
+ %tmp9522 = getelementptr inbounds float, float* %tmp9521, i64 1
+ %tmp9523 = getelementptr inbounds float, float* %tmp9522, i64 1
+ %tmp9524 = getelementptr inbounds float, float* %tmp9523, i64 1
+ %tmp9525 = getelementptr inbounds float, float* %tmp9524, i64 1
+ %tmp9526 = getelementptr inbounds float, float* %tmp9525, i64 1
+ %tmp9527 = getelementptr inbounds float, float* %tmp9526, i64 1
+ %tmp9528 = getelementptr inbounds float, float* %tmp9527, i64 1
+ %tmp9529 = getelementptr inbounds float, float* %tmp9528, i64 1
+ %tmp9530 = getelementptr inbounds float, float* %tmp9529, i64 1
+ %tmp9531 = getelementptr inbounds float, float* %tmp9530, i64 1
+ %tmp9532 = getelementptr inbounds float, float* %tmp9531, i64 1
+ %tmp9533 = getelementptr inbounds float, float* %tmp9532, i64 1
+ %tmp9534 = getelementptr inbounds float, float* %tmp9533, i64 1
+ %tmp9535 = getelementptr inbounds float, float* %tmp9534, i64 1
+ %tmp9536 = getelementptr inbounds float, float* %tmp9535, i64 1
+ %tmp9537 = getelementptr inbounds float, float* %tmp9536, i64 1
+ %tmp9538 = getelementptr inbounds float, float* %tmp9537, i64 1
+ %tmp9539 = getelementptr inbounds float, float* %tmp9538, i64 1
+ %tmp9540 = getelementptr inbounds float, float* %tmp9539, i64 1
+ %tmp9541 = getelementptr inbounds float, float* %tmp9540, i64 1
+ %tmp9542 = getelementptr inbounds float, float* %tmp9541, i64 1
+ %tmp9543 = getelementptr inbounds float, float* %tmp9542, i64 1
+ %tmp9544 = getelementptr inbounds float, float* %tmp9543, i64 1
+ %tmp9545 = getelementptr inbounds float, float* %tmp9544, i64 1
+ %tmp9546 = getelementptr inbounds float, float* %tmp9545, i64 1
+ %tmp9547 = getelementptr inbounds float, float* %tmp9546, i64 1
+ %tmp9548 = getelementptr inbounds float, float* %tmp9547, i64 1
+ %tmp9549 = getelementptr inbounds float, float* %tmp9548, i64 1
+ %tmp9550 = getelementptr inbounds float, float* %tmp9549, i64 1
+ %tmp9551 = getelementptr inbounds float, float* %tmp9550, i64 1
+ %tmp9552 = getelementptr inbounds float, float* %tmp9551, i64 1
+ %tmp9553 = getelementptr inbounds float, float* %tmp9552, i64 1
+ %tmp9554 = getelementptr inbounds float, float* %tmp9553, i64 1
+ %tmp9555 = getelementptr inbounds float, float* %tmp9554, i64 1
+ %tmp9556 = getelementptr inbounds float, float* %tmp9555, i64 1
+ %tmp9557 = getelementptr inbounds float, float* %tmp9556, i64 1
+ %tmp9558 = getelementptr inbounds float, float* %tmp9557, i64 1
+ %tmp9559 = getelementptr inbounds float, float* %tmp9558, i64 1
+ %tmp9560 = getelementptr inbounds float, float* %tmp9559, i64 1
+ %tmp9561 = getelementptr inbounds float, float* %tmp9560, i64 1
+ %tmp9562 = getelementptr inbounds float, float* %tmp9561, i64 1
+ %tmp9563 = getelementptr inbounds float, float* %tmp9562, i64 1
+ %tmp9564 = getelementptr inbounds float, float* %tmp9563, i64 1
+ %tmp9565 = getelementptr inbounds float, float* %tmp9564, i64 1
+ %tmp9566 = getelementptr inbounds float, float* %tmp9565, i64 1
+ %tmp9567 = getelementptr inbounds float, float* %tmp9566, i64 1
+ %tmp9568 = getelementptr inbounds float, float* %tmp9567, i64 1
+ %tmp9569 = getelementptr inbounds float, float* %tmp9568, i64 1
+ %tmp9570 = getelementptr inbounds float, float* %tmp9569, i64 1
+ %tmp9571 = getelementptr inbounds float, float* %tmp9570, i64 1
+ %tmp9572 = getelementptr inbounds float, float* %tmp9571, i64 1
+ %tmp9573 = getelementptr inbounds float, float* %tmp9572, i64 1
+ %tmp9574 = getelementptr inbounds float, float* %tmp9573, i64 1
+ %tmp9575 = getelementptr inbounds float, float* %tmp9574, i64 1
+ %tmp9576 = getelementptr inbounds float, float* %tmp9575, i64 1
+ %tmp9577 = getelementptr inbounds float, float* %tmp9576, i64 1
+ %tmp9578 = getelementptr inbounds float, float* %tmp9577, i64 1
+ %tmp9579 = getelementptr inbounds float, float* %tmp9578, i64 1
+ %tmp9580 = getelementptr inbounds float, float* %tmp9579, i64 1
+ %tmp9581 = getelementptr inbounds float, float* %tmp9580, i64 1
+ %tmp9582 = getelementptr inbounds float, float* %tmp9581, i64 1
+ %tmp9583 = getelementptr inbounds float, float* %tmp9582, i64 1
+ %tmp9584 = getelementptr inbounds float, float* %tmp9583, i64 1
+ %tmp9585 = getelementptr inbounds float, float* %tmp9584, i64 1
+ %tmp9586 = getelementptr inbounds float, float* %tmp9585, i64 1
+ %tmp9587 = getelementptr inbounds float, float* %tmp9586, i64 1
+ %tmp9588 = getelementptr inbounds float, float* %tmp9587, i64 1
+ %tmp9589 = getelementptr inbounds float, float* %tmp9588, i64 1
+ %tmp9590 = getelementptr inbounds float, float* %tmp9589, i64 1
+ %tmp9591 = getelementptr inbounds float, float* %tmp9590, i64 1
+ %tmp9592 = getelementptr inbounds float, float* %tmp9591, i64 1
+ %tmp9593 = getelementptr inbounds float, float* %tmp9592, i64 1
+ %tmp9594 = getelementptr inbounds float, float* %tmp9593, i64 1
+ %tmp9595 = getelementptr inbounds float, float* %tmp9594, i64 1
+ %tmp9596 = getelementptr inbounds float, float* %tmp9595, i64 1
+ %tmp9597 = getelementptr inbounds float, float* %tmp9596, i64 1
+ %tmp9598 = getelementptr inbounds float, float* %tmp9597, i64 1
+ %tmp9599 = getelementptr inbounds float, float* %tmp9598, i64 1
+ %tmp9600 = getelementptr inbounds float, float* %tmp9599, i64 1
+ %tmp9601 = getelementptr inbounds float, float* %tmp9600, i64 1
+ %tmp9602 = getelementptr inbounds float, float* %tmp9601, i64 1
+ %tmp9603 = getelementptr inbounds float, float* %tmp9602, i64 1
+ %tmp9604 = getelementptr inbounds float, float* %tmp9603, i64 1
+ %tmp9605 = getelementptr inbounds float, float* %tmp9604, i64 1
+ %tmp9606 = getelementptr inbounds float, float* %tmp9605, i64 1
+ %tmp9607 = getelementptr inbounds float, float* %tmp9606, i64 1
+ %tmp9608 = getelementptr inbounds float, float* %tmp9607, i64 1
+ %tmp9609 = getelementptr inbounds float, float* %tmp9608, i64 1
+ %tmp9610 = getelementptr inbounds float, float* %tmp9609, i64 1
+ %tmp9611 = getelementptr inbounds float, float* %tmp9610, i64 1
+ %tmp9612 = getelementptr inbounds float, float* %tmp9611, i64 1
+ %tmp9613 = getelementptr inbounds float, float* %tmp9612, i64 1
+ %tmp9614 = getelementptr inbounds float, float* %tmp9613, i64 1
+ %tmp9615 = getelementptr inbounds float, float* %tmp9614, i64 1
+ %tmp9616 = getelementptr inbounds float, float* %tmp9615, i64 1
+ %tmp9617 = getelementptr inbounds float, float* %tmp9616, i64 1
+ %tmp9618 = getelementptr inbounds float, float* %tmp9617, i64 1
+ %tmp9619 = getelementptr inbounds float, float* %tmp9618, i64 1
+ %tmp9620 = getelementptr inbounds float, float* %tmp9619, i64 1
+ %tmp9621 = getelementptr inbounds float, float* %tmp9620, i64 1
+ %tmp9622 = getelementptr inbounds float, float* %tmp9621, i64 1
+ %tmp9623 = getelementptr inbounds float, float* %tmp9622, i64 1
+ %tmp9624 = getelementptr inbounds float, float* %tmp9623, i64 1
+ %tmp9625 = getelementptr inbounds float, float* %tmp9624, i64 1
+ %tmp9626 = getelementptr inbounds float, float* %tmp9625, i64 1
+ %tmp9627 = getelementptr inbounds float, float* %tmp9626, i64 1
+ %tmp9628 = getelementptr inbounds float, float* %tmp9627, i64 1
+ %tmp9629 = getelementptr inbounds float, float* %tmp9628, i64 1
+ %tmp9630 = getelementptr inbounds float, float* %tmp9629, i64 1
+ %tmp9631 = getelementptr inbounds float, float* %tmp9630, i64 1
+ %tmp9632 = getelementptr inbounds float, float* %tmp9631, i64 1
+ %tmp9633 = getelementptr inbounds float, float* %tmp9632, i64 1
+ %tmp9634 = getelementptr inbounds float, float* %tmp9633, i64 1
+ %tmp9635 = getelementptr inbounds float, float* %tmp9634, i64 1
+ %tmp9636 = getelementptr inbounds float, float* %tmp9635, i64 1
+ %tmp9637 = getelementptr inbounds float, float* %tmp9636, i64 1
+ %tmp9638 = getelementptr inbounds float, float* %tmp9637, i64 1
+ %tmp9639 = getelementptr inbounds float, float* %tmp9638, i64 1
+ %tmp9640 = getelementptr inbounds float, float* %tmp9639, i64 1
+ %tmp9641 = getelementptr inbounds float, float* %tmp9640, i64 1
+ %tmp9642 = getelementptr inbounds float, float* %tmp9641, i64 1
+ %tmp9643 = getelementptr inbounds float, float* %tmp9642, i64 1
+ %tmp9644 = getelementptr inbounds float, float* %tmp9643, i64 1
+ %tmp9645 = getelementptr inbounds float, float* %tmp9644, i64 1
+ %tmp9646 = getelementptr inbounds float, float* %tmp9645, i64 1
+ %tmp9647 = getelementptr inbounds float, float* %tmp9646, i64 1
+ %tmp9648 = getelementptr inbounds float, float* %tmp9647, i64 1
+ %tmp9649 = getelementptr inbounds float, float* %tmp9648, i64 1
+ %tmp9650 = getelementptr inbounds float, float* %tmp9649, i64 1
+ %tmp9651 = getelementptr inbounds float, float* %tmp9650, i64 1
+ %tmp9652 = getelementptr inbounds float, float* %tmp9651, i64 1
+ %tmp9653 = getelementptr inbounds float, float* %tmp9652, i64 1
+ %tmp9654 = getelementptr inbounds float, float* %tmp9653, i64 1
+ %tmp9655 = getelementptr inbounds float, float* %tmp9654, i64 1
+ %tmp9656 = getelementptr inbounds float, float* %tmp9655, i64 1
+ %tmp9657 = getelementptr inbounds float, float* %tmp9656, i64 1
+ %tmp9658 = getelementptr inbounds float, float* %tmp9657, i64 1
+ %tmp9659 = getelementptr inbounds float, float* %tmp9658, i64 1
+ %tmp9660 = getelementptr inbounds float, float* %tmp9659, i64 1
+ %tmp9661 = getelementptr inbounds float, float* %tmp9660, i64 1
+ %tmp9662 = getelementptr inbounds float, float* %tmp9661, i64 1
+ %tmp9663 = getelementptr inbounds float, float* %tmp9662, i64 1
+ %tmp9664 = getelementptr inbounds float, float* %tmp9663, i64 1
+ %tmp9665 = getelementptr inbounds float, float* %tmp9664, i64 1
+ %tmp9666 = getelementptr inbounds float, float* %tmp9665, i64 1
+ %tmp9667 = getelementptr inbounds float, float* %tmp9666, i64 1
+ %tmp9668 = getelementptr inbounds float, float* %tmp9667, i64 1
+ %tmp9669 = getelementptr inbounds float, float* %tmp9668, i64 1
+ %tmp9670 = getelementptr inbounds float, float* %tmp9669, i64 1
+ %tmp9671 = getelementptr inbounds float, float* %tmp9670, i64 1
+ %tmp9672 = getelementptr inbounds float, float* %tmp9671, i64 1
+ %tmp9673 = getelementptr inbounds float, float* %tmp9672, i64 1
+ %tmp9674 = getelementptr inbounds float, float* %tmp9673, i64 1
+ %tmp9675 = getelementptr inbounds float, float* %tmp9674, i64 1
+ %tmp9676 = getelementptr inbounds float, float* %tmp9675, i64 1
+ %tmp9677 = getelementptr inbounds float, float* %tmp9676, i64 1
+ %tmp9678 = getelementptr inbounds float, float* %tmp9677, i64 1
+ %tmp9679 = getelementptr inbounds float, float* %tmp9678, i64 1
+ %tmp9680 = getelementptr inbounds float, float* %tmp9679, i64 1
+ %tmp9681 = getelementptr inbounds float, float* %tmp9680, i64 1
+ %tmp9682 = getelementptr inbounds float, float* %tmp9681, i64 1
+ %tmp9683 = getelementptr inbounds float, float* %tmp9682, i64 1
+ %tmp9684 = getelementptr inbounds float, float* %tmp9683, i64 1
+ %tmp9685 = getelementptr inbounds float, float* %tmp9684, i64 1
+ %tmp9686 = getelementptr inbounds float, float* %tmp9685, i64 1
+ %tmp9687 = getelementptr inbounds float, float* %tmp9686, i64 1
+ %tmp9688 = getelementptr inbounds float, float* %tmp9687, i64 1
+ %tmp9689 = getelementptr inbounds float, float* %tmp9688, i64 1
+ %tmp9690 = getelementptr inbounds float, float* %tmp9689, i64 1
+ %tmp9691 = getelementptr inbounds float, float* %tmp9690, i64 1
+ %tmp9692 = getelementptr inbounds float, float* %tmp9691, i64 1
+ %tmp9693 = getelementptr inbounds float, float* %tmp9692, i64 1
+ %tmp9694 = getelementptr inbounds float, float* %tmp9693, i64 1
+ %tmp9695 = getelementptr inbounds float, float* %tmp9694, i64 1
+ %tmp9696 = getelementptr inbounds float, float* %tmp9695, i64 1
+ %tmp9697 = getelementptr inbounds float, float* %tmp9696, i64 1
+ %tmp9698 = getelementptr inbounds float, float* %tmp9697, i64 1
+ %tmp9699 = getelementptr inbounds float, float* %tmp9698, i64 1
+ %tmp9700 = getelementptr inbounds float, float* %tmp9699, i64 1
+ %tmp9701 = getelementptr inbounds float, float* %tmp9700, i64 1
+ %tmp9702 = getelementptr inbounds float, float* %tmp9701, i64 1
+ %tmp9703 = getelementptr inbounds float, float* %tmp9702, i64 1
+ %tmp9704 = getelementptr inbounds float, float* %tmp9703, i64 1
+ %tmp9705 = getelementptr inbounds float, float* %tmp9704, i64 1
+ %tmp9706 = getelementptr inbounds float, float* %tmp9705, i64 1
+ %tmp9707 = getelementptr inbounds float, float* %tmp9706, i64 1
+ %tmp9708 = getelementptr inbounds float, float* %tmp9707, i64 1
+ %tmp9709 = getelementptr inbounds float, float* %tmp9708, i64 1
+ %tmp9710 = getelementptr inbounds float, float* %tmp9709, i64 1
+ %tmp9711 = getelementptr inbounds float, float* %tmp9710, i64 1
+ %tmp9712 = getelementptr inbounds float, float* %tmp9711, i64 1
+ %tmp9713 = getelementptr inbounds float, float* %tmp9712, i64 1
+ %tmp9714 = getelementptr inbounds float, float* %tmp9713, i64 1
+ %tmp9715 = getelementptr inbounds float, float* %tmp9714, i64 1
+ %tmp9716 = getelementptr inbounds float, float* %tmp9715, i64 1
+ %tmp9717 = getelementptr inbounds float, float* %tmp9716, i64 1
+ %tmp9718 = getelementptr inbounds float, float* %tmp9717, i64 1
+ %tmp9719 = getelementptr inbounds float, float* %tmp9718, i64 1
+ %tmp9720 = getelementptr inbounds float, float* %tmp9719, i64 1
+ %tmp9721 = getelementptr inbounds float, float* %tmp9720, i64 1
+ %tmp9722 = getelementptr inbounds float, float* %tmp9721, i64 1
+ %tmp9723 = getelementptr inbounds float, float* %tmp9722, i64 1
+ %tmp9724 = getelementptr inbounds float, float* %tmp9723, i64 1
+ %tmp9725 = getelementptr inbounds float, float* %tmp9724, i64 1
+ %tmp9726 = getelementptr inbounds float, float* %tmp9725, i64 1
+ %tmp9727 = getelementptr inbounds float, float* %tmp9726, i64 1
+ %tmp9728 = getelementptr inbounds float, float* %tmp9727, i64 1
+ %tmp9729 = getelementptr inbounds float, float* %tmp9728, i64 1
+ %tmp9730 = getelementptr inbounds float, float* %tmp9729, i64 1
+ %tmp9731 = getelementptr inbounds float, float* %tmp9730, i64 1
+ %tmp9732 = getelementptr inbounds float, float* %tmp9731, i64 1
+ %tmp9733 = getelementptr inbounds float, float* %tmp9732, i64 1
+ %tmp9734 = getelementptr inbounds float, float* %tmp9733, i64 1
+ %tmp9735 = getelementptr inbounds float, float* %tmp9734, i64 1
+ %tmp9736 = getelementptr inbounds float, float* %tmp9735, i64 1
+ %tmp9737 = getelementptr inbounds float, float* %tmp9736, i64 1
+ %tmp9738 = getelementptr inbounds float, float* %tmp9737, i64 1
+ %tmp9739 = getelementptr inbounds float, float* %tmp9738, i64 1
+ %tmp9740 = getelementptr inbounds float, float* %tmp9739, i64 1
+ %tmp9741 = getelementptr inbounds float, float* %tmp9740, i64 1
+ %tmp9742 = getelementptr inbounds float, float* %tmp9741, i64 1
+ %tmp9743 = getelementptr inbounds float, float* %tmp9742, i64 1
+ %tmp9744 = getelementptr inbounds float, float* %tmp9743, i64 1
+ %tmp9745 = getelementptr inbounds float, float* %tmp9744, i64 1
+ %tmp9746 = getelementptr inbounds float, float* %tmp9745, i64 1
+ %tmp9747 = getelementptr inbounds float, float* %tmp9746, i64 1
+ %tmp9748 = getelementptr inbounds float, float* %tmp9747, i64 1
+ %tmp9749 = getelementptr inbounds float, float* %tmp9748, i64 1
+ %tmp9750 = getelementptr inbounds float, float* %tmp9749, i64 1
+ %tmp9751 = getelementptr inbounds float, float* %tmp9750, i64 1
+ %tmp9752 = getelementptr inbounds float, float* %tmp9751, i64 1
+ %tmp9753 = getelementptr inbounds float, float* %tmp9752, i64 1
+ %tmp9754 = getelementptr inbounds float, float* %tmp9753, i64 1
+ %tmp9755 = getelementptr inbounds float, float* %tmp9754, i64 1
+ %tmp9756 = getelementptr inbounds float, float* %tmp9755, i64 1
+ %tmp9757 = getelementptr inbounds float, float* %tmp9756, i64 1
+ %tmp9758 = getelementptr inbounds float, float* %tmp9757, i64 1
+ %tmp9759 = getelementptr inbounds float, float* %tmp9758, i64 1
+ %tmp9760 = getelementptr inbounds float, float* %tmp9759, i64 1
+ %tmp9761 = getelementptr inbounds float, float* %tmp9760, i64 1
+ %tmp9762 = getelementptr inbounds float, float* %tmp9761, i64 1
+ %tmp9763 = getelementptr inbounds float, float* %tmp9762, i64 1
+ %tmp9764 = getelementptr inbounds float, float* %tmp9763, i64 1
+ %tmp9765 = getelementptr inbounds float, float* %tmp9764, i64 1
+ %tmp9766 = getelementptr inbounds float, float* %tmp9765, i64 1
+ %tmp9767 = getelementptr inbounds float, float* %tmp9766, i64 1
+ %tmp9768 = getelementptr inbounds float, float* %tmp9767, i64 1
+ %tmp9769 = getelementptr inbounds float, float* %tmp9768, i64 1
+ %tmp9770 = getelementptr inbounds float, float* %tmp9769, i64 1
+ %tmp9771 = getelementptr inbounds float, float* %tmp9770, i64 1
+ %tmp9772 = getelementptr inbounds float, float* %tmp9771, i64 1
+ %tmp9773 = getelementptr inbounds float, float* %tmp9772, i64 1
+ %tmp9774 = getelementptr inbounds float, float* %tmp9773, i64 1
+ %tmp9775 = getelementptr inbounds float, float* %tmp9774, i64 1
+ %tmp9776 = getelementptr inbounds float, float* %tmp9775, i64 1
+ %tmp9777 = getelementptr inbounds float, float* %tmp9776, i64 1
+ %tmp9778 = getelementptr inbounds float, float* %tmp9777, i64 1
+ %tmp9779 = getelementptr inbounds float, float* %tmp9778, i64 1
+ %tmp9780 = getelementptr inbounds float, float* %tmp9779, i64 1
+ %tmp9781 = getelementptr inbounds float, float* %tmp9780, i64 1
+ %tmp9782 = getelementptr inbounds float, float* %tmp9781, i64 1
+ %tmp9783 = getelementptr inbounds float, float* %tmp9782, i64 1
+ %tmp9784 = getelementptr inbounds float, float* %tmp9783, i64 1
+ %tmp9785 = getelementptr inbounds float, float* %tmp9784, i64 1
+ %tmp9786 = getelementptr inbounds float, float* %tmp9785, i64 1
+ %tmp9787 = getelementptr inbounds float, float* %tmp9786, i64 1
+ %tmp9788 = getelementptr inbounds float, float* %tmp9787, i64 1
+ %tmp9789 = getelementptr inbounds float, float* %tmp9788, i64 1
+ %tmp9790 = getelementptr inbounds float, float* %tmp9789, i64 1
+ %tmp9791 = getelementptr inbounds float, float* %tmp9790, i64 1
+ %tmp9792 = getelementptr inbounds float, float* %tmp9791, i64 1
+ %tmp9793 = getelementptr inbounds float, float* %tmp9792, i64 1
+ %tmp9794 = getelementptr inbounds float, float* %tmp9793, i64 1
+ %tmp9795 = getelementptr inbounds float, float* %tmp9794, i64 1
+ %tmp9796 = getelementptr inbounds float, float* %tmp9795, i64 1
+ %tmp9797 = getelementptr inbounds float, float* %tmp9796, i64 1
+ %tmp9798 = getelementptr inbounds float, float* %tmp9797, i64 1
+ %tmp9799 = getelementptr inbounds float, float* %tmp9798, i64 1
+ %tmp9800 = getelementptr inbounds float, float* %tmp9799, i64 1
+ %tmp9801 = getelementptr inbounds float, float* %tmp9800, i64 1
+ %tmp9802 = getelementptr inbounds float, float* %tmp9801, i64 1
+ %tmp9803 = getelementptr inbounds float, float* %tmp9802, i64 1
+ %tmp9804 = getelementptr inbounds float, float* %tmp9803, i64 1
+ %tmp9805 = getelementptr inbounds float, float* %tmp9804, i64 1
+ %tmp9806 = getelementptr inbounds float, float* %tmp9805, i64 1
+ %tmp9807 = getelementptr inbounds float, float* %tmp9806, i64 1
+ %tmp9808 = getelementptr inbounds float, float* %tmp9807, i64 1
+ %tmp9809 = getelementptr inbounds float, float* %tmp9808, i64 1
+ %tmp9810 = getelementptr inbounds float, float* %tmp9809, i64 1
+ %tmp9811 = getelementptr inbounds float, float* %tmp9810, i64 1
+ %tmp9812 = getelementptr inbounds float, float* %tmp9811, i64 1
+ %tmp9813 = getelementptr inbounds float, float* %tmp9812, i64 1
+ %tmp9814 = getelementptr inbounds float, float* %tmp9813, i64 1
+ %tmp9815 = getelementptr inbounds float, float* %tmp9814, i64 1
+ %tmp9816 = getelementptr inbounds float, float* %tmp9815, i64 1
+ %tmp9817 = getelementptr inbounds float, float* %tmp9816, i64 1
+ %tmp9818 = getelementptr inbounds float, float* %tmp9817, i64 1
+ %tmp9819 = getelementptr inbounds float, float* %tmp9818, i64 1
+ %tmp9820 = getelementptr inbounds float, float* %tmp9819, i64 1
+ %tmp9821 = getelementptr inbounds float, float* %tmp9820, i64 1
+ %tmp9822 = getelementptr inbounds float, float* %tmp9821, i64 1
+ %tmp9823 = getelementptr inbounds float, float* %tmp9822, i64 1
+ %tmp9824 = getelementptr inbounds float, float* %tmp9823, i64 1
+ %tmp9825 = getelementptr inbounds float, float* %tmp9824, i64 1
+ %tmp9826 = getelementptr inbounds float, float* %tmp9825, i64 1
+ %tmp9827 = getelementptr inbounds float, float* %tmp9826, i64 1
+ %tmp9828 = getelementptr inbounds float, float* %tmp9827, i64 1
+ %tmp9829 = getelementptr inbounds float, float* %tmp9828, i64 1
+ %tmp9830 = getelementptr inbounds float, float* %tmp9829, i64 1
+ %tmp9831 = getelementptr inbounds float, float* %tmp9830, i64 1
+ %tmp9832 = getelementptr inbounds float, float* %tmp9831, i64 1
+ %tmp9833 = getelementptr inbounds float, float* %tmp9832, i64 1
+ %tmp9834 = getelementptr inbounds float, float* %tmp9833, i64 1
+ %tmp9835 = getelementptr inbounds float, float* %tmp9834, i64 1
+ %tmp9836 = getelementptr inbounds float, float* %tmp9835, i64 1
+ %tmp9837 = getelementptr inbounds float, float* %tmp9836, i64 1
+ %tmp9838 = getelementptr inbounds float, float* %tmp9837, i64 1
+ %tmp9839 = getelementptr inbounds float, float* %tmp9838, i64 1
+ %tmp9840 = getelementptr inbounds float, float* %tmp9839, i64 1
+ %tmp9841 = getelementptr inbounds float, float* %tmp9840, i64 1
+ %tmp9842 = getelementptr inbounds float, float* %tmp9841, i64 1
+ %tmp9843 = getelementptr inbounds float, float* %tmp9842, i64 1
+ %tmp9844 = getelementptr inbounds float, float* %tmp9843, i64 1
+ %tmp9845 = getelementptr inbounds float, float* %tmp9844, i64 1
+ %tmp9846 = getelementptr inbounds float, float* %tmp9845, i64 1
+ %tmp9847 = getelementptr inbounds float, float* %tmp9846, i64 1
+ %tmp9848 = getelementptr inbounds float, float* %tmp9847, i64 1
+ %tmp9849 = getelementptr inbounds float, float* %tmp9848, i64 1
+ %tmp9850 = getelementptr inbounds float, float* %tmp9849, i64 1
+ %tmp9851 = getelementptr inbounds float, float* %tmp9850, i64 1
+ %tmp9852 = getelementptr inbounds float, float* %tmp9851, i64 1
+ %tmp9853 = getelementptr inbounds float, float* %tmp9852, i64 1
+ %tmp9854 = getelementptr inbounds float, float* %tmp9853, i64 1
+ %tmp9855 = getelementptr inbounds float, float* %tmp9854, i64 1
+ %tmp9856 = getelementptr inbounds float, float* %tmp9855, i64 1
+ %tmp9857 = getelementptr inbounds float, float* %tmp9856, i64 1
+ %tmp9858 = getelementptr inbounds float, float* %tmp9857, i64 1
+ %tmp9859 = getelementptr inbounds float, float* %tmp9858, i64 1
+ %tmp9860 = getelementptr inbounds float, float* %tmp9859, i64 1
+ %tmp9861 = getelementptr inbounds float, float* %tmp9860, i64 1
+ %tmp9862 = getelementptr inbounds float, float* %tmp9861, i64 1
+ %tmp9863 = getelementptr inbounds float, float* %tmp9862, i64 1
+ %tmp9864 = getelementptr inbounds float, float* %tmp9863, i64 1
+ %tmp9865 = getelementptr inbounds float, float* %tmp9864, i64 1
+ %tmp9866 = getelementptr inbounds float, float* %tmp9865, i64 1
+ %tmp9867 = getelementptr inbounds float, float* %tmp9866, i64 1
+ %tmp9868 = getelementptr inbounds float, float* %tmp9867, i64 1
+ %tmp9869 = getelementptr inbounds float, float* %tmp9868, i64 1
+ %tmp9870 = getelementptr inbounds float, float* %tmp9869, i64 1
+ %tmp9871 = getelementptr inbounds float, float* %tmp9870, i64 1
+ %tmp9872 = getelementptr inbounds float, float* %tmp9871, i64 1
+ %tmp9873 = getelementptr inbounds float, float* %tmp9872, i64 1
+ %tmp9874 = getelementptr inbounds float, float* %tmp9873, i64 1
+ %tmp9875 = getelementptr inbounds float, float* %tmp9874, i64 1
+ %tmp9876 = getelementptr inbounds float, float* %tmp9875, i64 1
+ %tmp9877 = getelementptr inbounds float, float* %tmp9876, i64 1
+ %tmp9878 = getelementptr inbounds float, float* %tmp9877, i64 1
+ %tmp9879 = getelementptr inbounds float, float* %tmp9878, i64 1
+ %tmp9880 = getelementptr inbounds float, float* %tmp9879, i64 1
+ %tmp9881 = getelementptr inbounds float, float* %tmp9880, i64 1
+ %tmp9882 = getelementptr inbounds float, float* %tmp9881, i64 1
+ %tmp9883 = getelementptr inbounds float, float* %tmp9882, i64 1
+ %tmp9884 = getelementptr inbounds float, float* %tmp9883, i64 1
+ %tmp9885 = getelementptr inbounds float, float* %tmp9884, i64 1
+ %tmp9886 = getelementptr inbounds float, float* %tmp9885, i64 1
+ %tmp9887 = getelementptr inbounds float, float* %tmp9886, i64 1
+ %tmp9888 = getelementptr inbounds float, float* %tmp9887, i64 1
+ %tmp9889 = getelementptr inbounds float, float* %tmp9888, i64 1
+ %tmp9890 = getelementptr inbounds float, float* %tmp9889, i64 1
+ %tmp9891 = getelementptr inbounds float, float* %tmp9890, i64 1
+ %tmp9892 = getelementptr inbounds float, float* %tmp9891, i64 1
+ %tmp9893 = getelementptr inbounds float, float* %tmp9892, i64 1
+ %tmp9894 = getelementptr inbounds float, float* %tmp9893, i64 1
+ %tmp9895 = getelementptr inbounds float, float* %tmp9894, i64 1
+ %tmp9896 = getelementptr inbounds float, float* %tmp9895, i64 1
+ %tmp9897 = getelementptr inbounds float, float* %tmp9896, i64 1
+ %tmp9898 = getelementptr inbounds float, float* %tmp9897, i64 1
+ %tmp9899 = getelementptr inbounds float, float* %tmp9898, i64 1
+ %tmp9900 = getelementptr inbounds float, float* %tmp9899, i64 1
+ %tmp9901 = getelementptr inbounds float, float* %tmp9900, i64 1
+ %tmp9902 = getelementptr inbounds float, float* %tmp9901, i64 1
+ %tmp9903 = getelementptr inbounds float, float* %tmp9902, i64 1
+ %tmp9904 = getelementptr inbounds float, float* %tmp9903, i64 1
+ %tmp9905 = getelementptr inbounds float, float* %tmp9904, i64 1
+ %tmp9906 = getelementptr inbounds float, float* %tmp9905, i64 1
+ %tmp9907 = getelementptr inbounds float, float* %tmp9906, i64 1
+ %tmp9908 = getelementptr inbounds float, float* %tmp9907, i64 1
+ %tmp9909 = getelementptr inbounds float, float* %tmp9908, i64 1
+ %tmp9910 = getelementptr inbounds float, float* %tmp9909, i64 1
+ %tmp9911 = getelementptr inbounds float, float* %tmp9910, i64 1
+ %tmp9912 = getelementptr inbounds float, float* %tmp9911, i64 1
+ %tmp9913 = getelementptr inbounds float, float* %tmp9912, i64 1
+ %tmp9914 = getelementptr inbounds float, float* %tmp9913, i64 1
+ %tmp9915 = getelementptr inbounds float, float* %tmp9914, i64 1
+ %tmp9916 = getelementptr inbounds float, float* %tmp9915, i64 1
+ %tmp9917 = getelementptr inbounds float, float* %tmp9916, i64 1
+ %tmp9918 = getelementptr inbounds float, float* %tmp9917, i64 1
+ %tmp9919 = getelementptr inbounds float, float* %tmp9918, i64 1
+ %tmp9920 = getelementptr inbounds float, float* %tmp9919, i64 1
+ %tmp9921 = getelementptr inbounds float, float* %tmp9920, i64 1
+ %tmp9922 = getelementptr inbounds float, float* %tmp9921, i64 1
+ %tmp9923 = getelementptr inbounds float, float* %tmp9922, i64 1
+ %tmp9924 = getelementptr inbounds float, float* %tmp9923, i64 1
+ %tmp9925 = getelementptr inbounds float, float* %tmp9924, i64 1
+ %tmp9926 = getelementptr inbounds float, float* %tmp9925, i64 1
+ %tmp9927 = getelementptr inbounds float, float* %tmp9926, i64 1
+ %tmp9928 = getelementptr inbounds float, float* %tmp9927, i64 1
+ %tmp9929 = getelementptr inbounds float, float* %tmp9928, i64 1
+ %tmp9930 = getelementptr inbounds float, float* %tmp9929, i64 1
+ %tmp9931 = getelementptr inbounds float, float* %tmp9930, i64 1
+ %tmp9932 = getelementptr inbounds float, float* %tmp9931, i64 1
+ %tmp9933 = getelementptr inbounds float, float* %tmp9932, i64 1
+ %tmp9934 = getelementptr inbounds float, float* %tmp9933, i64 1
+ %tmp9935 = getelementptr inbounds float, float* %tmp9934, i64 1
+ %tmp9936 = getelementptr inbounds float, float* %tmp9935, i64 1
+ %tmp9937 = getelementptr inbounds float, float* %tmp9936, i64 1
+ %tmp9938 = getelementptr inbounds float, float* %tmp9937, i64 1
+ %tmp9939 = getelementptr inbounds float, float* %tmp9938, i64 1
+ %tmp9940 = getelementptr inbounds float, float* %tmp9939, i64 1
+ %tmp9941 = getelementptr inbounds float, float* %tmp9940, i64 1
+ %tmp9942 = getelementptr inbounds float, float* %tmp9941, i64 1
+ %tmp9943 = getelementptr inbounds float, float* %tmp9942, i64 1
+ %tmp9944 = getelementptr inbounds float, float* %tmp9943, i64 1
+ %tmp9945 = getelementptr inbounds float, float* %tmp9944, i64 1
+ %tmp9946 = getelementptr inbounds float, float* %tmp9945, i64 1
+ %tmp9947 = getelementptr inbounds float, float* %tmp9946, i64 1
+ %tmp9948 = getelementptr inbounds float, float* %tmp9947, i64 1
+ %tmp9949 = getelementptr inbounds float, float* %tmp9948, i64 1
+ %tmp9950 = getelementptr inbounds float, float* %tmp9949, i64 1
+ %tmp9951 = getelementptr inbounds float, float* %tmp9950, i64 1
+ %tmp9952 = getelementptr inbounds float, float* %tmp9951, i64 1
+ %tmp9953 = getelementptr inbounds float, float* %tmp9952, i64 1
+ %tmp9954 = getelementptr inbounds float, float* %tmp9953, i64 1
+ %tmp9955 = getelementptr inbounds float, float* %tmp9954, i64 1
+ %tmp9956 = getelementptr inbounds float, float* %tmp9955, i64 1
+ %tmp9957 = getelementptr inbounds float, float* %tmp9956, i64 1
+ %tmp9958 = getelementptr inbounds float, float* %tmp9957, i64 1
+ %tmp9959 = getelementptr inbounds float, float* %tmp9958, i64 1
+ %tmp9960 = getelementptr inbounds float, float* %tmp9959, i64 1
+ %tmp9961 = getelementptr inbounds float, float* %tmp9960, i64 1
+ %tmp9962 = getelementptr inbounds float, float* %tmp9961, i64 1
+ %tmp9963 = getelementptr inbounds float, float* %tmp9962, i64 1
+ %tmp9964 = getelementptr inbounds float, float* %tmp9963, i64 1
+ %tmp9965 = getelementptr inbounds float, float* %tmp9964, i64 1
+ %tmp9966 = getelementptr inbounds float, float* %tmp9965, i64 1
+ %tmp9967 = getelementptr inbounds float, float* %tmp9966, i64 1
+ %tmp9968 = getelementptr inbounds float, float* %tmp9967, i64 1
+ %tmp9969 = getelementptr inbounds float, float* %tmp9968, i64 1
+ %tmp9970 = getelementptr inbounds float, float* %tmp9969, i64 1
+ %tmp9971 = getelementptr inbounds float, float* %tmp9970, i64 1
+ %tmp9972 = getelementptr inbounds float, float* %tmp9971, i64 1
+ %tmp9973 = getelementptr inbounds float, float* %tmp9972, i64 1
+ %tmp9974 = getelementptr inbounds float, float* %tmp9973, i64 1
+ %tmp9975 = getelementptr inbounds float, float* %tmp9974, i64 1
+ %tmp9976 = getelementptr inbounds float, float* %tmp9975, i64 1
+ %tmp9977 = getelementptr inbounds float, float* %tmp9976, i64 1
+ %tmp9978 = getelementptr inbounds float, float* %tmp9977, i64 1
+ %tmp9979 = getelementptr inbounds float, float* %tmp9978, i64 1
+ %tmp9980 = getelementptr inbounds float, float* %tmp9979, i64 1
+ %tmp9981 = getelementptr inbounds float, float* %tmp9980, i64 1
+ %tmp9982 = getelementptr inbounds float, float* %tmp9981, i64 1
+ %tmp9983 = getelementptr inbounds float, float* %tmp9982, i64 1
+ %tmp9984 = getelementptr inbounds float, float* %tmp9983, i64 1
+ %tmp9985 = getelementptr inbounds float, float* %tmp9984, i64 1
+ %tmp9986 = getelementptr inbounds float, float* %tmp9985, i64 1
+ %tmp9987 = getelementptr inbounds float, float* %tmp9986, i64 1
+ %tmp9988 = getelementptr inbounds float, float* %tmp9987, i64 1
+ %tmp9989 = getelementptr inbounds float, float* %tmp9988, i64 1
+ %tmp9990 = getelementptr inbounds float, float* %tmp9989, i64 1
+ %tmp9991 = getelementptr inbounds float, float* %tmp9990, i64 1
+ %tmp9992 = getelementptr inbounds float, float* %tmp9991, i64 1
+ %tmp9993 = getelementptr inbounds float, float* %tmp9992, i64 1
+ %tmp9994 = getelementptr inbounds float, float* %tmp9993, i64 1
+ %tmp9995 = getelementptr inbounds float, float* %tmp9994, i64 1
+ %tmp9996 = getelementptr inbounds float, float* %tmp9995, i64 1
+ %tmp9997 = getelementptr inbounds float, float* %tmp9996, i64 1
+ %tmp9998 = getelementptr inbounds float, float* %tmp9997, i64 1
+ %tmp9999 = getelementptr inbounds float, float* %tmp9998, i64 1
+ %tmp10000 = getelementptr inbounds float, float* %tmp9999, i64 1
+ %tmp10001 = getelementptr inbounds float, float* %tmp10000, i64 1
+ %tmp10002 = getelementptr inbounds float, float* %tmp10001, i64 1
+ %tmp10003 = getelementptr inbounds float, float* %tmp10002, i64 1
+ %tmp10004 = getelementptr inbounds float, float* %tmp10003, i64 1
+ %tmp10005 = getelementptr inbounds float, float* %tmp10004, i64 1
+ %tmp10006 = getelementptr inbounds float, float* %tmp10005, i64 1
+ %tmp10007 = getelementptr inbounds float, float* %tmp10006, i64 1
+ %tmp10008 = getelementptr inbounds float, float* %tmp10007, i64 1
+ %tmp10009 = getelementptr inbounds float, float* %tmp10008, i64 1
+ %tmp10010 = getelementptr inbounds float, float* %tmp10009, i64 1
+ %tmp10011 = getelementptr inbounds float, float* %tmp10010, i64 1
+ %tmp10012 = getelementptr inbounds float, float* %tmp10011, i64 1
+ %tmp10013 = getelementptr inbounds float, float* %tmp10012, i64 1
+ %tmp10014 = getelementptr inbounds float, float* %tmp10013, i64 1
+ %tmp10015 = getelementptr inbounds float, float* %tmp10014, i64 1
+ %tmp10016 = getelementptr inbounds float, float* %tmp10015, i64 1
+ %tmp10017 = getelementptr inbounds float, float* %tmp10016, i64 1
+ %tmp10018 = getelementptr inbounds float, float* %tmp10017, i64 1
+ %tmp10019 = getelementptr inbounds float, float* %tmp10018, i64 1
+ %tmp10020 = getelementptr inbounds float, float* %tmp10019, i64 1
+ %tmp10021 = getelementptr inbounds float, float* %tmp10020, i64 1
+ %tmp10022 = getelementptr inbounds float, float* %tmp10021, i64 1
+ %tmp10023 = getelementptr inbounds float, float* %tmp10022, i64 1
+ %tmp10024 = getelementptr inbounds float, float* %tmp10023, i64 1
+ %tmp10025 = getelementptr inbounds float, float* %tmp10024, i64 1
+ %tmp10026 = getelementptr inbounds float, float* %tmp10025, i64 1
+ %tmp10027 = getelementptr inbounds float, float* %tmp10026, i64 1
+ %tmp10028 = getelementptr inbounds float, float* %tmp10027, i64 1
+ %tmp10029 = getelementptr inbounds float, float* %tmp10028, i64 1
+ %tmp10030 = getelementptr inbounds float, float* %tmp10029, i64 1
+ %tmp10031 = getelementptr inbounds float, float* %tmp10030, i64 1
+ %tmp10032 = getelementptr inbounds float, float* %tmp10031, i64 1
+ %tmp10033 = getelementptr inbounds float, float* %tmp10032, i64 1
+ %tmp10034 = getelementptr inbounds float, float* %tmp10033, i64 1
+ %tmp10035 = getelementptr inbounds float, float* %tmp10034, i64 1
+ %tmp10036 = getelementptr inbounds float, float* %tmp10035, i64 1
+ %tmp10037 = getelementptr inbounds float, float* %tmp10036, i64 1
+ %tmp10038 = getelementptr inbounds float, float* %tmp10037, i64 1
+ %tmp10039 = getelementptr inbounds float, float* %tmp10038, i64 1
+ %tmp10040 = getelementptr inbounds float, float* %tmp10039, i64 1
+ %tmp10041 = getelementptr inbounds float, float* %tmp10040, i64 1
+ %tmp10042 = getelementptr inbounds float, float* %tmp10041, i64 1
+ %tmp10043 = getelementptr inbounds float, float* %tmp10042, i64 1
+ %tmp10044 = getelementptr inbounds float, float* %tmp10043, i64 1
+ %tmp10045 = getelementptr inbounds float, float* %tmp10044, i64 1
+ %tmp10046 = getelementptr inbounds float, float* %tmp10045, i64 1
+ %tmp10047 = getelementptr inbounds float, float* %tmp10046, i64 1
+ %tmp10048 = getelementptr inbounds float, float* %tmp10047, i64 1
+ %tmp10049 = getelementptr inbounds float, float* %tmp10048, i64 1
+ %tmp10050 = getelementptr inbounds float, float* %tmp10049, i64 1
+ %tmp10051 = getelementptr inbounds float, float* %tmp10050, i64 1
+ %tmp10052 = getelementptr inbounds float, float* %tmp10051, i64 1
+ %tmp10053 = getelementptr inbounds float, float* %tmp10052, i64 1
+ %tmp10054 = getelementptr inbounds float, float* %tmp10053, i64 1
+ %tmp10055 = getelementptr inbounds float, float* %tmp10054, i64 1
+ %tmp10056 = getelementptr inbounds float, float* %tmp10055, i64 1
+ %tmp10057 = getelementptr inbounds float, float* %tmp10056, i64 1
+ %tmp10058 = getelementptr inbounds float, float* %tmp10057, i64 1
+ %tmp10059 = getelementptr inbounds float, float* %tmp10058, i64 1
+ %tmp10060 = getelementptr inbounds float, float* %tmp10059, i64 1
+ %tmp10061 = getelementptr inbounds float, float* %tmp10060, i64 1
+ %tmp10062 = getelementptr inbounds float, float* %tmp10061, i64 1
+ %tmp10063 = getelementptr inbounds float, float* %tmp10062, i64 1
+ %tmp10064 = getelementptr inbounds float, float* %tmp10063, i64 1
+ %tmp10065 = getelementptr inbounds float, float* %tmp10064, i64 1
+ %tmp10066 = getelementptr inbounds float, float* %tmp10065, i64 1
+ %tmp10067 = getelementptr inbounds float, float* %tmp10066, i64 1
+ %tmp10068 = getelementptr inbounds float, float* %tmp10067, i64 1
+ %tmp10069 = getelementptr inbounds float, float* %tmp10068, i64 1
+ %tmp10070 = getelementptr inbounds float, float* %tmp10069, i64 1
+ %tmp10071 = getelementptr inbounds float, float* %tmp10070, i64 1
+ %tmp10072 = getelementptr inbounds float, float* %tmp10071, i64 1
+ %tmp10073 = getelementptr inbounds float, float* %tmp10072, i64 1
+ %tmp10074 = getelementptr inbounds float, float* %tmp10073, i64 1
+ %tmp10075 = getelementptr inbounds float, float* %tmp10074, i64 1
+ %tmp10076 = getelementptr inbounds float, float* %tmp10075, i64 1
+ %tmp10077 = getelementptr inbounds float, float* %tmp10076, i64 1
+ %tmp10078 = getelementptr inbounds float, float* %tmp10077, i64 1
+ %tmp10079 = getelementptr inbounds float, float* %tmp10078, i64 1
+ %tmp10080 = getelementptr inbounds float, float* %tmp10079, i64 1
+ %tmp10081 = getelementptr inbounds float, float* %tmp10080, i64 1
+ %tmp10082 = getelementptr inbounds float, float* %tmp10081, i64 1
+ %tmp10083 = getelementptr inbounds float, float* %tmp10082, i64 1
+ %tmp10084 = getelementptr inbounds float, float* %tmp10083, i64 1
+ %tmp10085 = getelementptr inbounds float, float* %tmp10084, i64 1
+ %tmp10086 = getelementptr inbounds float, float* %tmp10085, i64 1
+ %tmp10087 = getelementptr inbounds float, float* %tmp10086, i64 1
+ %tmp10088 = getelementptr inbounds float, float* %tmp10087, i64 1
+ %tmp10089 = getelementptr inbounds float, float* %tmp10088, i64 1
+ %tmp10090 = getelementptr inbounds float, float* %tmp10089, i64 1
+ %tmp10091 = getelementptr inbounds float, float* %tmp10090, i64 1
+ %tmp10092 = getelementptr inbounds float, float* %tmp10091, i64 1
+ %tmp10093 = getelementptr inbounds float, float* %tmp10092, i64 1
+ %tmp10094 = getelementptr inbounds float, float* %tmp10093, i64 1
+ %tmp10095 = getelementptr inbounds float, float* %tmp10094, i64 1
+ %tmp10096 = getelementptr inbounds float, float* %tmp10095, i64 1
+ %tmp10097 = getelementptr inbounds float, float* %tmp10096, i64 1
+ %tmp10098 = getelementptr inbounds float, float* %tmp10097, i64 1
+ %tmp10099 = getelementptr inbounds float, float* %tmp10098, i64 1
+ %tmp10100 = getelementptr inbounds float, float* %tmp10099, i64 1
+ %tmp10101 = getelementptr inbounds float, float* %tmp10100, i64 1
+ %tmp10102 = getelementptr inbounds float, float* %tmp10101, i64 1
+ %tmp10103 = getelementptr inbounds float, float* %tmp10102, i64 1
+ %tmp10104 = getelementptr inbounds float, float* %tmp10103, i64 1
+ %tmp10105 = getelementptr inbounds float, float* %tmp10104, i64 1
+ %tmp10106 = getelementptr inbounds float, float* %tmp10105, i64 1
+ %tmp10107 = getelementptr inbounds float, float* %tmp10106, i64 1
+ %tmp10108 = getelementptr inbounds float, float* %tmp10107, i64 1
+ %tmp10109 = getelementptr inbounds float, float* %tmp10108, i64 1
+ %tmp10110 = getelementptr inbounds float, float* %tmp10109, i64 1
+ %tmp10111 = getelementptr inbounds float, float* %tmp10110, i64 1
+ %tmp10112 = getelementptr inbounds float, float* %tmp10111, i64 1
+ %tmp10113 = getelementptr inbounds float, float* %tmp10112, i64 1
+ %tmp10114 = getelementptr inbounds float, float* %tmp10113, i64 1
+ %tmp10115 = getelementptr inbounds float, float* %tmp10114, i64 1
+ %tmp10116 = getelementptr inbounds float, float* %tmp10115, i64 1
+ %tmp10117 = getelementptr inbounds float, float* %tmp10116, i64 1
+ %tmp10118 = getelementptr inbounds float, float* %tmp10117, i64 1
+ %tmp10119 = getelementptr inbounds float, float* %tmp10118, i64 1
+ %tmp10120 = getelementptr inbounds float, float* %tmp10119, i64 1
+ %tmp10121 = getelementptr inbounds float, float* %tmp10120, i64 1
+ %tmp10122 = getelementptr inbounds float, float* %tmp10121, i64 1
+ %tmp10123 = getelementptr inbounds float, float* %tmp10122, i64 1
+ %tmp10124 = getelementptr inbounds float, float* %tmp10123, i64 1
+ %tmp10125 = getelementptr inbounds float, float* %tmp10124, i64 1
+ %tmp10126 = getelementptr inbounds float, float* %tmp10125, i64 1
+ %tmp10127 = getelementptr inbounds float, float* %tmp10126, i64 1
+ %tmp10128 = getelementptr inbounds float, float* %tmp10127, i64 1
+ %tmp10129 = getelementptr inbounds float, float* %tmp10128, i64 1
+ %tmp10130 = getelementptr inbounds float, float* %tmp10129, i64 1
+ %tmp10131 = getelementptr inbounds float, float* %tmp10130, i64 1
+ %tmp10132 = getelementptr inbounds float, float* %tmp10131, i64 1
+ %tmp10133 = getelementptr inbounds float, float* %tmp10132, i64 1
+ %tmp10134 = getelementptr inbounds float, float* %tmp10133, i64 1
+ %tmp10135 = getelementptr inbounds float, float* %tmp10134, i64 1
+ %tmp10136 = getelementptr inbounds float, float* %tmp10135, i64 1
+ %tmp10137 = getelementptr inbounds float, float* %tmp10136, i64 1
+ %tmp10138 = getelementptr inbounds float, float* %tmp10137, i64 1
+ %tmp10139 = getelementptr inbounds float, float* %tmp10138, i64 1
+ %tmp10140 = getelementptr inbounds float, float* %tmp10139, i64 1
+ %tmp10141 = getelementptr inbounds float, float* %tmp10140, i64 1
+ %tmp10142 = getelementptr inbounds float, float* %tmp10141, i64 1
+ %tmp10143 = getelementptr inbounds float, float* %tmp10142, i64 1
+ %tmp10144 = getelementptr inbounds float, float* %tmp10143, i64 1
+ %tmp10145 = getelementptr inbounds float, float* %tmp10144, i64 1
+ %tmp10146 = getelementptr inbounds float, float* %tmp10145, i64 1
+ %tmp10147 = getelementptr inbounds float, float* %tmp10146, i64 1
+ %tmp10148 = getelementptr inbounds float, float* %tmp10147, i64 1
+ %tmp10149 = getelementptr inbounds float, float* %tmp10148, i64 1
+ %tmp10150 = getelementptr inbounds float, float* %tmp10149, i64 1
+ %tmp10151 = getelementptr inbounds float, float* %tmp10150, i64 1
+ %tmp10152 = getelementptr inbounds float, float* %tmp10151, i64 1
+ %tmp10153 = getelementptr inbounds float, float* %tmp10152, i64 1
+ %tmp10154 = getelementptr inbounds float, float* %tmp10153, i64 1
+ %tmp10155 = getelementptr inbounds float, float* %tmp10154, i64 1
+ %tmp10156 = getelementptr inbounds float, float* %tmp10155, i64 1
+ %tmp10157 = getelementptr inbounds float, float* %tmp10156, i64 1
+ %tmp10158 = getelementptr inbounds float, float* %tmp10157, i64 1
+ %tmp10159 = getelementptr inbounds float, float* %tmp10158, i64 1
+ %tmp10160 = getelementptr inbounds float, float* %tmp10159, i64 1
+ %tmp10161 = getelementptr inbounds float, float* %tmp10160, i64 1
+ %tmp10162 = getelementptr inbounds float, float* %tmp10161, i64 1
+ %tmp10163 = getelementptr inbounds float, float* %tmp10162, i64 1
+ %tmp10164 = getelementptr inbounds float, float* %tmp10163, i64 1
+ %tmp10165 = getelementptr inbounds float, float* %tmp10164, i64 1
+ %tmp10166 = getelementptr inbounds float, float* %tmp10165, i64 1
+ %tmp10167 = getelementptr inbounds float, float* %tmp10166, i64 1
+ %tmp10168 = getelementptr inbounds float, float* %tmp10167, i64 1
+ %tmp10169 = getelementptr inbounds float, float* %tmp10168, i64 1
+ %tmp10170 = getelementptr inbounds float, float* %tmp10169, i64 1
+ %tmp10171 = getelementptr inbounds float, float* %tmp10170, i64 1
+ %tmp10172 = getelementptr inbounds float, float* %tmp10171, i64 1
+ %tmp10173 = getelementptr inbounds float, float* %tmp10172, i64 1
+ %tmp10174 = getelementptr inbounds float, float* %tmp10173, i64 1
+ %tmp10175 = getelementptr inbounds float, float* %tmp10174, i64 1
+ %tmp10176 = getelementptr inbounds float, float* %tmp10175, i64 1
+ %tmp10177 = getelementptr inbounds float, float* %tmp10176, i64 1
+ %tmp10178 = getelementptr inbounds float, float* %tmp10177, i64 1
+ %tmp10179 = getelementptr inbounds float, float* %tmp10178, i64 1
+ %tmp10180 = getelementptr inbounds float, float* %tmp10179, i64 1
+ %tmp10181 = getelementptr inbounds float, float* %tmp10180, i64 1
+ %tmp10182 = getelementptr inbounds float, float* %tmp10181, i64 1
+ %tmp10183 = getelementptr inbounds float, float* %tmp10182, i64 1
+ %tmp10184 = getelementptr inbounds float, float* %tmp10183, i64 1
+ %tmp10185 = getelementptr inbounds float, float* %tmp10184, i64 1
+ %tmp10186 = getelementptr inbounds float, float* %tmp10185, i64 1
+ %tmp10187 = getelementptr inbounds float, float* %tmp10186, i64 1
+ %tmp10188 = getelementptr inbounds float, float* %tmp10187, i64 1
+ %tmp10189 = getelementptr inbounds float, float* %tmp10188, i64 1
+ %tmp10190 = getelementptr inbounds float, float* %tmp10189, i64 1
+ %tmp10191 = getelementptr inbounds float, float* %tmp10190, i64 1
+ %tmp10192 = getelementptr inbounds float, float* %tmp10191, i64 1
+ %tmp10193 = getelementptr inbounds float, float* %tmp10192, i64 1
+ %tmp10194 = getelementptr inbounds float, float* %tmp10193, i64 1
+ %tmp10195 = getelementptr inbounds float, float* %tmp10194, i64 1
+ %tmp10196 = getelementptr inbounds float, float* %tmp10195, i64 1
+ %tmp10197 = getelementptr inbounds float, float* %tmp10196, i64 1
+ %tmp10198 = getelementptr inbounds float, float* %tmp10197, i64 1
+ %tmp10199 = getelementptr inbounds float, float* %tmp10198, i64 1
+ %tmp10200 = getelementptr inbounds float, float* %tmp10199, i64 1
+ %tmp10201 = getelementptr inbounds float, float* %tmp10200, i64 1
+ %tmp10202 = getelementptr inbounds float, float* %tmp10201, i64 1
+ %tmp10203 = getelementptr inbounds float, float* %tmp10202, i64 1
+ %tmp10204 = getelementptr inbounds float, float* %tmp10203, i64 1
+ %tmp10205 = getelementptr inbounds float, float* %tmp10204, i64 1
+ %tmp10206 = getelementptr inbounds float, float* %tmp10205, i64 1
+ %tmp10207 = getelementptr inbounds float, float* %tmp10206, i64 1
+ %tmp10208 = getelementptr inbounds float, float* %tmp10207, i64 1
+ %tmp10209 = getelementptr inbounds float, float* %tmp10208, i64 1
+ %tmp10210 = getelementptr inbounds float, float* %tmp10209, i64 1
+ %tmp10211 = getelementptr inbounds float, float* %tmp10210, i64 1
+ %tmp10212 = getelementptr inbounds float, float* %tmp10211, i64 1
+ %tmp10213 = getelementptr inbounds float, float* %tmp10212, i64 1
+ %tmp10214 = getelementptr inbounds float, float* %tmp10213, i64 1
+ %tmp10215 = getelementptr inbounds float, float* %tmp10214, i64 1
+ %tmp10216 = getelementptr inbounds float, float* %tmp10215, i64 1
+ %tmp10217 = getelementptr inbounds float, float* %tmp10216, i64 1
+ %tmp10218 = getelementptr inbounds float, float* %tmp10217, i64 1
+ %tmp10219 = getelementptr inbounds float, float* %tmp10218, i64 1
+ %tmp10220 = getelementptr inbounds float, float* %tmp10219, i64 1
+ %tmp10221 = getelementptr inbounds float, float* %tmp10220, i64 1
+ %tmp10222 = getelementptr inbounds float, float* %tmp10221, i64 1
+ %tmp10223 = getelementptr inbounds float, float* %tmp10222, i64 1
+ %tmp10224 = getelementptr inbounds float, float* %tmp10223, i64 1
+ %tmp10225 = getelementptr inbounds float, float* %tmp10224, i64 1
+ %tmp10226 = getelementptr inbounds float, float* %tmp10225, i64 1
+ %tmp10227 = getelementptr inbounds float, float* %tmp10226, i64 1
+ %tmp10228 = getelementptr inbounds float, float* %tmp10227, i64 1
+ %tmp10229 = getelementptr inbounds float, float* %tmp10228, i64 1
+ %tmp10230 = getelementptr inbounds float, float* %tmp10229, i64 1
+ %tmp10231 = getelementptr inbounds float, float* %tmp10230, i64 1
+ %tmp10232 = getelementptr inbounds float, float* %tmp10231, i64 1
+ %tmp10233 = getelementptr inbounds float, float* %tmp10232, i64 1
+ %tmp10234 = getelementptr inbounds float, float* %tmp10233, i64 1
+ %tmp10235 = getelementptr inbounds float, float* %tmp10234, i64 1
+ %tmp10236 = getelementptr inbounds float, float* %tmp10235, i64 1
+ %tmp10237 = getelementptr inbounds float, float* %tmp10236, i64 1
+ %tmp10238 = getelementptr inbounds float, float* %tmp10237, i64 1
+ %tmp10239 = getelementptr inbounds float, float* %tmp10238, i64 1
+ %tmp10240 = getelementptr inbounds float, float* %tmp10239, i64 1
+ %tmp10241 = getelementptr inbounds float, float* %tmp10240, i64 1
+ %tmp10242 = getelementptr inbounds float, float* %tmp10241, i64 1
+ %tmp10243 = getelementptr inbounds float, float* %tmp10242, i64 1
+ %tmp10244 = getelementptr inbounds float, float* %tmp10243, i64 1
+ %tmp10245 = getelementptr inbounds float, float* %tmp10244, i64 1
+ %tmp10246 = getelementptr inbounds float, float* %tmp10245, i64 1
+ %tmp10247 = getelementptr inbounds float, float* %tmp10246, i64 1
+ %tmp10248 = getelementptr inbounds float, float* %tmp10247, i64 1
+ %tmp10249 = getelementptr inbounds float, float* %tmp10248, i64 1
+ %tmp10250 = getelementptr inbounds float, float* %tmp10249, i64 1
+ %tmp10251 = getelementptr inbounds float, float* %tmp10250, i64 1
+ %tmp10252 = getelementptr inbounds float, float* %tmp10251, i64 1
+ %tmp10253 = getelementptr inbounds float, float* %tmp10252, i64 1
+ %tmp10254 = getelementptr inbounds float, float* %tmp10253, i64 1
+ %tmp10255 = getelementptr inbounds float, float* %tmp10254, i64 1
+ %tmp10256 = getelementptr inbounds float, float* %tmp10255, i64 1
+ %tmp10257 = getelementptr inbounds float, float* %tmp10256, i64 1
+ %tmp10258 = getelementptr inbounds float, float* %tmp10257, i64 1
+ %tmp10259 = getelementptr inbounds float, float* %tmp10258, i64 1
+ %tmp10260 = getelementptr inbounds float, float* %tmp10259, i64 1
+ %tmp10261 = getelementptr inbounds float, float* %tmp10260, i64 1
+ %tmp10262 = getelementptr inbounds float, float* %tmp10261, i64 1
+ %tmp10263 = getelementptr inbounds float, float* %tmp10262, i64 1
+ %tmp10264 = getelementptr inbounds float, float* %tmp10263, i64 1
+ %tmp10265 = getelementptr inbounds float, float* %tmp10264, i64 1
+ %tmp10266 = getelementptr inbounds float, float* %tmp10265, i64 1
+ %tmp10267 = getelementptr inbounds float, float* %tmp10266, i64 1
+ %tmp10268 = getelementptr inbounds float, float* %tmp10267, i64 1
+ %tmp10269 = getelementptr inbounds float, float* %tmp10268, i64 1
+ %tmp10270 = getelementptr inbounds float, float* %tmp10269, i64 1
+ %tmp10271 = getelementptr inbounds float, float* %tmp10270, i64 1
+ %tmp10272 = getelementptr inbounds float, float* %tmp10271, i64 1
+ %tmp10273 = getelementptr inbounds float, float* %tmp10272, i64 1
+ %tmp10274 = getelementptr inbounds float, float* %tmp10273, i64 1
+ %tmp10275 = getelementptr inbounds float, float* %tmp10274, i64 1
+ %tmp10276 = getelementptr inbounds float, float* %tmp10275, i64 1
+ %tmp10277 = getelementptr inbounds float, float* %tmp10276, i64 1
+ %tmp10278 = getelementptr inbounds float, float* %tmp10277, i64 1
+ %tmp10279 = getelementptr inbounds float, float* %tmp10278, i64 1
+ %tmp10280 = getelementptr inbounds float, float* %tmp10279, i64 1
+ %tmp10281 = getelementptr inbounds float, float* %tmp10280, i64 1
+ %tmp10282 = getelementptr inbounds float, float* %tmp10281, i64 1
+ %tmp10283 = getelementptr inbounds float, float* %tmp10282, i64 1
+ %tmp10284 = getelementptr inbounds float, float* %tmp10283, i64 1
+ %tmp10285 = getelementptr inbounds float, float* %tmp10284, i64 1
+ %tmp10286 = getelementptr inbounds float, float* %tmp10285, i64 1
+ %tmp10287 = getelementptr inbounds float, float* %tmp10286, i64 1
+ %tmp10288 = getelementptr inbounds float, float* %tmp10287, i64 1
+ %tmp10289 = getelementptr inbounds float, float* %tmp10288, i64 1
+ %tmp10290 = getelementptr inbounds float, float* %tmp10289, i64 1
+ %tmp10291 = getelementptr inbounds float, float* %tmp10290, i64 1
+ %tmp10292 = getelementptr inbounds float, float* %tmp10291, i64 1
+ %tmp10293 = getelementptr inbounds float, float* %tmp10292, i64 1
+ %tmp10294 = getelementptr inbounds float, float* %tmp10293, i64 1
+ %tmp10295 = getelementptr inbounds float, float* %tmp10294, i64 1
+ %tmp10296 = getelementptr inbounds float, float* %tmp10295, i64 1
+ %tmp10297 = getelementptr inbounds float, float* %tmp10296, i64 1
+ %tmp10298 = getelementptr inbounds float, float* %tmp10297, i64 1
+ %tmp10299 = getelementptr inbounds float, float* %tmp10298, i64 1
+ %tmp10300 = getelementptr inbounds float, float* %tmp10299, i64 1
+ %tmp10301 = getelementptr inbounds float, float* %tmp10300, i64 1
+ %tmp10302 = getelementptr inbounds float, float* %tmp10301, i64 1
+ %tmp10303 = getelementptr inbounds float, float* %tmp10302, i64 1
+ %tmp10304 = getelementptr inbounds float, float* %tmp10303, i64 1
+ %tmp10305 = getelementptr inbounds float, float* %tmp10304, i64 1
+ %tmp10306 = getelementptr inbounds float, float* %tmp10305, i64 1
+ %tmp10307 = getelementptr inbounds float, float* %tmp10306, i64 1
+ %tmp10308 = getelementptr inbounds float, float* %tmp10307, i64 1
+ %tmp10309 = getelementptr inbounds float, float* %tmp10308, i64 1
+ %tmp10310 = getelementptr inbounds float, float* %tmp10309, i64 1
+ %tmp10311 = getelementptr inbounds float, float* %tmp10310, i64 1
+ %tmp10312 = getelementptr inbounds float, float* %tmp10311, i64 1
+ %tmp10313 = getelementptr inbounds float, float* %tmp10312, i64 1
+ %tmp10314 = getelementptr inbounds float, float* %tmp10313, i64 1
+ %tmp10315 = getelementptr inbounds float, float* %tmp10314, i64 1
+ %tmp10316 = getelementptr inbounds float, float* %tmp10315, i64 1
+ %tmp10317 = getelementptr inbounds float, float* %tmp10316, i64 1
+ %tmp10318 = getelementptr inbounds float, float* %tmp10317, i64 1
+ %tmp10319 = getelementptr inbounds float, float* %tmp10318, i64 1
+ %tmp10320 = getelementptr inbounds float, float* %tmp10319, i64 1
+ %tmp10321 = getelementptr inbounds float, float* %tmp10320, i64 1
+ %tmp10322 = getelementptr inbounds float, float* %tmp10321, i64 1
+ %tmp10323 = getelementptr inbounds float, float* %tmp10322, i64 1
+ %tmp10324 = getelementptr inbounds float, float* %tmp10323, i64 1
+ %tmp10325 = getelementptr inbounds float, float* %tmp10324, i64 1
+ %tmp10326 = getelementptr inbounds float, float* %tmp10325, i64 1
+ %tmp10327 = getelementptr inbounds float, float* %tmp10326, i64 1
+ %tmp10328 = getelementptr inbounds float, float* %tmp10327, i64 1
+ %tmp10329 = getelementptr inbounds float, float* %tmp10328, i64 1
+ %tmp10330 = getelementptr inbounds float, float* %tmp10329, i64 1
+ %tmp10331 = getelementptr inbounds float, float* %tmp10330, i64 1
+ %tmp10332 = getelementptr inbounds float, float* %tmp10331, i64 1
+ %tmp10333 = getelementptr inbounds float, float* %tmp10332, i64 1
+ %tmp10334 = getelementptr inbounds float, float* %tmp10333, i64 1
+ %tmp10335 = getelementptr inbounds float, float* %tmp10334, i64 1
+ %tmp10336 = getelementptr inbounds float, float* %tmp10335, i64 1
+ %tmp10337 = getelementptr inbounds float, float* %tmp10336, i64 1
+ %tmp10338 = getelementptr inbounds float, float* %tmp10337, i64 1
+ %tmp10339 = getelementptr inbounds float, float* %tmp10338, i64 1
+ %tmp10340 = getelementptr inbounds float, float* %tmp10339, i64 1
+ %tmp10341 = getelementptr inbounds float, float* %tmp10340, i64 1
+ %tmp10342 = getelementptr inbounds float, float* %tmp10341, i64 1
+ %tmp10343 = getelementptr inbounds float, float* %tmp10342, i64 1
+ %tmp10344 = getelementptr inbounds float, float* %tmp10343, i64 1
+ %tmp10345 = getelementptr inbounds float, float* %tmp10344, i64 1
+ %tmp10346 = getelementptr inbounds float, float* %tmp10345, i64 1
+ %tmp10347 = getelementptr inbounds float, float* %tmp10346, i64 1
+ %tmp10348 = getelementptr inbounds float, float* %tmp10347, i64 1
+ %tmp10349 = getelementptr inbounds float, float* %tmp10348, i64 1
+ %tmp10350 = getelementptr inbounds float, float* %tmp10349, i64 1
+ %tmp10351 = getelementptr inbounds float, float* %tmp10350, i64 1
+ %tmp10352 = getelementptr inbounds float, float* %tmp10351, i64 1
+ %tmp10353 = getelementptr inbounds float, float* %tmp10352, i64 1
+ %tmp10354 = getelementptr inbounds float, float* %tmp10353, i64 1
+ %tmp10355 = getelementptr inbounds float, float* %tmp10354, i64 1
+ %tmp10356 = getelementptr inbounds float, float* %tmp10355, i64 1
+ %tmp10357 = getelementptr inbounds float, float* %tmp10356, i64 1
+ %tmp10358 = getelementptr inbounds float, float* %tmp10357, i64 1
+ %tmp10359 = getelementptr inbounds float, float* %tmp10358, i64 1
+ %tmp10360 = getelementptr inbounds float, float* %tmp10359, i64 1
+ %tmp10361 = getelementptr inbounds float, float* %tmp10360, i64 1
+ %tmp10362 = getelementptr inbounds float, float* %tmp10361, i64 1
+ %tmp10363 = getelementptr inbounds float, float* %tmp10362, i64 1
+ %tmp10364 = getelementptr inbounds float, float* %tmp10363, i64 1
+ %tmp10365 = getelementptr inbounds float, float* %tmp10364, i64 1
+ %tmp10366 = getelementptr inbounds float, float* %tmp10365, i64 1
+ %tmp10367 = getelementptr inbounds float, float* %tmp10366, i64 1
+ %tmp10368 = getelementptr inbounds float, float* %tmp10367, i64 1
+ %tmp10369 = getelementptr inbounds float, float* %tmp10368, i64 1
+ %tmp10370 = getelementptr inbounds float, float* %tmp10369, i64 1
+ %tmp10371 = getelementptr inbounds float, float* %tmp10370, i64 1
+ %tmp10372 = getelementptr inbounds float, float* %tmp10371, i64 1
+ %tmp10373 = getelementptr inbounds float, float* %tmp10372, i64 1
+ %tmp10374 = getelementptr inbounds float, float* %tmp10373, i64 1
+ %tmp10375 = getelementptr inbounds float, float* %tmp10374, i64 1
+ %tmp10376 = getelementptr inbounds float, float* %tmp10375, i64 1
+ %tmp10377 = getelementptr inbounds float, float* %tmp10376, i64 1
+ %tmp10378 = getelementptr inbounds float, float* %tmp10377, i64 1
+ %tmp10379 = getelementptr inbounds float, float* %tmp10378, i64 1
+ %tmp10380 = getelementptr inbounds float, float* %tmp10379, i64 1
+ %tmp10381 = getelementptr inbounds float, float* %tmp10380, i64 1
+ %tmp10382 = getelementptr inbounds float, float* %tmp10381, i64 1
+ %tmp10383 = getelementptr inbounds float, float* %tmp10382, i64 1
+ %tmp10384 = getelementptr inbounds float, float* %tmp10383, i64 1
+ %tmp10385 = getelementptr inbounds float, float* %tmp10384, i64 1
+ %tmp10386 = getelementptr inbounds float, float* %tmp10385, i64 1
+ %tmp10387 = getelementptr inbounds float, float* %tmp10386, i64 1
+ %tmp10388 = getelementptr inbounds float, float* %tmp10387, i64 1
+ %tmp10389 = getelementptr inbounds float, float* %tmp10388, i64 1
+ %tmp10390 = getelementptr inbounds float, float* %tmp10389, i64 1
+ %tmp10391 = getelementptr inbounds float, float* %tmp10390, i64 1
+ %tmp10392 = getelementptr inbounds float, float* %tmp10391, i64 1
+ %tmp10393 = getelementptr inbounds float, float* %tmp10392, i64 1
+ %tmp10394 = getelementptr inbounds float, float* %tmp10393, i64 1
+ %tmp10395 = getelementptr inbounds float, float* %tmp10394, i64 1
+ %tmp10396 = getelementptr inbounds float, float* %tmp10395, i64 1
+ %tmp10397 = getelementptr inbounds float, float* %tmp10396, i64 1
+ %tmp10398 = getelementptr inbounds float, float* %tmp10397, i64 1
+ %tmp10399 = getelementptr inbounds float, float* %tmp10398, i64 1
+ %tmp10400 = getelementptr inbounds float, float* %tmp10399, i64 1
+ %tmp10401 = getelementptr inbounds float, float* %tmp10400, i64 1
+ %tmp10402 = getelementptr inbounds float, float* %tmp10401, i64 1
+ %tmp10403 = getelementptr inbounds float, float* %tmp10402, i64 1
+ %tmp10404 = getelementptr inbounds float, float* %tmp10403, i64 1
+ %tmp10405 = getelementptr inbounds float, float* %tmp10404, i64 1
+ %tmp10406 = getelementptr inbounds float, float* %tmp10405, i64 1
+ %tmp10407 = getelementptr inbounds float, float* %tmp10406, i64 1
+ %tmp10408 = getelementptr inbounds float, float* %tmp10407, i64 1
+ %tmp10409 = getelementptr inbounds float, float* %tmp10408, i64 1
+ %tmp10410 = getelementptr inbounds float, float* %tmp10409, i64 1
+ %tmp10411 = getelementptr inbounds float, float* %tmp10410, i64 1
+ %tmp10412 = getelementptr inbounds float, float* %tmp10411, i64 1
+ %tmp10413 = getelementptr inbounds float, float* %tmp10412, i64 1
+ %tmp10414 = getelementptr inbounds float, float* %tmp10413, i64 1
+ %tmp10415 = getelementptr inbounds float, float* %tmp10414, i64 1
+ %tmp10416 = getelementptr inbounds float, float* %tmp10415, i64 1
+ %tmp10417 = getelementptr inbounds float, float* %tmp10416, i64 1
+ %tmp10418 = getelementptr inbounds float, float* %tmp10417, i64 1
+ %tmp10419 = getelementptr inbounds float, float* %tmp10418, i64 1
+ %tmp10420 = getelementptr inbounds float, float* %tmp10419, i64 1
+ %tmp10421 = getelementptr inbounds float, float* %tmp10420, i64 1
+ %tmp10422 = getelementptr inbounds float, float* %tmp10421, i64 1
+ %tmp10423 = getelementptr inbounds float, float* %tmp10422, i64 1
+ %tmp10424 = getelementptr inbounds float, float* %tmp10423, i64 1
+ %tmp10425 = getelementptr inbounds float, float* %tmp10424, i64 1
+ %tmp10426 = getelementptr inbounds float, float* %tmp10425, i64 1
+ %tmp10427 = getelementptr inbounds float, float* %tmp10426, i64 1
+ %tmp10428 = getelementptr inbounds float, float* %tmp10427, i64 1
+ %tmp10429 = getelementptr inbounds float, float* %tmp10428, i64 1
+ %tmp10430 = getelementptr inbounds float, float* %tmp10429, i64 1
+ %tmp10431 = getelementptr inbounds float, float* %tmp10430, i64 1
+ %tmp10432 = getelementptr inbounds float, float* %tmp10431, i64 1
+ %tmp10433 = getelementptr inbounds float, float* %tmp10432, i64 1
+ %tmp10434 = getelementptr inbounds float, float* %tmp10433, i64 1
+ %tmp10435 = getelementptr inbounds float, float* %tmp10434, i64 1
+ %tmp10436 = getelementptr inbounds float, float* %tmp10435, i64 1
+ %tmp10437 = getelementptr inbounds float, float* %tmp10436, i64 1
+ %tmp10438 = getelementptr inbounds float, float* %tmp10437, i64 1
+ %tmp10439 = getelementptr inbounds float, float* %tmp10438, i64 1
+ %tmp10440 = getelementptr inbounds float, float* %tmp10439, i64 1
+ %tmp10441 = getelementptr inbounds float, float* %tmp10440, i64 1
+ %tmp10442 = getelementptr inbounds float, float* %tmp10441, i64 1
+ %tmp10443 = getelementptr inbounds float, float* %tmp10442, i64 1
+ %tmp10444 = getelementptr inbounds float, float* %tmp10443, i64 1
+ %tmp10445 = getelementptr inbounds float, float* %tmp10444, i64 1
+ %tmp10446 = getelementptr inbounds float, float* %tmp10445, i64 1
+ %tmp10447 = getelementptr inbounds float, float* %tmp10446, i64 1
+ %tmp10448 = getelementptr inbounds float, float* %tmp10447, i64 1
+ %tmp10449 = getelementptr inbounds float, float* %tmp10448, i64 1
+ %tmp10450 = getelementptr inbounds float, float* %tmp10449, i64 1
+ %tmp10451 = getelementptr inbounds float, float* %tmp10450, i64 1
+ %tmp10452 = getelementptr inbounds float, float* %tmp10451, i64 1
+ %tmp10453 = getelementptr inbounds float, float* %tmp10452, i64 1
+ %tmp10454 = getelementptr inbounds float, float* %tmp10453, i64 1
+ %tmp10455 = getelementptr inbounds float, float* %tmp10454, i64 1
+ %tmp10456 = getelementptr inbounds float, float* %tmp10455, i64 1
+ %tmp10457 = getelementptr inbounds float, float* %tmp10456, i64 1
+ %tmp10458 = getelementptr inbounds float, float* %tmp10457, i64 1
+ %tmp10459 = getelementptr inbounds float, float* %tmp10458, i64 1
+ %tmp10460 = getelementptr inbounds float, float* %tmp10459, i64 1
+ %tmp10461 = getelementptr inbounds float, float* %tmp10460, i64 1
+ %tmp10462 = getelementptr inbounds float, float* %tmp10461, i64 1
+ %tmp10463 = getelementptr inbounds float, float* %tmp10462, i64 1
+ %tmp10464 = getelementptr inbounds float, float* %tmp10463, i64 1
+ %tmp10465 = getelementptr inbounds float, float* %tmp10464, i64 1
+ %tmp10466 = getelementptr inbounds float, float* %tmp10465, i64 1
+ %tmp10467 = getelementptr inbounds float, float* %tmp10466, i64 1
+ %tmp10468 = getelementptr inbounds float, float* %tmp10467, i64 1
+ %tmp10469 = getelementptr inbounds float, float* %tmp10468, i64 1
+ %tmp10470 = getelementptr inbounds float, float* %tmp10469, i64 1
+ %tmp10471 = getelementptr inbounds float, float* %tmp10470, i64 1
+ %tmp10472 = getelementptr inbounds float, float* %tmp10471, i64 1
+ %tmp10473 = getelementptr inbounds float, float* %tmp10472, i64 1
+ %tmp10474 = getelementptr inbounds float, float* %tmp10473, i64 1
+ %tmp10475 = getelementptr inbounds float, float* %tmp10474, i64 1
+ %tmp10476 = getelementptr inbounds float, float* %tmp10475, i64 1
+ %tmp10477 = getelementptr inbounds float, float* %tmp10476, i64 1
+ %tmp10478 = getelementptr inbounds float, float* %tmp10477, i64 1
+ %tmp10479 = getelementptr inbounds float, float* %tmp10478, i64 1
+ %tmp10480 = getelementptr inbounds float, float* %tmp10479, i64 1
+ %tmp10481 = getelementptr inbounds float, float* %tmp10480, i64 1
+ %tmp10482 = getelementptr inbounds float, float* %tmp10481, i64 1
+ %tmp10483 = getelementptr inbounds float, float* %tmp10482, i64 1
+ %tmp10484 = getelementptr inbounds float, float* %tmp10483, i64 1
+ %tmp10485 = getelementptr inbounds float, float* %tmp10484, i64 1
+ %tmp10486 = getelementptr inbounds float, float* %tmp10485, i64 1
+ %tmp10487 = getelementptr inbounds float, float* %tmp10486, i64 1
+ %tmp10488 = getelementptr inbounds float, float* %tmp10487, i64 1
+ %tmp10489 = getelementptr inbounds float, float* %tmp10488, i64 1
+ %tmp10490 = getelementptr inbounds float, float* %tmp10489, i64 1
+ %tmp10491 = getelementptr inbounds float, float* %tmp10490, i64 1
+ %tmp10492 = getelementptr inbounds float, float* %tmp10491, i64 1
+ %tmp10493 = getelementptr inbounds float, float* %tmp10492, i64 1
+ %tmp10494 = getelementptr inbounds float, float* %tmp10493, i64 1
+ %tmp10495 = getelementptr inbounds float, float* %tmp10494, i64 1
+ %tmp10496 = getelementptr inbounds float, float* %tmp10495, i64 1
+ %tmp10497 = getelementptr inbounds float, float* %tmp10496, i64 1
+ %tmp10498 = getelementptr inbounds float, float* %tmp10497, i64 1
+ %tmp10499 = getelementptr inbounds float, float* %tmp10498, i64 1
+ %tmp10500 = getelementptr inbounds float, float* %tmp10499, i64 1
+ %tmp10501 = getelementptr inbounds float, float* %tmp10500, i64 1
+ %tmp10502 = getelementptr inbounds float, float* %tmp10501, i64 1
+ %tmp10503 = getelementptr inbounds float, float* %tmp10502, i64 1
+ %tmp10504 = getelementptr inbounds float, float* %tmp10503, i64 1
+ %tmp10505 = getelementptr inbounds float, float* %tmp10504, i64 1
+ %tmp10506 = getelementptr inbounds float, float* %tmp10505, i64 1
+ %tmp10507 = getelementptr inbounds float, float* %tmp10506, i64 1
+ %tmp10508 = getelementptr inbounds float, float* %tmp10507, i64 1
+ %tmp10509 = getelementptr inbounds float, float* %tmp10508, i64 1
+ %tmp10510 = getelementptr inbounds float, float* %tmp10509, i64 1
+ %tmp10511 = getelementptr inbounds float, float* %tmp10510, i64 1
+ %tmp10512 = getelementptr inbounds float, float* %tmp10511, i64 1
+ %tmp10513 = getelementptr inbounds float, float* %tmp10512, i64 1
+ %tmp10514 = getelementptr inbounds float, float* %tmp10513, i64 1
+ %tmp10515 = getelementptr inbounds float, float* %tmp10514, i64 1
+ %tmp10516 = getelementptr inbounds float, float* %tmp10515, i64 1
+ %tmp10517 = getelementptr inbounds float, float* %tmp10516, i64 1
+ %tmp10518 = getelementptr inbounds float, float* %tmp10517, i64 1
+ %tmp10519 = getelementptr inbounds float, float* %tmp10518, i64 1
+ %tmp10520 = getelementptr inbounds float, float* %tmp10519, i64 1
+ %tmp10521 = getelementptr inbounds float, float* %tmp10520, i64 1
+ %tmp10522 = getelementptr inbounds float, float* %tmp10521, i64 1
+ %tmp10523 = getelementptr inbounds float, float* %tmp10522, i64 1
+ %tmp10524 = getelementptr inbounds float, float* %tmp10523, i64 1
+ %tmp10525 = getelementptr inbounds float, float* %tmp10524, i64 1
+ %tmp10526 = getelementptr inbounds float, float* %tmp10525, i64 1
+ %tmp10527 = getelementptr inbounds float, float* %tmp10526, i64 1
+ %tmp10528 = getelementptr inbounds float, float* %tmp10527, i64 1
+ %tmp10529 = getelementptr inbounds float, float* %tmp10528, i64 1
+ %tmp10530 = getelementptr inbounds float, float* %tmp10529, i64 1
+ %tmp10531 = getelementptr inbounds float, float* %tmp10530, i64 1
+ %tmp10532 = getelementptr inbounds float, float* %tmp10531, i64 1
+ %tmp10533 = getelementptr inbounds float, float* %tmp10532, i64 1
+ %tmp10534 = getelementptr inbounds float, float* %tmp10533, i64 1
+ %tmp10535 = getelementptr inbounds float, float* %tmp10534, i64 1
+ %tmp10536 = getelementptr inbounds float, float* %tmp10535, i64 1
+ %tmp10537 = getelementptr inbounds float, float* %tmp10536, i64 1
+ %tmp10538 = getelementptr inbounds float, float* %tmp10537, i64 1
+ %tmp10539 = getelementptr inbounds float, float* %tmp10538, i64 1
+ %tmp10540 = getelementptr inbounds float, float* %tmp10539, i64 1
+ %tmp10541 = getelementptr inbounds float, float* %tmp10540, i64 1
+ %tmp10542 = getelementptr inbounds float, float* %tmp10541, i64 1
+ %tmp10543 = getelementptr inbounds float, float* %tmp10542, i64 1
+ %tmp10544 = getelementptr inbounds float, float* %tmp10543, i64 1
+ %tmp10545 = getelementptr inbounds float, float* %tmp10544, i64 1
+ %tmp10546 = getelementptr inbounds float, float* %tmp10545, i64 1
+ %tmp10547 = getelementptr inbounds float, float* %tmp10546, i64 1
+ %tmp10548 = getelementptr inbounds float, float* %tmp10547, i64 1
+ %tmp10549 = getelementptr inbounds float, float* %tmp10548, i64 1
+ %tmp10550 = getelementptr inbounds float, float* %tmp10549, i64 1
+ %tmp10551 = getelementptr inbounds float, float* %tmp10550, i64 1
+ %tmp10552 = getelementptr inbounds float, float* %tmp10551, i64 1
+ %tmp10553 = getelementptr inbounds float, float* %tmp10552, i64 1
+ %tmp10554 = getelementptr inbounds float, float* %tmp10553, i64 1
+ %tmp10555 = getelementptr inbounds float, float* %tmp10554, i64 1
+ %tmp10556 = getelementptr inbounds float, float* %tmp10555, i64 1
+ %tmp10557 = getelementptr inbounds float, float* %tmp10556, i64 1
+ %tmp10558 = getelementptr inbounds float, float* %tmp10557, i64 1
+ %tmp10559 = getelementptr inbounds float, float* %tmp10558, i64 1
+ %tmp10560 = getelementptr inbounds float, float* %tmp10559, i64 1
+ %tmp10561 = getelementptr inbounds float, float* %tmp10560, i64 1
+ %tmp10562 = getelementptr inbounds float, float* %tmp10561, i64 1
+ %tmp10563 = getelementptr inbounds float, float* %tmp10562, i64 1
+ %tmp10564 = getelementptr inbounds float, float* %tmp10563, i64 1
+ %tmp10565 = getelementptr inbounds float, float* %tmp10564, i64 1
+ %tmp10566 = getelementptr inbounds float, float* %tmp10565, i64 1
+ %tmp10567 = getelementptr inbounds float, float* %tmp10566, i64 1
+ %tmp10568 = getelementptr inbounds float, float* %tmp10567, i64 1
+ %tmp10569 = getelementptr inbounds float, float* %tmp10568, i64 1
+ %tmp10570 = getelementptr inbounds float, float* %tmp10569, i64 1
+ %tmp10571 = getelementptr inbounds float, float* %tmp10570, i64 1
+ %tmp10572 = getelementptr inbounds float, float* %tmp10571, i64 1
+ %tmp10573 = getelementptr inbounds float, float* %tmp10572, i64 1
+ %tmp10574 = getelementptr inbounds float, float* %tmp10573, i64 1
+ %tmp10575 = getelementptr inbounds float, float* %tmp10574, i64 1
+ %tmp10576 = getelementptr inbounds float, float* %tmp10575, i64 1
+ %tmp10577 = getelementptr inbounds float, float* %tmp10576, i64 1
+ %tmp10578 = getelementptr inbounds float, float* %tmp10577, i64 1
+ %tmp10579 = getelementptr inbounds float, float* %tmp10578, i64 1
+ %tmp10580 = getelementptr inbounds float, float* %tmp10579, i64 1
+ %tmp10581 = getelementptr inbounds float, float* %tmp10580, i64 1
+ %tmp10582 = getelementptr inbounds float, float* %tmp10581, i64 1
+ %tmp10583 = getelementptr inbounds float, float* %tmp10582, i64 1
+ %tmp10584 = getelementptr inbounds float, float* %tmp10583, i64 1
+ %tmp10585 = getelementptr inbounds float, float* %tmp10584, i64 1
+ %tmp10586 = getelementptr inbounds float, float* %tmp10585, i64 1
+ %tmp10587 = getelementptr inbounds float, float* %tmp10586, i64 1
+ %tmp10588 = getelementptr inbounds float, float* %tmp10587, i64 1
+ %tmp10589 = getelementptr inbounds float, float* %tmp10588, i64 1
+ %tmp10590 = getelementptr inbounds float, float* %tmp10589, i64 1
+ %tmp10591 = getelementptr inbounds float, float* %tmp10590, i64 1
+ %tmp10592 = getelementptr inbounds float, float* %tmp10591, i64 1
+ %tmp10593 = getelementptr inbounds float, float* %tmp10592, i64 1
+ %tmp10594 = getelementptr inbounds float, float* %tmp10593, i64 1
+ %tmp10595 = getelementptr inbounds float, float* %tmp10594, i64 1
+ %tmp10596 = getelementptr inbounds float, float* %tmp10595, i64 1
+ %tmp10597 = getelementptr inbounds float, float* %tmp10596, i64 1
+ %tmp10598 = getelementptr inbounds float, float* %tmp10597, i64 1
+ %tmp10599 = getelementptr inbounds float, float* %tmp10598, i64 1
+ %tmp10600 = getelementptr inbounds float, float* %tmp10599, i64 1
+ %tmp10601 = getelementptr inbounds float, float* %tmp10600, i64 1
+ %tmp10602 = getelementptr inbounds float, float* %tmp10601, i64 1
+ %tmp10603 = getelementptr inbounds float, float* %tmp10602, i64 1
+ %tmp10604 = getelementptr inbounds float, float* %tmp10603, i64 1
+ %tmp10605 = getelementptr inbounds float, float* %tmp10604, i64 1
+ %tmp10606 = getelementptr inbounds float, float* %tmp10605, i64 1
+ %tmp10607 = getelementptr inbounds float, float* %tmp10606, i64 1
+ %tmp10608 = getelementptr inbounds float, float* %tmp10607, i64 1
+ %tmp10609 = getelementptr inbounds float, float* %tmp10608, i64 1
+ %tmp10610 = getelementptr inbounds float, float* %tmp10609, i64 1
+ %tmp10611 = getelementptr inbounds float, float* %tmp10610, i64 1
+ %tmp10612 = getelementptr inbounds float, float* %tmp10611, i64 1
+ %tmp10613 = getelementptr inbounds float, float* %tmp10612, i64 1
+ %tmp10614 = getelementptr inbounds float, float* %tmp10613, i64 1
+ %tmp10615 = getelementptr inbounds float, float* %tmp10614, i64 1
+ %tmp10616 = getelementptr inbounds float, float* %tmp10615, i64 1
+ %tmp10617 = getelementptr inbounds float, float* %tmp10616, i64 1
+ %tmp10618 = getelementptr inbounds float, float* %tmp10617, i64 1
+ %tmp10619 = getelementptr inbounds float, float* %tmp10618, i64 1
+ %tmp10620 = getelementptr inbounds float, float* %tmp10619, i64 1
+ %tmp10621 = getelementptr inbounds float, float* %tmp10620, i64 1
+ %tmp10622 = getelementptr inbounds float, float* %tmp10621, i64 1
+ %tmp10623 = getelementptr inbounds float, float* %tmp10622, i64 1
+ %tmp10624 = getelementptr inbounds float, float* %tmp10623, i64 1
+ %tmp10625 = getelementptr inbounds float, float* %tmp10624, i64 1
+ %tmp10626 = getelementptr inbounds float, float* %tmp10625, i64 1
+ %tmp10627 = getelementptr inbounds float, float* %tmp10626, i64 1
+ %tmp10628 = getelementptr inbounds float, float* %tmp10627, i64 1
+ %tmp10629 = getelementptr inbounds float, float* %tmp10628, i64 1
+ %tmp10630 = getelementptr inbounds float, float* %tmp10629, i64 1
+ %tmp10631 = getelementptr inbounds float, float* %tmp10630, i64 1
+ %tmp10632 = getelementptr inbounds float, float* %tmp10631, i64 1
+ %tmp10633 = getelementptr inbounds float, float* %tmp10632, i64 1
+ %tmp10634 = getelementptr inbounds float, float* %tmp10633, i64 1
+ %tmp10635 = getelementptr inbounds float, float* %tmp10634, i64 1
+ %tmp10636 = getelementptr inbounds float, float* %tmp10635, i64 1
+ %tmp10637 = getelementptr inbounds float, float* %tmp10636, i64 1
+ %tmp10638 = getelementptr inbounds float, float* %tmp10637, i64 1
+ %tmp10639 = getelementptr inbounds float, float* %tmp10638, i64 1
+ %tmp10640 = getelementptr inbounds float, float* %tmp10639, i64 1
+ %tmp10641 = getelementptr inbounds float, float* %tmp10640, i64 1
+ %tmp10642 = getelementptr inbounds float, float* %tmp10641, i64 1
+ %tmp10643 = getelementptr inbounds float, float* %tmp10642, i64 1
+ %tmp10644 = getelementptr inbounds float, float* %tmp10643, i64 1
+ %tmp10645 = getelementptr inbounds float, float* %tmp10644, i64 1
+ %tmp10646 = getelementptr inbounds float, float* %tmp10645, i64 1
+ %tmp10647 = getelementptr inbounds float, float* %tmp10646, i64 1
+ %tmp10648 = getelementptr inbounds float, float* %tmp10647, i64 1
+ %tmp10649 = getelementptr inbounds float, float* %tmp10648, i64 1
+ %tmp10650 = getelementptr inbounds float, float* %tmp10649, i64 1
+ %tmp10651 = getelementptr inbounds float, float* %tmp10650, i64 1
+ %tmp10652 = getelementptr inbounds float, float* %tmp10651, i64 1
+ %tmp10653 = getelementptr inbounds float, float* %tmp10652, i64 1
+ %tmp10654 = getelementptr inbounds float, float* %tmp10653, i64 1
+ %tmp10655 = getelementptr inbounds float, float* %tmp10654, i64 1
+ %tmp10656 = getelementptr inbounds float, float* %tmp10655, i64 1
+ %tmp10657 = getelementptr inbounds float, float* %tmp10656, i64 1
+ %tmp10658 = getelementptr inbounds float, float* %tmp10657, i64 1
+ %tmp10659 = getelementptr inbounds float, float* %tmp10658, i64 1
+ %tmp10660 = getelementptr inbounds float, float* %tmp10659, i64 1
+ %tmp10661 = getelementptr inbounds float, float* %tmp10660, i64 1
+ %tmp10662 = getelementptr inbounds float, float* %tmp10661, i64 1
+ %tmp10663 = getelementptr inbounds float, float* %tmp10662, i64 1
+ %tmp10664 = getelementptr inbounds float, float* %tmp10663, i64 1
+ %tmp10665 = getelementptr inbounds float, float* %tmp10664, i64 1
+ %tmp10666 = getelementptr inbounds float, float* %tmp10665, i64 1
+ %tmp10667 = getelementptr inbounds float, float* %tmp10666, i64 1
+ %tmp10668 = getelementptr inbounds float, float* %tmp10667, i64 1
+ %tmp10669 = getelementptr inbounds float, float* %tmp10668, i64 1
+ %tmp10670 = getelementptr inbounds float, float* %tmp10669, i64 1
+ %tmp10671 = getelementptr inbounds float, float* %tmp10670, i64 1
+ %tmp10672 = getelementptr inbounds float, float* %tmp10671, i64 1
+ %tmp10673 = getelementptr inbounds float, float* %tmp10672, i64 1
+ %tmp10674 = getelementptr inbounds float, float* %tmp10673, i64 1
+ %tmp10675 = getelementptr inbounds float, float* %tmp10674, i64 1
+ %tmp10676 = getelementptr inbounds float, float* %tmp10675, i64 1
+ %tmp10677 = getelementptr inbounds float, float* %tmp10676, i64 1
+ %tmp10678 = getelementptr inbounds float, float* %tmp10677, i64 1
+ %tmp10679 = getelementptr inbounds float, float* %tmp10678, i64 1
+ %tmp10680 = getelementptr inbounds float, float* %tmp10679, i64 1
+ %tmp10681 = getelementptr inbounds float, float* %tmp10680, i64 1
+ %tmp10682 = getelementptr inbounds float, float* %tmp10681, i64 1
+ %tmp10683 = getelementptr inbounds float, float* %tmp10682, i64 1
+ %tmp10684 = getelementptr inbounds float, float* %tmp10683, i64 1
+ %tmp10685 = getelementptr inbounds float, float* %tmp10684, i64 1
+ %tmp10686 = getelementptr inbounds float, float* %tmp10685, i64 1
+ %tmp10687 = getelementptr inbounds float, float* %tmp10686, i64 1
+ %tmp10688 = getelementptr inbounds float, float* %tmp10687, i64 1
+ %tmp10689 = getelementptr inbounds float, float* %tmp10688, i64 1
+ %tmp10690 = getelementptr inbounds float, float* %tmp10689, i64 1
+ %tmp10691 = getelementptr inbounds float, float* %tmp10690, i64 1
+ %tmp10692 = getelementptr inbounds float, float* %tmp10691, i64 1
+ %tmp10693 = getelementptr inbounds float, float* %tmp10692, i64 1
+ %tmp10694 = getelementptr inbounds float, float* %tmp10693, i64 1
+ %tmp10695 = getelementptr inbounds float, float* %tmp10694, i64 1
+ %tmp10696 = getelementptr inbounds float, float* %tmp10695, i64 1
+ %tmp10697 = getelementptr inbounds float, float* %tmp10696, i64 1
+ %tmp10698 = getelementptr inbounds float, float* %tmp10697, i64 1
+ %tmp10699 = getelementptr inbounds float, float* %tmp10698, i64 1
+ %tmp10700 = getelementptr inbounds float, float* %tmp10699, i64 1
+ %tmp10701 = getelementptr inbounds float, float* %tmp10700, i64 1
+ %tmp10702 = getelementptr inbounds float, float* %tmp10701, i64 1
+ %tmp10703 = getelementptr inbounds float, float* %tmp10702, i64 1
+ %tmp10704 = getelementptr inbounds float, float* %tmp10703, i64 1
+ %tmp10705 = getelementptr inbounds float, float* %tmp10704, i64 1
+ %tmp10706 = getelementptr inbounds float, float* %tmp10705, i64 1
+ %tmp10707 = getelementptr inbounds float, float* %tmp10706, i64 1
+ %tmp10708 = getelementptr inbounds float, float* %tmp10707, i64 1
+ %tmp10709 = getelementptr inbounds float, float* %tmp10708, i64 1
+ %tmp10710 = getelementptr inbounds float, float* %tmp10709, i64 1
+ %tmp10711 = getelementptr inbounds float, float* %tmp10710, i64 1
+ %tmp10712 = getelementptr inbounds float, float* %tmp10711, i64 1
+ %tmp10713 = getelementptr inbounds float, float* %tmp10712, i64 1
+ %tmp10714 = getelementptr inbounds float, float* %tmp10713, i64 1
+ %tmp10715 = getelementptr inbounds float, float* %tmp10714, i64 1
+ %tmp10716 = getelementptr inbounds float, float* %tmp10715, i64 1
+ %tmp10717 = getelementptr inbounds float, float* %tmp10716, i64 1
+ %tmp10718 = getelementptr inbounds float, float* %tmp10717, i64 1
+ %tmp10719 = getelementptr inbounds float, float* %tmp10718, i64 1
+ %tmp10720 = getelementptr inbounds float, float* %tmp10719, i64 1
+ %tmp10721 = getelementptr inbounds float, float* %tmp10720, i64 1
+ %tmp10722 = getelementptr inbounds float, float* %tmp10721, i64 1
+ %tmp10723 = getelementptr inbounds float, float* %tmp10722, i64 1
+ %tmp10724 = getelementptr inbounds float, float* %tmp10723, i64 1
+ %tmp10725 = getelementptr inbounds float, float* %tmp10724, i64 1
+ %tmp10726 = getelementptr inbounds float, float* %tmp10725, i64 1
+ %tmp10727 = getelementptr inbounds float, float* %tmp10726, i64 1
+ %tmp10728 = getelementptr inbounds float, float* %tmp10727, i64 1
+ %tmp10729 = getelementptr inbounds float, float* %tmp10728, i64 1
+ %tmp10730 = getelementptr inbounds float, float* %tmp10729, i64 1
+ %tmp10731 = getelementptr inbounds float, float* %tmp10730, i64 1
+ %tmp10732 = getelementptr inbounds float, float* %tmp10731, i64 1
+ %tmp10733 = getelementptr inbounds float, float* %tmp10732, i64 1
+ %tmp10734 = getelementptr inbounds float, float* %tmp10733, i64 1
+ %tmp10735 = getelementptr inbounds float, float* %tmp10734, i64 1
+ %tmp10736 = getelementptr inbounds float, float* %tmp10735, i64 1
+ %tmp10737 = getelementptr inbounds float, float* %tmp10736, i64 1
+ %tmp10738 = getelementptr inbounds float, float* %tmp10737, i64 1
+ %tmp10739 = getelementptr inbounds float, float* %tmp10738, i64 1
+ %tmp10740 = getelementptr inbounds float, float* %tmp10739, i64 1
+ %tmp10741 = getelementptr inbounds float, float* %tmp10740, i64 1
+ %tmp10742 = getelementptr inbounds float, float* %tmp10741, i64 1
+ %tmp10743 = getelementptr inbounds float, float* %tmp10742, i64 1
+ %tmp10744 = getelementptr inbounds float, float* %tmp10743, i64 1
+ %tmp10745 = getelementptr inbounds float, float* %tmp10744, i64 1
+ %tmp10746 = getelementptr inbounds float, float* %tmp10745, i64 1
+ %tmp10747 = getelementptr inbounds float, float* %tmp10746, i64 1
+ %tmp10748 = getelementptr inbounds float, float* %tmp10747, i64 1
+ %tmp10749 = getelementptr inbounds float, float* %tmp10748, i64 1
+ %tmp10750 = getelementptr inbounds float, float* %tmp10749, i64 1
+ %tmp10751 = getelementptr inbounds float, float* %tmp10750, i64 1
+ %tmp10752 = getelementptr inbounds float, float* %tmp10751, i64 1
+ %tmp10753 = getelementptr inbounds float, float* %tmp10752, i64 1
+ %tmp10754 = getelementptr inbounds float, float* %tmp10753, i64 1
+ %tmp10755 = getelementptr inbounds float, float* %tmp10754, i64 1
+ %tmp10756 = getelementptr inbounds float, float* %tmp10755, i64 1
+ %tmp10757 = getelementptr inbounds float, float* %tmp10756, i64 1
+ %tmp10758 = getelementptr inbounds float, float* %tmp10757, i64 1
+ %tmp10759 = getelementptr inbounds float, float* %tmp10758, i64 1
+ %tmp10760 = getelementptr inbounds float, float* %tmp10759, i64 1
+ %tmp10761 = getelementptr inbounds float, float* %tmp10760, i64 1
+ %tmp10762 = getelementptr inbounds float, float* %tmp10761, i64 1
+ %tmp10763 = getelementptr inbounds float, float* %tmp10762, i64 1
+ %tmp10764 = getelementptr inbounds float, float* %tmp10763, i64 1
+ %tmp10765 = getelementptr inbounds float, float* %tmp10764, i64 1
+ %tmp10766 = getelementptr inbounds float, float* %tmp10765, i64 1
+ %tmp10767 = getelementptr inbounds float, float* %tmp10766, i64 1
+ %tmp10768 = getelementptr inbounds float, float* %tmp10767, i64 1
+ %tmp10769 = getelementptr inbounds float, float* %tmp10768, i64 1
+ %tmp10770 = getelementptr inbounds float, float* %tmp10769, i64 1
+ %tmp10771 = getelementptr inbounds float, float* %tmp10770, i64 1
+ %tmp10772 = getelementptr inbounds float, float* %tmp10771, i64 1
+ %tmp10773 = getelementptr inbounds float, float* %tmp10772, i64 1
+ %tmp10774 = getelementptr inbounds float, float* %tmp10773, i64 1
+ %tmp10775 = getelementptr inbounds float, float* %tmp10774, i64 1
+ %tmp10776 = getelementptr inbounds float, float* %tmp10775, i64 1
+ %tmp10777 = getelementptr inbounds float, float* %tmp10776, i64 1
+ %tmp10778 = getelementptr inbounds float, float* %tmp10777, i64 1
+ %tmp10779 = getelementptr inbounds float, float* %tmp10778, i64 1
+ %tmp10780 = getelementptr inbounds float, float* %tmp10779, i64 1
+ %tmp10781 = getelementptr inbounds float, float* %tmp10780, i64 1
+ %tmp10782 = getelementptr inbounds float, float* %tmp10781, i64 1
+ %tmp10783 = getelementptr inbounds float, float* %tmp10782, i64 1
+ %tmp10784 = getelementptr inbounds float, float* %tmp10783, i64 1
+ %tmp10785 = getelementptr inbounds float, float* %tmp10784, i64 1
+ %tmp10786 = getelementptr inbounds float, float* %tmp10785, i64 1
+ %tmp10787 = getelementptr inbounds float, float* %tmp10786, i64 1
+ %tmp10788 = getelementptr inbounds float, float* %tmp10787, i64 1
+ %tmp10789 = getelementptr inbounds float, float* %tmp10788, i64 1
+ %tmp10790 = getelementptr inbounds float, float* %tmp10789, i64 1
+ %tmp10791 = getelementptr inbounds float, float* %tmp10790, i64 1
+ %tmp10792 = getelementptr inbounds float, float* %tmp10791, i64 1
+ %tmp10793 = getelementptr inbounds float, float* %tmp10792, i64 1
+ %tmp10794 = getelementptr inbounds float, float* %tmp10793, i64 1
+ %tmp10795 = getelementptr inbounds float, float* %tmp10794, i64 1
+ %tmp10796 = getelementptr inbounds float, float* %tmp10795, i64 1
+ %tmp10797 = getelementptr inbounds float, float* %tmp10796, i64 1
+ %tmp10798 = getelementptr inbounds float, float* %tmp10797, i64 1
+ %tmp10799 = getelementptr inbounds float, float* %tmp10798, i64 1
+ %tmp10800 = getelementptr inbounds float, float* %tmp10799, i64 1
+ %tmp10801 = getelementptr inbounds float, float* %tmp10800, i64 1
+ %tmp10802 = getelementptr inbounds float, float* %tmp10801, i64 1
+ %tmp10803 = getelementptr inbounds float, float* %tmp10802, i64 1
+ %tmp10804 = getelementptr inbounds float, float* %tmp10803, i64 1
+ %tmp10805 = getelementptr inbounds float, float* %tmp10804, i64 1
+ %tmp10806 = getelementptr inbounds float, float* %tmp10805, i64 1
+ %tmp10807 = getelementptr inbounds float, float* %tmp10806, i64 1
+ %tmp10808 = getelementptr inbounds float, float* %tmp10807, i64 1
+ %tmp10809 = getelementptr inbounds float, float* %tmp10808, i64 1
+ %tmp10810 = getelementptr inbounds float, float* %tmp10809, i64 1
+ %tmp10811 = getelementptr inbounds float, float* %tmp10810, i64 1
+ %tmp10812 = getelementptr inbounds float, float* %tmp10811, i64 1
+ %tmp10813 = getelementptr inbounds float, float* %tmp10812, i64 1
+ %tmp10814 = getelementptr inbounds float, float* %tmp10813, i64 1
+ %tmp10815 = getelementptr inbounds float, float* %tmp10814, i64 1
+ %tmp10816 = getelementptr inbounds float, float* %tmp10815, i64 1
+ %tmp10817 = getelementptr inbounds float, float* %tmp10816, i64 1
+ %tmp10818 = getelementptr inbounds float, float* %tmp10817, i64 1
+ %tmp10819 = getelementptr inbounds float, float* %tmp10818, i64 1
+ %tmp10820 = getelementptr inbounds float, float* %tmp10819, i64 1
+ %tmp10821 = getelementptr inbounds float, float* %tmp10820, i64 1
+ %tmp10822 = getelementptr inbounds float, float* %tmp10821, i64 1
+ %tmp10823 = getelementptr inbounds float, float* %tmp10822, i64 1
+ %tmp10824 = getelementptr inbounds float, float* %tmp10823, i64 1
+ %tmp10825 = getelementptr inbounds float, float* %tmp10824, i64 1
+ %tmp10826 = getelementptr inbounds float, float* %tmp10825, i64 1
+ %tmp10827 = getelementptr inbounds float, float* %tmp10826, i64 1
+ %tmp10828 = getelementptr inbounds float, float* %tmp10827, i64 1
+ %tmp10829 = getelementptr inbounds float, float* %tmp10828, i64 1
+ %tmp10830 = getelementptr inbounds float, float* %tmp10829, i64 1
+ %tmp10831 = getelementptr inbounds float, float* %tmp10830, i64 1
+ %tmp10832 = getelementptr inbounds float, float* %tmp10831, i64 1
+ %tmp10833 = getelementptr inbounds float, float* %tmp10832, i64 1
+ %tmp10834 = getelementptr inbounds float, float* %tmp10833, i64 1
+ %tmp10835 = getelementptr inbounds float, float* %tmp10834, i64 1
+ %tmp10836 = getelementptr inbounds float, float* %tmp10835, i64 1
+ %tmp10837 = getelementptr inbounds float, float* %tmp10836, i64 1
+ %tmp10838 = getelementptr inbounds float, float* %tmp10837, i64 1
+ %tmp10839 = getelementptr inbounds float, float* %tmp10838, i64 1
+ %tmp10840 = getelementptr inbounds float, float* %tmp10839, i64 1
+ %tmp10841 = getelementptr inbounds float, float* %tmp10840, i64 1
+ %tmp10842 = getelementptr inbounds float, float* %tmp10841, i64 1
+ %tmp10843 = getelementptr inbounds float, float* %tmp10842, i64 1
+ %tmp10844 = getelementptr inbounds float, float* %tmp10843, i64 1
+ %tmp10845 = getelementptr inbounds float, float* %tmp10844, i64 1
+ %tmp10846 = getelementptr inbounds float, float* %tmp10845, i64 1
+ %tmp10847 = getelementptr inbounds float, float* %tmp10846, i64 1
+ %tmp10848 = getelementptr inbounds float, float* %tmp10847, i64 1
+ %tmp10849 = getelementptr inbounds float, float* %tmp10848, i64 1
+ %tmp10850 = getelementptr inbounds float, float* %tmp10849, i64 1
+ %tmp10851 = getelementptr inbounds float, float* %tmp10850, i64 1
+ %tmp10852 = getelementptr inbounds float, float* %tmp10851, i64 1
+ %tmp10853 = getelementptr inbounds float, float* %tmp10852, i64 1
+ %tmp10854 = getelementptr inbounds float, float* %tmp10853, i64 1
+ %tmp10855 = getelementptr inbounds float, float* %tmp10854, i64 1
+ %tmp10856 = getelementptr inbounds float, float* %tmp10855, i64 1
+ %tmp10857 = getelementptr inbounds float, float* %tmp10856, i64 1
+ %tmp10858 = getelementptr inbounds float, float* %tmp10857, i64 1
+ %tmp10859 = getelementptr inbounds float, float* %tmp10858, i64 1
+ %tmp10860 = getelementptr inbounds float, float* %tmp10859, i64 1
+ %tmp10861 = getelementptr inbounds float, float* %tmp10860, i64 1
+ %tmp10862 = getelementptr inbounds float, float* %tmp10861, i64 1
+ %tmp10863 = getelementptr inbounds float, float* %tmp10862, i64 1
+ %tmp10864 = getelementptr inbounds float, float* %tmp10863, i64 1
+ %tmp10865 = getelementptr inbounds float, float* %tmp10864, i64 1
+ %tmp10866 = getelementptr inbounds float, float* %tmp10865, i64 1
+ %tmp10867 = getelementptr inbounds float, float* %tmp10866, i64 1
+ %tmp10868 = getelementptr inbounds float, float* %tmp10867, i64 1
+ %tmp10869 = getelementptr inbounds float, float* %tmp10868, i64 1
+ %tmp10870 = getelementptr inbounds float, float* %tmp10869, i64 1
+ %tmp10871 = getelementptr inbounds float, float* %tmp10870, i64 1
+ %tmp10872 = getelementptr inbounds float, float* %tmp10871, i64 1
+ %tmp10873 = getelementptr inbounds float, float* %tmp10872, i64 1
+ %tmp10874 = getelementptr inbounds float, float* %tmp10873, i64 1
+ %tmp10875 = getelementptr inbounds float, float* %tmp10874, i64 1
+ %tmp10876 = getelementptr inbounds float, float* %tmp10875, i64 1
+ %tmp10877 = getelementptr inbounds float, float* %tmp10876, i64 1
+ %tmp10878 = getelementptr inbounds float, float* %tmp10877, i64 1
+ %tmp10879 = getelementptr inbounds float, float* %tmp10878, i64 1
+ %tmp10880 = getelementptr inbounds float, float* %tmp10879, i64 1
+ %tmp10881 = getelementptr inbounds float, float* %tmp10880, i64 1
+ %tmp10882 = getelementptr inbounds float, float* %tmp10881, i64 1
+ %tmp10883 = getelementptr inbounds float, float* %tmp10882, i64 1
+ %tmp10884 = getelementptr inbounds float, float* %tmp10883, i64 1
+ %tmp10885 = getelementptr inbounds float, float* %tmp10884, i64 1
+ %tmp10886 = getelementptr inbounds float, float* %tmp10885, i64 1
+ %tmp10887 = getelementptr inbounds float, float* %tmp10886, i64 1
+ %tmp10888 = getelementptr inbounds float, float* %tmp10887, i64 1
+ %tmp10889 = getelementptr inbounds float, float* %tmp10888, i64 1
+ %tmp10890 = getelementptr inbounds float, float* %tmp10889, i64 1
+ %tmp10891 = getelementptr inbounds float, float* %tmp10890, i64 1
+ %tmp10892 = getelementptr inbounds float, float* %tmp10891, i64 1
+ %tmp10893 = getelementptr inbounds float, float* %tmp10892, i64 1
+ %tmp10894 = getelementptr inbounds float, float* %tmp10893, i64 1
+ %tmp10895 = getelementptr inbounds float, float* %tmp10894, i64 1
+ %tmp10896 = getelementptr inbounds float, float* %tmp10895, i64 1
+ %tmp10897 = getelementptr inbounds float, float* %tmp10896, i64 1
+ %tmp10898 = getelementptr inbounds float, float* %tmp10897, i64 1
+ %tmp10899 = getelementptr inbounds float, float* %tmp10898, i64 1
+ %tmp10900 = getelementptr inbounds float, float* %tmp10899, i64 1
+ %tmp10901 = getelementptr inbounds float, float* %tmp10900, i64 1
+ %tmp10902 = getelementptr inbounds float, float* %tmp10901, i64 1
+ %tmp10903 = getelementptr inbounds float, float* %tmp10902, i64 1
+ %tmp10904 = getelementptr inbounds float, float* %tmp10903, i64 1
+ %tmp10905 = getelementptr inbounds float, float* %tmp10904, i64 1
+ %tmp10906 = getelementptr inbounds float, float* %tmp10905, i64 1
+ %tmp10907 = getelementptr inbounds float, float* %tmp10906, i64 1
+ %tmp10908 = getelementptr inbounds float, float* %tmp10907, i64 1
+ %tmp10909 = getelementptr inbounds float, float* %tmp10908, i64 1
+ %tmp10910 = getelementptr inbounds float, float* %tmp10909, i64 1
+ %tmp10911 = getelementptr inbounds float, float* %tmp10910, i64 1
+ %tmp10912 = getelementptr inbounds float, float* %tmp10911, i64 1
+ %tmp10913 = getelementptr inbounds float, float* %tmp10912, i64 1
+ %tmp10914 = getelementptr inbounds float, float* %tmp10913, i64 1
+ %tmp10915 = getelementptr inbounds float, float* %tmp10914, i64 1
+ %tmp10916 = getelementptr inbounds float, float* %tmp10915, i64 1
+ %tmp10917 = getelementptr inbounds float, float* %tmp10916, i64 1
+ %tmp10918 = getelementptr inbounds float, float* %tmp10917, i64 1
+ %tmp10919 = getelementptr inbounds float, float* %tmp10918, i64 1
+ %tmp10920 = getelementptr inbounds float, float* %tmp10919, i64 1
+ %tmp10921 = getelementptr inbounds float, float* %tmp10920, i64 1
+ %tmp10922 = getelementptr inbounds float, float* %tmp10921, i64 1
+ %tmp10923 = getelementptr inbounds float, float* %tmp10922, i64 1
+ %tmp10924 = getelementptr inbounds float, float* %tmp10923, i64 1
+ %tmp10925 = getelementptr inbounds float, float* %tmp10924, i64 1
+ %tmp10926 = getelementptr inbounds float, float* %tmp10925, i64 1
+ %tmp10927 = getelementptr inbounds float, float* %tmp10926, i64 1
+ %tmp10928 = getelementptr inbounds float, float* %tmp10927, i64 1
+ %tmp10929 = getelementptr inbounds float, float* %tmp10928, i64 1
+ %tmp10930 = getelementptr inbounds float, float* %tmp10929, i64 1
+ %tmp10931 = getelementptr inbounds float, float* %tmp10930, i64 1
+ %tmp10932 = getelementptr inbounds float, float* %tmp10931, i64 1
+ %tmp10933 = getelementptr inbounds float, float* %tmp10932, i64 1
+ %tmp10934 = getelementptr inbounds float, float* %tmp10933, i64 1
+ %tmp10935 = getelementptr inbounds float, float* %tmp10934, i64 1
+ %tmp10936 = getelementptr inbounds float, float* %tmp10935, i64 1
+ %tmp10937 = getelementptr inbounds float, float* %tmp10936, i64 1
+ %tmp10938 = getelementptr inbounds float, float* %tmp10937, i64 1
+ %tmp10939 = getelementptr inbounds float, float* %tmp10938, i64 1
+ %tmp10940 = getelementptr inbounds float, float* %tmp10939, i64 1
+ %tmp10941 = getelementptr inbounds float, float* %tmp10940, i64 1
+ %tmp10942 = getelementptr inbounds float, float* %tmp10941, i64 1
+ %tmp10943 = getelementptr inbounds float, float* %tmp10942, i64 1
+ %tmp10944 = getelementptr inbounds float, float* %tmp10943, i64 1
+ %tmp10945 = getelementptr inbounds float, float* %tmp10944, i64 1
+ %tmp10946 = getelementptr inbounds float, float* %tmp10945, i64 1
+ %tmp10947 = getelementptr inbounds float, float* %tmp10946, i64 1
+ %tmp10948 = getelementptr inbounds float, float* %tmp10947, i64 1
+ %tmp10949 = getelementptr inbounds float, float* %tmp10948, i64 1
+ %tmp10950 = getelementptr inbounds float, float* %tmp10949, i64 1
+ %tmp10951 = getelementptr inbounds float, float* %tmp10950, i64 1
+ %tmp10952 = getelementptr inbounds float, float* %tmp10951, i64 1
+ %tmp10953 = getelementptr inbounds float, float* %tmp10952, i64 1
+ %tmp10954 = getelementptr inbounds float, float* %tmp10953, i64 1
+ %tmp10955 = getelementptr inbounds float, float* %tmp10954, i64 1
+ %tmp10956 = getelementptr inbounds float, float* %tmp10955, i64 1
+ %tmp10957 = getelementptr inbounds float, float* %tmp10956, i64 1
+ %tmp10958 = getelementptr inbounds float, float* %tmp10957, i64 1
+ %tmp10959 = getelementptr inbounds float, float* %tmp10958, i64 1
+ %tmp10960 = getelementptr inbounds float, float* %tmp10959, i64 1
+ %tmp10961 = getelementptr inbounds float, float* %tmp10960, i64 1
+ %tmp10962 = getelementptr inbounds float, float* %tmp10961, i64 1
+ %tmp10963 = getelementptr inbounds float, float* %tmp10962, i64 1
+ %tmp10964 = getelementptr inbounds float, float* %tmp10963, i64 1
+ %tmp10965 = getelementptr inbounds float, float* %tmp10964, i64 1
+ %tmp10966 = getelementptr inbounds float, float* %tmp10965, i64 1
+ %tmp10967 = getelementptr inbounds float, float* %tmp10966, i64 1
+ %tmp10968 = getelementptr inbounds float, float* %tmp10967, i64 1
+ %tmp10969 = getelementptr inbounds float, float* %tmp10968, i64 1
+ %tmp10970 = getelementptr inbounds float, float* %tmp10969, i64 1
+ %tmp10971 = getelementptr inbounds float, float* %tmp10970, i64 1
+ %tmp10972 = getelementptr inbounds float, float* %tmp10971, i64 1
+ %tmp10973 = getelementptr inbounds float, float* %tmp10972, i64 1
+ %tmp10974 = getelementptr inbounds float, float* %tmp10973, i64 1
+ %tmp10975 = getelementptr inbounds float, float* %tmp10974, i64 1
+ %tmp10976 = getelementptr inbounds float, float* %tmp10975, i64 1
+ %tmp10977 = getelementptr inbounds float, float* %tmp10976, i64 1
+ %tmp10978 = getelementptr inbounds float, float* %tmp10977, i64 1
+ %tmp10979 = getelementptr inbounds float, float* %tmp10978, i64 1
+ %tmp10980 = getelementptr inbounds float, float* %tmp10979, i64 1
+ %tmp10981 = getelementptr inbounds float, float* %tmp10980, i64 1
+ %tmp10982 = getelementptr inbounds float, float* %tmp10981, i64 1
+ %tmp10983 = getelementptr inbounds float, float* %tmp10982, i64 1
+ %tmp10984 = getelementptr inbounds float, float* %tmp10983, i64 1
+ %tmp10985 = getelementptr inbounds float, float* %tmp10984, i64 1
+ %tmp10986 = getelementptr inbounds float, float* %tmp10985, i64 1
+ %tmp10987 = getelementptr inbounds float, float* %tmp10986, i64 1
+ %tmp10988 = getelementptr inbounds float, float* %tmp10987, i64 1
+ %tmp10989 = getelementptr inbounds float, float* %tmp10988, i64 1
+ %tmp10990 = getelementptr inbounds float, float* %tmp10989, i64 1
+ %tmp10991 = getelementptr inbounds float, float* %tmp10990, i64 1
+ %tmp10992 = getelementptr inbounds float, float* %tmp10991, i64 1
+ %tmp10993 = getelementptr inbounds float, float* %tmp10992, i64 1
+ %tmp10994 = getelementptr inbounds float, float* %tmp10993, i64 1
+ %tmp10995 = getelementptr inbounds float, float* %tmp10994, i64 1
+ %tmp10996 = getelementptr inbounds float, float* %tmp10995, i64 1
+ %tmp10997 = getelementptr inbounds float, float* %tmp10996, i64 1
+ %tmp10998 = getelementptr inbounds float, float* %tmp10997, i64 1
+ %tmp10999 = getelementptr inbounds float, float* %tmp10998, i64 1
+ %tmp11000 = getelementptr inbounds float, float* %tmp10999, i64 1
+ %tmp11001 = getelementptr inbounds float, float* %tmp11000, i64 1
+ %tmp11002 = getelementptr inbounds float, float* %tmp11001, i64 1
+ %tmp11003 = getelementptr inbounds float, float* %tmp11002, i64 1
+ %tmp11004 = getelementptr inbounds float, float* %tmp11003, i64 1
+ %tmp11005 = getelementptr inbounds float, float* %tmp11004, i64 1
+ %tmp11006 = getelementptr inbounds float, float* %tmp11005, i64 1
+ %tmp11007 = getelementptr inbounds float, float* %tmp11006, i64 1
+ %tmp11008 = getelementptr inbounds float, float* %tmp11007, i64 1
+ %tmp11009 = getelementptr inbounds float, float* %tmp11008, i64 1
+ %tmp11010 = getelementptr inbounds float, float* %tmp11009, i64 1
+ %tmp11011 = getelementptr inbounds float, float* %tmp11010, i64 1
+ %tmp11012 = getelementptr inbounds float, float* %tmp11011, i64 1
+ %tmp11013 = getelementptr inbounds float, float* %tmp11012, i64 1
+ %tmp11014 = getelementptr inbounds float, float* %tmp11013, i64 1
+ %tmp11015 = getelementptr inbounds float, float* %tmp11014, i64 1
+ %tmp11016 = getelementptr inbounds float, float* %tmp11015, i64 1
+ %tmp11017 = getelementptr inbounds float, float* %tmp11016, i64 1
+ %tmp11018 = getelementptr inbounds float, float* %tmp11017, i64 1
+ %tmp11019 = getelementptr inbounds float, float* %tmp11018, i64 1
+ %tmp11020 = getelementptr inbounds float, float* %tmp11019, i64 1
+ %tmp11021 = getelementptr inbounds float, float* %tmp11020, i64 1
+ %tmp11022 = getelementptr inbounds float, float* %tmp11021, i64 1
+ %tmp11023 = getelementptr inbounds float, float* %tmp11022, i64 1
+ %tmp11024 = getelementptr inbounds float, float* %tmp11023, i64 1
+ %tmp11025 = getelementptr inbounds float, float* %tmp11024, i64 1
+ %tmp11026 = getelementptr inbounds float, float* %tmp11025, i64 1
+ %tmp11027 = getelementptr inbounds float, float* %tmp11026, i64 1
+ %tmp11028 = getelementptr inbounds float, float* %tmp11027, i64 1
+ %tmp11029 = getelementptr inbounds float, float* %tmp11028, i64 1
+ %tmp11030 = getelementptr inbounds float, float* %tmp11029, i64 1
+ %tmp11031 = getelementptr inbounds float, float* %tmp11030, i64 1
+ %tmp11032 = getelementptr inbounds float, float* %tmp11031, i64 1
+ %tmp11033 = getelementptr inbounds float, float* %tmp11032, i64 1
+ %tmp11034 = getelementptr inbounds float, float* %tmp11033, i64 1
+ %tmp11035 = getelementptr inbounds float, float* %tmp11034, i64 1
+ %tmp11036 = getelementptr inbounds float, float* %tmp11035, i64 1
+ %tmp11037 = getelementptr inbounds float, float* %tmp11036, i64 1
+ %tmp11038 = getelementptr inbounds float, float* %tmp11037, i64 1
+ %tmp11039 = getelementptr inbounds float, float* %tmp11038, i64 1
+ %tmp11040 = getelementptr inbounds float, float* %tmp11039, i64 1
+ %tmp11041 = getelementptr inbounds float, float* %tmp11040, i64 1
+ %tmp11042 = getelementptr inbounds float, float* %tmp11041, i64 1
+ %tmp11043 = getelementptr inbounds float, float* %tmp11042, i64 1
+ %tmp11044 = getelementptr inbounds float, float* %tmp11043, i64 1
+ %tmp11045 = getelementptr inbounds float, float* %tmp11044, i64 1
+ %tmp11046 = getelementptr inbounds float, float* %tmp11045, i64 1
+ %tmp11047 = getelementptr inbounds float, float* %tmp11046, i64 1
+ %tmp11048 = getelementptr inbounds float, float* %tmp11047, i64 1
+ %tmp11049 = getelementptr inbounds float, float* %tmp11048, i64 1
+ %tmp11050 = getelementptr inbounds float, float* %tmp11049, i64 1
+ %tmp11051 = getelementptr inbounds float, float* %tmp11050, i64 1
+ %tmp11052 = getelementptr inbounds float, float* %tmp11051, i64 1
+ %tmp11053 = getelementptr inbounds float, float* %tmp11052, i64 1
+ %tmp11054 = getelementptr inbounds float, float* %tmp11053, i64 1
+ %tmp11055 = getelementptr inbounds float, float* %tmp11054, i64 1
+ %tmp11056 = getelementptr inbounds float, float* %tmp11055, i64 1
+ %tmp11057 = getelementptr inbounds float, float* %tmp11056, i64 1
+ %tmp11058 = getelementptr inbounds float, float* %tmp11057, i64 1
+ %tmp11059 = getelementptr inbounds float, float* %tmp11058, i64 1
+ %tmp11060 = getelementptr inbounds float, float* %tmp11059, i64 1
+ %tmp11061 = getelementptr inbounds float, float* %tmp11060, i64 1
+ %tmp11062 = getelementptr inbounds float, float* %tmp11061, i64 1
+ %tmp11063 = getelementptr inbounds float, float* %tmp11062, i64 1
+ %tmp11064 = getelementptr inbounds float, float* %tmp11063, i64 1
+ %tmp11065 = getelementptr inbounds float, float* %tmp11064, i64 1
+ %tmp11066 = getelementptr inbounds float, float* %tmp11065, i64 1
+ %tmp11067 = getelementptr inbounds float, float* %tmp11066, i64 1
+ %tmp11068 = getelementptr inbounds float, float* %tmp11067, i64 1
+ %tmp11069 = getelementptr inbounds float, float* %tmp11068, i64 1
+ %tmp11070 = getelementptr inbounds float, float* %tmp11069, i64 1
+ %tmp11071 = getelementptr inbounds float, float* %tmp11070, i64 1
+ %tmp11072 = getelementptr inbounds float, float* %tmp11071, i64 1
+ %tmp11073 = getelementptr inbounds float, float* %tmp11072, i64 1
+ %tmp11074 = getelementptr inbounds float, float* %tmp11073, i64 1
+ %tmp11075 = getelementptr inbounds float, float* %tmp11074, i64 1
+ %tmp11076 = getelementptr inbounds float, float* %tmp11075, i64 1
+ %tmp11077 = getelementptr inbounds float, float* %tmp11076, i64 1
+ %tmp11078 = getelementptr inbounds float, float* %tmp11077, i64 1
+ %tmp11079 = getelementptr inbounds float, float* %tmp11078, i64 1
+ %tmp11080 = getelementptr inbounds float, float* %tmp11079, i64 1
+ %tmp11081 = getelementptr inbounds float, float* %tmp11080, i64 1
+ %tmp11082 = getelementptr inbounds float, float* %tmp11081, i64 1
+ %tmp11083 = getelementptr inbounds float, float* %tmp11082, i64 1
+ %tmp11084 = getelementptr inbounds float, float* %tmp11083, i64 1
+ %tmp11085 = getelementptr inbounds float, float* %tmp11084, i64 1
+ %tmp11086 = getelementptr inbounds float, float* %tmp11085, i64 1
+ %tmp11087 = getelementptr inbounds float, float* %tmp11086, i64 1
+ %tmp11088 = getelementptr inbounds float, float* %tmp11087, i64 1
+ %tmp11089 = getelementptr inbounds float, float* %tmp11088, i64 1
+ %tmp11090 = getelementptr inbounds float, float* %tmp11089, i64 1
+ %tmp11091 = getelementptr inbounds float, float* %tmp11090, i64 1
+ %tmp11092 = getelementptr inbounds float, float* %tmp11091, i64 1
+ %tmp11093 = getelementptr inbounds float, float* %tmp11092, i64 1
+ %tmp11094 = getelementptr inbounds float, float* %tmp11093, i64 1
+ %tmp11095 = getelementptr inbounds float, float* %tmp11094, i64 1
+ %tmp11096 = getelementptr inbounds float, float* %tmp11095, i64 1
+ %tmp11097 = getelementptr inbounds float, float* %tmp11096, i64 1
+ %tmp11098 = getelementptr inbounds float, float* %tmp11097, i64 1
+ %tmp11099 = getelementptr inbounds float, float* %tmp11098, i64 1
+ %tmp11100 = getelementptr inbounds float, float* %tmp11099, i64 1
+ %tmp11101 = getelementptr inbounds float, float* %tmp11100, i64 1
+ %tmp11102 = getelementptr inbounds float, float* %tmp11101, i64 1
+ %tmp11103 = getelementptr inbounds float, float* %tmp11102, i64 1
+ %tmp11104 = getelementptr inbounds float, float* %tmp11103, i64 1
+ %tmp11105 = getelementptr inbounds float, float* %tmp11104, i64 1
+ %tmp11106 = getelementptr inbounds float, float* %tmp11105, i64 1
+ %tmp11107 = getelementptr inbounds float, float* %tmp11106, i64 1
+ %tmp11108 = getelementptr inbounds float, float* %tmp11107, i64 1
+ %tmp11109 = getelementptr inbounds float, float* %tmp11108, i64 1
+ %tmp11110 = getelementptr inbounds float, float* %tmp11109, i64 1
+ %tmp11111 = getelementptr inbounds float, float* %tmp11110, i64 1
+ %tmp11112 = getelementptr inbounds float, float* %tmp11111, i64 1
+ %tmp11113 = getelementptr inbounds float, float* %tmp11112, i64 1
+ %tmp11114 = getelementptr inbounds float, float* %tmp11113, i64 1
+ %tmp11115 = getelementptr inbounds float, float* %tmp11114, i64 1
+ %tmp11116 = getelementptr inbounds float, float* %tmp11115, i64 1
+ %tmp11117 = getelementptr inbounds float, float* %tmp11116, i64 1
+ %tmp11118 = getelementptr inbounds float, float* %tmp11117, i64 1
+ %tmp11119 = getelementptr inbounds float, float* %tmp11118, i64 1
+ %tmp11120 = getelementptr inbounds float, float* %tmp11119, i64 1
+ %tmp11121 = getelementptr inbounds float, float* %tmp11120, i64 1
+ %tmp11122 = getelementptr inbounds float, float* %tmp11121, i64 1
+ %tmp11123 = getelementptr inbounds float, float* %tmp11122, i64 1
+ %tmp11124 = getelementptr inbounds float, float* %tmp11123, i64 1
+ %tmp11125 = getelementptr inbounds float, float* %tmp11124, i64 1
+ %tmp11126 = getelementptr inbounds float, float* %tmp11125, i64 1
+ %tmp11127 = getelementptr inbounds float, float* %tmp11126, i64 1
+ %tmp11128 = getelementptr inbounds float, float* %tmp11127, i64 1
+ %tmp11129 = getelementptr inbounds float, float* %tmp11128, i64 1
+ %tmp11130 = getelementptr inbounds float, float* %tmp11129, i64 1
+ %tmp11131 = getelementptr inbounds float, float* %tmp11130, i64 1
+ %tmp11132 = getelementptr inbounds float, float* %tmp11131, i64 1
+ %tmp11133 = getelementptr inbounds float, float* %tmp11132, i64 1
+ %tmp11134 = getelementptr inbounds float, float* %tmp11133, i64 1
+ %tmp11135 = getelementptr inbounds float, float* %tmp11134, i64 1
+ %tmp11136 = getelementptr inbounds float, float* %tmp11135, i64 1
+ %tmp11137 = getelementptr inbounds float, float* %tmp11136, i64 1
+ %tmp11138 = getelementptr inbounds float, float* %tmp11137, i64 1
+ %tmp11139 = getelementptr inbounds float, float* %tmp11138, i64 1
+ %tmp11140 = getelementptr inbounds float, float* %tmp11139, i64 1
+ %tmp11141 = getelementptr inbounds float, float* %tmp11140, i64 1
+ %tmp11142 = getelementptr inbounds float, float* %tmp11141, i64 1
+ %tmp11143 = getelementptr inbounds float, float* %tmp11142, i64 1
+ %tmp11144 = getelementptr inbounds float, float* %tmp11143, i64 1
+ %tmp11145 = getelementptr inbounds float, float* %tmp11144, i64 1
+ %tmp11146 = getelementptr inbounds float, float* %tmp11145, i64 1
+ %tmp11147 = getelementptr inbounds float, float* %tmp11146, i64 1
+ %tmp11148 = getelementptr inbounds float, float* %tmp11147, i64 1
+ %tmp11149 = getelementptr inbounds float, float* %tmp11148, i64 1
+ %tmp11150 = getelementptr inbounds float, float* %tmp11149, i64 1
+ %tmp11151 = getelementptr inbounds float, float* %tmp11150, i64 1
+ %tmp11152 = getelementptr inbounds float, float* %tmp11151, i64 1
+ %tmp11153 = getelementptr inbounds float, float* %tmp11152, i64 1
+ %tmp11154 = getelementptr inbounds float, float* %tmp11153, i64 1
+ %tmp11155 = getelementptr inbounds float, float* %tmp11154, i64 1
+ %tmp11156 = getelementptr inbounds float, float* %tmp11155, i64 1
+ %tmp11157 = getelementptr inbounds float, float* %tmp11156, i64 1
+ %tmp11158 = getelementptr inbounds float, float* %tmp11157, i64 1
+ %tmp11159 = getelementptr inbounds float, float* %tmp11158, i64 1
+ %tmp11160 = getelementptr inbounds float, float* %tmp11159, i64 1
+ %tmp11161 = getelementptr inbounds float, float* %tmp11160, i64 1
+ %tmp11162 = getelementptr inbounds float, float* %tmp11161, i64 1
+ %tmp11163 = getelementptr inbounds float, float* %tmp11162, i64 1
+ %tmp11164 = getelementptr inbounds float, float* %tmp11163, i64 1
+ %tmp11165 = getelementptr inbounds float, float* %tmp11164, i64 1
+ %tmp11166 = getelementptr inbounds float, float* %tmp11165, i64 1
+ %tmp11167 = getelementptr inbounds float, float* %tmp11166, i64 1
+ %tmp11168 = getelementptr inbounds float, float* %tmp11167, i64 1
+ %tmp11169 = getelementptr inbounds float, float* %tmp11168, i64 1
+ %tmp11170 = getelementptr inbounds float, float* %tmp11169, i64 1
+ %tmp11171 = getelementptr inbounds float, float* %tmp11170, i64 1
+ %tmp11172 = getelementptr inbounds float, float* %tmp11171, i64 1
+ %tmp11173 = getelementptr inbounds float, float* %tmp11172, i64 1
+ %tmp11174 = getelementptr inbounds float, float* %tmp11173, i64 1
+ %tmp11175 = getelementptr inbounds float, float* %tmp11174, i64 1
+ %tmp11176 = getelementptr inbounds float, float* %tmp11175, i64 1
+ %tmp11177 = getelementptr inbounds float, float* %tmp11176, i64 1
+ %tmp11178 = getelementptr inbounds float, float* %tmp11177, i64 1
+ %tmp11179 = getelementptr inbounds float, float* %tmp11178, i64 1
+ %tmp11180 = getelementptr inbounds float, float* %tmp11179, i64 1
+ %tmp11181 = getelementptr inbounds float, float* %tmp11180, i64 1
+ %tmp11182 = getelementptr inbounds float, float* %tmp11181, i64 1
+ %tmp11183 = getelementptr inbounds float, float* %tmp11182, i64 1
+ %tmp11184 = getelementptr inbounds float, float* %tmp11183, i64 1
+ %tmp11185 = getelementptr inbounds float, float* %tmp11184, i64 1
+ %tmp11186 = getelementptr inbounds float, float* %tmp11185, i64 1
+ %tmp11187 = getelementptr inbounds float, float* %tmp11186, i64 1
+ %tmp11188 = getelementptr inbounds float, float* %tmp11187, i64 1
+ %tmp11189 = getelementptr inbounds float, float* %tmp11188, i64 1
+ %tmp11190 = getelementptr inbounds float, float* %tmp11189, i64 1
+ %tmp11191 = getelementptr inbounds float, float* %tmp11190, i64 1
+ %tmp11192 = getelementptr inbounds float, float* %tmp11191, i64 1
+ %tmp11193 = getelementptr inbounds float, float* %tmp11192, i64 1
+ %tmp11194 = getelementptr inbounds float, float* %tmp11193, i64 1
+ %tmp11195 = getelementptr inbounds float, float* %tmp11194, i64 1
+ %tmp11196 = getelementptr inbounds float, float* %tmp11195, i64 1
+ %tmp11197 = getelementptr inbounds float, float* %tmp11196, i64 1
+ %tmp11198 = getelementptr inbounds float, float* %tmp11197, i64 1
+ %tmp11199 = getelementptr inbounds float, float* %tmp11198, i64 1
+ %tmp11200 = getelementptr inbounds float, float* %tmp11199, i64 1
+ %tmp11201 = getelementptr inbounds float, float* %tmp11200, i64 1
+ %tmp11202 = getelementptr inbounds float, float* %tmp11201, i64 1
+ %tmp11203 = getelementptr inbounds float, float* %tmp11202, i64 1
+ %tmp11204 = getelementptr inbounds float, float* %tmp11203, i64 1
+ %tmp11205 = getelementptr inbounds float, float* %tmp11204, i64 1
+ %tmp11206 = getelementptr inbounds float, float* %tmp11205, i64 1
+ %tmp11207 = getelementptr inbounds float, float* %tmp11206, i64 1
+ %tmp11208 = getelementptr inbounds float, float* %tmp11207, i64 1
+ %tmp11209 = getelementptr inbounds float, float* %tmp11208, i64 1
+ %tmp11210 = getelementptr inbounds float, float* %tmp11209, i64 1
+ %tmp11211 = getelementptr inbounds float, float* %tmp11210, i64 1
+ %tmp11212 = getelementptr inbounds float, float* %tmp11211, i64 1
+ %tmp11213 = getelementptr inbounds float, float* %tmp11212, i64 1
+ %tmp11214 = getelementptr inbounds float, float* %tmp11213, i64 1
+ %tmp11215 = getelementptr inbounds float, float* %tmp11214, i64 1
+ %tmp11216 = getelementptr inbounds float, float* %tmp11215, i64 1
+ %tmp11217 = getelementptr inbounds float, float* %tmp11216, i64 1
+ %tmp11218 = getelementptr inbounds float, float* %tmp11217, i64 1
+ %tmp11219 = getelementptr inbounds float, float* %tmp11218, i64 1
+ %tmp11220 = getelementptr inbounds float, float* %tmp11219, i64 1
+ %tmp11221 = getelementptr inbounds float, float* %tmp11220, i64 1
+ %tmp11222 = getelementptr inbounds float, float* %tmp11221, i64 1
+ %tmp11223 = getelementptr inbounds float, float* %tmp11222, i64 1
+ %tmp11224 = getelementptr inbounds float, float* %tmp11223, i64 1
+ %tmp11225 = getelementptr inbounds float, float* %tmp11224, i64 1
+ %tmp11226 = getelementptr inbounds float, float* %tmp11225, i64 1
+ %tmp11227 = getelementptr inbounds float, float* %tmp11226, i64 1
+ %tmp11228 = getelementptr inbounds float, float* %tmp11227, i64 1
+ %tmp11229 = getelementptr inbounds float, float* %tmp11228, i64 1
+ %tmp11230 = getelementptr inbounds float, float* %tmp11229, i64 1
+ %tmp11231 = getelementptr inbounds float, float* %tmp11230, i64 1
+ %tmp11232 = getelementptr inbounds float, float* %tmp11231, i64 1
+ %tmp11233 = getelementptr inbounds float, float* %tmp11232, i64 1
+ %tmp11234 = getelementptr inbounds float, float* %tmp11233, i64 1
+ %tmp11235 = getelementptr inbounds float, float* %tmp11234, i64 1
+ %tmp11236 = getelementptr inbounds float, float* %tmp11235, i64 1
+ %tmp11237 = getelementptr inbounds float, float* %tmp11236, i64 1
+ %tmp11238 = getelementptr inbounds float, float* %tmp11237, i64 1
+ %tmp11239 = getelementptr inbounds float, float* %tmp11238, i64 1
+ %tmp11240 = getelementptr inbounds float, float* %tmp11239, i64 1
+ %tmp11241 = getelementptr inbounds float, float* %tmp11240, i64 1
+ %tmp11242 = getelementptr inbounds float, float* %tmp11241, i64 1
+ %tmp11243 = getelementptr inbounds float, float* %tmp11242, i64 1
+ %tmp11244 = getelementptr inbounds float, float* %tmp11243, i64 1
+ %tmp11245 = getelementptr inbounds float, float* %tmp11244, i64 1
+ %tmp11246 = getelementptr inbounds float, float* %tmp11245, i64 1
+ %tmp11247 = getelementptr inbounds float, float* %tmp11246, i64 1
+ %tmp11248 = getelementptr inbounds float, float* %tmp11247, i64 1
+ %tmp11249 = getelementptr inbounds float, float* %tmp11248, i64 1
+ %tmp11250 = getelementptr inbounds float, float* %tmp11249, i64 1
+ %tmp11251 = getelementptr inbounds float, float* %tmp11250, i64 1
+ %tmp11252 = getelementptr inbounds float, float* %tmp11251, i64 1
+ %tmp11253 = getelementptr inbounds float, float* %tmp11252, i64 1
+ %tmp11254 = getelementptr inbounds float, float* %tmp11253, i64 1
+ %tmp11255 = getelementptr inbounds float, float* %tmp11254, i64 1
+ %tmp11256 = getelementptr inbounds float, float* %tmp11255, i64 1
+ %tmp11257 = getelementptr inbounds float, float* %tmp11256, i64 1
+ %tmp11258 = getelementptr inbounds float, float* %tmp11257, i64 1
+ %tmp11259 = getelementptr inbounds float, float* %tmp11258, i64 1
+ %tmp11260 = getelementptr inbounds float, float* %tmp11259, i64 1
+ %tmp11261 = getelementptr inbounds float, float* %tmp11260, i64 1
+ %tmp11262 = getelementptr inbounds float, float* %tmp11261, i64 1
+ %tmp11263 = getelementptr inbounds float, float* %tmp11262, i64 1
+ %tmp11264 = getelementptr inbounds float, float* %tmp11263, i64 1
+ %tmp11265 = getelementptr inbounds float, float* %tmp11264, i64 1
+ %tmp11266 = getelementptr inbounds float, float* %tmp11265, i64 1
+ %tmp11267 = getelementptr inbounds float, float* %tmp11266, i64 1
+ %tmp11268 = getelementptr inbounds float, float* %tmp11267, i64 1
+ %tmp11269 = getelementptr inbounds float, float* %tmp11268, i64 1
+ %tmp11270 = getelementptr inbounds float, float* %tmp11269, i64 1
+ %tmp11271 = getelementptr inbounds float, float* %tmp11270, i64 1
+ %tmp11272 = getelementptr inbounds float, float* %tmp11271, i64 1
+ %tmp11273 = getelementptr inbounds float, float* %tmp11272, i64 1
+ %tmp11274 = getelementptr inbounds float, float* %tmp11273, i64 1
+ %tmp11275 = getelementptr inbounds float, float* %tmp11274, i64 1
+ %tmp11276 = getelementptr inbounds float, float* %tmp11275, i64 1
+ %tmp11277 = getelementptr inbounds float, float* %tmp11276, i64 1
+ %tmp11278 = getelementptr inbounds float, float* %tmp11277, i64 1
+ %tmp11279 = getelementptr inbounds float, float* %tmp11278, i64 1
+ %tmp11280 = getelementptr inbounds float, float* %tmp11279, i64 1
+ %tmp11281 = getelementptr inbounds float, float* %tmp11280, i64 1
+ %tmp11282 = getelementptr inbounds float, float* %tmp11281, i64 1
+ %tmp11283 = getelementptr inbounds float, float* %tmp11282, i64 1
+ %tmp11284 = getelementptr inbounds float, float* %tmp11283, i64 1
+ %tmp11285 = getelementptr inbounds float, float* %tmp11284, i64 1
+ %tmp11286 = getelementptr inbounds float, float* %tmp11285, i64 1
+ %tmp11287 = getelementptr inbounds float, float* %tmp11286, i64 1
+ %tmp11288 = getelementptr inbounds float, float* %tmp11287, i64 1
+ %tmp11289 = getelementptr inbounds float, float* %tmp11288, i64 1
+ %tmp11290 = getelementptr inbounds float, float* %tmp11289, i64 1
+ %tmp11291 = getelementptr inbounds float, float* %tmp11290, i64 1
+ %tmp11292 = getelementptr inbounds float, float* %tmp11291, i64 1
+ %tmp11293 = getelementptr inbounds float, float* %tmp11292, i64 1
+ %tmp11294 = getelementptr inbounds float, float* %tmp11293, i64 1
+ %tmp11295 = getelementptr inbounds float, float* %tmp11294, i64 1
+ %tmp11296 = getelementptr inbounds float, float* %tmp11295, i64 1
+ %tmp11297 = getelementptr inbounds float, float* %tmp11296, i64 1
+ %tmp11298 = getelementptr inbounds float, float* %tmp11297, i64 1
+ %tmp11299 = getelementptr inbounds float, float* %tmp11298, i64 1
+ %tmp11300 = getelementptr inbounds float, float* %tmp11299, i64 1
+ %tmp11301 = getelementptr inbounds float, float* %tmp11300, i64 1
+ %tmp11302 = getelementptr inbounds float, float* %tmp11301, i64 1
+ %tmp11303 = getelementptr inbounds float, float* %tmp11302, i64 1
+ %tmp11304 = getelementptr inbounds float, float* %tmp11303, i64 1
+ %tmp11305 = getelementptr inbounds float, float* %tmp11304, i64 1
+ %tmp11306 = getelementptr inbounds float, float* %tmp11305, i64 1
+ %tmp11307 = getelementptr inbounds float, float* %tmp11306, i64 1
+ %tmp11308 = getelementptr inbounds float, float* %tmp11307, i64 1
+ %tmp11309 = getelementptr inbounds float, float* %tmp11308, i64 1
+ %tmp11310 = getelementptr inbounds float, float* %tmp11309, i64 1
+ %tmp11311 = getelementptr inbounds float, float* %tmp11310, i64 1
+ %tmp11312 = getelementptr inbounds float, float* %tmp11311, i64 1
+ %tmp11313 = getelementptr inbounds float, float* %tmp11312, i64 1
+ %tmp11314 = getelementptr inbounds float, float* %tmp11313, i64 1
+ %tmp11315 = getelementptr inbounds float, float* %tmp11314, i64 1
+ %tmp11316 = getelementptr inbounds float, float* %tmp11315, i64 1
+ %tmp11317 = getelementptr inbounds float, float* %tmp11316, i64 1
+ %tmp11318 = getelementptr inbounds float, float* %tmp11317, i64 1
+ %tmp11319 = getelementptr inbounds float, float* %tmp11318, i64 1
+ %tmp11320 = getelementptr inbounds float, float* %tmp11319, i64 1
+ %tmp11321 = getelementptr inbounds float, float* %tmp11320, i64 1
+ %tmp11322 = getelementptr inbounds float, float* %tmp11321, i64 1
+ %tmp11323 = getelementptr inbounds float, float* %tmp11322, i64 1
+ %tmp11324 = getelementptr inbounds float, float* %tmp11323, i64 1
+ %tmp11325 = getelementptr inbounds float, float* %tmp11324, i64 1
+ %tmp11326 = getelementptr inbounds float, float* %tmp11325, i64 1
+ %tmp11327 = getelementptr inbounds float, float* %tmp11326, i64 1
+ %tmp11328 = getelementptr inbounds float, float* %tmp11327, i64 1
+ %tmp11329 = getelementptr inbounds float, float* %tmp11328, i64 1
+ %tmp11330 = getelementptr inbounds float, float* %tmp11329, i64 1
+ %tmp11331 = getelementptr inbounds float, float* %tmp11330, i64 1
+ %tmp11332 = getelementptr inbounds float, float* %tmp11331, i64 1
+ %tmp11333 = getelementptr inbounds float, float* %tmp11332, i64 1
+ %tmp11334 = getelementptr inbounds float, float* %tmp11333, i64 1
+ %tmp11335 = getelementptr inbounds float, float* %tmp11334, i64 1
+ %tmp11336 = getelementptr inbounds float, float* %tmp11335, i64 1
+ %tmp11337 = getelementptr inbounds float, float* %tmp11336, i64 1
+ %tmp11338 = getelementptr inbounds float, float* %tmp11337, i64 1
+ %tmp11339 = getelementptr inbounds float, float* %tmp11338, i64 1
+ %tmp11340 = getelementptr inbounds float, float* %tmp11339, i64 1
+ %tmp11341 = getelementptr inbounds float, float* %tmp11340, i64 1
+ %tmp11342 = getelementptr inbounds float, float* %tmp11341, i64 1
+ %tmp11343 = getelementptr inbounds float, float* %tmp11342, i64 1
+ %tmp11344 = getelementptr inbounds float, float* %tmp11343, i64 1
+ %tmp11345 = getelementptr inbounds float, float* %tmp11344, i64 1
+ %tmp11346 = getelementptr inbounds float, float* %tmp11345, i64 1
+ %tmp11347 = getelementptr inbounds float, float* %tmp11346, i64 1
+ %tmp11348 = getelementptr inbounds float, float* %tmp11347, i64 1
+ %tmp11349 = getelementptr inbounds float, float* %tmp11348, i64 1
+ %tmp11350 = getelementptr inbounds float, float* %tmp11349, i64 1
+ %tmp11351 = getelementptr inbounds float, float* %tmp11350, i64 1
+ %tmp11352 = getelementptr inbounds float, float* %tmp11351, i64 1
+ %tmp11353 = getelementptr inbounds float, float* %tmp11352, i64 1
+ %tmp11354 = getelementptr inbounds float, float* %tmp11353, i64 1
+ %tmp11355 = getelementptr inbounds float, float* %tmp11354, i64 1
+ %tmp11356 = getelementptr inbounds float, float* %tmp11355, i64 1
+ %tmp11357 = getelementptr inbounds float, float* %tmp11356, i64 1
+ %tmp11358 = getelementptr inbounds float, float* %tmp11357, i64 1
+ %tmp11359 = getelementptr inbounds float, float* %tmp11358, i64 1
+ %tmp11360 = getelementptr inbounds float, float* %tmp11359, i64 1
+ %tmp11361 = getelementptr inbounds float, float* %tmp11360, i64 1
+ %tmp11362 = getelementptr inbounds float, float* %tmp11361, i64 1
+ %tmp11363 = getelementptr inbounds float, float* %tmp11362, i64 1
+ %tmp11364 = getelementptr inbounds float, float* %tmp11363, i64 1
+ %tmp11365 = getelementptr inbounds float, float* %tmp11364, i64 1
+ %tmp11366 = getelementptr inbounds float, float* %tmp11365, i64 1
+ %tmp11367 = getelementptr inbounds float, float* %tmp11366, i64 1
+ %tmp11368 = getelementptr inbounds float, float* %tmp11367, i64 1
+ %tmp11369 = getelementptr inbounds float, float* %tmp11368, i64 1
+ %tmp11370 = getelementptr inbounds float, float* %tmp11369, i64 1
+ %tmp11371 = getelementptr inbounds float, float* %tmp11370, i64 1
+ %tmp11372 = getelementptr inbounds float, float* %tmp11371, i64 1
+ %tmp11373 = getelementptr inbounds float, float* %tmp11372, i64 1
+ %tmp11374 = getelementptr inbounds float, float* %tmp11373, i64 1
+ %tmp11375 = getelementptr inbounds float, float* %tmp11374, i64 1
+ %tmp11376 = getelementptr inbounds float, float* %tmp11375, i64 1
+ %tmp11377 = getelementptr inbounds float, float* %tmp11376, i64 1
+ %tmp11378 = getelementptr inbounds float, float* %tmp11377, i64 1
+ %tmp11379 = getelementptr inbounds float, float* %tmp11378, i64 1
+ %tmp11380 = getelementptr inbounds float, float* %tmp11379, i64 1
+ %tmp11381 = getelementptr inbounds float, float* %tmp11380, i64 1
+ %tmp11382 = getelementptr inbounds float, float* %tmp11381, i64 1
+ %tmp11383 = getelementptr inbounds float, float* %tmp11382, i64 1
+ %tmp11384 = getelementptr inbounds float, float* %tmp11383, i64 1
+ %tmp11385 = getelementptr inbounds float, float* %tmp11384, i64 1
+ %tmp11386 = getelementptr inbounds float, float* %tmp11385, i64 1
+ %tmp11387 = getelementptr inbounds float, float* %tmp11386, i64 1
+ %tmp11388 = getelementptr inbounds float, float* %tmp11387, i64 1
+ %tmp11389 = getelementptr inbounds float, float* %tmp11388, i64 1
+ %tmp11390 = getelementptr inbounds float, float* %tmp11389, i64 1
+ %tmp11391 = getelementptr inbounds float, float* %tmp11390, i64 1
+ %tmp11392 = getelementptr inbounds float, float* %tmp11391, i64 1
+ %tmp11393 = getelementptr inbounds float, float* %tmp11392, i64 1
+ %tmp11394 = getelementptr inbounds float, float* %tmp11393, i64 1
+ %tmp11395 = getelementptr inbounds float, float* %tmp11394, i64 1
+ %tmp11396 = getelementptr inbounds float, float* %tmp11395, i64 1
+ %tmp11397 = getelementptr inbounds float, float* %tmp11396, i64 1
+ %tmp11398 = getelementptr inbounds float, float* %tmp11397, i64 1
+ %tmp11399 = getelementptr inbounds float, float* %tmp11398, i64 1
+ %tmp11400 = getelementptr inbounds float, float* %tmp11399, i64 1
+ %tmp11401 = getelementptr inbounds float, float* %tmp11400, i64 1
+ %tmp11402 = getelementptr inbounds float, float* %tmp11401, i64 1
+ %tmp11403 = getelementptr inbounds float, float* %tmp11402, i64 1
+ %tmp11404 = getelementptr inbounds float, float* %tmp11403, i64 1
+ %tmp11405 = getelementptr inbounds float, float* %tmp11404, i64 1
+ %tmp11406 = getelementptr inbounds float, float* %tmp11405, i64 1
+ %tmp11407 = getelementptr inbounds float, float* %tmp11406, i64 1
+ %tmp11408 = getelementptr inbounds float, float* %tmp11407, i64 1
+ %tmp11409 = getelementptr inbounds float, float* %tmp11408, i64 1
+ %tmp11410 = getelementptr inbounds float, float* %tmp11409, i64 1
+ %tmp11411 = getelementptr inbounds float, float* %tmp11410, i64 1
+ %tmp11412 = getelementptr inbounds float, float* %tmp11411, i64 1
+ %tmp11413 = getelementptr inbounds float, float* %tmp11412, i64 1
+ %tmp11414 = getelementptr inbounds float, float* %tmp11413, i64 1
+ %tmp11415 = getelementptr inbounds float, float* %tmp11414, i64 1
+ %tmp11416 = getelementptr inbounds float, float* %tmp11415, i64 1
+ %tmp11417 = getelementptr inbounds float, float* %tmp11416, i64 1
+ %tmp11418 = getelementptr inbounds float, float* %tmp11417, i64 1
+ %tmp11419 = getelementptr inbounds float, float* %tmp11418, i64 1
+ %tmp11420 = getelementptr inbounds float, float* %tmp11419, i64 1
+ %tmp11421 = getelementptr inbounds float, float* %tmp11420, i64 1
+ %tmp11422 = getelementptr inbounds float, float* %tmp11421, i64 1
+ %tmp11423 = getelementptr inbounds float, float* %tmp11422, i64 1
+ %tmp11424 = getelementptr inbounds float, float* %tmp11423, i64 1
+ %tmp11425 = getelementptr inbounds float, float* %tmp11424, i64 1
+ %tmp11426 = getelementptr inbounds float, float* %tmp11425, i64 1
+ %tmp11427 = getelementptr inbounds float, float* %tmp11426, i64 1
+ %tmp11428 = getelementptr inbounds float, float* %tmp11427, i64 1
+ %tmp11429 = getelementptr inbounds float, float* %tmp11428, i64 1
+ %tmp11430 = getelementptr inbounds float, float* %tmp11429, i64 1
+ %tmp11431 = getelementptr inbounds float, float* %tmp11430, i64 1
+ %tmp11432 = getelementptr inbounds float, float* %tmp11431, i64 1
+ %tmp11433 = getelementptr inbounds float, float* %tmp11432, i64 1
+ %tmp11434 = getelementptr inbounds float, float* %tmp11433, i64 1
+ %tmp11435 = getelementptr inbounds float, float* %tmp11434, i64 1
+ %tmp11436 = getelementptr inbounds float, float* %tmp11435, i64 1
+ %tmp11437 = getelementptr inbounds float, float* %tmp11436, i64 1
+ %tmp11438 = getelementptr inbounds float, float* %tmp11437, i64 1
+ %tmp11439 = getelementptr inbounds float, float* %tmp11438, i64 1
+ %tmp11440 = getelementptr inbounds float, float* %tmp11439, i64 1
+ %tmp11441 = getelementptr inbounds float, float* %tmp11440, i64 1
+ %tmp11442 = getelementptr inbounds float, float* %tmp11441, i64 1
+ %tmp11443 = getelementptr inbounds float, float* %tmp11442, i64 1
+ %tmp11444 = getelementptr inbounds float, float* %tmp11443, i64 1
+ %tmp11445 = getelementptr inbounds float, float* %tmp11444, i64 1
+ %tmp11446 = getelementptr inbounds float, float* %tmp11445, i64 1
+ %tmp11447 = getelementptr inbounds float, float* %tmp11446, i64 1
+ %tmp11448 = getelementptr inbounds float, float* %tmp11447, i64 1
+ %tmp11449 = getelementptr inbounds float, float* %tmp11448, i64 1
+ %tmp11450 = getelementptr inbounds float, float* %tmp11449, i64 1
+ %tmp11451 = getelementptr inbounds float, float* %tmp11450, i64 1
+ %tmp11452 = getelementptr inbounds float, float* %tmp11451, i64 1
+ %tmp11453 = getelementptr inbounds float, float* %tmp11452, i64 1
+ %tmp11454 = getelementptr inbounds float, float* %tmp11453, i64 1
+ %tmp11455 = getelementptr inbounds float, float* %tmp11454, i64 1
+ %tmp11456 = getelementptr inbounds float, float* %tmp11455, i64 1
+ %tmp11457 = getelementptr inbounds float, float* %tmp11456, i64 1
+ %tmp11458 = getelementptr inbounds float, float* %tmp11457, i64 1
+ %tmp11459 = getelementptr inbounds float, float* %tmp11458, i64 1
+ %tmp11460 = getelementptr inbounds float, float* %tmp11459, i64 1
+ %tmp11461 = getelementptr inbounds float, float* %tmp11460, i64 1
+ %tmp11462 = getelementptr inbounds float, float* %tmp11461, i64 1
+ %tmp11463 = getelementptr inbounds float, float* %tmp11462, i64 1
+ %tmp11464 = getelementptr inbounds float, float* %tmp11463, i64 1
+ %tmp11465 = getelementptr inbounds float, float* %tmp11464, i64 1
+ %tmp11466 = getelementptr inbounds float, float* %tmp11465, i64 1
+ %tmp11467 = getelementptr inbounds float, float* %tmp11466, i64 1
+ %tmp11468 = getelementptr inbounds float, float* %tmp11467, i64 1
+ %tmp11469 = getelementptr inbounds float, float* %tmp11468, i64 1
+ %tmp11470 = getelementptr inbounds float, float* %tmp11469, i64 1
+ %tmp11471 = getelementptr inbounds float, float* %tmp11470, i64 1
+ %tmp11472 = getelementptr inbounds float, float* %tmp11471, i64 1
+ %tmp11473 = getelementptr inbounds float, float* %tmp11472, i64 1
+ %tmp11474 = getelementptr inbounds float, float* %tmp11473, i64 1
+ %tmp11475 = getelementptr inbounds float, float* %tmp11474, i64 1
+ %tmp11476 = getelementptr inbounds float, float* %tmp11475, i64 1
+ %tmp11477 = getelementptr inbounds float, float* %tmp11476, i64 1
+ %tmp11478 = getelementptr inbounds float, float* %tmp11477, i64 1
+ %tmp11479 = getelementptr inbounds float, float* %tmp11478, i64 1
+ %tmp11480 = getelementptr inbounds float, float* %tmp11479, i64 1
+ %tmp11481 = getelementptr inbounds float, float* %tmp11480, i64 1
+ %tmp11482 = getelementptr inbounds float, float* %tmp11481, i64 1
+ %tmp11483 = getelementptr inbounds float, float* %tmp11482, i64 1
+ %tmp11484 = getelementptr inbounds float, float* %tmp11483, i64 1
+ %tmp11485 = getelementptr inbounds float, float* %tmp11484, i64 1
+ %tmp11486 = getelementptr inbounds float, float* %tmp11485, i64 1
+ %tmp11487 = getelementptr inbounds float, float* %tmp11486, i64 1
+ %tmp11488 = getelementptr inbounds float, float* %tmp11487, i64 1
+ %tmp11489 = getelementptr inbounds float, float* %tmp11488, i64 1
+ %tmp11490 = getelementptr inbounds float, float* %tmp11489, i64 1
+ %tmp11491 = getelementptr inbounds float, float* %tmp11490, i64 1
+ %tmp11492 = getelementptr inbounds float, float* %tmp11491, i64 1
+ %tmp11493 = getelementptr inbounds float, float* %tmp11492, i64 1
+ %tmp11494 = getelementptr inbounds float, float* %tmp11493, i64 1
+ %tmp11495 = getelementptr inbounds float, float* %tmp11494, i64 1
+ %tmp11496 = getelementptr inbounds float, float* %tmp11495, i64 1
+ %tmp11497 = getelementptr inbounds float, float* %tmp11496, i64 1
+ %tmp11498 = getelementptr inbounds float, float* %tmp11497, i64 1
+ %tmp11499 = getelementptr inbounds float, float* %tmp11498, i64 1
+ %tmp11500 = getelementptr inbounds float, float* %tmp11499, i64 1
+ %tmp11501 = getelementptr inbounds float, float* %tmp11500, i64 1
+ %tmp11502 = getelementptr inbounds float, float* %tmp11501, i64 1
+ %tmp11503 = getelementptr inbounds float, float* %tmp11502, i64 1
+ %tmp11504 = getelementptr inbounds float, float* %tmp11503, i64 1
+ %tmp11505 = getelementptr inbounds float, float* %tmp11504, i64 1
+ %tmp11506 = getelementptr inbounds float, float* %tmp11505, i64 1
+ %tmp11507 = getelementptr inbounds float, float* %tmp11506, i64 1
+ %tmp11508 = getelementptr inbounds float, float* %tmp11507, i64 1
+ %tmp11509 = getelementptr inbounds float, float* %tmp11508, i64 1
+ %tmp11510 = getelementptr inbounds float, float* %tmp11509, i64 1
+ %tmp11511 = getelementptr inbounds float, float* %tmp11510, i64 1
+ %tmp11512 = getelementptr inbounds float, float* %tmp11511, i64 1
+ %tmp11513 = getelementptr inbounds float, float* %tmp11512, i64 1
+ %tmp11514 = getelementptr inbounds float, float* %tmp11513, i64 1
+ %tmp11515 = getelementptr inbounds float, float* %tmp11514, i64 1
+ %tmp11516 = getelementptr inbounds float, float* %tmp11515, i64 1
+ %tmp11517 = getelementptr inbounds float, float* %tmp11516, i64 1
+ %tmp11518 = getelementptr inbounds float, float* %tmp11517, i64 1
+ %tmp11519 = getelementptr inbounds float, float* %tmp11518, i64 1
+ %tmp11520 = getelementptr inbounds float, float* %tmp11519, i64 1
+ %tmp11521 = getelementptr inbounds float, float* %tmp11520, i64 1
+ %tmp11522 = getelementptr inbounds float, float* %tmp11521, i64 1
+ %tmp11523 = getelementptr inbounds float, float* %tmp11522, i64 1
+ %tmp11524 = getelementptr inbounds float, float* %tmp11523, i64 1
+ %tmp11525 = getelementptr inbounds float, float* %tmp11524, i64 1
+ %tmp11526 = getelementptr inbounds float, float* %tmp11525, i64 1
+ %tmp11527 = getelementptr inbounds float, float* %tmp11526, i64 1
+ %tmp11528 = getelementptr inbounds float, float* %tmp11527, i64 1
+ %tmp11529 = getelementptr inbounds float, float* %tmp11528, i64 1
+ %tmp11530 = getelementptr inbounds float, float* %tmp11529, i64 1
+ %tmp11531 = getelementptr inbounds float, float* %tmp11530, i64 1
+ %tmp11532 = getelementptr inbounds float, float* %tmp11531, i64 1
+ %tmp11533 = getelementptr inbounds float, float* %tmp11532, i64 1
+ %tmp11534 = getelementptr inbounds float, float* %tmp11533, i64 1
+ %tmp11535 = getelementptr inbounds float, float* %tmp11534, i64 1
+ %tmp11536 = getelementptr inbounds float, float* %tmp11535, i64 1
+ %tmp11537 = getelementptr inbounds float, float* %tmp11536, i64 1
+ %tmp11538 = getelementptr inbounds float, float* %tmp11537, i64 1
+ %tmp11539 = getelementptr inbounds float, float* %tmp11538, i64 1
+ %tmp11540 = getelementptr inbounds float, float* %tmp11539, i64 1
+ %tmp11541 = getelementptr inbounds float, float* %tmp11540, i64 1
+ %tmp11542 = getelementptr inbounds float, float* %tmp11541, i64 1
+ %tmp11543 = getelementptr inbounds float, float* %tmp11542, i64 1
+ %tmp11544 = getelementptr inbounds float, float* %tmp11543, i64 1
+ %tmp11545 = getelementptr inbounds float, float* %tmp11544, i64 1
+ %tmp11546 = getelementptr inbounds float, float* %tmp11545, i64 1
+ %tmp11547 = getelementptr inbounds float, float* %tmp11546, i64 1
+ %tmp11548 = getelementptr inbounds float, float* %tmp11547, i64 1
+ %tmp11549 = getelementptr inbounds float, float* %tmp11548, i64 1
+ %tmp11550 = getelementptr inbounds float, float* %tmp11549, i64 1
+ %tmp11551 = getelementptr inbounds float, float* %tmp11550, i64 1
+ %tmp11552 = getelementptr inbounds float, float* %tmp11551, i64 1
+ %tmp11553 = getelementptr inbounds float, float* %tmp11552, i64 1
+ %tmp11554 = getelementptr inbounds float, float* %tmp11553, i64 1
+ %tmp11555 = getelementptr inbounds float, float* %tmp11554, i64 1
+ %tmp11556 = getelementptr inbounds float, float* %tmp11555, i64 1
+ %tmp11557 = getelementptr inbounds float, float* %tmp11556, i64 1
+ %tmp11558 = getelementptr inbounds float, float* %tmp11557, i64 1
+ %tmp11559 = getelementptr inbounds float, float* %tmp11558, i64 1
+ %tmp11560 = getelementptr inbounds float, float* %tmp11559, i64 1
+ %tmp11561 = getelementptr inbounds float, float* %tmp11560, i64 1
+ %tmp11562 = getelementptr inbounds float, float* %tmp11561, i64 1
+ %tmp11563 = getelementptr inbounds float, float* %tmp11562, i64 1
+ %tmp11564 = getelementptr inbounds float, float* %tmp11563, i64 1
+ %tmp11565 = getelementptr inbounds float, float* %tmp11564, i64 1
+ %tmp11566 = getelementptr inbounds float, float* %tmp11565, i64 1
+ %tmp11567 = getelementptr inbounds float, float* %tmp11566, i64 1
+ %tmp11568 = getelementptr inbounds float, float* %tmp11567, i64 1
+ %tmp11569 = getelementptr inbounds float, float* %tmp11568, i64 1
+ %tmp11570 = getelementptr inbounds float, float* %tmp11569, i64 1
+ %tmp11571 = getelementptr inbounds float, float* %tmp11570, i64 1
+ %tmp11572 = getelementptr inbounds float, float* %tmp11571, i64 1
+ %tmp11573 = getelementptr inbounds float, float* %tmp11572, i64 1
+ %tmp11574 = getelementptr inbounds float, float* %tmp11573, i64 1
+ %tmp11575 = getelementptr inbounds float, float* %tmp11574, i64 1
+ %tmp11576 = getelementptr inbounds float, float* %tmp11575, i64 1
+ %tmp11577 = getelementptr inbounds float, float* %tmp11576, i64 1
+ %tmp11578 = getelementptr inbounds float, float* %tmp11577, i64 1
+ %tmp11579 = getelementptr inbounds float, float* %tmp11578, i64 1
+ %tmp11580 = getelementptr inbounds float, float* %tmp11579, i64 1
+ %tmp11581 = getelementptr inbounds float, float* %tmp11580, i64 1
+ %tmp11582 = getelementptr inbounds float, float* %tmp11581, i64 1
+ %tmp11583 = getelementptr inbounds float, float* %tmp11582, i64 1
+ %tmp11584 = getelementptr inbounds float, float* %tmp11583, i64 1
+ %tmp11585 = getelementptr inbounds float, float* %tmp11584, i64 1
+ %tmp11586 = getelementptr inbounds float, float* %tmp11585, i64 1
+ %tmp11587 = getelementptr inbounds float, float* %tmp11586, i64 1
+ %tmp11588 = getelementptr inbounds float, float* %tmp11587, i64 1
+ %tmp11589 = getelementptr inbounds float, float* %tmp11588, i64 1
+ %tmp11590 = getelementptr inbounds float, float* %tmp11589, i64 1
+ %tmp11591 = getelementptr inbounds float, float* %tmp11590, i64 1
+ %tmp11592 = getelementptr inbounds float, float* %tmp11591, i64 1
+ %tmp11593 = getelementptr inbounds float, float* %tmp11592, i64 1
+ %tmp11594 = getelementptr inbounds float, float* %tmp11593, i64 1
+ %tmp11595 = getelementptr inbounds float, float* %tmp11594, i64 1
+ %tmp11596 = getelementptr inbounds float, float* %tmp11595, i64 1
+ %tmp11597 = getelementptr inbounds float, float* %tmp11596, i64 1
+ %tmp11598 = getelementptr inbounds float, float* %tmp11597, i64 1
+ %tmp11599 = getelementptr inbounds float, float* %tmp11598, i64 1
+ %tmp11600 = getelementptr inbounds float, float* %tmp11599, i64 1
+ %tmp11601 = getelementptr inbounds float, float* %tmp11600, i64 1
+ %tmp11602 = getelementptr inbounds float, float* %tmp11601, i64 1
+ %tmp11603 = getelementptr inbounds float, float* %tmp11602, i64 1
+ %tmp11604 = getelementptr inbounds float, float* %tmp11603, i64 1
+ %tmp11605 = getelementptr inbounds float, float* %tmp11604, i64 1
+ %tmp11606 = getelementptr inbounds float, float* %tmp11605, i64 1
+ %tmp11607 = getelementptr inbounds float, float* %tmp11606, i64 1
+ %tmp11608 = getelementptr inbounds float, float* %tmp11607, i64 1
+ %tmp11609 = getelementptr inbounds float, float* %tmp11608, i64 1
+ %tmp11610 = getelementptr inbounds float, float* %tmp11609, i64 1
+ %tmp11611 = getelementptr inbounds float, float* %tmp11610, i64 1
+ %tmp11612 = getelementptr inbounds float, float* %tmp11611, i64 1
+ %tmp11613 = getelementptr inbounds float, float* %tmp11612, i64 1
+ %tmp11614 = getelementptr inbounds float, float* %tmp11613, i64 1
+ %tmp11615 = getelementptr inbounds float, float* %tmp11614, i64 1
+ %tmp11616 = getelementptr inbounds float, float* %tmp11615, i64 1
+ %tmp11617 = getelementptr inbounds float, float* %tmp11616, i64 1
+ %tmp11618 = getelementptr inbounds float, float* %tmp11617, i64 1
+ %tmp11619 = getelementptr inbounds float, float* %tmp11618, i64 1
+ %tmp11620 = getelementptr inbounds float, float* %tmp11619, i64 1
+ %tmp11621 = getelementptr inbounds float, float* %tmp11620, i64 1
+ %tmp11622 = getelementptr inbounds float, float* %tmp11621, i64 1
+ %tmp11623 = getelementptr inbounds float, float* %tmp11622, i64 1
+ %tmp11624 = getelementptr inbounds float, float* %tmp11623, i64 1
+ %tmp11625 = getelementptr inbounds float, float* %tmp11624, i64 1
+ %tmp11626 = getelementptr inbounds float, float* %tmp11625, i64 1
+ %tmp11627 = getelementptr inbounds float, float* %tmp11626, i64 1
+ %tmp11628 = getelementptr inbounds float, float* %tmp11627, i64 1
+ %tmp11629 = getelementptr inbounds float, float* %tmp11628, i64 1
+ %tmp11630 = getelementptr inbounds float, float* %tmp11629, i64 1
+ %tmp11631 = getelementptr inbounds float, float* %tmp11630, i64 1
+ %tmp11632 = getelementptr inbounds float, float* %tmp11631, i64 1
+ %tmp11633 = getelementptr inbounds float, float* %tmp11632, i64 1
+ %tmp11634 = getelementptr inbounds float, float* %tmp11633, i64 1
+ %tmp11635 = getelementptr inbounds float, float* %tmp11634, i64 1
+ %tmp11636 = getelementptr inbounds float, float* %tmp11635, i64 1
+ %tmp11637 = getelementptr inbounds float, float* %tmp11636, i64 1
+ %tmp11638 = getelementptr inbounds float, float* %tmp11637, i64 1
+ %tmp11639 = getelementptr inbounds float, float* %tmp11638, i64 1
+ %tmp11640 = getelementptr inbounds float, float* %tmp11639, i64 1
+ %tmp11641 = getelementptr inbounds float, float* %tmp11640, i64 1
+ %tmp11642 = getelementptr inbounds float, float* %tmp11641, i64 1
+ %tmp11643 = getelementptr inbounds float, float* %tmp11642, i64 1
+ %tmp11644 = getelementptr inbounds float, float* %tmp11643, i64 1
+ %tmp11645 = getelementptr inbounds float, float* %tmp11644, i64 1
+ %tmp11646 = getelementptr inbounds float, float* %tmp11645, i64 1
+ %tmp11647 = getelementptr inbounds float, float* %tmp11646, i64 1
+ %tmp11648 = getelementptr inbounds float, float* %tmp11647, i64 1
+ %tmp11649 = getelementptr inbounds float, float* %tmp11648, i64 1
+ %tmp11650 = getelementptr inbounds float, float* %tmp11649, i64 1
+ %tmp11651 = getelementptr inbounds float, float* %tmp11650, i64 1
+ %tmp11652 = getelementptr inbounds float, float* %tmp11651, i64 1
+ %tmp11653 = getelementptr inbounds float, float* %tmp11652, i64 1
+ %tmp11654 = getelementptr inbounds float, float* %tmp11653, i64 1
+ %tmp11655 = getelementptr inbounds float, float* %tmp11654, i64 1
+ %tmp11656 = getelementptr inbounds float, float* %tmp11655, i64 1
+ %tmp11657 = getelementptr inbounds float, float* %tmp11656, i64 1
+ %tmp11658 = getelementptr inbounds float, float* %tmp11657, i64 1
+ %tmp11659 = getelementptr inbounds float, float* %tmp11658, i64 1
+ %tmp11660 = getelementptr inbounds float, float* %tmp11659, i64 1
+ %tmp11661 = getelementptr inbounds float, float* %tmp11660, i64 1
+ %tmp11662 = getelementptr inbounds float, float* %tmp11661, i64 1
+ %tmp11663 = getelementptr inbounds float, float* %tmp11662, i64 1
+ %tmp11664 = getelementptr inbounds float, float* %tmp11663, i64 1
+ %tmp11665 = getelementptr inbounds float, float* %tmp11664, i64 1
+ %tmp11666 = getelementptr inbounds float, float* %tmp11665, i64 1
+ %tmp11667 = getelementptr inbounds float, float* %tmp11666, i64 1
+ %tmp11668 = getelementptr inbounds float, float* %tmp11667, i64 1
+ %tmp11669 = getelementptr inbounds float, float* %tmp11668, i64 1
+ %tmp11670 = getelementptr inbounds float, float* %tmp11669, i64 1
+ %tmp11671 = getelementptr inbounds float, float* %tmp11670, i64 1
+ %tmp11672 = getelementptr inbounds float, float* %tmp11671, i64 1
+ %tmp11673 = getelementptr inbounds float, float* %tmp11672, i64 1
+ %tmp11674 = getelementptr inbounds float, float* %tmp11673, i64 1
+ %tmp11675 = getelementptr inbounds float, float* %tmp11674, i64 1
+ %tmp11676 = getelementptr inbounds float, float* %tmp11675, i64 1
+ %tmp11677 = getelementptr inbounds float, float* %tmp11676, i64 1
+ %tmp11678 = getelementptr inbounds float, float* %tmp11677, i64 1
+ %tmp11679 = getelementptr inbounds float, float* %tmp11678, i64 1
+ %tmp11680 = getelementptr inbounds float, float* %tmp11679, i64 1
+ %tmp11681 = getelementptr inbounds float, float* %tmp11680, i64 1
+ %tmp11682 = getelementptr inbounds float, float* %tmp11681, i64 1
+ %tmp11683 = getelementptr inbounds float, float* %tmp11682, i64 1
+ %tmp11684 = getelementptr inbounds float, float* %tmp11683, i64 1
+ %tmp11685 = getelementptr inbounds float, float* %tmp11684, i64 1
+ %tmp11686 = getelementptr inbounds float, float* %tmp11685, i64 1
+ %tmp11687 = getelementptr inbounds float, float* %tmp11686, i64 1
+ %tmp11688 = getelementptr inbounds float, float* %tmp11687, i64 1
+ %tmp11689 = getelementptr inbounds float, float* %tmp11688, i64 1
+ %tmp11690 = getelementptr inbounds float, float* %tmp11689, i64 1
+ %tmp11691 = getelementptr inbounds float, float* %tmp11690, i64 1
+ %tmp11692 = getelementptr inbounds float, float* %tmp11691, i64 1
+ %tmp11693 = getelementptr inbounds float, float* %tmp11692, i64 1
+ %tmp11694 = getelementptr inbounds float, float* %tmp11693, i64 1
+ %tmp11695 = getelementptr inbounds float, float* %tmp11694, i64 1
+ %tmp11696 = getelementptr inbounds float, float* %tmp11695, i64 1
+ %tmp11697 = getelementptr inbounds float, float* %tmp11696, i64 1
+ %tmp11698 = getelementptr inbounds float, float* %tmp11697, i64 1
+ %tmp11699 = getelementptr inbounds float, float* %tmp11698, i64 1
+ %tmp11700 = getelementptr inbounds float, float* %tmp11699, i64 1
+ %tmp11701 = getelementptr inbounds float, float* %tmp11700, i64 1
+ %tmp11702 = getelementptr inbounds float, float* %tmp11701, i64 1
+ %tmp11703 = getelementptr inbounds float, float* %tmp11702, i64 1
+ %tmp11704 = getelementptr inbounds float, float* %tmp11703, i64 1
+ %tmp11705 = getelementptr inbounds float, float* %tmp11704, i64 1
+ %tmp11706 = getelementptr inbounds float, float* %tmp11705, i64 1
+ %tmp11707 = getelementptr inbounds float, float* %tmp11706, i64 1
+ %tmp11708 = getelementptr inbounds float, float* %tmp11707, i64 1
+ %tmp11709 = getelementptr inbounds float, float* %tmp11708, i64 1
+ %tmp11710 = getelementptr inbounds float, float* %tmp11709, i64 1
+ %tmp11711 = getelementptr inbounds float, float* %tmp11710, i64 1
+ %tmp11712 = getelementptr inbounds float, float* %tmp11711, i64 1
+ %tmp11713 = getelementptr inbounds float, float* %tmp11712, i64 1
+ %tmp11714 = getelementptr inbounds float, float* %tmp11713, i64 1
+ %tmp11715 = getelementptr inbounds float, float* %tmp11714, i64 1
+ %tmp11716 = getelementptr inbounds float, float* %tmp11715, i64 1
+ %tmp11717 = getelementptr inbounds float, float* %tmp11716, i64 1
+ %tmp11718 = getelementptr inbounds float, float* %tmp11717, i64 1
+ %tmp11719 = getelementptr inbounds float, float* %tmp11718, i64 1
+ %tmp11720 = getelementptr inbounds float, float* %tmp11719, i64 1
+ %tmp11721 = getelementptr inbounds float, float* %tmp11720, i64 1
+ %tmp11722 = getelementptr inbounds float, float* %tmp11721, i64 1
+ %tmp11723 = getelementptr inbounds float, float* %tmp11722, i64 1
+ %tmp11724 = getelementptr inbounds float, float* %tmp11723, i64 1
+ %tmp11725 = getelementptr inbounds float, float* %tmp11724, i64 1
+ %tmp11726 = getelementptr inbounds float, float* %tmp11725, i64 1
+ %tmp11727 = getelementptr inbounds float, float* %tmp11726, i64 1
+ %tmp11728 = getelementptr inbounds float, float* %tmp11727, i64 1
+ %tmp11729 = getelementptr inbounds float, float* %tmp11728, i64 1
+ %tmp11730 = getelementptr inbounds float, float* %tmp11729, i64 1
+ %tmp11731 = getelementptr inbounds float, float* %tmp11730, i64 1
+ %tmp11732 = getelementptr inbounds float, float* %tmp11731, i64 1
+ %tmp11733 = getelementptr inbounds float, float* %tmp11732, i64 1
+ %tmp11734 = getelementptr inbounds float, float* %tmp11733, i64 1
+ %tmp11735 = getelementptr inbounds float, float* %tmp11734, i64 1
+ %tmp11736 = getelementptr inbounds float, float* %tmp11735, i64 1
+ %tmp11737 = getelementptr inbounds float, float* %tmp11736, i64 1
+ %tmp11738 = getelementptr inbounds float, float* %tmp11737, i64 1
+ %tmp11739 = getelementptr inbounds float, float* %tmp11738, i64 1
+ %tmp11740 = getelementptr inbounds float, float* %tmp11739, i64 1
+ %tmp11741 = getelementptr inbounds float, float* %tmp11740, i64 1
+ %tmp11742 = getelementptr inbounds float, float* %tmp11741, i64 1
+ %tmp11743 = getelementptr inbounds float, float* %tmp11742, i64 1
+ %tmp11744 = getelementptr inbounds float, float* %tmp11743, i64 1
+ %tmp11745 = getelementptr inbounds float, float* %tmp11744, i64 1
+ %tmp11746 = getelementptr inbounds float, float* %tmp11745, i64 1
+ %tmp11747 = getelementptr inbounds float, float* %tmp11746, i64 1
+ %tmp11748 = getelementptr inbounds float, float* %tmp11747, i64 1
+ %tmp11749 = getelementptr inbounds float, float* %tmp11748, i64 1
+ %tmp11750 = getelementptr inbounds float, float* %tmp11749, i64 1
+ %tmp11751 = getelementptr inbounds float, float* %tmp11750, i64 1
+ %tmp11752 = getelementptr inbounds float, float* %tmp11751, i64 1
+ %tmp11753 = getelementptr inbounds float, float* %tmp11752, i64 1
+ %tmp11754 = getelementptr inbounds float, float* %tmp11753, i64 1
+ %tmp11755 = getelementptr inbounds float, float* %tmp11754, i64 1
+ %tmp11756 = getelementptr inbounds float, float* %tmp11755, i64 1
+ %tmp11757 = getelementptr inbounds float, float* %tmp11756, i64 1
+ %tmp11758 = getelementptr inbounds float, float* %tmp11757, i64 1
+ %tmp11759 = getelementptr inbounds float, float* %tmp11758, i64 1
+ %tmp11760 = getelementptr inbounds float, float* %tmp11759, i64 1
+ %tmp11761 = getelementptr inbounds float, float* %tmp11760, i64 1
+ %tmp11762 = getelementptr inbounds float, float* %tmp11761, i64 1
+ %tmp11763 = getelementptr inbounds float, float* %tmp11762, i64 1
+ %tmp11764 = getelementptr inbounds float, float* %tmp11763, i64 1
+ %tmp11765 = getelementptr inbounds float, float* %tmp11764, i64 1
+ %tmp11766 = getelementptr inbounds float, float* %tmp11765, i64 1
+ %tmp11767 = getelementptr inbounds float, float* %tmp11766, i64 1
+ %tmp11768 = getelementptr inbounds float, float* %tmp11767, i64 1
+ %tmp11769 = getelementptr inbounds float, float* %tmp11768, i64 1
+ %tmp11770 = getelementptr inbounds float, float* %tmp11769, i64 1
+ %tmp11771 = getelementptr inbounds float, float* %tmp11770, i64 1
+ %tmp11772 = getelementptr inbounds float, float* %tmp11771, i64 1
+ %tmp11773 = getelementptr inbounds float, float* %tmp11772, i64 1
+ %tmp11774 = getelementptr inbounds float, float* %tmp11773, i64 1
+ %tmp11775 = getelementptr inbounds float, float* %tmp11774, i64 1
+ %tmp11776 = getelementptr inbounds float, float* %tmp11775, i64 1
+ %tmp11777 = getelementptr inbounds float, float* %tmp11776, i64 1
+ %tmp11778 = getelementptr inbounds float, float* %tmp11777, i64 1
+ %tmp11779 = getelementptr inbounds float, float* %tmp11778, i64 1
+ %tmp11780 = getelementptr inbounds float, float* %tmp11779, i64 1
+ %tmp11781 = getelementptr inbounds float, float* %tmp11780, i64 1
+ %tmp11782 = getelementptr inbounds float, float* %tmp11781, i64 1
+ %tmp11783 = getelementptr inbounds float, float* %tmp11782, i64 1
+ %tmp11784 = getelementptr inbounds float, float* %tmp11783, i64 1
+ %tmp11785 = getelementptr inbounds float, float* %tmp11784, i64 1
+ %tmp11786 = getelementptr inbounds float, float* %tmp11785, i64 1
+ %tmp11787 = getelementptr inbounds float, float* %tmp11786, i64 1
+ %tmp11788 = getelementptr inbounds float, float* %tmp11787, i64 1
+ %tmp11789 = getelementptr inbounds float, float* %tmp11788, i64 1
+ %tmp11790 = getelementptr inbounds float, float* %tmp11789, i64 1
+ %tmp11791 = getelementptr inbounds float, float* %tmp11790, i64 1
+ %tmp11792 = getelementptr inbounds float, float* %tmp11791, i64 1
+ %tmp11793 = getelementptr inbounds float, float* %tmp11792, i64 1
+ %tmp11794 = getelementptr inbounds float, float* %tmp11793, i64 1
+ %tmp11795 = getelementptr inbounds float, float* %tmp11794, i64 1
+ %tmp11796 = getelementptr inbounds float, float* %tmp11795, i64 1
+ %tmp11797 = getelementptr inbounds float, float* %tmp11796, i64 1
+ %tmp11798 = getelementptr inbounds float, float* %tmp11797, i64 1
+ %tmp11799 = getelementptr inbounds float, float* %tmp11798, i64 1
+ %tmp11800 = getelementptr inbounds float, float* %tmp11799, i64 1
+ %tmp11801 = getelementptr inbounds float, float* %tmp11800, i64 1
+ %tmp11802 = getelementptr inbounds float, float* %tmp11801, i64 1
+ %tmp11803 = getelementptr inbounds float, float* %tmp11802, i64 1
+ %tmp11804 = getelementptr inbounds float, float* %tmp11803, i64 1
+ %tmp11805 = getelementptr inbounds float, float* %tmp11804, i64 1
+ %tmp11806 = getelementptr inbounds float, float* %tmp11805, i64 1
+ %tmp11807 = getelementptr inbounds float, float* %tmp11806, i64 1
+ %tmp11808 = getelementptr inbounds float, float* %tmp11807, i64 1
+ %tmp11809 = getelementptr inbounds float, float* %tmp11808, i64 1
+ %tmp11810 = getelementptr inbounds float, float* %tmp11809, i64 1
+ %tmp11811 = getelementptr inbounds float, float* %tmp11810, i64 1
+ %tmp11812 = getelementptr inbounds float, float* %tmp11811, i64 1
+ %tmp11813 = getelementptr inbounds float, float* %tmp11812, i64 1
+ %tmp11814 = getelementptr inbounds float, float* %tmp11813, i64 1
+ %tmp11815 = getelementptr inbounds float, float* %tmp11814, i64 1
+ %tmp11816 = getelementptr inbounds float, float* %tmp11815, i64 1
+ %tmp11817 = getelementptr inbounds float, float* %tmp11816, i64 1
+ %tmp11818 = getelementptr inbounds float, float* %tmp11817, i64 1
+ %tmp11819 = getelementptr inbounds float, float* %tmp11818, i64 1
+ %tmp11820 = getelementptr inbounds float, float* %tmp11819, i64 1
+ %tmp11821 = getelementptr inbounds float, float* %tmp11820, i64 1
+ %tmp11822 = getelementptr inbounds float, float* %tmp11821, i64 1
+ %tmp11823 = getelementptr inbounds float, float* %tmp11822, i64 1
+ %tmp11824 = getelementptr inbounds float, float* %tmp11823, i64 1
+ %tmp11825 = getelementptr inbounds float, float* %tmp11824, i64 1
+ %tmp11826 = getelementptr inbounds float, float* %tmp11825, i64 1
+ %tmp11827 = getelementptr inbounds float, float* %tmp11826, i64 1
+ %tmp11828 = getelementptr inbounds float, float* %tmp11827, i64 1
+ %tmp11829 = getelementptr inbounds float, float* %tmp11828, i64 1
+ %tmp11830 = getelementptr inbounds float, float* %tmp11829, i64 1
+ %tmp11831 = getelementptr inbounds float, float* %tmp11830, i64 1
+ %tmp11832 = getelementptr inbounds float, float* %tmp11831, i64 1
+ %tmp11833 = getelementptr inbounds float, float* %tmp11832, i64 1
+ %tmp11834 = getelementptr inbounds float, float* %tmp11833, i64 1
+ %tmp11835 = getelementptr inbounds float, float* %tmp11834, i64 1
+ %tmp11836 = getelementptr inbounds float, float* %tmp11835, i64 1
+ %tmp11837 = getelementptr inbounds float, float* %tmp11836, i64 1
+ %tmp11838 = getelementptr inbounds float, float* %tmp11837, i64 1
+ %tmp11839 = getelementptr inbounds float, float* %tmp11838, i64 1
+ %tmp11840 = getelementptr inbounds float, float* %tmp11839, i64 1
+ %tmp11841 = getelementptr inbounds float, float* %tmp11840, i64 1
+ %tmp11842 = getelementptr inbounds float, float* %tmp11841, i64 1
+ %tmp11843 = getelementptr inbounds float, float* %tmp11842, i64 1
+ %tmp11844 = getelementptr inbounds float, float* %tmp11843, i64 1
+ %tmp11845 = getelementptr inbounds float, float* %tmp11844, i64 1
+ %tmp11846 = getelementptr inbounds float, float* %tmp11845, i64 1
+ %tmp11847 = getelementptr inbounds float, float* %tmp11846, i64 1
+ %tmp11848 = getelementptr inbounds float, float* %tmp11847, i64 1
+ %tmp11849 = getelementptr inbounds float, float* %tmp11848, i64 1
+ %tmp11850 = getelementptr inbounds float, float* %tmp11849, i64 1
+ %tmp11851 = getelementptr inbounds float, float* %tmp11850, i64 1
+ %tmp11852 = getelementptr inbounds float, float* %tmp11851, i64 1
+ %tmp11853 = getelementptr inbounds float, float* %tmp11852, i64 1
+ %tmp11854 = getelementptr inbounds float, float* %tmp11853, i64 1
+ %tmp11855 = getelementptr inbounds float, float* %tmp11854, i64 1
+ %tmp11856 = getelementptr inbounds float, float* %tmp11855, i64 1
+ %tmp11857 = getelementptr inbounds float, float* %tmp11856, i64 1
+ %tmp11858 = getelementptr inbounds float, float* %tmp11857, i64 1
+ %tmp11859 = getelementptr inbounds float, float* %tmp11858, i64 1
+ %tmp11860 = getelementptr inbounds float, float* %tmp11859, i64 1
+ %tmp11861 = getelementptr inbounds float, float* %tmp11860, i64 1
+ %tmp11862 = getelementptr inbounds float, float* %tmp11861, i64 1
+ %tmp11863 = getelementptr inbounds float, float* %tmp11862, i64 1
+ %tmp11864 = getelementptr inbounds float, float* %tmp11863, i64 1
+ %tmp11865 = getelementptr inbounds float, float* %tmp11864, i64 1
+ %tmp11866 = getelementptr inbounds float, float* %tmp11865, i64 1
+ %tmp11867 = getelementptr inbounds float, float* %tmp11866, i64 1
+ %tmp11868 = getelementptr inbounds float, float* %tmp11867, i64 1
+ %tmp11869 = getelementptr inbounds float, float* %tmp11868, i64 1
+ %tmp11870 = getelementptr inbounds float, float* %tmp11869, i64 1
+ %tmp11871 = getelementptr inbounds float, float* %tmp11870, i64 1
+ %tmp11872 = getelementptr inbounds float, float* %tmp11871, i64 1
+ %tmp11873 = getelementptr inbounds float, float* %tmp11872, i64 1
+ %tmp11874 = getelementptr inbounds float, float* %tmp11873, i64 1
+ %tmp11875 = getelementptr inbounds float, float* %tmp11874, i64 1
+ %tmp11876 = getelementptr inbounds float, float* %tmp11875, i64 1
+ %tmp11877 = getelementptr inbounds float, float* %tmp11876, i64 1
+ %tmp11878 = getelementptr inbounds float, float* %tmp11877, i64 1
+ %tmp11879 = getelementptr inbounds float, float* %tmp11878, i64 1
+ %tmp11880 = getelementptr inbounds float, float* %tmp11879, i64 1
+ %tmp11881 = getelementptr inbounds float, float* %tmp11880, i64 1
+ %tmp11882 = getelementptr inbounds float, float* %tmp11881, i64 1
+ %tmp11883 = getelementptr inbounds float, float* %tmp11882, i64 1
+ %tmp11884 = getelementptr inbounds float, float* %tmp11883, i64 1
+ %tmp11885 = getelementptr inbounds float, float* %tmp11884, i64 1
+ %tmp11886 = getelementptr inbounds float, float* %tmp11885, i64 1
+ %tmp11887 = getelementptr inbounds float, float* %tmp11886, i64 1
+ %tmp11888 = getelementptr inbounds float, float* %tmp11887, i64 1
+ %tmp11889 = getelementptr inbounds float, float* %tmp11888, i64 1
+ %tmp11890 = getelementptr inbounds float, float* %tmp11889, i64 1
+ %tmp11891 = getelementptr inbounds float, float* %tmp11890, i64 1
+ %tmp11892 = getelementptr inbounds float, float* %tmp11891, i64 1
+ %tmp11893 = getelementptr inbounds float, float* %tmp11892, i64 1
+ %tmp11894 = getelementptr inbounds float, float* %tmp11893, i64 1
+ %tmp11895 = getelementptr inbounds float, float* %tmp11894, i64 1
+ %tmp11896 = getelementptr inbounds float, float* %tmp11895, i64 1
+ %tmp11897 = getelementptr inbounds float, float* %tmp11896, i64 1
+ %tmp11898 = getelementptr inbounds float, float* %tmp11897, i64 1
+ %tmp11899 = getelementptr inbounds float, float* %tmp11898, i64 1
+ %tmp11900 = getelementptr inbounds float, float* %tmp11899, i64 1
+ %tmp11901 = getelementptr inbounds float, float* %tmp11900, i64 1
+ %tmp11902 = getelementptr inbounds float, float* %tmp11901, i64 1
+ %tmp11903 = getelementptr inbounds float, float* %tmp11902, i64 1
+ %tmp11904 = getelementptr inbounds float, float* %tmp11903, i64 1
+ %tmp11905 = getelementptr inbounds float, float* %tmp11904, i64 1
+ %tmp11906 = getelementptr inbounds float, float* %tmp11905, i64 1
+ %tmp11907 = getelementptr inbounds float, float* %tmp11906, i64 1
+ %tmp11908 = getelementptr inbounds float, float* %tmp11907, i64 1
+ %tmp11909 = getelementptr inbounds float, float* %tmp11908, i64 1
+ %tmp11910 = getelementptr inbounds float, float* %tmp11909, i64 1
+ %tmp11911 = getelementptr inbounds float, float* %tmp11910, i64 1
+ %tmp11912 = getelementptr inbounds float, float* %tmp11911, i64 1
+ %tmp11913 = getelementptr inbounds float, float* %tmp11912, i64 1
+ %tmp11914 = getelementptr inbounds float, float* %tmp11913, i64 1
+ %tmp11915 = getelementptr inbounds float, float* %tmp11914, i64 1
+ %tmp11916 = getelementptr inbounds float, float* %tmp11915, i64 1
+ %tmp11917 = getelementptr inbounds float, float* %tmp11916, i64 1
+ %tmp11918 = getelementptr inbounds float, float* %tmp11917, i64 1
+ %tmp11919 = getelementptr inbounds float, float* %tmp11918, i64 1
+ %tmp11920 = getelementptr inbounds float, float* %tmp11919, i64 1
+ %tmp11921 = getelementptr inbounds float, float* %tmp11920, i64 1
+ %tmp11922 = getelementptr inbounds float, float* %tmp11921, i64 1
+ %tmp11923 = getelementptr inbounds float, float* %tmp11922, i64 1
+ %tmp11924 = getelementptr inbounds float, float* %tmp11923, i64 1
+ %tmp11925 = getelementptr inbounds float, float* %tmp11924, i64 1
+ %tmp11926 = getelementptr inbounds float, float* %tmp11925, i64 1
+ %tmp11927 = getelementptr inbounds float, float* %tmp11926, i64 1
+ %tmp11928 = getelementptr inbounds float, float* %tmp11927, i64 1
+ %tmp11929 = getelementptr inbounds float, float* %tmp11928, i64 1
+ %tmp11930 = getelementptr inbounds float, float* %tmp11929, i64 1
+ %tmp11931 = getelementptr inbounds float, float* %tmp11930, i64 1
+ %tmp11932 = getelementptr inbounds float, float* %tmp11931, i64 1
+ %tmp11933 = getelementptr inbounds float, float* %tmp11932, i64 1
+ %tmp11934 = getelementptr inbounds float, float* %tmp11933, i64 1
+ %tmp11935 = getelementptr inbounds float, float* %tmp11934, i64 1
+ %tmp11936 = getelementptr inbounds float, float* %tmp11935, i64 1
+ %tmp11937 = getelementptr inbounds float, float* %tmp11936, i64 1
+ %tmp11938 = getelementptr inbounds float, float* %tmp11937, i64 1
+ %tmp11939 = getelementptr inbounds float, float* %tmp11938, i64 1
+ %tmp11940 = getelementptr inbounds float, float* %tmp11939, i64 1
+ %tmp11941 = getelementptr inbounds float, float* %tmp11940, i64 1
+ %tmp11942 = getelementptr inbounds float, float* %tmp11941, i64 1
+ %tmp11943 = getelementptr inbounds float, float* %tmp11942, i64 1
+ %tmp11944 = getelementptr inbounds float, float* %tmp11943, i64 1
+ %tmp11945 = getelementptr inbounds float, float* %tmp11944, i64 1
+ %tmp11946 = getelementptr inbounds float, float* %tmp11945, i64 1
+ %tmp11947 = getelementptr inbounds float, float* %tmp11946, i64 1
+ %tmp11948 = getelementptr inbounds float, float* %tmp11947, i64 1
+ %tmp11949 = getelementptr inbounds float, float* %tmp11948, i64 1
+ %tmp11950 = getelementptr inbounds float, float* %tmp11949, i64 1
+ %tmp11951 = getelementptr inbounds float, float* %tmp11950, i64 1
+ %tmp11952 = getelementptr inbounds float, float* %tmp11951, i64 1
+ %tmp11953 = getelementptr inbounds float, float* %tmp11952, i64 1
+ %tmp11954 = getelementptr inbounds float, float* %tmp11953, i64 1
+ %tmp11955 = getelementptr inbounds float, float* %tmp11954, i64 1
+ %tmp11956 = getelementptr inbounds float, float* %tmp11955, i64 1
+ %tmp11957 = getelementptr inbounds float, float* %tmp11956, i64 1
+ %tmp11958 = getelementptr inbounds float, float* %tmp11957, i64 1
+ %tmp11959 = getelementptr inbounds float, float* %tmp11958, i64 1
+ %tmp11960 = getelementptr inbounds float, float* %tmp11959, i64 1
+ %tmp11961 = getelementptr inbounds float, float* %tmp11960, i64 1
+ %tmp11962 = getelementptr inbounds float, float* %tmp11961, i64 1
+ %tmp11963 = getelementptr inbounds float, float* %tmp11962, i64 1
+ %tmp11964 = getelementptr inbounds float, float* %tmp11963, i64 1
+ %tmp11965 = getelementptr inbounds float, float* %tmp11964, i64 1
+ %tmp11966 = getelementptr inbounds float, float* %tmp11965, i64 1
+ %tmp11967 = getelementptr inbounds float, float* %tmp11966, i64 1
+ %tmp11968 = getelementptr inbounds float, float* %tmp11967, i64 1
+ %tmp11969 = getelementptr inbounds float, float* %tmp11968, i64 1
+ %tmp11970 = getelementptr inbounds float, float* %tmp11969, i64 1
+ %tmp11971 = getelementptr inbounds float, float* %tmp11970, i64 1
+ %tmp11972 = getelementptr inbounds float, float* %tmp11971, i64 1
+ %tmp11973 = getelementptr inbounds float, float* %tmp11972, i64 1
+ %tmp11974 = getelementptr inbounds float, float* %tmp11973, i64 1
+ %tmp11975 = getelementptr inbounds float, float* %tmp11974, i64 1
+ %tmp11976 = getelementptr inbounds float, float* %tmp11975, i64 1
+ %tmp11977 = getelementptr inbounds float, float* %tmp11976, i64 1
+ %tmp11978 = getelementptr inbounds float, float* %tmp11977, i64 1
+ %tmp11979 = getelementptr inbounds float, float* %tmp11978, i64 1
+ %tmp11980 = getelementptr inbounds float, float* %tmp11979, i64 1
+ %tmp11981 = getelementptr inbounds float, float* %tmp11980, i64 1
+ %tmp11982 = getelementptr inbounds float, float* %tmp11981, i64 1
+ %tmp11983 = getelementptr inbounds float, float* %tmp11982, i64 1
+ %tmp11984 = getelementptr inbounds float, float* %tmp11983, i64 1
+ %tmp11985 = getelementptr inbounds float, float* %tmp11984, i64 1
+ %tmp11986 = getelementptr inbounds float, float* %tmp11985, i64 1
+ %tmp11987 = getelementptr inbounds float, float* %tmp11986, i64 1
+ %tmp11988 = getelementptr inbounds float, float* %tmp11987, i64 1
+ %tmp11989 = getelementptr inbounds float, float* %tmp11988, i64 1
+ %tmp11990 = getelementptr inbounds float, float* %tmp11989, i64 1
+ %tmp11991 = getelementptr inbounds float, float* %tmp11990, i64 1
+ %tmp11992 = getelementptr inbounds float, float* %tmp11991, i64 1
+ %tmp11993 = getelementptr inbounds float, float* %tmp11992, i64 1
+ %tmp11994 = getelementptr inbounds float, float* %tmp11993, i64 1
+ %tmp11995 = getelementptr inbounds float, float* %tmp11994, i64 1
+ %tmp11996 = getelementptr inbounds float, float* %tmp11995, i64 1
+ %tmp11997 = getelementptr inbounds float, float* %tmp11996, i64 1
+ %tmp11998 = getelementptr inbounds float, float* %tmp11997, i64 1
+ %tmp11999 = getelementptr inbounds float, float* %tmp11998, i64 1
+ %tmp12000 = getelementptr inbounds float, float* %tmp11999, i64 1
+ %tmp12001 = getelementptr inbounds float, float* %tmp12000, i64 1
+ %tmp12002 = getelementptr inbounds float, float* %tmp12001, i64 1
+ %tmp12003 = getelementptr inbounds float, float* %tmp12002, i64 1
+ %tmp12004 = getelementptr inbounds float, float* %tmp12003, i64 1
+ %tmp12005 = getelementptr inbounds float, float* %tmp12004, i64 1
+ %tmp12006 = getelementptr inbounds float, float* %tmp12005, i64 1
+ %tmp12007 = getelementptr inbounds float, float* %tmp12006, i64 1
+ %tmp12008 = getelementptr inbounds float, float* %tmp12007, i64 1
+ %tmp12009 = getelementptr inbounds float, float* %tmp12008, i64 1
+ %tmp12010 = getelementptr inbounds float, float* %tmp12009, i64 1
+ %tmp12011 = getelementptr inbounds float, float* %tmp12010, i64 1
+ %tmp12012 = getelementptr inbounds float, float* %tmp12011, i64 1
+ %tmp12013 = getelementptr inbounds float, float* %tmp12012, i64 1
+ %tmp12014 = getelementptr inbounds float, float* %tmp12013, i64 1
+ %tmp12015 = getelementptr inbounds float, float* %tmp12014, i64 1
+ %tmp12016 = getelementptr inbounds float, float* %tmp12015, i64 1
+ %tmp12017 = getelementptr inbounds float, float* %tmp12016, i64 1
+ %tmp12018 = getelementptr inbounds float, float* %tmp12017, i64 1
+ %tmp12019 = getelementptr inbounds float, float* %tmp12018, i64 1
+ %tmp12020 = getelementptr inbounds float, float* %tmp12019, i64 1
+ %tmp12021 = getelementptr inbounds float, float* %tmp12020, i64 1
+ %tmp12022 = getelementptr inbounds float, float* %tmp12021, i64 1
+ %tmp12023 = getelementptr inbounds float, float* %tmp12022, i64 1
+ %tmp12024 = getelementptr inbounds float, float* %tmp12023, i64 1
+ %tmp12025 = getelementptr inbounds float, float* %tmp12024, i64 1
+ %tmp12026 = getelementptr inbounds float, float* %tmp12025, i64 1
+ %tmp12027 = getelementptr inbounds float, float* %tmp12026, i64 1
+ %tmp12028 = getelementptr inbounds float, float* %tmp12027, i64 1
+ %tmp12029 = getelementptr inbounds float, float* %tmp12028, i64 1
+ %tmp12030 = getelementptr inbounds float, float* %tmp12029, i64 1
+ %tmp12031 = getelementptr inbounds float, float* %tmp12030, i64 1
+ %tmp12032 = getelementptr inbounds float, float* %tmp12031, i64 1
+ %tmp12033 = getelementptr inbounds float, float* %tmp12032, i64 1
+ %tmp12034 = getelementptr inbounds float, float* %tmp12033, i64 1
+ %tmp12035 = getelementptr inbounds float, float* %tmp12034, i64 1
+ %tmp12036 = getelementptr inbounds float, float* %tmp12035, i64 1
+ %tmp12037 = getelementptr inbounds float, float* %tmp12036, i64 1
+ %tmp12038 = getelementptr inbounds float, float* %tmp12037, i64 1
+ %tmp12039 = getelementptr inbounds float, float* %tmp12038, i64 1
+ %tmp12040 = getelementptr inbounds float, float* %tmp12039, i64 1
+ %tmp12041 = getelementptr inbounds float, float* %tmp12040, i64 1
+ %tmp12042 = getelementptr inbounds float, float* %tmp12041, i64 1
+ %tmp12043 = getelementptr inbounds float, float* %tmp12042, i64 1
+ %tmp12044 = getelementptr inbounds float, float* %tmp12043, i64 1
+ %tmp12045 = getelementptr inbounds float, float* %tmp12044, i64 1
+ %tmp12046 = getelementptr inbounds float, float* %tmp12045, i64 1
+ %tmp12047 = getelementptr inbounds float, float* %tmp12046, i64 1
+ %tmp12048 = getelementptr inbounds float, float* %tmp12047, i64 1
+ %tmp12049 = getelementptr inbounds float, float* %tmp12048, i64 1
+ %tmp12050 = getelementptr inbounds float, float* %tmp12049, i64 1
+ %tmp12051 = getelementptr inbounds float, float* %tmp12050, i64 1
+ %tmp12052 = getelementptr inbounds float, float* %tmp12051, i64 1
+ %tmp12053 = getelementptr inbounds float, float* %tmp12052, i64 1
+ %tmp12054 = getelementptr inbounds float, float* %tmp12053, i64 1
+ %tmp12055 = getelementptr inbounds float, float* %tmp12054, i64 1
+ %tmp12056 = getelementptr inbounds float, float* %tmp12055, i64 1
+ %tmp12057 = getelementptr inbounds float, float* %tmp12056, i64 1
+ %tmp12058 = getelementptr inbounds float, float* %tmp12057, i64 1
+ %tmp12059 = getelementptr inbounds float, float* %tmp12058, i64 1
+ %tmp12060 = getelementptr inbounds float, float* %tmp12059, i64 1
+ %tmp12061 = getelementptr inbounds float, float* %tmp12060, i64 1
+ %tmp12062 = getelementptr inbounds float, float* %tmp12061, i64 1
+ %tmp12063 = getelementptr inbounds float, float* %tmp12062, i64 1
+ %tmp12064 = getelementptr inbounds float, float* %tmp12063, i64 1
+ %tmp12065 = getelementptr inbounds float, float* %tmp12064, i64 1
+ %tmp12066 = getelementptr inbounds float, float* %tmp12065, i64 1
+ %tmp12067 = getelementptr inbounds float, float* %tmp12066, i64 1
+ %tmp12068 = getelementptr inbounds float, float* %tmp12067, i64 1
+ %tmp12069 = getelementptr inbounds float, float* %tmp12068, i64 1
+ %tmp12070 = getelementptr inbounds float, float* %tmp12069, i64 1
+ %tmp12071 = getelementptr inbounds float, float* %tmp12070, i64 1
+ %tmp12072 = getelementptr inbounds float, float* %tmp12071, i64 1
+ %tmp12073 = getelementptr inbounds float, float* %tmp12072, i64 1
+ %tmp12074 = getelementptr inbounds float, float* %tmp12073, i64 1
+ %tmp12075 = getelementptr inbounds float, float* %tmp12074, i64 1
+ %tmp12076 = getelementptr inbounds float, float* %tmp12075, i64 1
+ %tmp12077 = getelementptr inbounds float, float* %tmp12076, i64 1
+ %tmp12078 = getelementptr inbounds float, float* %tmp12077, i64 1
+ %tmp12079 = getelementptr inbounds float, float* %tmp12078, i64 1
+ %tmp12080 = getelementptr inbounds float, float* %tmp12079, i64 1
+ %tmp12081 = getelementptr inbounds float, float* %tmp12080, i64 1
+ %tmp12082 = getelementptr inbounds float, float* %tmp12081, i64 1
+ %tmp12083 = getelementptr inbounds float, float* %tmp12082, i64 1
+ %tmp12084 = getelementptr inbounds float, float* %tmp12083, i64 1
+ %tmp12085 = getelementptr inbounds float, float* %tmp12084, i64 1
+ %tmp12086 = getelementptr inbounds float, float* %tmp12085, i64 1
+ %tmp12087 = getelementptr inbounds float, float* %tmp12086, i64 1
+ %tmp12088 = getelementptr inbounds float, float* %tmp12087, i64 1
+ %tmp12089 = getelementptr inbounds float, float* %tmp12088, i64 1
+ %tmp12090 = getelementptr inbounds float, float* %tmp12089, i64 1
+ %tmp12091 = getelementptr inbounds float, float* %tmp12090, i64 1
+ %tmp12092 = getelementptr inbounds float, float* %tmp12091, i64 1
+ %tmp12093 = getelementptr inbounds float, float* %tmp12092, i64 1
+ %tmp12094 = getelementptr inbounds float, float* %tmp12093, i64 1
+ %tmp12095 = getelementptr inbounds float, float* %tmp12094, i64 1
+ %tmp12096 = getelementptr inbounds float, float* %tmp12095, i64 1
+ %tmp12097 = getelementptr inbounds float, float* %tmp12096, i64 1
+ %tmp12098 = getelementptr inbounds float, float* %tmp12097, i64 1
+ %tmp12099 = getelementptr inbounds float, float* %tmp12098, i64 1
+ %tmp12100 = getelementptr inbounds float, float* %tmp12099, i64 1
+ %tmp12101 = getelementptr inbounds float, float* %tmp12100, i64 1
+ %tmp12102 = getelementptr inbounds float, float* %tmp12101, i64 1
+ %tmp12103 = getelementptr inbounds float, float* %tmp12102, i64 1
+ %tmp12104 = getelementptr inbounds float, float* %tmp12103, i64 1
+ %tmp12105 = getelementptr inbounds float, float* %tmp12104, i64 1
+ %tmp12106 = getelementptr inbounds float, float* %tmp12105, i64 1
+ %tmp12107 = getelementptr inbounds float, float* %tmp12106, i64 1
+ %tmp12108 = getelementptr inbounds float, float* %tmp12107, i64 1
+ %tmp12109 = getelementptr inbounds float, float* %tmp12108, i64 1
+ %tmp12110 = getelementptr inbounds float, float* %tmp12109, i64 1
+ %tmp12111 = getelementptr inbounds float, float* %tmp12110, i64 1
+ %tmp12112 = getelementptr inbounds float, float* %tmp12111, i64 1
+ %tmp12113 = getelementptr inbounds float, float* %tmp12112, i64 1
+ %tmp12114 = getelementptr inbounds float, float* %tmp12113, i64 1
+ %tmp12115 = getelementptr inbounds float, float* %tmp12114, i64 1
+ %tmp12116 = getelementptr inbounds float, float* %tmp12115, i64 1
+ %tmp12117 = getelementptr inbounds float, float* %tmp12116, i64 1
+ %tmp12118 = getelementptr inbounds float, float* %tmp12117, i64 1
+ %tmp12119 = getelementptr inbounds float, float* %tmp12118, i64 1
+ %tmp12120 = getelementptr inbounds float, float* %tmp12119, i64 1
+ %tmp12121 = getelementptr inbounds float, float* %tmp12120, i64 1
+ %tmp12122 = getelementptr inbounds float, float* %tmp12121, i64 1
+ %tmp12123 = getelementptr inbounds float, float* %tmp12122, i64 1
+ %tmp12124 = getelementptr inbounds float, float* %tmp12123, i64 1
+ %tmp12125 = getelementptr inbounds float, float* %tmp12124, i64 1
+ %tmp12126 = getelementptr inbounds float, float* %tmp12125, i64 1
+ %tmp12127 = getelementptr inbounds float, float* %tmp12126, i64 1
+ %tmp12128 = getelementptr inbounds float, float* %tmp12127, i64 1
+ %tmp12129 = getelementptr inbounds float, float* %tmp12128, i64 1
+ %tmp12130 = getelementptr inbounds float, float* %tmp12129, i64 1
+ %tmp12131 = getelementptr inbounds float, float* %tmp12130, i64 1
+ %tmp12132 = getelementptr inbounds float, float* %tmp12131, i64 1
+ %tmp12133 = getelementptr inbounds float, float* %tmp12132, i64 1
+ %tmp12134 = getelementptr inbounds float, float* %tmp12133, i64 1
+ %tmp12135 = getelementptr inbounds float, float* %tmp12134, i64 1
+ %tmp12136 = getelementptr inbounds float, float* %tmp12135, i64 1
+ %tmp12137 = getelementptr inbounds float, float* %tmp12136, i64 1
+ %tmp12138 = getelementptr inbounds float, float* %tmp12137, i64 1
+ %tmp12139 = getelementptr inbounds float, float* %tmp12138, i64 1
+ %tmp12140 = getelementptr inbounds float, float* %tmp12139, i64 1
+ %tmp12141 = getelementptr inbounds float, float* %tmp12140, i64 1
+ %tmp12142 = getelementptr inbounds float, float* %tmp12141, i64 1
+ %tmp12143 = getelementptr inbounds float, float* %tmp12142, i64 1
+ %tmp12144 = getelementptr inbounds float, float* %tmp12143, i64 1
+ %tmp12145 = getelementptr inbounds float, float* %tmp12144, i64 1
+ %tmp12146 = getelementptr inbounds float, float* %tmp12145, i64 1
+ %tmp12147 = getelementptr inbounds float, float* %tmp12146, i64 1
+ %tmp12148 = getelementptr inbounds float, float* %tmp12147, i64 1
+ %tmp12149 = getelementptr inbounds float, float* %tmp12148, i64 1
+ %tmp12150 = getelementptr inbounds float, float* %tmp12149, i64 1
+ %tmp12151 = getelementptr inbounds float, float* %tmp12150, i64 1
+ %tmp12152 = getelementptr inbounds float, float* %tmp12151, i64 1
+ %tmp12153 = getelementptr inbounds float, float* %tmp12152, i64 1
+ %tmp12154 = getelementptr inbounds float, float* %tmp12153, i64 1
+ %tmp12155 = getelementptr inbounds float, float* %tmp12154, i64 1
+ %tmp12156 = getelementptr inbounds float, float* %tmp12155, i64 1
+ %tmp12157 = getelementptr inbounds float, float* %tmp12156, i64 1
+ %tmp12158 = getelementptr inbounds float, float* %tmp12157, i64 1
+ %tmp12159 = getelementptr inbounds float, float* %tmp12158, i64 1
+ %tmp12160 = getelementptr inbounds float, float* %tmp12159, i64 1
+ %tmp12161 = getelementptr inbounds float, float* %tmp12160, i64 1
+ %tmp12162 = getelementptr inbounds float, float* %tmp12161, i64 1
+ %tmp12163 = getelementptr inbounds float, float* %tmp12162, i64 1
+ %tmp12164 = getelementptr inbounds float, float* %tmp12163, i64 1
+ %tmp12165 = getelementptr inbounds float, float* %tmp12164, i64 1
+ %tmp12166 = getelementptr inbounds float, float* %tmp12165, i64 1
+ %tmp12167 = getelementptr inbounds float, float* %tmp12166, i64 1
+ %tmp12168 = getelementptr inbounds float, float* %tmp12167, i64 1
+ %tmp12169 = getelementptr inbounds float, float* %tmp12168, i64 1
+ %tmp12170 = getelementptr inbounds float, float* %tmp12169, i64 1
+ %tmp12171 = getelementptr inbounds float, float* %tmp12170, i64 1
+ %tmp12172 = getelementptr inbounds float, float* %tmp12171, i64 1
+ %tmp12173 = getelementptr inbounds float, float* %tmp12172, i64 1
+ %tmp12174 = getelementptr inbounds float, float* %tmp12173, i64 1
+ %tmp12175 = getelementptr inbounds float, float* %tmp12174, i64 1
+ %tmp12176 = getelementptr inbounds float, float* %tmp12175, i64 1
+ %tmp12177 = getelementptr inbounds float, float* %tmp12176, i64 1
+ %tmp12178 = getelementptr inbounds float, float* %tmp12177, i64 1
+ %tmp12179 = getelementptr inbounds float, float* %tmp12178, i64 1
+ %tmp12180 = getelementptr inbounds float, float* %tmp12179, i64 1
+ %tmp12181 = getelementptr inbounds float, float* %tmp12180, i64 1
+ %tmp12182 = getelementptr inbounds float, float* %tmp12181, i64 1
+ %tmp12183 = getelementptr inbounds float, float* %tmp12182, i64 1
+ %tmp12184 = getelementptr inbounds float, float* %tmp12183, i64 1
+ %tmp12185 = getelementptr inbounds float, float* %tmp12184, i64 1
+ %tmp12186 = getelementptr inbounds float, float* %tmp12185, i64 1
+ %tmp12187 = getelementptr inbounds float, float* %tmp12186, i64 1
+ %tmp12188 = getelementptr inbounds float, float* %tmp12187, i64 1
+ %tmp12189 = getelementptr inbounds float, float* %tmp12188, i64 1
+ %tmp12190 = getelementptr inbounds float, float* %tmp12189, i64 1
+ %tmp12191 = getelementptr inbounds float, float* %tmp12190, i64 1
+ %tmp12192 = getelementptr inbounds float, float* %tmp12191, i64 1
+ %tmp12193 = getelementptr inbounds float, float* %tmp12192, i64 1
+ %tmp12194 = getelementptr inbounds float, float* %tmp12193, i64 1
+ %tmp12195 = getelementptr inbounds float, float* %tmp12194, i64 1
+ %tmp12196 = getelementptr inbounds float, float* %tmp12195, i64 1
+ %tmp12197 = getelementptr inbounds float, float* %tmp12196, i64 1
+ %tmp12198 = getelementptr inbounds float, float* %tmp12197, i64 1
+ %tmp12199 = getelementptr inbounds float, float* %tmp12198, i64 1
+ %tmp12200 = getelementptr inbounds float, float* %tmp12199, i64 1
+ %tmp12201 = getelementptr inbounds float, float* %tmp12200, i64 1
+ %tmp12202 = getelementptr inbounds float, float* %tmp12201, i64 1
+ %tmp12203 = getelementptr inbounds float, float* %tmp12202, i64 1
+ %tmp12204 = getelementptr inbounds float, float* %tmp12203, i64 1
+ %tmp12205 = getelementptr inbounds float, float* %tmp12204, i64 1
+ %tmp12206 = getelementptr inbounds float, float* %tmp12205, i64 1
+ %tmp12207 = getelementptr inbounds float, float* %tmp12206, i64 1
+ %tmp12208 = getelementptr inbounds float, float* %tmp12207, i64 1
+ %tmp12209 = getelementptr inbounds float, float* %tmp12208, i64 1
+ %tmp12210 = getelementptr inbounds float, float* %tmp12209, i64 1
+ %tmp12211 = getelementptr inbounds float, float* %tmp12210, i64 1
+ %tmp12212 = getelementptr inbounds float, float* %tmp12211, i64 1
+ %tmp12213 = getelementptr inbounds float, float* %tmp12212, i64 1
+ %tmp12214 = getelementptr inbounds float, float* %tmp12213, i64 1
+ %tmp12215 = getelementptr inbounds float, float* %tmp12214, i64 1
+ %tmp12216 = getelementptr inbounds float, float* %tmp12215, i64 1
+ %tmp12217 = getelementptr inbounds float, float* %tmp12216, i64 1
+ %tmp12218 = getelementptr inbounds float, float* %tmp12217, i64 1
+ %tmp12219 = getelementptr inbounds float, float* %tmp12218, i64 1
+ %tmp12220 = getelementptr inbounds float, float* %tmp12219, i64 1
+ %tmp12221 = getelementptr inbounds float, float* %tmp12220, i64 1
+ %tmp12222 = getelementptr inbounds float, float* %tmp12221, i64 1
+ %tmp12223 = getelementptr inbounds float, float* %tmp12222, i64 1
+ %tmp12224 = getelementptr inbounds float, float* %tmp12223, i64 1
+ %tmp12225 = getelementptr inbounds float, float* %tmp12224, i64 1
+ %tmp12226 = getelementptr inbounds float, float* %tmp12225, i64 1
+ %tmp12227 = getelementptr inbounds float, float* %tmp12226, i64 1
+ %tmp12228 = getelementptr inbounds float, float* %tmp12227, i64 1
+ %tmp12229 = getelementptr inbounds float, float* %tmp12228, i64 1
+ %tmp12230 = getelementptr inbounds float, float* %tmp12229, i64 1
+ %tmp12231 = getelementptr inbounds float, float* %tmp12230, i64 1
+ %tmp12232 = getelementptr inbounds float, float* %tmp12231, i64 1
+ %tmp12233 = getelementptr inbounds float, float* %tmp12232, i64 1
+ %tmp12234 = getelementptr inbounds float, float* %tmp12233, i64 1
+ %tmp12235 = getelementptr inbounds float, float* %tmp12234, i64 1
+ %tmp12236 = getelementptr inbounds float, float* %tmp12235, i64 1
+ %tmp12237 = getelementptr inbounds float, float* %tmp12236, i64 1
+ %tmp12238 = getelementptr inbounds float, float* %tmp12237, i64 1
+ %tmp12239 = getelementptr inbounds float, float* %tmp12238, i64 1
+ %tmp12240 = getelementptr inbounds float, float* %tmp12239, i64 1
+ %tmp12241 = getelementptr inbounds float, float* %tmp12240, i64 1
+ %tmp12242 = getelementptr inbounds float, float* %tmp12241, i64 1
+ %tmp12243 = getelementptr inbounds float, float* %tmp12242, i64 1
+ %tmp12244 = getelementptr inbounds float, float* %tmp12243, i64 1
+ %tmp12245 = getelementptr inbounds float, float* %tmp12244, i64 1
+ %tmp12246 = getelementptr inbounds float, float* %tmp12245, i64 1
+ %tmp12247 = getelementptr inbounds float, float* %tmp12246, i64 1
+ %tmp12248 = getelementptr inbounds float, float* %tmp12247, i64 1
+ %tmp12249 = getelementptr inbounds float, float* %tmp12248, i64 1
+ %tmp12250 = getelementptr inbounds float, float* %tmp12249, i64 1
+ %tmp12251 = getelementptr inbounds float, float* %tmp12250, i64 1
+ %tmp12252 = getelementptr inbounds float, float* %tmp12251, i64 1
+ %tmp12253 = getelementptr inbounds float, float* %tmp12252, i64 1
+ %tmp12254 = getelementptr inbounds float, float* %tmp12253, i64 1
+ %tmp12255 = getelementptr inbounds float, float* %tmp12254, i64 1
+ %tmp12256 = getelementptr inbounds float, float* %tmp12255, i64 1
+ %tmp12257 = getelementptr inbounds float, float* %tmp12256, i64 1
+ %tmp12258 = getelementptr inbounds float, float* %tmp12257, i64 1
+ %tmp12259 = getelementptr inbounds float, float* %tmp12258, i64 1
+ %tmp12260 = getelementptr inbounds float, float* %tmp12259, i64 1
+ %tmp12261 = getelementptr inbounds float, float* %tmp12260, i64 1
+ %tmp12262 = getelementptr inbounds float, float* %tmp12261, i64 1
+ %tmp12263 = getelementptr inbounds float, float* %tmp12262, i64 1
+ %tmp12264 = getelementptr inbounds float, float* %tmp12263, i64 1
+ %tmp12265 = getelementptr inbounds float, float* %tmp12264, i64 1
+ %tmp12266 = getelementptr inbounds float, float* %tmp12265, i64 1
+ %tmp12267 = getelementptr inbounds float, float* %tmp12266, i64 1
+ %tmp12268 = getelementptr inbounds float, float* %tmp12267, i64 1
+ %tmp12269 = getelementptr inbounds float, float* %tmp12268, i64 1
+ %tmp12270 = getelementptr inbounds float, float* %tmp12269, i64 1
+ %tmp12271 = getelementptr inbounds float, float* %tmp12270, i64 1
+ %tmp12272 = getelementptr inbounds float, float* %tmp12271, i64 1
+ %tmp12273 = getelementptr inbounds float, float* %tmp12272, i64 1
+ %tmp12274 = getelementptr inbounds float, float* %tmp12273, i64 1
+ %tmp12275 = getelementptr inbounds float, float* %tmp12274, i64 1
+ %tmp12276 = getelementptr inbounds float, float* %tmp12275, i64 1
+ %tmp12277 = getelementptr inbounds float, float* %tmp12276, i64 1
+ %tmp12278 = getelementptr inbounds float, float* %tmp12277, i64 1
+ %tmp12279 = getelementptr inbounds float, float* %tmp12278, i64 1
+ %tmp12280 = getelementptr inbounds float, float* %tmp12279, i64 1
+ %tmp12281 = getelementptr inbounds float, float* %tmp12280, i64 1
+ %tmp12282 = getelementptr inbounds float, float* %tmp12281, i64 1
+ %tmp12283 = getelementptr inbounds float, float* %tmp12282, i64 1
+ %tmp12284 = getelementptr inbounds float, float* %tmp12283, i64 1
+ %tmp12285 = getelementptr inbounds float, float* %tmp12284, i64 1
+ %tmp12286 = getelementptr inbounds float, float* %tmp12285, i64 1
+ %tmp12287 = getelementptr inbounds float, float* %tmp12286, i64 1
+ %tmp12288 = getelementptr inbounds float, float* %tmp12287, i64 1
+ %tmp12289 = getelementptr inbounds float, float* %tmp12288, i64 1
+ %tmp12290 = getelementptr inbounds float, float* %tmp12289, i64 1
+ %tmp12291 = getelementptr inbounds float, float* %tmp12290, i64 1
+ %tmp12292 = getelementptr inbounds float, float* %tmp12291, i64 1
+ %tmp12293 = getelementptr inbounds float, float* %tmp12292, i64 1
+ %tmp12294 = getelementptr inbounds float, float* %tmp12293, i64 1
+ %tmp12295 = getelementptr inbounds float, float* %tmp12294, i64 1
+ %tmp12296 = getelementptr inbounds float, float* %tmp12295, i64 1
+ %tmp12297 = getelementptr inbounds float, float* %tmp12296, i64 1
+ %tmp12298 = getelementptr inbounds float, float* %tmp12297, i64 1
+ %tmp12299 = getelementptr inbounds float, float* %tmp12298, i64 1
+ %tmp12300 = getelementptr inbounds float, float* %tmp12299, i64 1
+ %tmp12301 = getelementptr inbounds float, float* %tmp12300, i64 1
+ %tmp12302 = getelementptr inbounds float, float* %tmp12301, i64 1
+ %tmp12303 = getelementptr inbounds float, float* %tmp12302, i64 1
+ %tmp12304 = getelementptr inbounds float, float* %tmp12303, i64 1
+ %tmp12305 = getelementptr inbounds float, float* %tmp12304, i64 1
+ %tmp12306 = getelementptr inbounds float, float* %tmp12305, i64 1
+ %tmp12307 = getelementptr inbounds float, float* %tmp12306, i64 1
+ %tmp12308 = getelementptr inbounds float, float* %tmp12307, i64 1
+ %tmp12309 = getelementptr inbounds float, float* %tmp12308, i64 1
+ %tmp12310 = getelementptr inbounds float, float* %tmp12309, i64 1
+ %tmp12311 = getelementptr inbounds float, float* %tmp12310, i64 1
+ %tmp12312 = getelementptr inbounds float, float* %tmp12311, i64 1
+ %tmp12313 = getelementptr inbounds float, float* %tmp12312, i64 1
+ %tmp12314 = getelementptr inbounds float, float* %tmp12313, i64 1
+ %tmp12315 = getelementptr inbounds float, float* %tmp12314, i64 1
+ %tmp12316 = getelementptr inbounds float, float* %tmp12315, i64 1
+ %tmp12317 = getelementptr inbounds float, float* %tmp12316, i64 1
+ %tmp12318 = getelementptr inbounds float, float* %tmp12317, i64 1
+ %tmp12319 = getelementptr inbounds float, float* %tmp12318, i64 1
+ %tmp12320 = getelementptr inbounds float, float* %tmp12319, i64 1
+ %tmp12321 = getelementptr inbounds float, float* %tmp12320, i64 1
+ %tmp12322 = getelementptr inbounds float, float* %tmp12321, i64 1
+ %tmp12323 = getelementptr inbounds float, float* %tmp12322, i64 1
+ %tmp12324 = getelementptr inbounds float, float* %tmp12323, i64 1
+ %tmp12325 = getelementptr inbounds float, float* %tmp12324, i64 1
+ %tmp12326 = getelementptr inbounds float, float* %tmp12325, i64 1
+ %tmp12327 = getelementptr inbounds float, float* %tmp12326, i64 1
+ %tmp12328 = getelementptr inbounds float, float* %tmp12327, i64 1
+ %tmp12329 = getelementptr inbounds float, float* %tmp12328, i64 1
+ %tmp12330 = getelementptr inbounds float, float* %tmp12329, i64 1
+ %tmp12331 = getelementptr inbounds float, float* %tmp12330, i64 1
+ %tmp12332 = getelementptr inbounds float, float* %tmp12331, i64 1
+ %tmp12333 = getelementptr inbounds float, float* %tmp12332, i64 1
+ %tmp12334 = getelementptr inbounds float, float* %tmp12333, i64 1
+ %tmp12335 = getelementptr inbounds float, float* %tmp12334, i64 1
+ %tmp12336 = getelementptr inbounds float, float* %tmp12335, i64 1
+ %tmp12337 = getelementptr inbounds float, float* %tmp12336, i64 1
+ %tmp12338 = getelementptr inbounds float, float* %tmp12337, i64 1
+ %tmp12339 = getelementptr inbounds float, float* %tmp12338, i64 1
+ %tmp12340 = getelementptr inbounds float, float* %tmp12339, i64 1
+ %tmp12341 = getelementptr inbounds float, float* %tmp12340, i64 1
+ %tmp12342 = getelementptr inbounds float, float* %tmp12341, i64 1
+ %tmp12343 = getelementptr inbounds float, float* %tmp12342, i64 1
+ %tmp12344 = getelementptr inbounds float, float* %tmp12343, i64 1
+ %tmp12345 = getelementptr inbounds float, float* %tmp12344, i64 1
+ %tmp12346 = getelementptr inbounds float, float* %tmp12345, i64 1
+ %tmp12347 = getelementptr inbounds float, float* %tmp12346, i64 1
+ %tmp12348 = getelementptr inbounds float, float* %tmp12347, i64 1
+ %tmp12349 = getelementptr inbounds float, float* %tmp12348, i64 1
+ %tmp12350 = getelementptr inbounds float, float* %tmp12349, i64 1
+ %tmp12351 = getelementptr inbounds float, float* %tmp12350, i64 1
+ %tmp12352 = getelementptr inbounds float, float* %tmp12351, i64 1
+ %tmp12353 = getelementptr inbounds float, float* %tmp12352, i64 1
+ %tmp12354 = getelementptr inbounds float, float* %tmp12353, i64 1
+ %tmp12355 = getelementptr inbounds float, float* %tmp12354, i64 1
+ %tmp12356 = getelementptr inbounds float, float* %tmp12355, i64 1
+ %tmp12357 = getelementptr inbounds float, float* %tmp12356, i64 1
+ %tmp12358 = getelementptr inbounds float, float* %tmp12357, i64 1
+ %tmp12359 = getelementptr inbounds float, float* %tmp12358, i64 1
+ %tmp12360 = getelementptr inbounds float, float* %tmp12359, i64 1
+ %tmp12361 = getelementptr inbounds float, float* %tmp12360, i64 1
+ %tmp12362 = getelementptr inbounds float, float* %tmp12361, i64 1
+ %tmp12363 = getelementptr inbounds float, float* %tmp12362, i64 1
+ %tmp12364 = getelementptr inbounds float, float* %tmp12363, i64 1
+ %tmp12365 = getelementptr inbounds float, float* %tmp12364, i64 1
+ %tmp12366 = getelementptr inbounds float, float* %tmp12365, i64 1
+ %tmp12367 = getelementptr inbounds float, float* %tmp12366, i64 1
+ %tmp12368 = getelementptr inbounds float, float* %tmp12367, i64 1
+ %tmp12369 = getelementptr inbounds float, float* %tmp12368, i64 1
+ %tmp12370 = getelementptr inbounds float, float* %tmp12369, i64 1
+ %tmp12371 = getelementptr inbounds float, float* %tmp12370, i64 1
+ %tmp12372 = getelementptr inbounds float, float* %tmp12371, i64 1
+ %tmp12373 = getelementptr inbounds float, float* %tmp12372, i64 1
+ %tmp12374 = getelementptr inbounds float, float* %tmp12373, i64 1
+ %tmp12375 = getelementptr inbounds float, float* %tmp12374, i64 1
+ %tmp12376 = getelementptr inbounds float, float* %tmp12375, i64 1
+ %tmp12377 = getelementptr inbounds float, float* %tmp12376, i64 1
+ %tmp12378 = getelementptr inbounds float, float* %tmp12377, i64 1
+ %tmp12379 = getelementptr inbounds float, float* %tmp12378, i64 1
+ %tmp12380 = getelementptr inbounds float, float* %tmp12379, i64 1
+ %tmp12381 = getelementptr inbounds float, float* %tmp12380, i64 1
+ %tmp12382 = getelementptr inbounds float, float* %tmp12381, i64 1
+ %tmp12383 = getelementptr inbounds float, float* %tmp12382, i64 1
+ %tmp12384 = getelementptr inbounds float, float* %tmp12383, i64 1
+ %tmp12385 = getelementptr inbounds float, float* %tmp12384, i64 1
+ %tmp12386 = getelementptr inbounds float, float* %tmp12385, i64 1
+ %tmp12387 = getelementptr inbounds float, float* %tmp12386, i64 1
+ %tmp12388 = getelementptr inbounds float, float* %tmp12387, i64 1
+ %tmp12389 = getelementptr inbounds float, float* %tmp12388, i64 1
+ %tmp12390 = getelementptr inbounds float, float* %tmp12389, i64 1
+ %tmp12391 = getelementptr inbounds float, float* %tmp12390, i64 1
+ %tmp12392 = getelementptr inbounds float, float* %tmp12391, i64 1
+ %tmp12393 = getelementptr inbounds float, float* %tmp12392, i64 1
+ %tmp12394 = getelementptr inbounds float, float* %tmp12393, i64 1
+ %tmp12395 = getelementptr inbounds float, float* %tmp12394, i64 1
+ %tmp12396 = getelementptr inbounds float, float* %tmp12395, i64 1
+ %tmp12397 = getelementptr inbounds float, float* %tmp12396, i64 1
+ %tmp12398 = getelementptr inbounds float, float* %tmp12397, i64 1
+ %tmp12399 = getelementptr inbounds float, float* %tmp12398, i64 1
+ %tmp12400 = getelementptr inbounds float, float* %tmp12399, i64 1
+ %tmp12401 = getelementptr inbounds float, float* %tmp12400, i64 1
+ %tmp12402 = getelementptr inbounds float, float* %tmp12401, i64 1
+ %tmp12403 = getelementptr inbounds float, float* %tmp12402, i64 1
+ %tmp12404 = getelementptr inbounds float, float* %tmp12403, i64 1
+ %tmp12405 = getelementptr inbounds float, float* %tmp12404, i64 1
+ %tmp12406 = getelementptr inbounds float, float* %tmp12405, i64 1
+ %tmp12407 = getelementptr inbounds float, float* %tmp12406, i64 1
+ %tmp12408 = getelementptr inbounds float, float* %tmp12407, i64 1
+ %tmp12409 = getelementptr inbounds float, float* %tmp12408, i64 1
+ %tmp12410 = getelementptr inbounds float, float* %tmp12409, i64 1
+ %tmp12411 = getelementptr inbounds float, float* %tmp12410, i64 1
+ %tmp12412 = getelementptr inbounds float, float* %tmp12411, i64 1
+ %tmp12413 = getelementptr inbounds float, float* %tmp12412, i64 1
+ %tmp12414 = getelementptr inbounds float, float* %tmp12413, i64 1
+ %tmp12415 = getelementptr inbounds float, float* %tmp12414, i64 1
+ %tmp12416 = getelementptr inbounds float, float* %tmp12415, i64 1
+ %tmp12417 = getelementptr inbounds float, float* %tmp12416, i64 1
+ %tmp12418 = getelementptr inbounds float, float* %tmp12417, i64 1
+ %tmp12419 = getelementptr inbounds float, float* %tmp12418, i64 1
+ %tmp12420 = getelementptr inbounds float, float* %tmp12419, i64 1
+ %tmp12421 = getelementptr inbounds float, float* %tmp12420, i64 1
+ %tmp12422 = getelementptr inbounds float, float* %tmp12421, i64 1
+ %tmp12423 = getelementptr inbounds float, float* %tmp12422, i64 1
+ %tmp12424 = getelementptr inbounds float, float* %tmp12423, i64 1
+ %tmp12425 = getelementptr inbounds float, float* %tmp12424, i64 1
+ %tmp12426 = getelementptr inbounds float, float* %tmp12425, i64 1
+ %tmp12427 = getelementptr inbounds float, float* %tmp12426, i64 1
+ %tmp12428 = getelementptr inbounds float, float* %tmp12427, i64 1
+ %tmp12429 = getelementptr inbounds float, float* %tmp12428, i64 1
+ %tmp12430 = getelementptr inbounds float, float* %tmp12429, i64 1
+ %tmp12431 = getelementptr inbounds float, float* %tmp12430, i64 1
+ %tmp12432 = getelementptr inbounds float, float* %tmp12431, i64 1
+ %tmp12433 = getelementptr inbounds float, float* %tmp12432, i64 1
+ %tmp12434 = getelementptr inbounds float, float* %tmp12433, i64 1
+ %tmp12435 = getelementptr inbounds float, float* %tmp12434, i64 1
+ %tmp12436 = getelementptr inbounds float, float* %tmp12435, i64 1
+ %tmp12437 = getelementptr inbounds float, float* %tmp12436, i64 1
+ %tmp12438 = getelementptr inbounds float, float* %tmp12437, i64 1
+ %tmp12439 = getelementptr inbounds float, float* %tmp12438, i64 1
+ %tmp12440 = getelementptr inbounds float, float* %tmp12439, i64 1
+ %tmp12441 = getelementptr inbounds float, float* %tmp12440, i64 1
+ %tmp12442 = getelementptr inbounds float, float* %tmp12441, i64 1
+ %tmp12443 = getelementptr inbounds float, float* %tmp12442, i64 1
+ %tmp12444 = getelementptr inbounds float, float* %tmp12443, i64 1
+ %tmp12445 = getelementptr inbounds float, float* %tmp12444, i64 1
+ %tmp12446 = getelementptr inbounds float, float* %tmp12445, i64 1
+ %tmp12447 = getelementptr inbounds float, float* %tmp12446, i64 1
+ %tmp12448 = getelementptr inbounds float, float* %tmp12447, i64 1
+ %tmp12449 = getelementptr inbounds float, float* %tmp12448, i64 1
+ %tmp12450 = getelementptr inbounds float, float* %tmp12449, i64 1
+ %tmp12451 = getelementptr inbounds float, float* %tmp12450, i64 1
+ %tmp12452 = getelementptr inbounds float, float* %tmp12451, i64 1
+ %tmp12453 = getelementptr inbounds float, float* %tmp12452, i64 1
+ %tmp12454 = getelementptr inbounds float, float* %tmp12453, i64 1
+ %tmp12455 = getelementptr inbounds float, float* %tmp12454, i64 1
+ %tmp12456 = getelementptr inbounds float, float* %tmp12455, i64 1
+ %tmp12457 = getelementptr inbounds float, float* %tmp12456, i64 1
+ %tmp12458 = getelementptr inbounds float, float* %tmp12457, i64 1
+ %tmp12459 = getelementptr inbounds float, float* %tmp12458, i64 1
+ %tmp12460 = getelementptr inbounds float, float* %tmp12459, i64 1
+ %tmp12461 = getelementptr inbounds float, float* %tmp12460, i64 1
+ %tmp12462 = getelementptr inbounds float, float* %tmp12461, i64 1
+ %tmp12463 = getelementptr inbounds float, float* %tmp12462, i64 1
+ %tmp12464 = getelementptr inbounds float, float* %tmp12463, i64 1
+ %tmp12465 = getelementptr inbounds float, float* %tmp12464, i64 1
+ %tmp12466 = getelementptr inbounds float, float* %tmp12465, i64 1
+ %tmp12467 = getelementptr inbounds float, float* %tmp12466, i64 1
+ %tmp12468 = getelementptr inbounds float, float* %tmp12467, i64 1
+ %tmp12469 = getelementptr inbounds float, float* %tmp12468, i64 1
+ %tmp12470 = getelementptr inbounds float, float* %tmp12469, i64 1
+ %tmp12471 = getelementptr inbounds float, float* %tmp12470, i64 1
+ %tmp12472 = getelementptr inbounds float, float* %tmp12471, i64 1
+ %tmp12473 = getelementptr inbounds float, float* %tmp12472, i64 1
+ %tmp12474 = getelementptr inbounds float, float* %tmp12473, i64 1
+ %tmp12475 = getelementptr inbounds float, float* %tmp12474, i64 1
+ %tmp12476 = getelementptr inbounds float, float* %tmp12475, i64 1
+ %tmp12477 = getelementptr inbounds float, float* %tmp12476, i64 1
+ %tmp12478 = getelementptr inbounds float, float* %tmp12477, i64 1
+ %tmp12479 = getelementptr inbounds float, float* %tmp12478, i64 1
+ %tmp12480 = getelementptr inbounds float, float* %tmp12479, i64 1
+ %tmp12481 = getelementptr inbounds float, float* %tmp12480, i64 1
+ %tmp12482 = getelementptr inbounds float, float* %tmp12481, i64 1
+ %tmp12483 = getelementptr inbounds float, float* %tmp12482, i64 1
+ %tmp12484 = getelementptr inbounds float, float* %tmp12483, i64 1
+ %tmp12485 = getelementptr inbounds float, float* %tmp12484, i64 1
+ %tmp12486 = getelementptr inbounds float, float* %tmp12485, i64 1
+ %tmp12487 = getelementptr inbounds float, float* %tmp12486, i64 1
+ %tmp12488 = getelementptr inbounds float, float* %tmp12487, i64 1
+ %tmp12489 = getelementptr inbounds float, float* %tmp12488, i64 1
+ %tmp12490 = getelementptr inbounds float, float* %tmp12489, i64 1
+ %tmp12491 = getelementptr inbounds float, float* %tmp12490, i64 1
+ %tmp12492 = getelementptr inbounds float, float* %tmp12491, i64 1
+ %tmp12493 = getelementptr inbounds float, float* %tmp12492, i64 1
+ %tmp12494 = getelementptr inbounds float, float* %tmp12493, i64 1
+ %tmp12495 = getelementptr inbounds float, float* %tmp12494, i64 1
+ %tmp12496 = getelementptr inbounds float, float* %tmp12495, i64 1
+ %tmp12497 = getelementptr inbounds float, float* %tmp12496, i64 1
+ %tmp12498 = getelementptr inbounds float, float* %tmp12497, i64 1
+ %tmp12499 = getelementptr inbounds float, float* %tmp12498, i64 1
+ %tmp12500 = getelementptr inbounds float, float* %tmp12499, i64 1
+ %tmp12501 = getelementptr inbounds float, float* %tmp12500, i64 1
+ %tmp12502 = getelementptr inbounds float, float* %tmp12501, i64 1
+ %tmp12503 = getelementptr inbounds float, float* %tmp12502, i64 1
+ %tmp12504 = getelementptr inbounds float, float* %tmp12503, i64 1
+ %tmp12505 = getelementptr inbounds float, float* %tmp12504, i64 1
+ %tmp12506 = getelementptr inbounds float, float* %tmp12505, i64 1
+ %tmp12507 = getelementptr inbounds float, float* %tmp12506, i64 1
+ %tmp12508 = getelementptr inbounds float, float* %tmp12507, i64 1
+ %tmp12509 = getelementptr inbounds float, float* %tmp12508, i64 1
+ %tmp12510 = getelementptr inbounds float, float* %tmp12509, i64 1
+ %tmp12511 = getelementptr inbounds float, float* %tmp12510, i64 1
+ %tmp12512 = getelementptr inbounds float, float* %tmp12511, i64 1
+ %tmp12513 = getelementptr inbounds float, float* %tmp12512, i64 1
+ %tmp12514 = getelementptr inbounds float, float* %tmp12513, i64 1
+ %tmp12515 = getelementptr inbounds float, float* %tmp12514, i64 1
+ %tmp12516 = getelementptr inbounds float, float* %tmp12515, i64 1
+ %tmp12517 = getelementptr inbounds float, float* %tmp12516, i64 1
+ %tmp12518 = getelementptr inbounds float, float* %tmp12517, i64 1
+ %tmp12519 = getelementptr inbounds float, float* %tmp12518, i64 1
+ %tmp12520 = getelementptr inbounds float, float* %tmp12519, i64 1
+ %tmp12521 = getelementptr inbounds float, float* %tmp12520, i64 1
+ %tmp12522 = getelementptr inbounds float, float* %tmp12521, i64 1
+ %tmp12523 = getelementptr inbounds float, float* %tmp12522, i64 1
+ %tmp12524 = getelementptr inbounds float, float* %tmp12523, i64 1
+ %tmp12525 = getelementptr inbounds float, float* %tmp12524, i64 1
+ %tmp12526 = getelementptr inbounds float, float* %tmp12525, i64 1
+ %tmp12527 = getelementptr inbounds float, float* %tmp12526, i64 1
+ %tmp12528 = getelementptr inbounds float, float* %tmp12527, i64 1
+ %tmp12529 = getelementptr inbounds float, float* %tmp12528, i64 1
+ %tmp12530 = getelementptr inbounds float, float* %tmp12529, i64 1
+ %tmp12531 = getelementptr inbounds float, float* %tmp12530, i64 1
+ %tmp12532 = getelementptr inbounds float, float* %tmp12531, i64 1
+ %tmp12533 = getelementptr inbounds float, float* %tmp12532, i64 1
+ %tmp12534 = getelementptr inbounds float, float* %tmp12533, i64 1
+ %tmp12535 = getelementptr inbounds float, float* %tmp12534, i64 1
+ %tmp12536 = getelementptr inbounds float, float* %tmp12535, i64 1
+ %tmp12537 = getelementptr inbounds float, float* %tmp12536, i64 1
+ %tmp12538 = getelementptr inbounds float, float* %tmp12537, i64 1
+ %tmp12539 = getelementptr inbounds float, float* %tmp12538, i64 1
+ %tmp12540 = getelementptr inbounds float, float* %tmp12539, i64 1
+ %tmp12541 = getelementptr inbounds float, float* %tmp12540, i64 1
+ %tmp12542 = getelementptr inbounds float, float* %tmp12541, i64 1
+ %tmp12543 = getelementptr inbounds float, float* %tmp12542, i64 1
+ %tmp12544 = getelementptr inbounds float, float* %tmp12543, i64 1
+ %tmp12545 = getelementptr inbounds float, float* %tmp12544, i64 1
+ %tmp12546 = getelementptr inbounds float, float* %tmp12545, i64 1
+ %tmp12547 = getelementptr inbounds float, float* %tmp12546, i64 1
+ %tmp12548 = getelementptr inbounds float, float* %tmp12547, i64 1
+ %tmp12549 = getelementptr inbounds float, float* %tmp12548, i64 1
+ %tmp12550 = getelementptr inbounds float, float* %tmp12549, i64 1
+ %tmp12551 = getelementptr inbounds float, float* %tmp12550, i64 1
+ %tmp12552 = getelementptr inbounds float, float* %tmp12551, i64 1
+ %tmp12553 = getelementptr inbounds float, float* %tmp12552, i64 1
+ %tmp12554 = getelementptr inbounds float, float* %tmp12553, i64 1
+ %tmp12555 = getelementptr inbounds float, float* %tmp12554, i64 1
+ %tmp12556 = getelementptr inbounds float, float* %tmp12555, i64 1
+ %tmp12557 = getelementptr inbounds float, float* %tmp12556, i64 1
+ %tmp12558 = getelementptr inbounds float, float* %tmp12557, i64 1
+ %tmp12559 = getelementptr inbounds float, float* %tmp12558, i64 1
+ %tmp12560 = getelementptr inbounds float, float* %tmp12559, i64 1
+ %tmp12561 = getelementptr inbounds float, float* %tmp12560, i64 1
+ %tmp12562 = getelementptr inbounds float, float* %tmp12561, i64 1
+ %tmp12563 = getelementptr inbounds float, float* %tmp12562, i64 1
+ %tmp12564 = getelementptr inbounds float, float* %tmp12563, i64 1
+ %tmp12565 = getelementptr inbounds float, float* %tmp12564, i64 1
+ %tmp12566 = getelementptr inbounds float, float* %tmp12565, i64 1
+ %tmp12567 = getelementptr inbounds float, float* %tmp12566, i64 1
+ %tmp12568 = getelementptr inbounds float, float* %tmp12567, i64 1
+ %tmp12569 = getelementptr inbounds float, float* %tmp12568, i64 1
+ %tmp12570 = getelementptr inbounds float, float* %tmp12569, i64 1
+ %tmp12571 = getelementptr inbounds float, float* %tmp12570, i64 1
+ %tmp12572 = getelementptr inbounds float, float* %tmp12571, i64 1
+ %tmp12573 = getelementptr inbounds float, float* %tmp12572, i64 1
+ %tmp12574 = getelementptr inbounds float, float* %tmp12573, i64 1
+ %tmp12575 = getelementptr inbounds float, float* %tmp12574, i64 1
+ %tmp12576 = getelementptr inbounds float, float* %tmp12575, i64 1
+ %tmp12577 = getelementptr inbounds float, float* %tmp12576, i64 1
+ %tmp12578 = getelementptr inbounds float, float* %tmp12577, i64 1
+ %tmp12579 = getelementptr inbounds float, float* %tmp12578, i64 1
+ %tmp12580 = getelementptr inbounds float, float* %tmp12579, i64 1
+ %tmp12581 = getelementptr inbounds float, float* %tmp12580, i64 1
+ %tmp12582 = getelementptr inbounds float, float* %tmp12581, i64 1
+ %tmp12583 = getelementptr inbounds float, float* %tmp12582, i64 1
+ %tmp12584 = getelementptr inbounds float, float* %tmp12583, i64 1
+ %tmp12585 = getelementptr inbounds float, float* %tmp12584, i64 1
+ %tmp12586 = getelementptr inbounds float, float* %tmp12585, i64 1
+ %tmp12587 = getelementptr inbounds float, float* %tmp12586, i64 1
+ %tmp12588 = getelementptr inbounds float, float* %tmp12587, i64 1
+ %tmp12589 = getelementptr inbounds float, float* %tmp12588, i64 1
+ %tmp12590 = getelementptr inbounds float, float* %tmp12589, i64 1
+ %tmp12591 = getelementptr inbounds float, float* %tmp12590, i64 1
+ %tmp12592 = getelementptr inbounds float, float* %tmp12591, i64 1
+ %tmp12593 = getelementptr inbounds float, float* %tmp12592, i64 1
+ %tmp12594 = getelementptr inbounds float, float* %tmp12593, i64 1
+ %tmp12595 = getelementptr inbounds float, float* %tmp12594, i64 1
+ %tmp12596 = getelementptr inbounds float, float* %tmp12595, i64 1
+ %tmp12597 = getelementptr inbounds float, float* %tmp12596, i64 1
+ %tmp12598 = getelementptr inbounds float, float* %tmp12597, i64 1
+ %tmp12599 = getelementptr inbounds float, float* %tmp12598, i64 1
+ %tmp12600 = getelementptr inbounds float, float* %tmp12599, i64 1
+ %tmp12601 = getelementptr inbounds float, float* %tmp12600, i64 1
+ %tmp12602 = getelementptr inbounds float, float* %tmp12601, i64 1
+ %tmp12603 = getelementptr inbounds float, float* %tmp12602, i64 1
+ %tmp12604 = getelementptr inbounds float, float* %tmp12603, i64 1
+ %tmp12605 = getelementptr inbounds float, float* %tmp12604, i64 1
+ %tmp12606 = getelementptr inbounds float, float* %tmp12605, i64 1
+ %tmp12607 = getelementptr inbounds float, float* %tmp12606, i64 1
+ %tmp12608 = getelementptr inbounds float, float* %tmp12607, i64 1
+ %tmp12609 = getelementptr inbounds float, float* %tmp12608, i64 1
+ %tmp12610 = getelementptr inbounds float, float* %tmp12609, i64 1
+ %tmp12611 = getelementptr inbounds float, float* %tmp12610, i64 1
+ %tmp12612 = getelementptr inbounds float, float* %tmp12611, i64 1
+ %tmp12613 = getelementptr inbounds float, float* %tmp12612, i64 1
+ %tmp12614 = getelementptr inbounds float, float* %tmp12613, i64 1
+ %tmp12615 = getelementptr inbounds float, float* %tmp12614, i64 1
+ %tmp12616 = getelementptr inbounds float, float* %tmp12615, i64 1
+ %tmp12617 = getelementptr inbounds float, float* %tmp12616, i64 1
+ %tmp12618 = getelementptr inbounds float, float* %tmp12617, i64 1
+ %tmp12619 = getelementptr inbounds float, float* %tmp12618, i64 1
+ %tmp12620 = getelementptr inbounds float, float* %tmp12619, i64 1
+ %tmp12621 = getelementptr inbounds float, float* %tmp12620, i64 1
+ %tmp12622 = getelementptr inbounds float, float* %tmp12621, i64 1
+ %tmp12623 = getelementptr inbounds float, float* %tmp12622, i64 1
+ %tmp12624 = getelementptr inbounds float, float* %tmp12623, i64 1
+ %tmp12625 = getelementptr inbounds float, float* %tmp12624, i64 1
+ %tmp12626 = getelementptr inbounds float, float* %tmp12625, i64 1
+ %tmp12627 = getelementptr inbounds float, float* %tmp12626, i64 1
+ %tmp12628 = getelementptr inbounds float, float* %tmp12627, i64 1
+ %tmp12629 = getelementptr inbounds float, float* %tmp12628, i64 1
+ %tmp12630 = getelementptr inbounds float, float* %tmp12629, i64 1
+ %tmp12631 = getelementptr inbounds float, float* %tmp12630, i64 1
+ %tmp12632 = getelementptr inbounds float, float* %tmp12631, i64 1
+ %tmp12633 = getelementptr inbounds float, float* %tmp12632, i64 1
+ %tmp12634 = getelementptr inbounds float, float* %tmp12633, i64 1
+ %tmp12635 = getelementptr inbounds float, float* %tmp12634, i64 1
+ %tmp12636 = getelementptr inbounds float, float* %tmp12635, i64 1
+ %tmp12637 = getelementptr inbounds float, float* %tmp12636, i64 1
+ %tmp12638 = getelementptr inbounds float, float* %tmp12637, i64 1
+ %tmp12639 = getelementptr inbounds float, float* %tmp12638, i64 1
+ %tmp12640 = getelementptr inbounds float, float* %tmp12639, i64 1
+ %tmp12641 = getelementptr inbounds float, float* %tmp12640, i64 1
+ %tmp12642 = getelementptr inbounds float, float* %tmp12641, i64 1
+ %tmp12643 = getelementptr inbounds float, float* %tmp12642, i64 1
+ %tmp12644 = getelementptr inbounds float, float* %tmp12643, i64 1
+ %tmp12645 = getelementptr inbounds float, float* %tmp12644, i64 1
+ %tmp12646 = getelementptr inbounds float, float* %tmp12645, i64 1
+ %tmp12647 = getelementptr inbounds float, float* %tmp12646, i64 1
+ %tmp12648 = getelementptr inbounds float, float* %tmp12647, i64 1
+ %tmp12649 = getelementptr inbounds float, float* %tmp12648, i64 1
+ %tmp12650 = getelementptr inbounds float, float* %tmp12649, i64 1
+ %tmp12651 = getelementptr inbounds float, float* %tmp12650, i64 1
+ %tmp12652 = getelementptr inbounds float, float* %tmp12651, i64 1
+ %tmp12653 = getelementptr inbounds float, float* %tmp12652, i64 1
+ %tmp12654 = getelementptr inbounds float, float* %tmp12653, i64 1
+ %tmp12655 = getelementptr inbounds float, float* %tmp12654, i64 1
+ %tmp12656 = getelementptr inbounds float, float* %tmp12655, i64 1
+ %tmp12657 = getelementptr inbounds float, float* %tmp12656, i64 1
+ %tmp12658 = getelementptr inbounds float, float* %tmp12657, i64 1
+ %tmp12659 = getelementptr inbounds float, float* %tmp12658, i64 1
+ %tmp12660 = getelementptr inbounds float, float* %tmp12659, i64 1
+ %tmp12661 = getelementptr inbounds float, float* %tmp12660, i64 1
+ %tmp12662 = getelementptr inbounds float, float* %tmp12661, i64 1
+ %tmp12663 = getelementptr inbounds float, float* %tmp12662, i64 1
+ %tmp12664 = getelementptr inbounds float, float* %tmp12663, i64 1
+ %tmp12665 = getelementptr inbounds float, float* %tmp12664, i64 1
+ %tmp12666 = getelementptr inbounds float, float* %tmp12665, i64 1
+ %tmp12667 = getelementptr inbounds float, float* %tmp12666, i64 1
+ %tmp12668 = getelementptr inbounds float, float* %tmp12667, i64 1
+ %tmp12669 = getelementptr inbounds float, float* %tmp12668, i64 1
+ %tmp12670 = getelementptr inbounds float, float* %tmp12669, i64 1
+ %tmp12671 = getelementptr inbounds float, float* %tmp12670, i64 1
+ %tmp12672 = getelementptr inbounds float, float* %tmp12671, i64 1
+ %tmp12673 = getelementptr inbounds float, float* %tmp12672, i64 1
+ %tmp12674 = getelementptr inbounds float, float* %tmp12673, i64 1
+ %tmp12675 = getelementptr inbounds float, float* %tmp12674, i64 1
+ %tmp12676 = getelementptr inbounds float, float* %tmp12675, i64 1
+ %tmp12677 = getelementptr inbounds float, float* %tmp12676, i64 1
+ %tmp12678 = getelementptr inbounds float, float* %tmp12677, i64 1
+ %tmp12679 = getelementptr inbounds float, float* %tmp12678, i64 1
+ %tmp12680 = getelementptr inbounds float, float* %tmp12679, i64 1
+ %tmp12681 = getelementptr inbounds float, float* %tmp12680, i64 1
+ %tmp12682 = getelementptr inbounds float, float* %tmp12681, i64 1
+ %tmp12683 = getelementptr inbounds float, float* %tmp12682, i64 1
+ %tmp12684 = getelementptr inbounds float, float* %tmp12683, i64 1
+ %tmp12685 = getelementptr inbounds float, float* %tmp12684, i64 1
+ %tmp12686 = getelementptr inbounds float, float* %tmp12685, i64 1
+ %tmp12687 = getelementptr inbounds float, float* %tmp12686, i64 1
+ %tmp12688 = getelementptr inbounds float, float* %tmp12687, i64 1
+ %tmp12689 = getelementptr inbounds float, float* %tmp12688, i64 1
+ %tmp12690 = getelementptr inbounds float, float* %tmp12689, i64 1
+ %tmp12691 = getelementptr inbounds float, float* %tmp12690, i64 1
+ %tmp12692 = getelementptr inbounds float, float* %tmp12691, i64 1
+ %tmp12693 = getelementptr inbounds float, float* %tmp12692, i64 1
+ %tmp12694 = getelementptr inbounds float, float* %tmp12693, i64 1
+ %tmp12695 = getelementptr inbounds float, float* %tmp12694, i64 1
+ %tmp12696 = getelementptr inbounds float, float* %tmp12695, i64 1
+ %tmp12697 = getelementptr inbounds float, float* %tmp12696, i64 1
+ %tmp12698 = getelementptr inbounds float, float* %tmp12697, i64 1
+ %tmp12699 = getelementptr inbounds float, float* %tmp12698, i64 1
+ %tmp12700 = getelementptr inbounds float, float* %tmp12699, i64 1
+ %tmp12701 = getelementptr inbounds float, float* %tmp12700, i64 1
+ %tmp12702 = getelementptr inbounds float, float* %tmp12701, i64 1
+ %tmp12703 = getelementptr inbounds float, float* %tmp12702, i64 1
+ %tmp12704 = getelementptr inbounds float, float* %tmp12703, i64 1
+ %tmp12705 = getelementptr inbounds float, float* %tmp12704, i64 1
+ %tmp12706 = getelementptr inbounds float, float* %tmp12705, i64 1
+ %tmp12707 = getelementptr inbounds float, float* %tmp12706, i64 1
+ %tmp12708 = getelementptr inbounds float, float* %tmp12707, i64 1
+ %tmp12709 = getelementptr inbounds float, float* %tmp12708, i64 1
+ %tmp12710 = getelementptr inbounds float, float* %tmp12709, i64 1
+ %tmp12711 = getelementptr inbounds float, float* %tmp12710, i64 1
+ %tmp12712 = getelementptr inbounds float, float* %tmp12711, i64 1
+ %tmp12713 = getelementptr inbounds float, float* %tmp12712, i64 1
+ %tmp12714 = getelementptr inbounds float, float* %tmp12713, i64 1
+ %tmp12715 = getelementptr inbounds float, float* %tmp12714, i64 1
+ %tmp12716 = getelementptr inbounds float, float* %tmp12715, i64 1
+ %tmp12717 = getelementptr inbounds float, float* %tmp12716, i64 1
+ %tmp12718 = getelementptr inbounds float, float* %tmp12717, i64 1
+ %tmp12719 = getelementptr inbounds float, float* %tmp12718, i64 1
+ %tmp12720 = getelementptr inbounds float, float* %tmp12719, i64 1
+ %tmp12721 = getelementptr inbounds float, float* %tmp12720, i64 1
+ %tmp12722 = getelementptr inbounds float, float* %tmp12721, i64 1
+ %tmp12723 = getelementptr inbounds float, float* %tmp12722, i64 1
+ %tmp12724 = getelementptr inbounds float, float* %tmp12723, i64 1
+ %tmp12725 = getelementptr inbounds float, float* %tmp12724, i64 1
+ %tmp12726 = getelementptr inbounds float, float* %tmp12725, i64 1
+ %tmp12727 = getelementptr inbounds float, float* %tmp12726, i64 1
+ %tmp12728 = getelementptr inbounds float, float* %tmp12727, i64 1
+ %tmp12729 = getelementptr inbounds float, float* %tmp12728, i64 1
+ %tmp12730 = getelementptr inbounds float, float* %tmp12729, i64 1
+ %tmp12731 = getelementptr inbounds float, float* %tmp12730, i64 1
+ %tmp12732 = getelementptr inbounds float, float* %tmp12731, i64 1
+ %tmp12733 = getelementptr inbounds float, float* %tmp12732, i64 1
+ %tmp12734 = getelementptr inbounds float, float* %tmp12733, i64 1
+ %tmp12735 = getelementptr inbounds float, float* %tmp12734, i64 1
+ %tmp12736 = getelementptr inbounds float, float* %tmp12735, i64 1
+ %tmp12737 = getelementptr inbounds float, float* %tmp12736, i64 1
+ %tmp12738 = getelementptr inbounds float, float* %tmp12737, i64 1
+ %tmp12739 = getelementptr inbounds float, float* %tmp12738, i64 1
+ %tmp12740 = getelementptr inbounds float, float* %tmp12739, i64 1
+ %tmp12741 = getelementptr inbounds float, float* %tmp12740, i64 1
+ %tmp12742 = getelementptr inbounds float, float* %tmp12741, i64 1
+ %tmp12743 = getelementptr inbounds float, float* %tmp12742, i64 1
+ %tmp12744 = getelementptr inbounds float, float* %tmp12743, i64 1
+ %tmp12745 = getelementptr inbounds float, float* %tmp12744, i64 1
+ %tmp12746 = getelementptr inbounds float, float* %tmp12745, i64 1
+ %tmp12747 = getelementptr inbounds float, float* %tmp12746, i64 1
+ %tmp12748 = getelementptr inbounds float, float* %tmp12747, i64 1
+ %tmp12749 = getelementptr inbounds float, float* %tmp12748, i64 1
+ %tmp12750 = getelementptr inbounds float, float* %tmp12749, i64 1
+ %tmp12751 = getelementptr inbounds float, float* %tmp12750, i64 1
+ %tmp12752 = getelementptr inbounds float, float* %tmp12751, i64 1
+ %tmp12753 = getelementptr inbounds float, float* %tmp12752, i64 1
+ %tmp12754 = getelementptr inbounds float, float* %tmp12753, i64 1
+ %tmp12755 = getelementptr inbounds float, float* %tmp12754, i64 1
+ %tmp12756 = getelementptr inbounds float, float* %tmp12755, i64 1
+ %tmp12757 = getelementptr inbounds float, float* %tmp12756, i64 1
+ %tmp12758 = getelementptr inbounds float, float* %tmp12757, i64 1
+ %tmp12759 = getelementptr inbounds float, float* %tmp12758, i64 1
+ %tmp12760 = getelementptr inbounds float, float* %tmp12759, i64 1
+ %tmp12761 = getelementptr inbounds float, float* %tmp12760, i64 1
+ %tmp12762 = getelementptr inbounds float, float* %tmp12761, i64 1
+ %tmp12763 = getelementptr inbounds float, float* %tmp12762, i64 1
+ %tmp12764 = getelementptr inbounds float, float* %tmp12763, i64 1
+ %tmp12765 = getelementptr inbounds float, float* %tmp12764, i64 1
+ %tmp12766 = getelementptr inbounds float, float* %tmp12765, i64 1
+ %tmp12767 = getelementptr inbounds float, float* %tmp12766, i64 1
+ %tmp12768 = getelementptr inbounds float, float* %tmp12767, i64 1
+ %tmp12769 = getelementptr inbounds float, float* %tmp12768, i64 1
+ %tmp12770 = getelementptr inbounds float, float* %tmp12769, i64 1
+ %tmp12771 = getelementptr inbounds float, float* %tmp12770, i64 1
+ %tmp12772 = getelementptr inbounds float, float* %tmp12771, i64 1
+ %tmp12773 = getelementptr inbounds float, float* %tmp12772, i64 1
+ %tmp12774 = getelementptr inbounds float, float* %tmp12773, i64 1
+ %tmp12775 = getelementptr inbounds float, float* %tmp12774, i64 1
+ %tmp12776 = getelementptr inbounds float, float* %tmp12775, i64 1
+ %tmp12777 = getelementptr inbounds float, float* %tmp12776, i64 1
+ %tmp12778 = getelementptr inbounds float, float* %tmp12777, i64 1
+ %tmp12779 = getelementptr inbounds float, float* %tmp12778, i64 1
+ %tmp12780 = getelementptr inbounds float, float* %tmp12779, i64 1
+ %tmp12781 = getelementptr inbounds float, float* %tmp12780, i64 1
+ %tmp12782 = getelementptr inbounds float, float* %tmp12781, i64 1
+ %tmp12783 = getelementptr inbounds float, float* %tmp12782, i64 1
+ %tmp12784 = getelementptr inbounds float, float* %tmp12783, i64 1
+ %tmp12785 = getelementptr inbounds float, float* %tmp12784, i64 1
+ %tmp12786 = getelementptr inbounds float, float* %tmp12785, i64 1
+ %tmp12787 = getelementptr inbounds float, float* %tmp12786, i64 1
+ %tmp12788 = getelementptr inbounds float, float* %tmp12787, i64 1
+ %tmp12789 = getelementptr inbounds float, float* %tmp12788, i64 1
+ %tmp12790 = getelementptr inbounds float, float* %tmp12789, i64 1
+ %tmp12791 = getelementptr inbounds float, float* %tmp12790, i64 1
+ %tmp12792 = getelementptr inbounds float, float* %tmp12791, i64 1
+ %tmp12793 = getelementptr inbounds float, float* %tmp12792, i64 1
+ %tmp12794 = getelementptr inbounds float, float* %tmp12793, i64 1
+ %tmp12795 = getelementptr inbounds float, float* %tmp12794, i64 1
+ %tmp12796 = getelementptr inbounds float, float* %tmp12795, i64 1
+ %tmp12797 = getelementptr inbounds float, float* %tmp12796, i64 1
+ %tmp12798 = getelementptr inbounds float, float* %tmp12797, i64 1
+ %tmp12799 = getelementptr inbounds float, float* %tmp12798, i64 1
+ %tmp12800 = getelementptr inbounds float, float* %tmp12799, i64 1
+ %tmp12801 = getelementptr inbounds float, float* %tmp12800, i64 1
+ %tmp12802 = getelementptr inbounds float, float* %tmp12801, i64 1
+ %tmp12803 = getelementptr inbounds float, float* %tmp12802, i64 1
+ %tmp12804 = getelementptr inbounds float, float* %tmp12803, i64 1
+ %tmp12805 = getelementptr inbounds float, float* %tmp12804, i64 1
+ %tmp12806 = getelementptr inbounds float, float* %tmp12805, i64 1
+ %tmp12807 = getelementptr inbounds float, float* %tmp12806, i64 1
+ %tmp12808 = getelementptr inbounds float, float* %tmp12807, i64 1
+ %tmp12809 = getelementptr inbounds float, float* %tmp12808, i64 1
+ %tmp12810 = getelementptr inbounds float, float* %tmp12809, i64 1
+ %tmp12811 = getelementptr inbounds float, float* %tmp12810, i64 1
+ %tmp12812 = getelementptr inbounds float, float* %tmp12811, i64 1
+ %tmp12813 = getelementptr inbounds float, float* %tmp12812, i64 1
+ %tmp12814 = getelementptr inbounds float, float* %tmp12813, i64 1
+ %tmp12815 = getelementptr inbounds float, float* %tmp12814, i64 1
+ %tmp12816 = getelementptr inbounds float, float* %tmp12815, i64 1
+ %tmp12817 = getelementptr inbounds float, float* %tmp12816, i64 1
+ %tmp12818 = getelementptr inbounds float, float* %tmp12817, i64 1
+ %tmp12819 = getelementptr inbounds float, float* %tmp12818, i64 1
+ %tmp12820 = getelementptr inbounds float, float* %tmp12819, i64 1
+ %tmp12821 = getelementptr inbounds float, float* %tmp12820, i64 1
+ %tmp12822 = getelementptr inbounds float, float* %tmp12821, i64 1
+ %tmp12823 = getelementptr inbounds float, float* %tmp12822, i64 1
+ %tmp12824 = getelementptr inbounds float, float* %tmp12823, i64 1
+ %tmp12825 = getelementptr inbounds float, float* %tmp12824, i64 1
+ %tmp12826 = getelementptr inbounds float, float* %tmp12825, i64 1
+ %tmp12827 = getelementptr inbounds float, float* %tmp12826, i64 1
+ %tmp12828 = getelementptr inbounds float, float* %tmp12827, i64 1
+ %tmp12829 = getelementptr inbounds float, float* %tmp12828, i64 1
+ %tmp12830 = getelementptr inbounds float, float* %tmp12829, i64 1
+ %tmp12831 = getelementptr inbounds float, float* %tmp12830, i64 1
+ %tmp12832 = getelementptr inbounds float, float* %tmp12831, i64 1
+ %tmp12833 = getelementptr inbounds float, float* %tmp12832, i64 1
+ %tmp12834 = getelementptr inbounds float, float* %tmp12833, i64 1
+ %tmp12835 = getelementptr inbounds float, float* %tmp12834, i64 1
+ %tmp12836 = getelementptr inbounds float, float* %tmp12835, i64 1
+ %tmp12837 = getelementptr inbounds float, float* %tmp12836, i64 1
+ %tmp12838 = getelementptr inbounds float, float* %tmp12837, i64 1
+ %tmp12839 = getelementptr inbounds float, float* %tmp12838, i64 1
+ %tmp12840 = getelementptr inbounds float, float* %tmp12839, i64 1
+ %tmp12841 = getelementptr inbounds float, float* %tmp12840, i64 1
+ %tmp12842 = getelementptr inbounds float, float* %tmp12841, i64 1
+ %tmp12843 = getelementptr inbounds float, float* %tmp12842, i64 1
+ %tmp12844 = getelementptr inbounds float, float* %tmp12843, i64 1
+ %tmp12845 = getelementptr inbounds float, float* %tmp12844, i64 1
+ %tmp12846 = getelementptr inbounds float, float* %tmp12845, i64 1
+ %tmp12847 = getelementptr inbounds float, float* %tmp12846, i64 1
+ %tmp12848 = getelementptr inbounds float, float* %tmp12847, i64 1
+ %tmp12849 = getelementptr inbounds float, float* %tmp12848, i64 1
+ %tmp12850 = getelementptr inbounds float, float* %tmp12849, i64 1
+ %tmp12851 = getelementptr inbounds float, float* %tmp12850, i64 1
+ %tmp12852 = getelementptr inbounds float, float* %tmp12851, i64 1
+ %tmp12853 = getelementptr inbounds float, float* %tmp12852, i64 1
+ %tmp12854 = getelementptr inbounds float, float* %tmp12853, i64 1
+ %tmp12855 = getelementptr inbounds float, float* %tmp12854, i64 1
+ %tmp12856 = getelementptr inbounds float, float* %tmp12855, i64 1
+ %tmp12857 = getelementptr inbounds float, float* %tmp12856, i64 1
+ %tmp12858 = getelementptr inbounds float, float* %tmp12857, i64 1
+ %tmp12859 = getelementptr inbounds float, float* %tmp12858, i64 1
+ %tmp12860 = getelementptr inbounds float, float* %tmp12859, i64 1
+ %tmp12861 = getelementptr inbounds float, float* %tmp12860, i64 1
+ %tmp12862 = getelementptr inbounds float, float* %tmp12861, i64 1
+ %tmp12863 = getelementptr inbounds float, float* %tmp12862, i64 1
+ %tmp12864 = getelementptr inbounds float, float* %tmp12863, i64 1
+ %tmp12865 = getelementptr inbounds float, float* %tmp12864, i64 1
+ %tmp12866 = getelementptr inbounds float, float* %tmp12865, i64 1
+ %tmp12867 = getelementptr inbounds float, float* %tmp12866, i64 1
+ %tmp12868 = getelementptr inbounds float, float* %tmp12867, i64 1
+ %tmp12869 = getelementptr inbounds float, float* %tmp12868, i64 1
+ %tmp12870 = getelementptr inbounds float, float* %tmp12869, i64 1
+ %tmp12871 = getelementptr inbounds float, float* %tmp12870, i64 1
+ %tmp12872 = getelementptr inbounds float, float* %tmp12871, i64 1
+ %tmp12873 = getelementptr inbounds float, float* %tmp12872, i64 1
+ %tmp12874 = getelementptr inbounds float, float* %tmp12873, i64 1
+ %tmp12875 = getelementptr inbounds float, float* %tmp12874, i64 1
+ %tmp12876 = getelementptr inbounds float, float* %tmp12875, i64 1
+ %tmp12877 = getelementptr inbounds float, float* %tmp12876, i64 1
+ %tmp12878 = getelementptr inbounds float, float* %tmp12877, i64 1
+ %tmp12879 = getelementptr inbounds float, float* %tmp12878, i64 1
+ %tmp12880 = getelementptr inbounds float, float* %tmp12879, i64 1
+ %tmp12881 = getelementptr inbounds float, float* %tmp12880, i64 1
+ %tmp12882 = getelementptr inbounds float, float* %tmp12881, i64 1
+ %tmp12883 = getelementptr inbounds float, float* %tmp12882, i64 1
+ %tmp12884 = getelementptr inbounds float, float* %tmp12883, i64 1
+ %tmp12885 = getelementptr inbounds float, float* %tmp12884, i64 1
+ %tmp12886 = getelementptr inbounds float, float* %tmp12885, i64 1
+ %tmp12887 = getelementptr inbounds float, float* %tmp12886, i64 1
+ %tmp12888 = getelementptr inbounds float, float* %tmp12887, i64 1
+ %tmp12889 = getelementptr inbounds float, float* %tmp12888, i64 1
+ %tmp12890 = getelementptr inbounds float, float* %tmp12889, i64 1
+ %tmp12891 = getelementptr inbounds float, float* %tmp12890, i64 1
+ %tmp12892 = getelementptr inbounds float, float* %tmp12891, i64 1
+ %tmp12893 = getelementptr inbounds float, float* %tmp12892, i64 1
+ %tmp12894 = getelementptr inbounds float, float* %tmp12893, i64 1
+ %tmp12895 = getelementptr inbounds float, float* %tmp12894, i64 1
+ %tmp12896 = getelementptr inbounds float, float* %tmp12895, i64 1
+ %tmp12897 = getelementptr inbounds float, float* %tmp12896, i64 1
+ %tmp12898 = getelementptr inbounds float, float* %tmp12897, i64 1
+ %tmp12899 = getelementptr inbounds float, float* %tmp12898, i64 1
+ %tmp12900 = getelementptr inbounds float, float* %tmp12899, i64 1
+ %tmp12901 = getelementptr inbounds float, float* %tmp12900, i64 1
+ %tmp12902 = getelementptr inbounds float, float* %tmp12901, i64 1
+ %tmp12903 = getelementptr inbounds float, float* %tmp12902, i64 1
+ %tmp12904 = getelementptr inbounds float, float* %tmp12903, i64 1
+ %tmp12905 = getelementptr inbounds float, float* %tmp12904, i64 1
+ %tmp12906 = getelementptr inbounds float, float* %tmp12905, i64 1
+ %tmp12907 = getelementptr inbounds float, float* %tmp12906, i64 1
+ %tmp12908 = getelementptr inbounds float, float* %tmp12907, i64 1
+ %tmp12909 = getelementptr inbounds float, float* %tmp12908, i64 1
+ %tmp12910 = getelementptr inbounds float, float* %tmp12909, i64 1
+ %tmp12911 = getelementptr inbounds float, float* %tmp12910, i64 1
+ %tmp12912 = getelementptr inbounds float, float* %tmp12911, i64 1
+ %tmp12913 = getelementptr inbounds float, float* %tmp12912, i64 1
+ %tmp12914 = getelementptr inbounds float, float* %tmp12913, i64 1
+ %tmp12915 = getelementptr inbounds float, float* %tmp12914, i64 1
+ %tmp12916 = getelementptr inbounds float, float* %tmp12915, i64 1
+ %tmp12917 = getelementptr inbounds float, float* %tmp12916, i64 1
+ %tmp12918 = getelementptr inbounds float, float* %tmp12917, i64 1
+ %tmp12919 = getelementptr inbounds float, float* %tmp12918, i64 1
+ %tmp12920 = getelementptr inbounds float, float* %tmp12919, i64 1
+ %tmp12921 = getelementptr inbounds float, float* %tmp12920, i64 1
+ %tmp12922 = getelementptr inbounds float, float* %tmp12921, i64 1
+ %tmp12923 = getelementptr inbounds float, float* %tmp12922, i64 1
+ %tmp12924 = getelementptr inbounds float, float* %tmp12923, i64 1
+ %tmp12925 = getelementptr inbounds float, float* %tmp12924, i64 1
+ %tmp12926 = getelementptr inbounds float, float* %tmp12925, i64 1
+ %tmp12927 = getelementptr inbounds float, float* %tmp12926, i64 1
+ %tmp12928 = getelementptr inbounds float, float* %tmp12927, i64 1
+ %tmp12929 = getelementptr inbounds float, float* %tmp12928, i64 1
+ %tmp12930 = getelementptr inbounds float, float* %tmp12929, i64 1
+ %tmp12931 = getelementptr inbounds float, float* %tmp12930, i64 1
+ %tmp12932 = getelementptr inbounds float, float* %tmp12931, i64 1
+ %tmp12933 = getelementptr inbounds float, float* %tmp12932, i64 1
+ %tmp12934 = getelementptr inbounds float, float* %tmp12933, i64 1
+ %tmp12935 = getelementptr inbounds float, float* %tmp12934, i64 1
+ %tmp12936 = getelementptr inbounds float, float* %tmp12935, i64 1
+ %tmp12937 = getelementptr inbounds float, float* %tmp12936, i64 1
+ %tmp12938 = getelementptr inbounds float, float* %tmp12937, i64 1
+ %tmp12939 = getelementptr inbounds float, float* %tmp12938, i64 1
+ %tmp12940 = getelementptr inbounds float, float* %tmp12939, i64 1
+ %tmp12941 = getelementptr inbounds float, float* %tmp12940, i64 1
+ %tmp12942 = getelementptr inbounds float, float* %tmp12941, i64 1
+ %tmp12943 = getelementptr inbounds float, float* %tmp12942, i64 1
+ %tmp12944 = getelementptr inbounds float, float* %tmp12943, i64 1
+ %tmp12945 = getelementptr inbounds float, float* %tmp12944, i64 1
+ %tmp12946 = getelementptr inbounds float, float* %tmp12945, i64 1
+ %tmp12947 = getelementptr inbounds float, float* %tmp12946, i64 1
+ %tmp12948 = getelementptr inbounds float, float* %tmp12947, i64 1
+ %tmp12949 = getelementptr inbounds float, float* %tmp12948, i64 1
+ %tmp12950 = getelementptr inbounds float, float* %tmp12949, i64 1
+ %tmp12951 = getelementptr inbounds float, float* %tmp12950, i64 1
+ %tmp12952 = getelementptr inbounds float, float* %tmp12951, i64 1
+ %tmp12953 = getelementptr inbounds float, float* %tmp12952, i64 1
+ %tmp12954 = getelementptr inbounds float, float* %tmp12953, i64 1
+ %tmp12955 = getelementptr inbounds float, float* %tmp12954, i64 1
+ %tmp12956 = getelementptr inbounds float, float* %tmp12955, i64 1
+ %tmp12957 = getelementptr inbounds float, float* %tmp12956, i64 1
+ %tmp12958 = getelementptr inbounds float, float* %tmp12957, i64 1
+ %tmp12959 = getelementptr inbounds float, float* %tmp12958, i64 1
+ %tmp12960 = getelementptr inbounds float, float* %tmp12959, i64 1
+ %tmp12961 = getelementptr inbounds float, float* %tmp12960, i64 1
+ %tmp12962 = getelementptr inbounds float, float* %tmp12961, i64 1
+ %tmp12963 = getelementptr inbounds float, float* %tmp12962, i64 1
+ %tmp12964 = getelementptr inbounds float, float* %tmp12963, i64 1
+ %tmp12965 = getelementptr inbounds float, float* %tmp12964, i64 1
+ %tmp12966 = getelementptr inbounds float, float* %tmp12965, i64 1
+ %tmp12967 = getelementptr inbounds float, float* %tmp12966, i64 1
+ %tmp12968 = getelementptr inbounds float, float* %tmp12967, i64 1
+ %tmp12969 = getelementptr inbounds float, float* %tmp12968, i64 1
+ %tmp12970 = getelementptr inbounds float, float* %tmp12969, i64 1
+ %tmp12971 = getelementptr inbounds float, float* %tmp12970, i64 1
+ %tmp12972 = getelementptr inbounds float, float* %tmp12971, i64 1
+ %tmp12973 = getelementptr inbounds float, float* %tmp12972, i64 1
+ %tmp12974 = getelementptr inbounds float, float* %tmp12973, i64 1
+ %tmp12975 = getelementptr inbounds float, float* %tmp12974, i64 1
+ %tmp12976 = getelementptr inbounds float, float* %tmp12975, i64 1
+ %tmp12977 = getelementptr inbounds float, float* %tmp12976, i64 1
+ %tmp12978 = getelementptr inbounds float, float* %tmp12977, i64 1
+ %tmp12979 = getelementptr inbounds float, float* %tmp12978, i64 1
+ %tmp12980 = getelementptr inbounds float, float* %tmp12979, i64 1
+ %tmp12981 = getelementptr inbounds float, float* %tmp12980, i64 1
+ %tmp12982 = getelementptr inbounds float, float* %tmp12981, i64 1
+ %tmp12983 = getelementptr inbounds float, float* %tmp12982, i64 1
+ %tmp12984 = getelementptr inbounds float, float* %tmp12983, i64 1
+ %tmp12985 = getelementptr inbounds float, float* %tmp12984, i64 1
+ %tmp12986 = getelementptr inbounds float, float* %tmp12985, i64 1
+ %tmp12987 = getelementptr inbounds float, float* %tmp12986, i64 1
+ %tmp12988 = getelementptr inbounds float, float* %tmp12987, i64 1
+ %tmp12989 = getelementptr inbounds float, float* %tmp12988, i64 1
+ %tmp12990 = getelementptr inbounds float, float* %tmp12989, i64 1
+ %tmp12991 = getelementptr inbounds float, float* %tmp12990, i64 1
+ %tmp12992 = getelementptr inbounds float, float* %tmp12991, i64 1
+ %tmp12993 = getelementptr inbounds float, float* %tmp12992, i64 1
+ %tmp12994 = getelementptr inbounds float, float* %tmp12993, i64 1
+ %tmp12995 = getelementptr inbounds float, float* %tmp12994, i64 1
+ %tmp12996 = getelementptr inbounds float, float* %tmp12995, i64 1
+ %tmp12997 = getelementptr inbounds float, float* %tmp12996, i64 1
+ %tmp12998 = getelementptr inbounds float, float* %tmp12997, i64 1
+ %tmp12999 = getelementptr inbounds float, float* %tmp12998, i64 1
+ %tmp13000 = getelementptr inbounds float, float* %tmp12999, i64 1
+ %tmp13001 = getelementptr inbounds float, float* %tmp13000, i64 1
+ %tmp13002 = getelementptr inbounds float, float* %tmp13001, i64 1
+ %tmp13003 = getelementptr inbounds float, float* %tmp13002, i64 1
+ %tmp13004 = getelementptr inbounds float, float* %tmp13003, i64 1
+ %tmp13005 = getelementptr inbounds float, float* %tmp13004, i64 1
+ %tmp13006 = getelementptr inbounds float, float* %tmp13005, i64 1
+ %tmp13007 = getelementptr inbounds float, float* %tmp13006, i64 1
+ %tmp13008 = getelementptr inbounds float, float* %tmp13007, i64 1
+ %tmp13009 = getelementptr inbounds float, float* %tmp13008, i64 1
+ %tmp13010 = getelementptr inbounds float, float* %tmp13009, i64 1
+ %tmp13011 = getelementptr inbounds float, float* %tmp13010, i64 1
+ %tmp13012 = getelementptr inbounds float, float* %tmp13011, i64 1
+ %tmp13013 = getelementptr inbounds float, float* %tmp13012, i64 1
+ %tmp13014 = getelementptr inbounds float, float* %tmp13013, i64 1
+ %tmp13015 = getelementptr inbounds float, float* %tmp13014, i64 1
+ %tmp13016 = getelementptr inbounds float, float* %tmp13015, i64 1
+ %tmp13017 = getelementptr inbounds float, float* %tmp13016, i64 1
+ %tmp13018 = getelementptr inbounds float, float* %tmp13017, i64 1
+ %tmp13019 = getelementptr inbounds float, float* %tmp13018, i64 1
+ %tmp13020 = getelementptr inbounds float, float* %tmp13019, i64 1
+ %tmp13021 = getelementptr inbounds float, float* %tmp13020, i64 1
+ %tmp13022 = getelementptr inbounds float, float* %tmp13021, i64 1
+ %tmp13023 = getelementptr inbounds float, float* %tmp13022, i64 1
+ %tmp13024 = getelementptr inbounds float, float* %tmp13023, i64 1
+ %tmp13025 = getelementptr inbounds float, float* %tmp13024, i64 1
+ %tmp13026 = getelementptr inbounds float, float* %tmp13025, i64 1
+ %tmp13027 = getelementptr inbounds float, float* %tmp13026, i64 1
+ %tmp13028 = getelementptr inbounds float, float* %tmp13027, i64 1
+ %tmp13029 = getelementptr inbounds float, float* %tmp13028, i64 1
+ %tmp13030 = getelementptr inbounds float, float* %tmp13029, i64 1
+ %tmp13031 = getelementptr inbounds float, float* %tmp13030, i64 1
+ %tmp13032 = getelementptr inbounds float, float* %tmp13031, i64 1
+ %tmp13033 = getelementptr inbounds float, float* %tmp13032, i64 1
+ %tmp13034 = getelementptr inbounds float, float* %tmp13033, i64 1
+ %tmp13035 = getelementptr inbounds float, float* %tmp13034, i64 1
+ %tmp13036 = getelementptr inbounds float, float* %tmp13035, i64 1
+ %tmp13037 = getelementptr inbounds float, float* %tmp13036, i64 1
+ %tmp13038 = getelementptr inbounds float, float* %tmp13037, i64 1
+ %tmp13039 = getelementptr inbounds float, float* %tmp13038, i64 1
+ %tmp13040 = getelementptr inbounds float, float* %tmp13039, i64 1
+ %tmp13041 = getelementptr inbounds float, float* %tmp13040, i64 1
+ %tmp13042 = getelementptr inbounds float, float* %tmp13041, i64 1
+ %tmp13043 = getelementptr inbounds float, float* %tmp13042, i64 1
+ %tmp13044 = getelementptr inbounds float, float* %tmp13043, i64 1
+ %tmp13045 = getelementptr inbounds float, float* %tmp13044, i64 1
+ %tmp13046 = getelementptr inbounds float, float* %tmp13045, i64 1
+ %tmp13047 = getelementptr inbounds float, float* %tmp13046, i64 1
+ %tmp13048 = getelementptr inbounds float, float* %tmp13047, i64 1
+ %tmp13049 = getelementptr inbounds float, float* %tmp13048, i64 1
+ %tmp13050 = getelementptr inbounds float, float* %tmp13049, i64 1
+ %tmp13051 = getelementptr inbounds float, float* %tmp13050, i64 1
+ %tmp13052 = getelementptr inbounds float, float* %tmp13051, i64 1
+ %tmp13053 = getelementptr inbounds float, float* %tmp13052, i64 1
+ %tmp13054 = getelementptr inbounds float, float* %tmp13053, i64 1
+ %tmp13055 = getelementptr inbounds float, float* %tmp13054, i64 1
+ %tmp13056 = getelementptr inbounds float, float* %tmp13055, i64 1
+ %tmp13057 = getelementptr inbounds float, float* %tmp13056, i64 1
+ %tmp13058 = getelementptr inbounds float, float* %tmp13057, i64 1
+ %tmp13059 = getelementptr inbounds float, float* %tmp13058, i64 1
+ %tmp13060 = getelementptr inbounds float, float* %tmp13059, i64 1
+ %tmp13061 = getelementptr inbounds float, float* %tmp13060, i64 1
+ %tmp13062 = getelementptr inbounds float, float* %tmp13061, i64 1
+ %tmp13063 = getelementptr inbounds float, float* %tmp13062, i64 1
+ %tmp13064 = getelementptr inbounds float, float* %tmp13063, i64 1
+ %tmp13065 = getelementptr inbounds float, float* %tmp13064, i64 1
+ %tmp13066 = getelementptr inbounds float, float* %tmp13065, i64 1
+ %tmp13067 = getelementptr inbounds float, float* %tmp13066, i64 1
+ %tmp13068 = getelementptr inbounds float, float* %tmp13067, i64 1
+ %tmp13069 = getelementptr inbounds float, float* %tmp13068, i64 1
+ %tmp13070 = getelementptr inbounds float, float* %tmp13069, i64 1
+ %tmp13071 = getelementptr inbounds float, float* %tmp13070, i64 1
+ %tmp13072 = getelementptr inbounds float, float* %tmp13071, i64 1
+ %tmp13073 = getelementptr inbounds float, float* %tmp13072, i64 1
+ %tmp13074 = getelementptr inbounds float, float* %tmp13073, i64 1
+ %tmp13075 = getelementptr inbounds float, float* %tmp13074, i64 1
+ %tmp13076 = getelementptr inbounds float, float* %tmp13075, i64 1
+ %tmp13077 = getelementptr inbounds float, float* %tmp13076, i64 1
+ %tmp13078 = getelementptr inbounds float, float* %tmp13077, i64 1
+ %tmp13079 = getelementptr inbounds float, float* %tmp13078, i64 1
+ %tmp13080 = getelementptr inbounds float, float* %tmp13079, i64 1
+ %tmp13081 = getelementptr inbounds float, float* %tmp13080, i64 1
+ %tmp13082 = getelementptr inbounds float, float* %tmp13081, i64 1
+ %tmp13083 = getelementptr inbounds float, float* %tmp13082, i64 1
+ %tmp13084 = getelementptr inbounds float, float* %tmp13083, i64 1
+ %tmp13085 = getelementptr inbounds float, float* %tmp13084, i64 1
+ %tmp13086 = getelementptr inbounds float, float* %tmp13085, i64 1
+ %tmp13087 = getelementptr inbounds float, float* %tmp13086, i64 1
+ %tmp13088 = getelementptr inbounds float, float* %tmp13087, i64 1
+ %tmp13089 = getelementptr inbounds float, float* %tmp13088, i64 1
+ %tmp13090 = getelementptr inbounds float, float* %tmp13089, i64 1
+ %tmp13091 = getelementptr inbounds float, float* %tmp13090, i64 1
+ %tmp13092 = getelementptr inbounds float, float* %tmp13091, i64 1
+ %tmp13093 = getelementptr inbounds float, float* %tmp13092, i64 1
+ %tmp13094 = getelementptr inbounds float, float* %tmp13093, i64 1
+ %tmp13095 = getelementptr inbounds float, float* %tmp13094, i64 1
+ %tmp13096 = getelementptr inbounds float, float* %tmp13095, i64 1
+ %tmp13097 = getelementptr inbounds float, float* %tmp13096, i64 1
+ %tmp13098 = getelementptr inbounds float, float* %tmp13097, i64 1
+ %tmp13099 = getelementptr inbounds float, float* %tmp13098, i64 1
+ %tmp13100 = getelementptr inbounds float, float* %tmp13099, i64 1
+ %tmp13101 = getelementptr inbounds float, float* %tmp13100, i64 1
+ %tmp13102 = getelementptr inbounds float, float* %tmp13101, i64 1
+ %tmp13103 = getelementptr inbounds float, float* %tmp13102, i64 1
+ %tmp13104 = getelementptr inbounds float, float* %tmp13103, i64 1
+ %tmp13105 = getelementptr inbounds float, float* %tmp13104, i64 1
+ %tmp13106 = getelementptr inbounds float, float* %tmp13105, i64 1
+ %tmp13107 = getelementptr inbounds float, float* %tmp13106, i64 1
+ %tmp13108 = getelementptr inbounds float, float* %tmp13107, i64 1
+ %tmp13109 = getelementptr inbounds float, float* %tmp13108, i64 1
+ %tmp13110 = getelementptr inbounds float, float* %tmp13109, i64 1
+ %tmp13111 = getelementptr inbounds float, float* %tmp13110, i64 1
+ %tmp13112 = getelementptr inbounds float, float* %tmp13111, i64 1
+ %tmp13113 = getelementptr inbounds float, float* %tmp13112, i64 1
+ %tmp13114 = getelementptr inbounds float, float* %tmp13113, i64 1
+ %tmp13115 = getelementptr inbounds float, float* %tmp13114, i64 1
+ %tmp13116 = getelementptr inbounds float, float* %tmp13115, i64 1
+ %tmp13117 = getelementptr inbounds float, float* %tmp13116, i64 1
+ %tmp13118 = getelementptr inbounds float, float* %tmp13117, i64 1
+ %tmp13119 = getelementptr inbounds float, float* %tmp13118, i64 1
+ %tmp13120 = getelementptr inbounds float, float* %tmp13119, i64 1
+ %tmp13121 = getelementptr inbounds float, float* %tmp13120, i64 1
+ %tmp13122 = getelementptr inbounds float, float* %tmp13121, i64 1
+ %tmp13123 = getelementptr inbounds float, float* %tmp13122, i64 1
+ %tmp13124 = getelementptr inbounds float, float* %tmp13123, i64 1
+ %tmp13125 = getelementptr inbounds float, float* %tmp13124, i64 1
+ %tmp13126 = getelementptr inbounds float, float* %tmp13125, i64 1
+ %tmp13127 = getelementptr inbounds float, float* %tmp13126, i64 1
+ %tmp13128 = getelementptr inbounds float, float* %tmp13127, i64 1
+ %tmp13129 = getelementptr inbounds float, float* %tmp13128, i64 1
+ %tmp13130 = getelementptr inbounds float, float* %tmp13129, i64 1
+ %tmp13131 = getelementptr inbounds float, float* %tmp13130, i64 1
+ %tmp13132 = getelementptr inbounds float, float* %tmp13131, i64 1
+ %tmp13133 = getelementptr inbounds float, float* %tmp13132, i64 1
+ %tmp13134 = getelementptr inbounds float, float* %tmp13133, i64 1
+ %tmp13135 = getelementptr inbounds float, float* %tmp13134, i64 1
+ %tmp13136 = getelementptr inbounds float, float* %tmp13135, i64 1
+ %tmp13137 = getelementptr inbounds float, float* %tmp13136, i64 1
+ %tmp13138 = getelementptr inbounds float, float* %tmp13137, i64 1
+ %tmp13139 = getelementptr inbounds float, float* %tmp13138, i64 1
+ %tmp13140 = getelementptr inbounds float, float* %tmp13139, i64 1
+ %tmp13141 = getelementptr inbounds float, float* %tmp13140, i64 1
+ %tmp13142 = getelementptr inbounds float, float* %tmp13141, i64 1
+ %tmp13143 = getelementptr inbounds float, float* %tmp13142, i64 1
+ %tmp13144 = getelementptr inbounds float, float* %tmp13143, i64 1
+ %tmp13145 = getelementptr inbounds float, float* %tmp13144, i64 1
+ %tmp13146 = getelementptr inbounds float, float* %tmp13145, i64 1
+ %tmp13147 = getelementptr inbounds float, float* %tmp13146, i64 1
+ %tmp13148 = getelementptr inbounds float, float* %tmp13147, i64 1
+ %tmp13149 = getelementptr inbounds float, float* %tmp13148, i64 1
+ %tmp13150 = getelementptr inbounds float, float* %tmp13149, i64 1
+ %tmp13151 = getelementptr inbounds float, float* %tmp13150, i64 1
+ %tmp13152 = getelementptr inbounds float, float* %tmp13151, i64 1
+ %tmp13153 = getelementptr inbounds float, float* %tmp13152, i64 1
+ %tmp13154 = getelementptr inbounds float, float* %tmp13153, i64 1
+ %tmp13155 = getelementptr inbounds float, float* %tmp13154, i64 1
+ %tmp13156 = getelementptr inbounds float, float* %tmp13155, i64 1
+ %tmp13157 = getelementptr inbounds float, float* %tmp13156, i64 1
+ %tmp13158 = getelementptr inbounds float, float* %tmp13157, i64 1
+ %tmp13159 = getelementptr inbounds float, float* %tmp13158, i64 1
+ %tmp13160 = getelementptr inbounds float, float* %tmp13159, i64 1
+ %tmp13161 = getelementptr inbounds float, float* %tmp13160, i64 1
+ %tmp13162 = getelementptr inbounds float, float* %tmp13161, i64 1
+ %tmp13163 = getelementptr inbounds float, float* %tmp13162, i64 1
+ %tmp13164 = getelementptr inbounds float, float* %tmp13163, i64 1
+ %tmp13165 = getelementptr inbounds float, float* %tmp13164, i64 1
+ %tmp13166 = getelementptr inbounds float, float* %tmp13165, i64 1
+ %tmp13167 = getelementptr inbounds float, float* %tmp13166, i64 1
+ %tmp13168 = getelementptr inbounds float, float* %tmp13167, i64 1
+ %tmp13169 = getelementptr inbounds float, float* %tmp13168, i64 1
+ %tmp13170 = getelementptr inbounds float, float* %tmp13169, i64 1
+ %tmp13171 = getelementptr inbounds float, float* %tmp13170, i64 1
+ %tmp13172 = getelementptr inbounds float, float* %tmp13171, i64 1
+ %tmp13173 = getelementptr inbounds float, float* %tmp13172, i64 1
+ %tmp13174 = getelementptr inbounds float, float* %tmp13173, i64 1
+ %tmp13175 = getelementptr inbounds float, float* %tmp13174, i64 1
+ %tmp13176 = getelementptr inbounds float, float* %tmp13175, i64 1
+ %tmp13177 = getelementptr inbounds float, float* %tmp13176, i64 1
+ %tmp13178 = getelementptr inbounds float, float* %tmp13177, i64 1
+ %tmp13179 = getelementptr inbounds float, float* %tmp13178, i64 1
+ %tmp13180 = getelementptr inbounds float, float* %tmp13179, i64 1
+ %tmp13181 = getelementptr inbounds float, float* %tmp13180, i64 1
+ %tmp13182 = getelementptr inbounds float, float* %tmp13181, i64 1
+ %tmp13183 = getelementptr inbounds float, float* %tmp13182, i64 1
+ %tmp13184 = getelementptr inbounds float, float* %tmp13183, i64 1
+ %tmp13185 = getelementptr inbounds float, float* %tmp13184, i64 1
+ %tmp13186 = getelementptr inbounds float, float* %tmp13185, i64 1
+ %tmp13187 = getelementptr inbounds float, float* %tmp13186, i64 1
+ %tmp13188 = getelementptr inbounds float, float* %tmp13187, i64 1
+ %tmp13189 = getelementptr inbounds float, float* %tmp13188, i64 1
+ %tmp13190 = getelementptr inbounds float, float* %tmp13189, i64 1
+ %tmp13191 = getelementptr inbounds float, float* %tmp13190, i64 1
+ %tmp13192 = getelementptr inbounds float, float* %tmp13191, i64 1
+ %tmp13193 = getelementptr inbounds float, float* %tmp13192, i64 1
+ %tmp13194 = getelementptr inbounds float, float* %tmp13193, i64 1
+ %tmp13195 = getelementptr inbounds float, float* %tmp13194, i64 1
+ %tmp13196 = getelementptr inbounds float, float* %tmp13195, i64 1
+ %tmp13197 = getelementptr inbounds float, float* %tmp13196, i64 1
+ %tmp13198 = getelementptr inbounds float, float* %tmp13197, i64 1
+ %tmp13199 = getelementptr inbounds float, float* %tmp13198, i64 1
+ %tmp13200 = getelementptr inbounds float, float* %tmp13199, i64 1
+ %tmp13201 = getelementptr inbounds float, float* %tmp13200, i64 1
+ %tmp13202 = getelementptr inbounds float, float* %tmp13201, i64 1
+ %tmp13203 = getelementptr inbounds float, float* %tmp13202, i64 1
+ %tmp13204 = getelementptr inbounds float, float* %tmp13203, i64 1
+ %tmp13205 = getelementptr inbounds float, float* %tmp13204, i64 1
+ %tmp13206 = getelementptr inbounds float, float* %tmp13205, i64 1
+ %tmp13207 = getelementptr inbounds float, float* %tmp13206, i64 1
+ %tmp13208 = getelementptr inbounds float, float* %tmp13207, i64 1
+ %tmp13209 = getelementptr inbounds float, float* %tmp13208, i64 1
+ %tmp13210 = getelementptr inbounds float, float* %tmp13209, i64 1
+ %tmp13211 = getelementptr inbounds float, float* %tmp13210, i64 1
+ %tmp13212 = getelementptr inbounds float, float* %tmp13211, i64 1
+ %tmp13213 = getelementptr inbounds float, float* %tmp13212, i64 1
+ %tmp13214 = getelementptr inbounds float, float* %tmp13213, i64 1
+ %tmp13215 = getelementptr inbounds float, float* %tmp13214, i64 1
+ %tmp13216 = getelementptr inbounds float, float* %tmp13215, i64 1
+ %tmp13217 = getelementptr inbounds float, float* %tmp13216, i64 1
+ %tmp13218 = getelementptr inbounds float, float* %tmp13217, i64 1
+ %tmp13219 = getelementptr inbounds float, float* %tmp13218, i64 1
+ %tmp13220 = getelementptr inbounds float, float* %tmp13219, i64 1
+ %tmp13221 = getelementptr inbounds float, float* %tmp13220, i64 1
+ %tmp13222 = getelementptr inbounds float, float* %tmp13221, i64 1
+ %tmp13223 = getelementptr inbounds float, float* %tmp13222, i64 1
+ %tmp13224 = getelementptr inbounds float, float* %tmp13223, i64 1
+ %tmp13225 = getelementptr inbounds float, float* %tmp13224, i64 1
+ %tmp13226 = getelementptr inbounds float, float* %tmp13225, i64 1
+ %tmp13227 = getelementptr inbounds float, float* %tmp13226, i64 1
+ %tmp13228 = getelementptr inbounds float, float* %tmp13227, i64 1
+ %tmp13229 = getelementptr inbounds float, float* %tmp13228, i64 1
+ %tmp13230 = getelementptr inbounds float, float* %tmp13229, i64 1
+ %tmp13231 = getelementptr inbounds float, float* %tmp13230, i64 1
+ %tmp13232 = getelementptr inbounds float, float* %tmp13231, i64 1
+ %tmp13233 = getelementptr inbounds float, float* %tmp13232, i64 1
+ %tmp13234 = getelementptr inbounds float, float* %tmp13233, i64 1
+ %tmp13235 = getelementptr inbounds float, float* %tmp13234, i64 1
+ %tmp13236 = getelementptr inbounds float, float* %tmp13235, i64 1
+ %tmp13237 = getelementptr inbounds float, float* %tmp13236, i64 1
+ %tmp13238 = getelementptr inbounds float, float* %tmp13237, i64 1
+ %tmp13239 = getelementptr inbounds float, float* %tmp13238, i64 1
+ %tmp13240 = getelementptr inbounds float, float* %tmp13239, i64 1
+ %tmp13241 = getelementptr inbounds float, float* %tmp13240, i64 1
+ %tmp13242 = getelementptr inbounds float, float* %tmp13241, i64 1
+ %tmp13243 = getelementptr inbounds float, float* %tmp13242, i64 1
+ %tmp13244 = getelementptr inbounds float, float* %tmp13243, i64 1
+ %tmp13245 = getelementptr inbounds float, float* %tmp13244, i64 1
+ %tmp13246 = getelementptr inbounds float, float* %tmp13245, i64 1
+ %tmp13247 = getelementptr inbounds float, float* %tmp13246, i64 1
+ %tmp13248 = getelementptr inbounds float, float* %tmp13247, i64 1
+ %tmp13249 = getelementptr inbounds float, float* %tmp13248, i64 1
+ %tmp13250 = getelementptr inbounds float, float* %tmp13249, i64 1
+ %tmp13251 = getelementptr inbounds float, float* %tmp13250, i64 1
+ %tmp13252 = getelementptr inbounds float, float* %tmp13251, i64 1
+ %tmp13253 = getelementptr inbounds float, float* %tmp13252, i64 1
+ %tmp13254 = getelementptr inbounds float, float* %tmp13253, i64 1
+ %tmp13255 = getelementptr inbounds float, float* %tmp13254, i64 1
+ %tmp13256 = getelementptr inbounds float, float* %tmp13255, i64 1
+ %tmp13257 = getelementptr inbounds float, float* %tmp13256, i64 1
+ %tmp13258 = getelementptr inbounds float, float* %tmp13257, i64 1
+ %tmp13259 = getelementptr inbounds float, float* %tmp13258, i64 1
+ %tmp13260 = getelementptr inbounds float, float* %tmp13259, i64 1
+ %tmp13261 = getelementptr inbounds float, float* %tmp13260, i64 1
+ %tmp13262 = getelementptr inbounds float, float* %tmp13261, i64 1
+ %tmp13263 = getelementptr inbounds float, float* %tmp13262, i64 1
+ %tmp13264 = getelementptr inbounds float, float* %tmp13263, i64 1
+ %tmp13265 = getelementptr inbounds float, float* %tmp13264, i64 1
+ %tmp13266 = getelementptr inbounds float, float* %tmp13265, i64 1
+ %tmp13267 = getelementptr inbounds float, float* %tmp13266, i64 1
+ %tmp13268 = getelementptr inbounds float, float* %tmp13267, i64 1
+ %tmp13269 = getelementptr inbounds float, float* %tmp13268, i64 1
+ %tmp13270 = getelementptr inbounds float, float* %tmp13269, i64 1
+ %tmp13271 = getelementptr inbounds float, float* %tmp13270, i64 1
+ %tmp13272 = getelementptr inbounds float, float* %tmp13271, i64 1
+ %tmp13273 = getelementptr inbounds float, float* %tmp13272, i64 1
+ %tmp13274 = getelementptr inbounds float, float* %tmp13273, i64 1
+ %tmp13275 = getelementptr inbounds float, float* %tmp13274, i64 1
+ %tmp13276 = getelementptr inbounds float, float* %tmp13275, i64 1
+ %tmp13277 = getelementptr inbounds float, float* %tmp13276, i64 1
+ %tmp13278 = getelementptr inbounds float, float* %tmp13277, i64 1
+ %tmp13279 = getelementptr inbounds float, float* %tmp13278, i64 1
+ %tmp13280 = getelementptr inbounds float, float* %tmp13279, i64 1
+ %tmp13281 = getelementptr inbounds float, float* %tmp13280, i64 1
+ %tmp13282 = getelementptr inbounds float, float* %tmp13281, i64 1
+ %tmp13283 = getelementptr inbounds float, float* %tmp13282, i64 1
+ %tmp13284 = getelementptr inbounds float, float* %tmp13283, i64 1
+ %tmp13285 = getelementptr inbounds float, float* %tmp13284, i64 1
+ %tmp13286 = getelementptr inbounds float, float* %tmp13285, i64 1
+ %tmp13287 = getelementptr inbounds float, float* %tmp13286, i64 1
+ %tmp13288 = getelementptr inbounds float, float* %tmp13287, i64 1
+ %tmp13289 = getelementptr inbounds float, float* %tmp13288, i64 1
+ %tmp13290 = getelementptr inbounds float, float* %tmp13289, i64 1
+ %tmp13291 = getelementptr inbounds float, float* %tmp13290, i64 1
+ %tmp13292 = getelementptr inbounds float, float* %tmp13291, i64 1
+ %tmp13293 = getelementptr inbounds float, float* %tmp13292, i64 1
+ %tmp13294 = getelementptr inbounds float, float* %tmp13293, i64 1
+ %tmp13295 = getelementptr inbounds float, float* %tmp13294, i64 1
+ %tmp13296 = getelementptr inbounds float, float* %tmp13295, i64 1
+ %tmp13297 = getelementptr inbounds float, float* %tmp13296, i64 1
+ %tmp13298 = getelementptr inbounds float, float* %tmp13297, i64 1
+ %tmp13299 = getelementptr inbounds float, float* %tmp13298, i64 1
+ %tmp13300 = getelementptr inbounds float, float* %tmp13299, i64 1
+ %tmp13301 = getelementptr inbounds float, float* %tmp13300, i64 1
+ %tmp13302 = getelementptr inbounds float, float* %tmp13301, i64 1
+ %tmp13303 = getelementptr inbounds float, float* %tmp13302, i64 1
+ %tmp13304 = getelementptr inbounds float, float* %tmp13303, i64 1
+ %tmp13305 = getelementptr inbounds float, float* %tmp13304, i64 1
+ %tmp13306 = getelementptr inbounds float, float* %tmp13305, i64 1
+ %tmp13307 = getelementptr inbounds float, float* %tmp13306, i64 1
+ %tmp13308 = getelementptr inbounds float, float* %tmp13307, i64 1
+ %tmp13309 = getelementptr inbounds float, float* %tmp13308, i64 1
+ %tmp13310 = getelementptr inbounds float, float* %tmp13309, i64 1
+ %tmp13311 = getelementptr inbounds float, float* %tmp13310, i64 1
+ %tmp13312 = getelementptr inbounds float, float* %tmp13311, i64 1
+ %tmp13313 = getelementptr inbounds float, float* %tmp13312, i64 1
+ %tmp13314 = getelementptr inbounds float, float* %tmp13313, i64 1
+ %tmp13315 = getelementptr inbounds float, float* %tmp13314, i64 1
+ %tmp13316 = getelementptr inbounds float, float* %tmp13315, i64 1
+ %tmp13317 = getelementptr inbounds float, float* %tmp13316, i64 1
+ %tmp13318 = getelementptr inbounds float, float* %tmp13317, i64 1
+ %tmp13319 = getelementptr inbounds float, float* %tmp13318, i64 1
+ %tmp13320 = getelementptr inbounds float, float* %tmp13319, i64 1
+ %tmp13321 = getelementptr inbounds float, float* %tmp13320, i64 1
+ %tmp13322 = getelementptr inbounds float, float* %tmp13321, i64 1
+ %tmp13323 = getelementptr inbounds float, float* %tmp13322, i64 1
+ %tmp13324 = getelementptr inbounds float, float* %tmp13323, i64 1
+ %tmp13325 = getelementptr inbounds float, float* %tmp13324, i64 1
+ %tmp13326 = getelementptr inbounds float, float* %tmp13325, i64 1
+ %tmp13327 = getelementptr inbounds float, float* %tmp13326, i64 1
+ %tmp13328 = getelementptr inbounds float, float* %tmp13327, i64 1
+ %tmp13329 = getelementptr inbounds float, float* %tmp13328, i64 1
+ %tmp13330 = getelementptr inbounds float, float* %tmp13329, i64 1
+ %tmp13331 = getelementptr inbounds float, float* %tmp13330, i64 1
+ %tmp13332 = getelementptr inbounds float, float* %tmp13331, i64 1
+ %tmp13333 = getelementptr inbounds float, float* %tmp13332, i64 1
+ %tmp13334 = getelementptr inbounds float, float* %tmp13333, i64 1
+ %tmp13335 = getelementptr inbounds float, float* %tmp13334, i64 1
+ %tmp13336 = getelementptr inbounds float, float* %tmp13335, i64 1
+ %tmp13337 = getelementptr inbounds float, float* %tmp13336, i64 1
+ %tmp13338 = getelementptr inbounds float, float* %tmp13337, i64 1
+ %tmp13339 = getelementptr inbounds float, float* %tmp13338, i64 1
+ %tmp13340 = getelementptr inbounds float, float* %tmp13339, i64 1
+ %tmp13341 = getelementptr inbounds float, float* %tmp13340, i64 1
+ %tmp13342 = getelementptr inbounds float, float* %tmp13341, i64 1
+ %tmp13343 = getelementptr inbounds float, float* %tmp13342, i64 1
+ %tmp13344 = getelementptr inbounds float, float* %tmp13343, i64 1
+ %tmp13345 = getelementptr inbounds float, float* %tmp13344, i64 1
+ %tmp13346 = getelementptr inbounds float, float* %tmp13345, i64 1
+ %tmp13347 = getelementptr inbounds float, float* %tmp13346, i64 1
+ %tmp13348 = getelementptr inbounds float, float* %tmp13347, i64 1
+ %tmp13349 = getelementptr inbounds float, float* %tmp13348, i64 1
+ %tmp13350 = getelementptr inbounds float, float* %tmp13349, i64 1
+ %tmp13351 = getelementptr inbounds float, float* %tmp13350, i64 1
+ %tmp13352 = getelementptr inbounds float, float* %tmp13351, i64 1
+ %tmp13353 = getelementptr inbounds float, float* %tmp13352, i64 1
+ %tmp13354 = getelementptr inbounds float, float* %tmp13353, i64 1
+ %tmp13355 = getelementptr inbounds float, float* %tmp13354, i64 1
+ %tmp13356 = getelementptr inbounds float, float* %tmp13355, i64 1
+ %tmp13357 = getelementptr inbounds float, float* %tmp13356, i64 1
+ %tmp13358 = getelementptr inbounds float, float* %tmp13357, i64 1
+ %tmp13359 = getelementptr inbounds float, float* %tmp13358, i64 1
+ %tmp13360 = getelementptr inbounds float, float* %tmp13359, i64 1
+ %tmp13361 = getelementptr inbounds float, float* %tmp13360, i64 1
+ %tmp13362 = getelementptr inbounds float, float* %tmp13361, i64 1
+ %tmp13363 = getelementptr inbounds float, float* %tmp13362, i64 1
+ %tmp13364 = getelementptr inbounds float, float* %tmp13363, i64 1
+ %tmp13365 = getelementptr inbounds float, float* %tmp13364, i64 1
+ %tmp13366 = getelementptr inbounds float, float* %tmp13365, i64 1
+ %tmp13367 = getelementptr inbounds float, float* %tmp13366, i64 1
+ %tmp13368 = getelementptr inbounds float, float* %tmp13367, i64 1
+ %tmp13369 = getelementptr inbounds float, float* %tmp13368, i64 1
+ %tmp13370 = getelementptr inbounds float, float* %tmp13369, i64 1
+ %tmp13371 = getelementptr inbounds float, float* %tmp13370, i64 1
+ %tmp13372 = getelementptr inbounds float, float* %tmp13371, i64 1
+ %tmp13373 = getelementptr inbounds float, float* %tmp13372, i64 1
+ %tmp13374 = getelementptr inbounds float, float* %tmp13373, i64 1
+ %tmp13375 = getelementptr inbounds float, float* %tmp13374, i64 1
+ %tmp13376 = getelementptr inbounds float, float* %tmp13375, i64 1
+ %tmp13377 = getelementptr inbounds float, float* %tmp13376, i64 1
+ %tmp13378 = getelementptr inbounds float, float* %tmp13377, i64 1
+ %tmp13379 = getelementptr inbounds float, float* %tmp13378, i64 1
+ %tmp13380 = getelementptr inbounds float, float* %tmp13379, i64 1
+ %tmp13381 = getelementptr inbounds float, float* %tmp13380, i64 1
+ %tmp13382 = getelementptr inbounds float, float* %tmp13381, i64 1
+ %tmp13383 = getelementptr inbounds float, float* %tmp13382, i64 1
+ %tmp13384 = getelementptr inbounds float, float* %tmp13383, i64 1
+ %tmp13385 = getelementptr inbounds float, float* %tmp13384, i64 1
+ %tmp13386 = getelementptr inbounds float, float* %tmp13385, i64 1
+ %tmp13387 = getelementptr inbounds float, float* %tmp13386, i64 1
+ %tmp13388 = getelementptr inbounds float, float* %tmp13387, i64 1
+ %tmp13389 = getelementptr inbounds float, float* %tmp13388, i64 1
+ %tmp13390 = getelementptr inbounds float, float* %tmp13389, i64 1
+ %tmp13391 = getelementptr inbounds float, float* %tmp13390, i64 1
+ %tmp13392 = getelementptr inbounds float, float* %tmp13391, i64 1
+ %tmp13393 = getelementptr inbounds float, float* %tmp13392, i64 1
+ %tmp13394 = getelementptr inbounds float, float* %tmp13393, i64 1
+ %tmp13395 = getelementptr inbounds float, float* %tmp13394, i64 1
+ %tmp13396 = getelementptr inbounds float, float* %tmp13395, i64 1
+ %tmp13397 = getelementptr inbounds float, float* %tmp13396, i64 1
+ %tmp13398 = getelementptr inbounds float, float* %tmp13397, i64 1
+ %tmp13399 = getelementptr inbounds float, float* %tmp13398, i64 1
+ %tmp13400 = getelementptr inbounds float, float* %tmp13399, i64 1
+ %tmp13401 = getelementptr inbounds float, float* %tmp13400, i64 1
+ %tmp13402 = getelementptr inbounds float, float* %tmp13401, i64 1
+ %tmp13403 = getelementptr inbounds float, float* %tmp13402, i64 1
+ %tmp13404 = getelementptr inbounds float, float* %tmp13403, i64 1
+ %tmp13405 = getelementptr inbounds float, float* %tmp13404, i64 1
+ %tmp13406 = getelementptr inbounds float, float* %tmp13405, i64 1
+ %tmp13407 = getelementptr inbounds float, float* %tmp13406, i64 1
+ %tmp13408 = getelementptr inbounds float, float* %tmp13407, i64 1
+ %tmp13409 = getelementptr inbounds float, float* %tmp13408, i64 1
+ %tmp13410 = getelementptr inbounds float, float* %tmp13409, i64 1
+ %tmp13411 = getelementptr inbounds float, float* %tmp13410, i64 1
+ %tmp13412 = getelementptr inbounds float, float* %tmp13411, i64 1
+ %tmp13413 = getelementptr inbounds float, float* %tmp13412, i64 1
+ %tmp13414 = getelementptr inbounds float, float* %tmp13413, i64 1
+ %tmp13415 = getelementptr inbounds float, float* %tmp13414, i64 1
+ %tmp13416 = getelementptr inbounds float, float* %tmp13415, i64 1
+ %tmp13417 = getelementptr inbounds float, float* %tmp13416, i64 1
+ %tmp13418 = getelementptr inbounds float, float* %tmp13417, i64 1
+ %tmp13419 = getelementptr inbounds float, float* %tmp13418, i64 1
+ %tmp13420 = getelementptr inbounds float, float* %tmp13419, i64 1
+ %tmp13421 = getelementptr inbounds float, float* %tmp13420, i64 1
+ %tmp13422 = getelementptr inbounds float, float* %tmp13421, i64 1
+ %tmp13423 = getelementptr inbounds float, float* %tmp13422, i64 1
+ %tmp13424 = getelementptr inbounds float, float* %tmp13423, i64 1
+ %tmp13425 = getelementptr inbounds float, float* %tmp13424, i64 1
+ %tmp13426 = getelementptr inbounds float, float* %tmp13425, i64 1
+ %tmp13427 = getelementptr inbounds float, float* %tmp13426, i64 1
+ %tmp13428 = getelementptr inbounds float, float* %tmp13427, i64 1
+ %tmp13429 = getelementptr inbounds float, float* %tmp13428, i64 1
+ %tmp13430 = getelementptr inbounds float, float* %tmp13429, i64 1
+ %tmp13431 = getelementptr inbounds float, float* %tmp13430, i64 1
+ %tmp13432 = getelementptr inbounds float, float* %tmp13431, i64 1
+ %tmp13433 = getelementptr inbounds float, float* %tmp13432, i64 1
+ %tmp13434 = getelementptr inbounds float, float* %tmp13433, i64 1
+ %tmp13435 = getelementptr inbounds float, float* %tmp13434, i64 1
+ %tmp13436 = getelementptr inbounds float, float* %tmp13435, i64 1
+ %tmp13437 = getelementptr inbounds float, float* %tmp13436, i64 1
+ %tmp13438 = getelementptr inbounds float, float* %tmp13437, i64 1
+ %tmp13439 = getelementptr inbounds float, float* %tmp13438, i64 1
+ %tmp13440 = getelementptr inbounds float, float* %tmp13439, i64 1
+ %tmp13441 = getelementptr inbounds float, float* %tmp13440, i64 1
+ %tmp13442 = getelementptr inbounds float, float* %tmp13441, i64 1
+ %tmp13443 = getelementptr inbounds float, float* %tmp13442, i64 1
+ %tmp13444 = getelementptr inbounds float, float* %tmp13443, i64 1
+ %tmp13445 = getelementptr inbounds float, float* %tmp13444, i64 1
+ %tmp13446 = getelementptr inbounds float, float* %tmp13445, i64 1
+ %tmp13447 = getelementptr inbounds float, float* %tmp13446, i64 1
+ %tmp13448 = getelementptr inbounds float, float* %tmp13447, i64 1
+ %tmp13449 = getelementptr inbounds float, float* %tmp13448, i64 1
+ %tmp13450 = getelementptr inbounds float, float* %tmp13449, i64 1
+ %tmp13451 = getelementptr inbounds float, float* %tmp13450, i64 1
+ %tmp13452 = getelementptr inbounds float, float* %tmp13451, i64 1
+ %tmp13453 = getelementptr inbounds float, float* %tmp13452, i64 1
+ %tmp13454 = getelementptr inbounds float, float* %tmp13453, i64 1
+ %tmp13455 = getelementptr inbounds float, float* %tmp13454, i64 1
+ %tmp13456 = getelementptr inbounds float, float* %tmp13455, i64 1
+ %tmp13457 = getelementptr inbounds float, float* %tmp13456, i64 1
+ %tmp13458 = getelementptr inbounds float, float* %tmp13457, i64 1
+ %tmp13459 = getelementptr inbounds float, float* %tmp13458, i64 1
+ %tmp13460 = getelementptr inbounds float, float* %tmp13459, i64 1
+ %tmp13461 = getelementptr inbounds float, float* %tmp13460, i64 1
+ %tmp13462 = getelementptr inbounds float, float* %tmp13461, i64 1
+ %tmp13463 = getelementptr inbounds float, float* %tmp13462, i64 1
+ %tmp13464 = getelementptr inbounds float, float* %tmp13463, i64 1
+ %tmp13465 = getelementptr inbounds float, float* %tmp13464, i64 1
+ %tmp13466 = getelementptr inbounds float, float* %tmp13465, i64 1
+ %tmp13467 = getelementptr inbounds float, float* %tmp13466, i64 1
+ %tmp13468 = getelementptr inbounds float, float* %tmp13467, i64 1
+ %tmp13469 = getelementptr inbounds float, float* %tmp13468, i64 1
+ %tmp13470 = getelementptr inbounds float, float* %tmp13469, i64 1
+ %tmp13471 = getelementptr inbounds float, float* %tmp13470, i64 1
+ %tmp13472 = getelementptr inbounds float, float* %tmp13471, i64 1
+ %tmp13473 = getelementptr inbounds float, float* %tmp13472, i64 1
+ %tmp13474 = getelementptr inbounds float, float* %tmp13473, i64 1
+ %tmp13475 = getelementptr inbounds float, float* %tmp13474, i64 1
+ %tmp13476 = getelementptr inbounds float, float* %tmp13475, i64 1
+ %tmp13477 = getelementptr inbounds float, float* %tmp13476, i64 1
+ %tmp13478 = getelementptr inbounds float, float* %tmp13477, i64 1
+ %tmp13479 = getelementptr inbounds float, float* %tmp13478, i64 1
+ %tmp13480 = getelementptr inbounds float, float* %tmp13479, i64 1
+ %tmp13481 = getelementptr inbounds float, float* %tmp13480, i64 1
+ %tmp13482 = getelementptr inbounds float, float* %tmp13481, i64 1
+ %tmp13483 = getelementptr inbounds float, float* %tmp13482, i64 1
+ %tmp13484 = getelementptr inbounds float, float* %tmp13483, i64 1
+ %tmp13485 = getelementptr inbounds float, float* %tmp13484, i64 1
+ %tmp13486 = getelementptr inbounds float, float* %tmp13485, i64 1
+ %tmp13487 = getelementptr inbounds float, float* %tmp13486, i64 1
+ %tmp13488 = getelementptr inbounds float, float* %tmp13487, i64 1
+ %tmp13489 = getelementptr inbounds float, float* %tmp13488, i64 1
+ %tmp13490 = getelementptr inbounds float, float* %tmp13489, i64 1
+ %tmp13491 = getelementptr inbounds float, float* %tmp13490, i64 1
+ %tmp13492 = getelementptr inbounds float, float* %tmp13491, i64 1
+ %tmp13493 = getelementptr inbounds float, float* %tmp13492, i64 1
+ %tmp13494 = getelementptr inbounds float, float* %tmp13493, i64 1
+ %tmp13495 = getelementptr inbounds float, float* %tmp13494, i64 1
+ %tmp13496 = getelementptr inbounds float, float* %tmp13495, i64 1
+ %tmp13497 = getelementptr inbounds float, float* %tmp13496, i64 1
+ %tmp13498 = getelementptr inbounds float, float* %tmp13497, i64 1
+ %tmp13499 = getelementptr inbounds float, float* %tmp13498, i64 1
+ %tmp13500 = getelementptr inbounds float, float* %tmp13499, i64 1
+ %tmp13501 = getelementptr inbounds float, float* %tmp13500, i64 1
+ %tmp13502 = getelementptr inbounds float, float* %tmp13501, i64 1
+ %tmp13503 = getelementptr inbounds float, float* %tmp13502, i64 1
+ %tmp13504 = getelementptr inbounds float, float* %tmp13503, i64 1
+ %tmp13505 = getelementptr inbounds float, float* %tmp13504, i64 1
+ %tmp13506 = getelementptr inbounds float, float* %tmp13505, i64 1
+ %tmp13507 = getelementptr inbounds float, float* %tmp13506, i64 1
+ %tmp13508 = getelementptr inbounds float, float* %tmp13507, i64 1
+ %tmp13509 = getelementptr inbounds float, float* %tmp13508, i64 1
+ %tmp13510 = getelementptr inbounds float, float* %tmp13509, i64 1
+ %tmp13511 = getelementptr inbounds float, float* %tmp13510, i64 1
+ %tmp13512 = getelementptr inbounds float, float* %tmp13511, i64 1
+ %tmp13513 = getelementptr inbounds float, float* %tmp13512, i64 1
+ %tmp13514 = getelementptr inbounds float, float* %tmp13513, i64 1
+ %tmp13515 = getelementptr inbounds float, float* %tmp13514, i64 1
+ %tmp13516 = getelementptr inbounds float, float* %tmp13515, i64 1
+ %tmp13517 = getelementptr inbounds float, float* %tmp13516, i64 1
+ %tmp13518 = getelementptr inbounds float, float* %tmp13517, i64 1
+ %tmp13519 = getelementptr inbounds float, float* %tmp13518, i64 1
+ %tmp13520 = getelementptr inbounds float, float* %tmp13519, i64 1
+ %tmp13521 = getelementptr inbounds float, float* %tmp13520, i64 1
+ %tmp13522 = getelementptr inbounds float, float* %tmp13521, i64 1
+ %tmp13523 = getelementptr inbounds float, float* %tmp13522, i64 1
+ %tmp13524 = getelementptr inbounds float, float* %tmp13523, i64 1
+ %tmp13525 = getelementptr inbounds float, float* %tmp13524, i64 1
+ %tmp13526 = getelementptr inbounds float, float* %tmp13525, i64 1
+ %tmp13527 = getelementptr inbounds float, float* %tmp13526, i64 1
+ %tmp13528 = getelementptr inbounds float, float* %tmp13527, i64 1
+ %tmp13529 = getelementptr inbounds float, float* %tmp13528, i64 1
+ %tmp13530 = getelementptr inbounds float, float* %tmp13529, i64 1
+ %tmp13531 = getelementptr inbounds float, float* %tmp13530, i64 1
+ %tmp13532 = getelementptr inbounds float, float* %tmp13531, i64 1
+ %tmp13533 = getelementptr inbounds float, float* %tmp13532, i64 1
+ %tmp13534 = getelementptr inbounds float, float* %tmp13533, i64 1
+ %tmp13535 = getelementptr inbounds float, float* %tmp13534, i64 1
+ %tmp13536 = getelementptr inbounds float, float* %tmp13535, i64 1
+ %tmp13537 = getelementptr inbounds float, float* %tmp13536, i64 1
+ %tmp13538 = getelementptr inbounds float, float* %tmp13537, i64 1
+ %tmp13539 = getelementptr inbounds float, float* %tmp13538, i64 1
+ %tmp13540 = getelementptr inbounds float, float* %tmp13539, i64 1
+ %tmp13541 = getelementptr inbounds float, float* %tmp13540, i64 1
+ %tmp13542 = getelementptr inbounds float, float* %tmp13541, i64 1
+ %tmp13543 = getelementptr inbounds float, float* %tmp13542, i64 1
+ %tmp13544 = getelementptr inbounds float, float* %tmp13543, i64 1
+ %tmp13545 = getelementptr inbounds float, float* %tmp13544, i64 1
+ %tmp13546 = getelementptr inbounds float, float* %tmp13545, i64 1
+ %tmp13547 = getelementptr inbounds float, float* %tmp13546, i64 1
+ %tmp13548 = getelementptr inbounds float, float* %tmp13547, i64 1
+ %tmp13549 = getelementptr inbounds float, float* %tmp13548, i64 1
+ %tmp13550 = getelementptr inbounds float, float* %tmp13549, i64 1
+ %tmp13551 = getelementptr inbounds float, float* %tmp13550, i64 1
+ %tmp13552 = getelementptr inbounds float, float* %tmp13551, i64 1
+ %tmp13553 = getelementptr inbounds float, float* %tmp13552, i64 1
+ %tmp13554 = getelementptr inbounds float, float* %tmp13553, i64 1
+ %tmp13555 = getelementptr inbounds float, float* %tmp13554, i64 1
+ %tmp13556 = getelementptr inbounds float, float* %tmp13555, i64 1
+ %tmp13557 = getelementptr inbounds float, float* %tmp13556, i64 1
+ %tmp13558 = getelementptr inbounds float, float* %tmp13557, i64 1
+ %tmp13559 = getelementptr inbounds float, float* %tmp13558, i64 1
+ %tmp13560 = getelementptr inbounds float, float* %tmp13559, i64 1
+ %tmp13561 = getelementptr inbounds float, float* %tmp13560, i64 1
+ %tmp13562 = getelementptr inbounds float, float* %tmp13561, i64 1
+ %tmp13563 = getelementptr inbounds float, float* %tmp13562, i64 1
+ %tmp13564 = getelementptr inbounds float, float* %tmp13563, i64 1
+ %tmp13565 = getelementptr inbounds float, float* %tmp13564, i64 1
+ %tmp13566 = getelementptr inbounds float, float* %tmp13565, i64 1
+ %tmp13567 = getelementptr inbounds float, float* %tmp13566, i64 1
+ %tmp13568 = getelementptr inbounds float, float* %tmp13567, i64 1
+ %tmp13569 = getelementptr inbounds float, float* %tmp13568, i64 1
+ %tmp13570 = getelementptr inbounds float, float* %tmp13569, i64 1
+ %tmp13571 = getelementptr inbounds float, float* %tmp13570, i64 1
+ %tmp13572 = getelementptr inbounds float, float* %tmp13571, i64 1
+ %tmp13573 = getelementptr inbounds float, float* %tmp13572, i64 1
+ %tmp13574 = getelementptr inbounds float, float* %tmp13573, i64 1
+ %tmp13575 = getelementptr inbounds float, float* %tmp13574, i64 1
+ %tmp13576 = getelementptr inbounds float, float* %tmp13575, i64 1
+ %tmp13577 = getelementptr inbounds float, float* %tmp13576, i64 1
+ %tmp13578 = getelementptr inbounds float, float* %tmp13577, i64 1
+ %tmp13579 = getelementptr inbounds float, float* %tmp13578, i64 1
+ %tmp13580 = getelementptr inbounds float, float* %tmp13579, i64 1
+ %tmp13581 = getelementptr inbounds float, float* %tmp13580, i64 1
+ %tmp13582 = getelementptr inbounds float, float* %tmp13581, i64 1
+ %tmp13583 = getelementptr inbounds float, float* %tmp13582, i64 1
+ %tmp13584 = getelementptr inbounds float, float* %tmp13583, i64 1
+ %tmp13585 = getelementptr inbounds float, float* %tmp13584, i64 1
+ %tmp13586 = getelementptr inbounds float, float* %tmp13585, i64 1
+ %tmp13587 = getelementptr inbounds float, float* %tmp13586, i64 1
+ %tmp13588 = getelementptr inbounds float, float* %tmp13587, i64 1
+ %tmp13589 = getelementptr inbounds float, float* %tmp13588, i64 1
+ %tmp13590 = getelementptr inbounds float, float* %tmp13589, i64 1
+ %tmp13591 = getelementptr inbounds float, float* %tmp13590, i64 1
+ %tmp13592 = getelementptr inbounds float, float* %tmp13591, i64 1
+ %tmp13593 = getelementptr inbounds float, float* %tmp13592, i64 1
+ %tmp13594 = getelementptr inbounds float, float* %tmp13593, i64 1
+ %tmp13595 = getelementptr inbounds float, float* %tmp13594, i64 1
+ %tmp13596 = getelementptr inbounds float, float* %tmp13595, i64 1
+ %tmp13597 = getelementptr inbounds float, float* %tmp13596, i64 1
+ %tmp13598 = getelementptr inbounds float, float* %tmp13597, i64 1
+ %tmp13599 = getelementptr inbounds float, float* %tmp13598, i64 1
+ %tmp13600 = getelementptr inbounds float, float* %tmp13599, i64 1
+ %tmp13601 = getelementptr inbounds float, float* %tmp13600, i64 1
+ %tmp13602 = getelementptr inbounds float, float* %tmp13601, i64 1
+ %tmp13603 = getelementptr inbounds float, float* %tmp13602, i64 1
+ %tmp13604 = getelementptr inbounds float, float* %tmp13603, i64 1
+ %tmp13605 = getelementptr inbounds float, float* %tmp13604, i64 1
+ %tmp13606 = getelementptr inbounds float, float* %tmp13605, i64 1
+ %tmp13607 = getelementptr inbounds float, float* %tmp13606, i64 1
+ %tmp13608 = getelementptr inbounds float, float* %tmp13607, i64 1
+ %tmp13609 = getelementptr inbounds float, float* %tmp13608, i64 1
+ %tmp13610 = getelementptr inbounds float, float* %tmp13609, i64 1
+ %tmp13611 = getelementptr inbounds float, float* %tmp13610, i64 1
+ %tmp13612 = getelementptr inbounds float, float* %tmp13611, i64 1
+ %tmp13613 = getelementptr inbounds float, float* %tmp13612, i64 1
+ %tmp13614 = getelementptr inbounds float, float* %tmp13613, i64 1
+ %tmp13615 = getelementptr inbounds float, float* %tmp13614, i64 1
+ %tmp13616 = getelementptr inbounds float, float* %tmp13615, i64 1
+ %tmp13617 = getelementptr inbounds float, float* %tmp13616, i64 1
+ %tmp13618 = getelementptr inbounds float, float* %tmp13617, i64 1
+ %tmp13619 = getelementptr inbounds float, float* %tmp13618, i64 1
+ %tmp13620 = getelementptr inbounds float, float* %tmp13619, i64 1
+ %tmp13621 = getelementptr inbounds float, float* %tmp13620, i64 1
+ %tmp13622 = getelementptr inbounds float, float* %tmp13621, i64 1
+ %tmp13623 = getelementptr inbounds float, float* %tmp13622, i64 1
+ %tmp13624 = getelementptr inbounds float, float* %tmp13623, i64 1
+ %tmp13625 = getelementptr inbounds float, float* %tmp13624, i64 1
+ %tmp13626 = getelementptr inbounds float, float* %tmp13625, i64 1
+ %tmp13627 = getelementptr inbounds float, float* %tmp13626, i64 1
+ %tmp13628 = getelementptr inbounds float, float* %tmp13627, i64 1
+ %tmp13629 = getelementptr inbounds float, float* %tmp13628, i64 1
+ %tmp13630 = getelementptr inbounds float, float* %tmp13629, i64 1
+ %tmp13631 = getelementptr inbounds float, float* %tmp13630, i64 1
+ %tmp13632 = getelementptr inbounds float, float* %tmp13631, i64 1
+ %tmp13633 = getelementptr inbounds float, float* %tmp13632, i64 1
+ %tmp13634 = getelementptr inbounds float, float* %tmp13633, i64 1
+ %tmp13635 = getelementptr inbounds float, float* %tmp13634, i64 1
+ %tmp13636 = getelementptr inbounds float, float* %tmp13635, i64 1
+ %tmp13637 = getelementptr inbounds float, float* %tmp13636, i64 1
+ %tmp13638 = getelementptr inbounds float, float* %tmp13637, i64 1
+ %tmp13639 = getelementptr inbounds float, float* %tmp13638, i64 1
+ %tmp13640 = getelementptr inbounds float, float* %tmp13639, i64 1
+ %tmp13641 = getelementptr inbounds float, float* %tmp13640, i64 1
+ %tmp13642 = getelementptr inbounds float, float* %tmp13641, i64 1
+ %tmp13643 = getelementptr inbounds float, float* %tmp13642, i64 1
+ %tmp13644 = getelementptr inbounds float, float* %tmp13643, i64 1
+ %tmp13645 = getelementptr inbounds float, float* %tmp13644, i64 1
+ %tmp13646 = getelementptr inbounds float, float* %tmp13645, i64 1
+ %tmp13647 = getelementptr inbounds float, float* %tmp13646, i64 1
+ %tmp13648 = getelementptr inbounds float, float* %tmp13647, i64 1
+ %tmp13649 = getelementptr inbounds float, float* %tmp13648, i64 1
+ %tmp13650 = getelementptr inbounds float, float* %tmp13649, i64 1
+ %tmp13651 = getelementptr inbounds float, float* %tmp13650, i64 1
+ %tmp13652 = getelementptr inbounds float, float* %tmp13651, i64 1
+ %tmp13653 = getelementptr inbounds float, float* %tmp13652, i64 1
+ %tmp13654 = getelementptr inbounds float, float* %tmp13653, i64 1
+ %tmp13655 = getelementptr inbounds float, float* %tmp13654, i64 1
+ %tmp13656 = getelementptr inbounds float, float* %tmp13655, i64 1
+ %tmp13657 = getelementptr inbounds float, float* %tmp13656, i64 1
+ %tmp13658 = getelementptr inbounds float, float* %tmp13657, i64 1
+ %tmp13659 = getelementptr inbounds float, float* %tmp13658, i64 1
+ %tmp13660 = getelementptr inbounds float, float* %tmp13659, i64 1
+ %tmp13661 = getelementptr inbounds float, float* %tmp13660, i64 1
+ %tmp13662 = getelementptr inbounds float, float* %tmp13661, i64 1
+ %tmp13663 = getelementptr inbounds float, float* %tmp13662, i64 1
+ %tmp13664 = getelementptr inbounds float, float* %tmp13663, i64 1
+ %tmp13665 = getelementptr inbounds float, float* %tmp13664, i64 1
+ %tmp13666 = getelementptr inbounds float, float* %tmp13665, i64 1
+ %tmp13667 = getelementptr inbounds float, float* %tmp13666, i64 1
+ %tmp13668 = getelementptr inbounds float, float* %tmp13667, i64 1
+ %tmp13669 = getelementptr inbounds float, float* %tmp13668, i64 1
+ %tmp13670 = getelementptr inbounds float, float* %tmp13669, i64 1
+ %tmp13671 = getelementptr inbounds float, float* %tmp13670, i64 1
+ %tmp13672 = getelementptr inbounds float, float* %tmp13671, i64 1
+ %tmp13673 = getelementptr inbounds float, float* %tmp13672, i64 1
+ %tmp13674 = getelementptr inbounds float, float* %tmp13673, i64 1
+ %tmp13675 = getelementptr inbounds float, float* %tmp13674, i64 1
+ %tmp13676 = getelementptr inbounds float, float* %tmp13675, i64 1
+ %tmp13677 = getelementptr inbounds float, float* %tmp13676, i64 1
+ %tmp13678 = getelementptr inbounds float, float* %tmp13677, i64 1
+ %tmp13679 = getelementptr inbounds float, float* %tmp13678, i64 1
+ %tmp13680 = getelementptr inbounds float, float* %tmp13679, i64 1
+ %tmp13681 = getelementptr inbounds float, float* %tmp13680, i64 1
+ %tmp13682 = getelementptr inbounds float, float* %tmp13681, i64 1
+ %tmp13683 = getelementptr inbounds float, float* %tmp13682, i64 1
+ %tmp13684 = getelementptr inbounds float, float* %tmp13683, i64 1
+ %tmp13685 = getelementptr inbounds float, float* %tmp13684, i64 1
+ %tmp13686 = getelementptr inbounds float, float* %tmp13685, i64 1
+ %tmp13687 = getelementptr inbounds float, float* %tmp13686, i64 1
+ %tmp13688 = getelementptr inbounds float, float* %tmp13687, i64 1
+ %tmp13689 = getelementptr inbounds float, float* %tmp13688, i64 1
+ %tmp13690 = getelementptr inbounds float, float* %tmp13689, i64 1
+ %tmp13691 = getelementptr inbounds float, float* %tmp13690, i64 1
+ %tmp13692 = getelementptr inbounds float, float* %tmp13691, i64 1
+ %tmp13693 = getelementptr inbounds float, float* %tmp13692, i64 1
+ %tmp13694 = getelementptr inbounds float, float* %tmp13693, i64 1
+ %tmp13695 = getelementptr inbounds float, float* %tmp13694, i64 1
+ %tmp13696 = getelementptr inbounds float, float* %tmp13695, i64 1
+ %tmp13697 = getelementptr inbounds float, float* %tmp13696, i64 1
+ %tmp13698 = getelementptr inbounds float, float* %tmp13697, i64 1
+ %tmp13699 = getelementptr inbounds float, float* %tmp13698, i64 1
+ %tmp13700 = getelementptr inbounds float, float* %tmp13699, i64 1
+ %tmp13701 = getelementptr inbounds float, float* %tmp13700, i64 1
+ %tmp13702 = getelementptr inbounds float, float* %tmp13701, i64 1
+ %tmp13703 = getelementptr inbounds float, float* %tmp13702, i64 1
+ %tmp13704 = getelementptr inbounds float, float* %tmp13703, i64 1
+ %tmp13705 = getelementptr inbounds float, float* %tmp13704, i64 1
+ %tmp13706 = getelementptr inbounds float, float* %tmp13705, i64 1
+ %tmp13707 = getelementptr inbounds float, float* %tmp13706, i64 1
+ %tmp13708 = getelementptr inbounds float, float* %tmp13707, i64 1
+ %tmp13709 = getelementptr inbounds float, float* %tmp13708, i64 1
+ %tmp13710 = getelementptr inbounds float, float* %tmp13709, i64 1
+ %tmp13711 = getelementptr inbounds float, float* %tmp13710, i64 1
+ %tmp13712 = getelementptr inbounds float, float* %tmp13711, i64 1
+ %tmp13713 = getelementptr inbounds float, float* %tmp13712, i64 1
+ %tmp13714 = getelementptr inbounds float, float* %tmp13713, i64 1
+ %tmp13715 = getelementptr inbounds float, float* %tmp13714, i64 1
+ %tmp13716 = getelementptr inbounds float, float* %tmp13715, i64 1
+ %tmp13717 = getelementptr inbounds float, float* %tmp13716, i64 1
+ %tmp13718 = getelementptr inbounds float, float* %tmp13717, i64 1
+ %tmp13719 = getelementptr inbounds float, float* %tmp13718, i64 1
+ %tmp13720 = getelementptr inbounds float, float* %tmp13719, i64 1
+ %tmp13721 = getelementptr inbounds float, float* %tmp13720, i64 1
+ %tmp13722 = getelementptr inbounds float, float* %tmp13721, i64 1
+ %tmp13723 = getelementptr inbounds float, float* %tmp13722, i64 1
+ %tmp13724 = getelementptr inbounds float, float* %tmp13723, i64 1
+ %tmp13725 = getelementptr inbounds float, float* %tmp13724, i64 1
+ %tmp13726 = getelementptr inbounds float, float* %tmp13725, i64 1
+ %tmp13727 = getelementptr inbounds float, float* %tmp13726, i64 1
+ %tmp13728 = getelementptr inbounds float, float* %tmp13727, i64 1
+ %tmp13729 = getelementptr inbounds float, float* %tmp13728, i64 1
+ %tmp13730 = getelementptr inbounds float, float* %tmp13729, i64 1
+ %tmp13731 = getelementptr inbounds float, float* %tmp13730, i64 1
+ %tmp13732 = getelementptr inbounds float, float* %tmp13731, i64 1
+ %tmp13733 = getelementptr inbounds float, float* %tmp13732, i64 1
+ %tmp13734 = getelementptr inbounds float, float* %tmp13733, i64 1
+ %tmp13735 = getelementptr inbounds float, float* %tmp13734, i64 1
+ %tmp13736 = getelementptr inbounds float, float* %tmp13735, i64 1
+ %tmp13737 = getelementptr inbounds float, float* %tmp13736, i64 1
+ %tmp13738 = getelementptr inbounds float, float* %tmp13737, i64 1
+ %tmp13739 = getelementptr inbounds float, float* %tmp13738, i64 1
+ %tmp13740 = getelementptr inbounds float, float* %tmp13739, i64 1
+ %tmp13741 = getelementptr inbounds float, float* %tmp13740, i64 1
+ %tmp13742 = getelementptr inbounds float, float* %tmp13741, i64 1
+ %tmp13743 = getelementptr inbounds float, float* %tmp13742, i64 1
+ %tmp13744 = getelementptr inbounds float, float* %tmp13743, i64 1
+ %tmp13745 = getelementptr inbounds float, float* %tmp13744, i64 1
+ %tmp13746 = getelementptr inbounds float, float* %tmp13745, i64 1
+ %tmp13747 = getelementptr inbounds float, float* %tmp13746, i64 1
+ %tmp13748 = getelementptr inbounds float, float* %tmp13747, i64 1
+ %tmp13749 = getelementptr inbounds float, float* %tmp13748, i64 1
+ %tmp13750 = getelementptr inbounds float, float* %tmp13749, i64 1
+ %tmp13751 = getelementptr inbounds float, float* %tmp13750, i64 1
+ %tmp13752 = getelementptr inbounds float, float* %tmp13751, i64 1
+ %tmp13753 = getelementptr inbounds float, float* %tmp13752, i64 1
+ %tmp13754 = getelementptr inbounds float, float* %tmp13753, i64 1
+ %tmp13755 = getelementptr inbounds float, float* %tmp13754, i64 1
+ %tmp13756 = getelementptr inbounds float, float* %tmp13755, i64 1
+ %tmp13757 = getelementptr inbounds float, float* %tmp13756, i64 1
+ %tmp13758 = getelementptr inbounds float, float* %tmp13757, i64 1
+ %tmp13759 = getelementptr inbounds float, float* %tmp13758, i64 1
+ %tmp13760 = getelementptr inbounds float, float* %tmp13759, i64 1
+ %tmp13761 = getelementptr inbounds float, float* %tmp13760, i64 1
+ %tmp13762 = getelementptr inbounds float, float* %tmp13761, i64 1
+ %tmp13763 = getelementptr inbounds float, float* %tmp13762, i64 1
+ %tmp13764 = getelementptr inbounds float, float* %tmp13763, i64 1
+ %tmp13765 = getelementptr inbounds float, float* %tmp13764, i64 1
+ %tmp13766 = getelementptr inbounds float, float* %tmp13765, i64 1
+ %tmp13767 = getelementptr inbounds float, float* %tmp13766, i64 1
+ %tmp13768 = getelementptr inbounds float, float* %tmp13767, i64 1
+ %tmp13769 = getelementptr inbounds float, float* %tmp13768, i64 1
+ %tmp13770 = getelementptr inbounds float, float* %tmp13769, i64 1
+ %tmp13771 = getelementptr inbounds float, float* %tmp13770, i64 1
+ %tmp13772 = getelementptr inbounds float, float* %tmp13771, i64 1
+ %tmp13773 = getelementptr inbounds float, float* %tmp13772, i64 1
+ %tmp13774 = getelementptr inbounds float, float* %tmp13773, i64 1
+ %tmp13775 = getelementptr inbounds float, float* %tmp13774, i64 1
+ %tmp13776 = getelementptr inbounds float, float* %tmp13775, i64 1
+ %tmp13777 = getelementptr inbounds float, float* %tmp13776, i64 1
+ %tmp13778 = getelementptr inbounds float, float* %tmp13777, i64 1
+ %tmp13779 = getelementptr inbounds float, float* %tmp13778, i64 1
+ %tmp13780 = getelementptr inbounds float, float* %tmp13779, i64 1
+ %tmp13781 = getelementptr inbounds float, float* %tmp13780, i64 1
+ %tmp13782 = getelementptr inbounds float, float* %tmp13781, i64 1
+ %tmp13783 = getelementptr inbounds float, float* %tmp13782, i64 1
+ %tmp13784 = getelementptr inbounds float, float* %tmp13783, i64 1
+ %tmp13785 = getelementptr inbounds float, float* %tmp13784, i64 1
+ %tmp13786 = getelementptr inbounds float, float* %tmp13785, i64 1
+ %tmp13787 = getelementptr inbounds float, float* %tmp13786, i64 1
+ %tmp13788 = getelementptr inbounds float, float* %tmp13787, i64 1
+ %tmp13789 = getelementptr inbounds float, float* %tmp13788, i64 1
+ %tmp13790 = getelementptr inbounds float, float* %tmp13789, i64 1
+ %tmp13791 = getelementptr inbounds float, float* %tmp13790, i64 1
+ %tmp13792 = getelementptr inbounds float, float* %tmp13791, i64 1
+ %tmp13793 = getelementptr inbounds float, float* %tmp13792, i64 1
+ %tmp13794 = getelementptr inbounds float, float* %tmp13793, i64 1
+ %tmp13795 = getelementptr inbounds float, float* %tmp13794, i64 1
+ %tmp13796 = getelementptr inbounds float, float* %tmp13795, i64 1
+ %tmp13797 = getelementptr inbounds float, float* %tmp13796, i64 1
+ %tmp13798 = getelementptr inbounds float, float* %tmp13797, i64 1
+ %tmp13799 = getelementptr inbounds float, float* %tmp13798, i64 1
+ %tmp13800 = getelementptr inbounds float, float* %tmp13799, i64 1
+ %tmp13801 = getelementptr inbounds float, float* %tmp13800, i64 1
+ %tmp13802 = getelementptr inbounds float, float* %tmp13801, i64 1
+ %tmp13803 = getelementptr inbounds float, float* %tmp13802, i64 1
+ %tmp13804 = getelementptr inbounds float, float* %tmp13803, i64 1
+ %tmp13805 = getelementptr inbounds float, float* %tmp13804, i64 1
+ %tmp13806 = getelementptr inbounds float, float* %tmp13805, i64 1
+ %tmp13807 = getelementptr inbounds float, float* %tmp13806, i64 1
+ %tmp13808 = getelementptr inbounds float, float* %tmp13807, i64 1
+ %tmp13809 = getelementptr inbounds float, float* %tmp13808, i64 1
+ %tmp13810 = getelementptr inbounds float, float* %tmp13809, i64 1
+ %tmp13811 = getelementptr inbounds float, float* %tmp13810, i64 1
+ %tmp13812 = getelementptr inbounds float, float* %tmp13811, i64 1
+ %tmp13813 = getelementptr inbounds float, float* %tmp13812, i64 1
+ %tmp13814 = getelementptr inbounds float, float* %tmp13813, i64 1
+ %tmp13815 = getelementptr inbounds float, float* %tmp13814, i64 1
+ %tmp13816 = getelementptr inbounds float, float* %tmp13815, i64 1
+ %tmp13817 = getelementptr inbounds float, float* %tmp13816, i64 1
+ %tmp13818 = getelementptr inbounds float, float* %tmp13817, i64 1
+ %tmp13819 = getelementptr inbounds float, float* %tmp13818, i64 1
+ %tmp13820 = getelementptr inbounds float, float* %tmp13819, i64 1
+ %tmp13821 = getelementptr inbounds float, float* %tmp13820, i64 1
+ %tmp13822 = getelementptr inbounds float, float* %tmp13821, i64 1
+ %tmp13823 = getelementptr inbounds float, float* %tmp13822, i64 1
+ %tmp13824 = getelementptr inbounds float, float* %tmp13823, i64 1
+ %tmp13825 = getelementptr inbounds float, float* %tmp13824, i64 1
+ %tmp13826 = getelementptr inbounds float, float* %tmp13825, i64 1
+ %tmp13827 = getelementptr inbounds float, float* %tmp13826, i64 1
+ %tmp13828 = getelementptr inbounds float, float* %tmp13827, i64 1
+ %tmp13829 = getelementptr inbounds float, float* %tmp13828, i64 1
+ %tmp13830 = getelementptr inbounds float, float* %tmp13829, i64 1
+ %tmp13831 = getelementptr inbounds float, float* %tmp13830, i64 1
+ %tmp13832 = getelementptr inbounds float, float* %tmp13831, i64 1
+ %tmp13833 = getelementptr inbounds float, float* %tmp13832, i64 1
+ %tmp13834 = getelementptr inbounds float, float* %tmp13833, i64 1
+ %tmp13835 = getelementptr inbounds float, float* %tmp13834, i64 1
+ %tmp13836 = getelementptr inbounds float, float* %tmp13835, i64 1
+ %tmp13837 = getelementptr inbounds float, float* %tmp13836, i64 1
+ %tmp13838 = getelementptr inbounds float, float* %tmp13837, i64 1
+ %tmp13839 = getelementptr inbounds float, float* %tmp13838, i64 1
+ %tmp13840 = getelementptr inbounds float, float* %tmp13839, i64 1
+ %tmp13841 = getelementptr inbounds float, float* %tmp13840, i64 1
+ %tmp13842 = getelementptr inbounds float, float* %tmp13841, i64 1
+ %tmp13843 = getelementptr inbounds float, float* %tmp13842, i64 1
+ %tmp13844 = getelementptr inbounds float, float* %tmp13843, i64 1
+ %tmp13845 = getelementptr inbounds float, float* %tmp13844, i64 1
+ %tmp13846 = getelementptr inbounds float, float* %tmp13845, i64 1
+ %tmp13847 = getelementptr inbounds float, float* %tmp13846, i64 1
+ %tmp13848 = getelementptr inbounds float, float* %tmp13847, i64 1
+ %tmp13849 = getelementptr inbounds float, float* %tmp13848, i64 1
+ %tmp13850 = getelementptr inbounds float, float* %tmp13849, i64 1
+ %tmp13851 = getelementptr inbounds float, float* %tmp13850, i64 1
+ %tmp13852 = getelementptr inbounds float, float* %tmp13851, i64 1
+ %tmp13853 = getelementptr inbounds float, float* %tmp13852, i64 1
+ %tmp13854 = getelementptr inbounds float, float* %tmp13853, i64 1
+ %tmp13855 = getelementptr inbounds float, float* %tmp13854, i64 1
+ %tmp13856 = getelementptr inbounds float, float* %tmp13855, i64 1
+ %tmp13857 = getelementptr inbounds float, float* %tmp13856, i64 1
+ %tmp13858 = getelementptr inbounds float, float* %tmp13857, i64 1
+ %tmp13859 = getelementptr inbounds float, float* %tmp13858, i64 1
+ %tmp13860 = getelementptr inbounds float, float* %tmp13859, i64 1
+ %tmp13861 = getelementptr inbounds float, float* %tmp13860, i64 1
+ %tmp13862 = getelementptr inbounds float, float* %tmp13861, i64 1
+ %tmp13863 = getelementptr inbounds float, float* %tmp13862, i64 1
+ %tmp13864 = getelementptr inbounds float, float* %tmp13863, i64 1
+ %tmp13865 = getelementptr inbounds float, float* %tmp13864, i64 1
+ %tmp13866 = getelementptr inbounds float, float* %tmp13865, i64 1
+ %tmp13867 = getelementptr inbounds float, float* %tmp13866, i64 1
+ %tmp13868 = getelementptr inbounds float, float* %tmp13867, i64 1
+ %tmp13869 = getelementptr inbounds float, float* %tmp13868, i64 1
+ %tmp13870 = getelementptr inbounds float, float* %tmp13869, i64 1
+ %tmp13871 = getelementptr inbounds float, float* %tmp13870, i64 1
+ %tmp13872 = getelementptr inbounds float, float* %tmp13871, i64 1
+ %tmp13873 = getelementptr inbounds float, float* %tmp13872, i64 1
+ %tmp13874 = getelementptr inbounds float, float* %tmp13873, i64 1
+ %tmp13875 = getelementptr inbounds float, float* %tmp13874, i64 1
+ %tmp13876 = getelementptr inbounds float, float* %tmp13875, i64 1
+ %tmp13877 = getelementptr inbounds float, float* %tmp13876, i64 1
+ %tmp13878 = getelementptr inbounds float, float* %tmp13877, i64 1
+ %tmp13879 = getelementptr inbounds float, float* %tmp13878, i64 1
+ %tmp13880 = getelementptr inbounds float, float* %tmp13879, i64 1
+ %tmp13881 = getelementptr inbounds float, float* %tmp13880, i64 1
+ %tmp13882 = getelementptr inbounds float, float* %tmp13881, i64 1
+ %tmp13883 = getelementptr inbounds float, float* %tmp13882, i64 1
+ %tmp13884 = getelementptr inbounds float, float* %tmp13883, i64 1
+ %tmp13885 = getelementptr inbounds float, float* %tmp13884, i64 1
+ %tmp13886 = getelementptr inbounds float, float* %tmp13885, i64 1
+ %tmp13887 = getelementptr inbounds float, float* %tmp13886, i64 1
+ %tmp13888 = getelementptr inbounds float, float* %tmp13887, i64 1
+ %tmp13889 = getelementptr inbounds float, float* %tmp13888, i64 1
+ %tmp13890 = getelementptr inbounds float, float* %tmp13889, i64 1
+ %tmp13891 = getelementptr inbounds float, float* %tmp13890, i64 1
+ %tmp13892 = getelementptr inbounds float, float* %tmp13891, i64 1
+ %tmp13893 = getelementptr inbounds float, float* %tmp13892, i64 1
+ %tmp13894 = getelementptr inbounds float, float* %tmp13893, i64 1
+ %tmp13895 = getelementptr inbounds float, float* %tmp13894, i64 1
+ %tmp13896 = getelementptr inbounds float, float* %tmp13895, i64 1
+ %tmp13897 = getelementptr inbounds float, float* %tmp13896, i64 1
+ %tmp13898 = getelementptr inbounds float, float* %tmp13897, i64 1
+ %tmp13899 = getelementptr inbounds float, float* %tmp13898, i64 1
+ %tmp13900 = getelementptr inbounds float, float* %tmp13899, i64 1
+ %tmp13901 = getelementptr inbounds float, float* %tmp13900, i64 1
+ %tmp13902 = getelementptr inbounds float, float* %tmp13901, i64 1
+ %tmp13903 = getelementptr inbounds float, float* %tmp13902, i64 1
+ %tmp13904 = getelementptr inbounds float, float* %tmp13903, i64 1
+ %tmp13905 = getelementptr inbounds float, float* %tmp13904, i64 1
+ %tmp13906 = getelementptr inbounds float, float* %tmp13905, i64 1
+ %tmp13907 = getelementptr inbounds float, float* %tmp13906, i64 1
+ %tmp13908 = getelementptr inbounds float, float* %tmp13907, i64 1
+ %tmp13909 = getelementptr inbounds float, float* %tmp13908, i64 1
+ %tmp13910 = getelementptr inbounds float, float* %tmp13909, i64 1
+ %tmp13911 = getelementptr inbounds float, float* %tmp13910, i64 1
+ %tmp13912 = getelementptr inbounds float, float* %tmp13911, i64 1
+ %tmp13913 = getelementptr inbounds float, float* %tmp13912, i64 1
+ %tmp13914 = getelementptr inbounds float, float* %tmp13913, i64 1
+ %tmp13915 = getelementptr inbounds float, float* %tmp13914, i64 1
+ %tmp13916 = getelementptr inbounds float, float* %tmp13915, i64 1
+ %tmp13917 = getelementptr inbounds float, float* %tmp13916, i64 1
+ %tmp13918 = getelementptr inbounds float, float* %tmp13917, i64 1
+ %tmp13919 = getelementptr inbounds float, float* %tmp13918, i64 1
+ %tmp13920 = getelementptr inbounds float, float* %tmp13919, i64 1
+ %tmp13921 = getelementptr inbounds float, float* %tmp13920, i64 1
+ %tmp13922 = getelementptr inbounds float, float* %tmp13921, i64 1
+ %tmp13923 = getelementptr inbounds float, float* %tmp13922, i64 1
+ %tmp13924 = getelementptr inbounds float, float* %tmp13923, i64 1
+ %tmp13925 = getelementptr inbounds float, float* %tmp13924, i64 1
+ %tmp13926 = getelementptr inbounds float, float* %tmp13925, i64 1
+ %tmp13927 = getelementptr inbounds float, float* %tmp13926, i64 1
+ %tmp13928 = getelementptr inbounds float, float* %tmp13927, i64 1
+ %tmp13929 = getelementptr inbounds float, float* %tmp13928, i64 1
+ %tmp13930 = getelementptr inbounds float, float* %tmp13929, i64 1
+ %tmp13931 = getelementptr inbounds float, float* %tmp13930, i64 1
+ %tmp13932 = getelementptr inbounds float, float* %tmp13931, i64 1
+ %tmp13933 = getelementptr inbounds float, float* %tmp13932, i64 1
+ %tmp13934 = getelementptr inbounds float, float* %tmp13933, i64 1
+ %tmp13935 = getelementptr inbounds float, float* %tmp13934, i64 1
+ %tmp13936 = getelementptr inbounds float, float* %tmp13935, i64 1
+ %tmp13937 = getelementptr inbounds float, float* %tmp13936, i64 1
+ %tmp13938 = getelementptr inbounds float, float* %tmp13937, i64 1
+ %tmp13939 = getelementptr inbounds float, float* %tmp13938, i64 1
+ %tmp13940 = getelementptr inbounds float, float* %tmp13939, i64 1
+ %tmp13941 = getelementptr inbounds float, float* %tmp13940, i64 1
+ %tmp13942 = getelementptr inbounds float, float* %tmp13941, i64 1
+ %tmp13943 = getelementptr inbounds float, float* %tmp13942, i64 1
+ %tmp13944 = getelementptr inbounds float, float* %tmp13943, i64 1
+ %tmp13945 = getelementptr inbounds float, float* %tmp13944, i64 1
+ %tmp13946 = getelementptr inbounds float, float* %tmp13945, i64 1
+ %tmp13947 = getelementptr inbounds float, float* %tmp13946, i64 1
+ %tmp13948 = getelementptr inbounds float, float* %tmp13947, i64 1
+ %tmp13949 = getelementptr inbounds float, float* %tmp13948, i64 1
+ %tmp13950 = getelementptr inbounds float, float* %tmp13949, i64 1
+ %tmp13951 = getelementptr inbounds float, float* %tmp13950, i64 1
+ %tmp13952 = getelementptr inbounds float, float* %tmp13951, i64 1
+ %tmp13953 = getelementptr inbounds float, float* %tmp13952, i64 1
+ %tmp13954 = getelementptr inbounds float, float* %tmp13953, i64 1
+ %tmp13955 = getelementptr inbounds float, float* %tmp13954, i64 1
+ %tmp13956 = getelementptr inbounds float, float* %tmp13955, i64 1
+ %tmp13957 = getelementptr inbounds float, float* %tmp13956, i64 1
+ %tmp13958 = getelementptr inbounds float, float* %tmp13957, i64 1
+ %tmp13959 = getelementptr inbounds float, float* %tmp13958, i64 1
+ %tmp13960 = getelementptr inbounds float, float* %tmp13959, i64 1
+ %tmp13961 = getelementptr inbounds float, float* %tmp13960, i64 1
+ %tmp13962 = getelementptr inbounds float, float* %tmp13961, i64 1
+ %tmp13963 = getelementptr inbounds float, float* %tmp13962, i64 1
+ %tmp13964 = getelementptr inbounds float, float* %tmp13963, i64 1
+ %tmp13965 = getelementptr inbounds float, float* %tmp13964, i64 1
+ %tmp13966 = getelementptr inbounds float, float* %tmp13965, i64 1
+ %tmp13967 = getelementptr inbounds float, float* %tmp13966, i64 1
+ %tmp13968 = getelementptr inbounds float, float* %tmp13967, i64 1
+ %tmp13969 = getelementptr inbounds float, float* %tmp13968, i64 1
+ %tmp13970 = getelementptr inbounds float, float* %tmp13969, i64 1
+ %tmp13971 = getelementptr inbounds float, float* %tmp13970, i64 1
+ %tmp13972 = getelementptr inbounds float, float* %tmp13971, i64 1
+ %tmp13973 = getelementptr inbounds float, float* %tmp13972, i64 1
+ %tmp13974 = getelementptr inbounds float, float* %tmp13973, i64 1
+ %tmp13975 = getelementptr inbounds float, float* %tmp13974, i64 1
+ %tmp13976 = getelementptr inbounds float, float* %tmp13975, i64 1
+ %tmp13977 = getelementptr inbounds float, float* %tmp13976, i64 1
+ %tmp13978 = getelementptr inbounds float, float* %tmp13977, i64 1
+ %tmp13979 = getelementptr inbounds float, float* %tmp13978, i64 1
+ %tmp13980 = getelementptr inbounds float, float* %tmp13979, i64 1
+ %tmp13981 = getelementptr inbounds float, float* %tmp13980, i64 1
+ %tmp13982 = getelementptr inbounds float, float* %tmp13981, i64 1
+ %tmp13983 = getelementptr inbounds float, float* %tmp13982, i64 1
+ %tmp13984 = getelementptr inbounds float, float* %tmp13983, i64 1
+ %tmp13985 = getelementptr inbounds float, float* %tmp13984, i64 1
+ %tmp13986 = getelementptr inbounds float, float* %tmp13985, i64 1
+ %tmp13987 = getelementptr inbounds float, float* %tmp13986, i64 1
+ %tmp13988 = getelementptr inbounds float, float* %tmp13987, i64 1
+ %tmp13989 = getelementptr inbounds float, float* %tmp13988, i64 1
+ %tmp13990 = getelementptr inbounds float, float* %tmp13989, i64 1
+ %tmp13991 = getelementptr inbounds float, float* %tmp13990, i64 1
+ %tmp13992 = getelementptr inbounds float, float* %tmp13991, i64 1
+ %tmp13993 = getelementptr inbounds float, float* %tmp13992, i64 1
+ %tmp13994 = getelementptr inbounds float, float* %tmp13993, i64 1
+ %tmp13995 = getelementptr inbounds float, float* %tmp13994, i64 1
+ %tmp13996 = getelementptr inbounds float, float* %tmp13995, i64 1
+ %tmp13997 = getelementptr inbounds float, float* %tmp13996, i64 1
+ %tmp13998 = getelementptr inbounds float, float* %tmp13997, i64 1
+ %tmp13999 = getelementptr inbounds float, float* %tmp13998, i64 1
+ %tmp14000 = getelementptr inbounds float, float* %tmp13999, i64 1
+ %tmp14001 = getelementptr inbounds float, float* %tmp14000, i64 1
+ %tmp14002 = getelementptr inbounds float, float* %tmp14001, i64 1
+ %tmp14003 = getelementptr inbounds float, float* %tmp14002, i64 1
+ %tmp14004 = getelementptr inbounds float, float* %tmp14003, i64 1
+ %tmp14005 = getelementptr inbounds float, float* %tmp14004, i64 1
+ %tmp14006 = getelementptr inbounds float, float* %tmp14005, i64 1
+ %tmp14007 = getelementptr inbounds float, float* %tmp14006, i64 1
+ %tmp14008 = getelementptr inbounds float, float* %tmp14007, i64 1
+ %tmp14009 = getelementptr inbounds float, float* %tmp14008, i64 1
+ %tmp14010 = getelementptr inbounds float, float* %tmp14009, i64 1
+ %tmp14011 = getelementptr inbounds float, float* %tmp14010, i64 1
+ %tmp14012 = getelementptr inbounds float, float* %tmp14011, i64 1
+ %tmp14013 = getelementptr inbounds float, float* %tmp14012, i64 1
+ %tmp14014 = getelementptr inbounds float, float* %tmp14013, i64 1
+ %tmp14015 = getelementptr inbounds float, float* %tmp14014, i64 1
+ %tmp14016 = getelementptr inbounds float, float* %tmp14015, i64 1
+ %tmp14017 = getelementptr inbounds float, float* %tmp14016, i64 1
+ %tmp14018 = getelementptr inbounds float, float* %tmp14017, i64 1
+ %tmp14019 = getelementptr inbounds float, float* %tmp14018, i64 1
+ %tmp14020 = getelementptr inbounds float, float* %tmp14019, i64 1
+ %tmp14021 = getelementptr inbounds float, float* %tmp14020, i64 1
+ %tmp14022 = getelementptr inbounds float, float* %tmp14021, i64 1
+ %tmp14023 = getelementptr inbounds float, float* %tmp14022, i64 1
+ %tmp14024 = getelementptr inbounds float, float* %tmp14023, i64 1
+ %tmp14025 = getelementptr inbounds float, float* %tmp14024, i64 1
+ %tmp14026 = getelementptr inbounds float, float* %tmp14025, i64 1
+ %tmp14027 = getelementptr inbounds float, float* %tmp14026, i64 1
+ %tmp14028 = getelementptr inbounds float, float* %tmp14027, i64 1
+ %tmp14029 = getelementptr inbounds float, float* %tmp14028, i64 1
+ %tmp14030 = getelementptr inbounds float, float* %tmp14029, i64 1
+ %tmp14031 = getelementptr inbounds float, float* %tmp14030, i64 1
+ %tmp14032 = getelementptr inbounds float, float* %tmp14031, i64 1
+ %tmp14033 = getelementptr inbounds float, float* %tmp14032, i64 1
+ %tmp14034 = getelementptr inbounds float, float* %tmp14033, i64 1
+ %tmp14035 = getelementptr inbounds float, float* %tmp14034, i64 1
+ %tmp14036 = getelementptr inbounds float, float* %tmp14035, i64 1
+ %tmp14037 = getelementptr inbounds float, float* %tmp14036, i64 1
+ %tmp14038 = getelementptr inbounds float, float* %tmp14037, i64 1
+ %tmp14039 = getelementptr inbounds float, float* %tmp14038, i64 1
+ %tmp14040 = getelementptr inbounds float, float* %tmp14039, i64 1
+ %tmp14041 = getelementptr inbounds float, float* %tmp14040, i64 1
+ %tmp14042 = getelementptr inbounds float, float* %tmp14041, i64 1
+ %tmp14043 = getelementptr inbounds float, float* %tmp14042, i64 1
+ %tmp14044 = getelementptr inbounds float, float* %tmp14043, i64 1
+ %tmp14045 = getelementptr inbounds float, float* %tmp14044, i64 1
+ %tmp14046 = getelementptr inbounds float, float* %tmp14045, i64 1
+ %tmp14047 = getelementptr inbounds float, float* %tmp14046, i64 1
+ %tmp14048 = getelementptr inbounds float, float* %tmp14047, i64 1
+ %tmp14049 = getelementptr inbounds float, float* %tmp14048, i64 1
+ %tmp14050 = getelementptr inbounds float, float* %tmp14049, i64 1
+ %tmp14051 = getelementptr inbounds float, float* %tmp14050, i64 1
+ %tmp14052 = getelementptr inbounds float, float* %tmp14051, i64 1
+ %tmp14053 = getelementptr inbounds float, float* %tmp14052, i64 1
+ %tmp14054 = getelementptr inbounds float, float* %tmp14053, i64 1
+ %tmp14055 = getelementptr inbounds float, float* %tmp14054, i64 1
+ %tmp14056 = getelementptr inbounds float, float* %tmp14055, i64 1
+ %tmp14057 = getelementptr inbounds float, float* %tmp14056, i64 1
+ %tmp14058 = getelementptr inbounds float, float* %tmp14057, i64 1
+ %tmp14059 = getelementptr inbounds float, float* %tmp14058, i64 1
+ %tmp14060 = getelementptr inbounds float, float* %tmp14059, i64 1
+ %tmp14061 = getelementptr inbounds float, float* %tmp14060, i64 1
+ %tmp14062 = getelementptr inbounds float, float* %tmp14061, i64 1
+ %tmp14063 = getelementptr inbounds float, float* %tmp14062, i64 1
+ %tmp14064 = getelementptr inbounds float, float* %tmp14063, i64 1
+ %tmp14065 = getelementptr inbounds float, float* %tmp14064, i64 1
+ %tmp14066 = getelementptr inbounds float, float* %tmp14065, i64 1
+ %tmp14067 = getelementptr inbounds float, float* %tmp14066, i64 1
+ %tmp14068 = getelementptr inbounds float, float* %tmp14067, i64 1
+ %tmp14069 = getelementptr inbounds float, float* %tmp14068, i64 1
+ %tmp14070 = getelementptr inbounds float, float* %tmp14069, i64 1
+ %tmp14071 = getelementptr inbounds float, float* %tmp14070, i64 1
+ %tmp14072 = getelementptr inbounds float, float* %tmp14071, i64 1
+ %tmp14073 = getelementptr inbounds float, float* %tmp14072, i64 1
+ %tmp14074 = getelementptr inbounds float, float* %tmp14073, i64 1
+ %tmp14075 = getelementptr inbounds float, float* %tmp14074, i64 1
+ %tmp14076 = getelementptr inbounds float, float* %tmp14075, i64 1
+ %tmp14077 = getelementptr inbounds float, float* %tmp14076, i64 1
+ %tmp14078 = getelementptr inbounds float, float* %tmp14077, i64 1
+ %tmp14079 = getelementptr inbounds float, float* %tmp14078, i64 1
+ %tmp14080 = getelementptr inbounds float, float* %tmp14079, i64 1
+ %tmp14081 = getelementptr inbounds float, float* %tmp14080, i64 1
+ %tmp14082 = getelementptr inbounds float, float* %tmp14081, i64 1
+ %tmp14083 = getelementptr inbounds float, float* %tmp14082, i64 1
+ %tmp14084 = getelementptr inbounds float, float* %tmp14083, i64 1
+ %tmp14085 = getelementptr inbounds float, float* %tmp14084, i64 1
+ %tmp14086 = getelementptr inbounds float, float* %tmp14085, i64 1
+ %tmp14087 = getelementptr inbounds float, float* %tmp14086, i64 1
+ %tmp14088 = getelementptr inbounds float, float* %tmp14087, i64 1
+ %tmp14089 = getelementptr inbounds float, float* %tmp14088, i64 1
+ %tmp14090 = getelementptr inbounds float, float* %tmp14089, i64 1
+ %tmp14091 = getelementptr inbounds float, float* %tmp14090, i64 1
+ %tmp14092 = getelementptr inbounds float, float* %tmp14091, i64 1
+ %tmp14093 = getelementptr inbounds float, float* %tmp14092, i64 1
+ %tmp14094 = getelementptr inbounds float, float* %tmp14093, i64 1
+ %tmp14095 = getelementptr inbounds float, float* %tmp14094, i64 1
+ %tmp14096 = getelementptr inbounds float, float* %tmp14095, i64 1
+ %tmp14097 = getelementptr inbounds float, float* %tmp14096, i64 1
+ %tmp14098 = getelementptr inbounds float, float* %tmp14097, i64 1
+ %tmp14099 = getelementptr inbounds float, float* %tmp14098, i64 1
+ %tmp14100 = getelementptr inbounds float, float* %tmp14099, i64 1
+ %tmp14101 = getelementptr inbounds float, float* %tmp14100, i64 1
+ %tmp14102 = getelementptr inbounds float, float* %tmp14101, i64 1
+ %tmp14103 = getelementptr inbounds float, float* %tmp14102, i64 1
+ %tmp14104 = getelementptr inbounds float, float* %tmp14103, i64 1
+ %tmp14105 = getelementptr inbounds float, float* %tmp14104, i64 1
+ %tmp14106 = getelementptr inbounds float, float* %tmp14105, i64 1
+ %tmp14107 = getelementptr inbounds float, float* %tmp14106, i64 1
+ %tmp14108 = getelementptr inbounds float, float* %tmp14107, i64 1
+ %tmp14109 = getelementptr inbounds float, float* %tmp14108, i64 1
+ %tmp14110 = getelementptr inbounds float, float* %tmp14109, i64 1
+ %tmp14111 = getelementptr inbounds float, float* %tmp14110, i64 1
+ %tmp14112 = getelementptr inbounds float, float* %tmp14111, i64 1
+ %tmp14113 = getelementptr inbounds float, float* %tmp14112, i64 1
+ %tmp14114 = getelementptr inbounds float, float* %tmp14113, i64 1
+ %tmp14115 = getelementptr inbounds float, float* %tmp14114, i64 1
+ %tmp14116 = getelementptr inbounds float, float* %tmp14115, i64 1
+ %tmp14117 = getelementptr inbounds float, float* %tmp14116, i64 1
+ %tmp14118 = getelementptr inbounds float, float* %tmp14117, i64 1
+ %tmp14119 = getelementptr inbounds float, float* %tmp14118, i64 1
+ %tmp14120 = getelementptr inbounds float, float* %tmp14119, i64 1
+ %tmp14121 = getelementptr inbounds float, float* %tmp14120, i64 1
+ %tmp14122 = getelementptr inbounds float, float* %tmp14121, i64 1
+ %tmp14123 = getelementptr inbounds float, float* %tmp14122, i64 1
+ %tmp14124 = getelementptr inbounds float, float* %tmp14123, i64 1
+ %tmp14125 = getelementptr inbounds float, float* %tmp14124, i64 1
+ %tmp14126 = getelementptr inbounds float, float* %tmp14125, i64 1
+ %tmp14127 = getelementptr inbounds float, float* %tmp14126, i64 1
+ %tmp14128 = getelementptr inbounds float, float* %tmp14127, i64 1
+ %tmp14129 = getelementptr inbounds float, float* %tmp14128, i64 1
+ %tmp14130 = getelementptr inbounds float, float* %tmp14129, i64 1
+ %tmp14131 = getelementptr inbounds float, float* %tmp14130, i64 1
+ %tmp14132 = getelementptr inbounds float, float* %tmp14131, i64 1
+ %tmp14133 = getelementptr inbounds float, float* %tmp14132, i64 1
+ %tmp14134 = getelementptr inbounds float, float* %tmp14133, i64 1
+ %tmp14135 = getelementptr inbounds float, float* %tmp14134, i64 1
+ %tmp14136 = getelementptr inbounds float, float* %tmp14135, i64 1
+ %tmp14137 = getelementptr inbounds float, float* %tmp14136, i64 1
+ %tmp14138 = getelementptr inbounds float, float* %tmp14137, i64 1
+ %tmp14139 = getelementptr inbounds float, float* %tmp14138, i64 1
+ %tmp14140 = getelementptr inbounds float, float* %tmp14139, i64 1
+ %tmp14141 = getelementptr inbounds float, float* %tmp14140, i64 1
+ %tmp14142 = getelementptr inbounds float, float* %tmp14141, i64 1
+ %tmp14143 = getelementptr inbounds float, float* %tmp14142, i64 1
+ %tmp14144 = getelementptr inbounds float, float* %tmp14143, i64 1
+ %tmp14145 = getelementptr inbounds float, float* %tmp14144, i64 1
+ %tmp14146 = getelementptr inbounds float, float* %tmp14145, i64 1
+ %tmp14147 = getelementptr inbounds float, float* %tmp14146, i64 1
+ %tmp14148 = getelementptr inbounds float, float* %tmp14147, i64 1
+ %tmp14149 = getelementptr inbounds float, float* %tmp14148, i64 1
+ %tmp14150 = getelementptr inbounds float, float* %tmp14149, i64 1
+ %tmp14151 = getelementptr inbounds float, float* %tmp14150, i64 1
+ %tmp14152 = getelementptr inbounds float, float* %tmp14151, i64 1
+ %tmp14153 = getelementptr inbounds float, float* %tmp14152, i64 1
+ %tmp14154 = getelementptr inbounds float, float* %tmp14153, i64 1
+ %tmp14155 = getelementptr inbounds float, float* %tmp14154, i64 1
+ %tmp14156 = getelementptr inbounds float, float* %tmp14155, i64 1
+ %tmp14157 = getelementptr inbounds float, float* %tmp14156, i64 1
+ %tmp14158 = getelementptr inbounds float, float* %tmp14157, i64 1
+ %tmp14159 = getelementptr inbounds float, float* %tmp14158, i64 1
+ %tmp14160 = getelementptr inbounds float, float* %tmp14159, i64 1
+ %tmp14161 = getelementptr inbounds float, float* %tmp14160, i64 1
+ %tmp14162 = getelementptr inbounds float, float* %tmp14161, i64 1
+ %tmp14163 = getelementptr inbounds float, float* %tmp14162, i64 1
+ %tmp14164 = getelementptr inbounds float, float* %tmp14163, i64 1
+ %tmp14165 = getelementptr inbounds float, float* %tmp14164, i64 1
+ %tmp14166 = getelementptr inbounds float, float* %tmp14165, i64 1
+ %tmp14167 = getelementptr inbounds float, float* %tmp14166, i64 1
+ %tmp14168 = getelementptr inbounds float, float* %tmp14167, i64 1
+ %tmp14169 = getelementptr inbounds float, float* %tmp14168, i64 1
+ %tmp14170 = getelementptr inbounds float, float* %tmp14169, i64 1
+ %tmp14171 = getelementptr inbounds float, float* %tmp14170, i64 1
+ %tmp14172 = getelementptr inbounds float, float* %tmp14171, i64 1
+ %tmp14173 = getelementptr inbounds float, float* %tmp14172, i64 1
+ %tmp14174 = getelementptr inbounds float, float* %tmp14173, i64 1
+ %tmp14175 = getelementptr inbounds float, float* %tmp14174, i64 1
+ %tmp14176 = getelementptr inbounds float, float* %tmp14175, i64 1
+ %tmp14177 = getelementptr inbounds float, float* %tmp14176, i64 1
+ %tmp14178 = getelementptr inbounds float, float* %tmp14177, i64 1
+ %tmp14179 = getelementptr inbounds float, float* %tmp14178, i64 1
+ %tmp14180 = getelementptr inbounds float, float* %tmp14179, i64 1
+ %tmp14181 = getelementptr inbounds float, float* %tmp14180, i64 1
+ %tmp14182 = getelementptr inbounds float, float* %tmp14181, i64 1
+ %tmp14183 = getelementptr inbounds float, float* %tmp14182, i64 1
+ %tmp14184 = getelementptr inbounds float, float* %tmp14183, i64 1
+ %tmp14185 = getelementptr inbounds float, float* %tmp14184, i64 1
+ %tmp14186 = getelementptr inbounds float, float* %tmp14185, i64 1
+ %tmp14187 = getelementptr inbounds float, float* %tmp14186, i64 1
+ %tmp14188 = getelementptr inbounds float, float* %tmp14187, i64 1
+ %tmp14189 = getelementptr inbounds float, float* %tmp14188, i64 1
+ %tmp14190 = getelementptr inbounds float, float* %tmp14189, i64 1
+ %tmp14191 = getelementptr inbounds float, float* %tmp14190, i64 1
+ %tmp14192 = getelementptr inbounds float, float* %tmp14191, i64 1
+ %tmp14193 = getelementptr inbounds float, float* %tmp14192, i64 1
+ %tmp14194 = getelementptr inbounds float, float* %tmp14193, i64 1
+ %tmp14195 = getelementptr inbounds float, float* %tmp14194, i64 1
+ %tmp14196 = getelementptr inbounds float, float* %tmp14195, i64 1
+ %tmp14197 = getelementptr inbounds float, float* %tmp14196, i64 1
+ %tmp14198 = getelementptr inbounds float, float* %tmp14197, i64 1
+ %tmp14199 = getelementptr inbounds float, float* %tmp14198, i64 1
+ %tmp14200 = getelementptr inbounds float, float* %tmp14199, i64 1
+ %tmp14201 = getelementptr inbounds float, float* %tmp14200, i64 1
+ %tmp14202 = getelementptr inbounds float, float* %tmp14201, i64 1
+ %tmp14203 = getelementptr inbounds float, float* %tmp14202, i64 1
+ %tmp14204 = getelementptr inbounds float, float* %tmp14203, i64 1
+ %tmp14205 = getelementptr inbounds float, float* %tmp14204, i64 1
+ %tmp14206 = getelementptr inbounds float, float* %tmp14205, i64 1
+ %tmp14207 = getelementptr inbounds float, float* %tmp14206, i64 1
+ %tmp14208 = getelementptr inbounds float, float* %tmp14207, i64 1
+ %tmp14209 = getelementptr inbounds float, float* %tmp14208, i64 1
+ %tmp14210 = getelementptr inbounds float, float* %tmp14209, i64 1
+ %tmp14211 = getelementptr inbounds float, float* %tmp14210, i64 1
+ %tmp14212 = getelementptr inbounds float, float* %tmp14211, i64 1
+ %tmp14213 = getelementptr inbounds float, float* %tmp14212, i64 1
+ %tmp14214 = getelementptr inbounds float, float* %tmp14213, i64 1
+ %tmp14215 = getelementptr inbounds float, float* %tmp14214, i64 1
+ %tmp14216 = getelementptr inbounds float, float* %tmp14215, i64 1
+ %tmp14217 = getelementptr inbounds float, float* %tmp14216, i64 1
+ %tmp14218 = getelementptr inbounds float, float* %tmp14217, i64 1
+ %tmp14219 = getelementptr inbounds float, float* %tmp14218, i64 1
+ %tmp14220 = getelementptr inbounds float, float* %tmp14219, i64 1
+ %tmp14221 = getelementptr inbounds float, float* %tmp14220, i64 1
+ %tmp14222 = getelementptr inbounds float, float* %tmp14221, i64 1
+ %tmp14223 = getelementptr inbounds float, float* %tmp14222, i64 1
+ %tmp14224 = getelementptr inbounds float, float* %tmp14223, i64 1
+ %tmp14225 = getelementptr inbounds float, float* %tmp14224, i64 1
+ %tmp14226 = getelementptr inbounds float, float* %tmp14225, i64 1
+ %tmp14227 = getelementptr inbounds float, float* %tmp14226, i64 1
+ %tmp14228 = getelementptr inbounds float, float* %tmp14227, i64 1
+ %tmp14229 = getelementptr inbounds float, float* %tmp14228, i64 1
+ %tmp14230 = getelementptr inbounds float, float* %tmp14229, i64 1
+ %tmp14231 = getelementptr inbounds float, float* %tmp14230, i64 1
+ %tmp14232 = getelementptr inbounds float, float* %tmp14231, i64 1
+ %tmp14233 = getelementptr inbounds float, float* %tmp14232, i64 1
+ %tmp14234 = getelementptr inbounds float, float* %tmp14233, i64 1
+ %tmp14235 = getelementptr inbounds float, float* %tmp14234, i64 1
+ %tmp14236 = getelementptr inbounds float, float* %tmp14235, i64 1
+ %tmp14237 = getelementptr inbounds float, float* %tmp14236, i64 1
+ %tmp14238 = getelementptr inbounds float, float* %tmp14237, i64 1
+ %tmp14239 = getelementptr inbounds float, float* %tmp14238, i64 1
+ %tmp14240 = getelementptr inbounds float, float* %tmp14239, i64 1
+ %tmp14241 = getelementptr inbounds float, float* %tmp14240, i64 1
+ %tmp14242 = getelementptr inbounds float, float* %tmp14241, i64 1
+ %tmp14243 = getelementptr inbounds float, float* %tmp14242, i64 1
+ %tmp14244 = getelementptr inbounds float, float* %tmp14243, i64 1
+ %tmp14245 = getelementptr inbounds float, float* %tmp14244, i64 1
+ %tmp14246 = getelementptr inbounds float, float* %tmp14245, i64 1
+ %tmp14247 = getelementptr inbounds float, float* %tmp14246, i64 1
+ %tmp14248 = getelementptr inbounds float, float* %tmp14247, i64 1
+ %tmp14249 = getelementptr inbounds float, float* %tmp14248, i64 1
+ %tmp14250 = getelementptr inbounds float, float* %tmp14249, i64 1
+ %tmp14251 = getelementptr inbounds float, float* %tmp14250, i64 1
+ %tmp14252 = getelementptr inbounds float, float* %tmp14251, i64 1
+ %tmp14253 = getelementptr inbounds float, float* %tmp14252, i64 1
+ %tmp14254 = getelementptr inbounds float, float* %tmp14253, i64 1
+ %tmp14255 = getelementptr inbounds float, float* %tmp14254, i64 1
+ %tmp14256 = getelementptr inbounds float, float* %tmp14255, i64 1
+ %tmp14257 = getelementptr inbounds float, float* %tmp14256, i64 1
+ %tmp14258 = getelementptr inbounds float, float* %tmp14257, i64 1
+ %tmp14259 = getelementptr inbounds float, float* %tmp14258, i64 1
+ %tmp14260 = getelementptr inbounds float, float* %tmp14259, i64 1
+ %tmp14261 = getelementptr inbounds float, float* %tmp14260, i64 1
+ %tmp14262 = getelementptr inbounds float, float* %tmp14261, i64 1
+ %tmp14263 = getelementptr inbounds float, float* %tmp14262, i64 1
+ %tmp14264 = getelementptr inbounds float, float* %tmp14263, i64 1
+ %tmp14265 = getelementptr inbounds float, float* %tmp14264, i64 1
+ %tmp14266 = getelementptr inbounds float, float* %tmp14265, i64 1
+ %tmp14267 = getelementptr inbounds float, float* %tmp14266, i64 1
+ %tmp14268 = getelementptr inbounds float, float* %tmp14267, i64 1
+ %tmp14269 = getelementptr inbounds float, float* %tmp14268, i64 1
+ %tmp14270 = getelementptr inbounds float, float* %tmp14269, i64 1
+ %tmp14271 = getelementptr inbounds float, float* %tmp14270, i64 1
+ %tmp14272 = getelementptr inbounds float, float* %tmp14271, i64 1
+ %tmp14273 = getelementptr inbounds float, float* %tmp14272, i64 1
+ %tmp14274 = getelementptr inbounds float, float* %tmp14273, i64 1
+ %tmp14275 = getelementptr inbounds float, float* %tmp14274, i64 1
+ %tmp14276 = getelementptr inbounds float, float* %tmp14275, i64 1
+ %tmp14277 = getelementptr inbounds float, float* %tmp14276, i64 1
+ %tmp14278 = getelementptr inbounds float, float* %tmp14277, i64 1
+ %tmp14279 = getelementptr inbounds float, float* %tmp14278, i64 1
+ %tmp14280 = getelementptr inbounds float, float* %tmp14279, i64 1
+ %tmp14281 = getelementptr inbounds float, float* %tmp14280, i64 1
+ %tmp14282 = getelementptr inbounds float, float* %tmp14281, i64 1
+ %tmp14283 = getelementptr inbounds float, float* %tmp14282, i64 1
+ %tmp14284 = getelementptr inbounds float, float* %tmp14283, i64 1
+ %tmp14285 = getelementptr inbounds float, float* %tmp14284, i64 1
+ %tmp14286 = getelementptr inbounds float, float* %tmp14285, i64 1
+ %tmp14287 = getelementptr inbounds float, float* %tmp14286, i64 1
+ %tmp14288 = getelementptr inbounds float, float* %tmp14287, i64 1
+ %tmp14289 = getelementptr inbounds float, float* %tmp14288, i64 1
+ %tmp14290 = getelementptr inbounds float, float* %tmp14289, i64 1
+ %tmp14291 = getelementptr inbounds float, float* %tmp14290, i64 1
+ %tmp14292 = getelementptr inbounds float, float* %tmp14291, i64 1
+ %tmp14293 = getelementptr inbounds float, float* %tmp14292, i64 1
+ %tmp14294 = getelementptr inbounds float, float* %tmp14293, i64 1
+ %tmp14295 = getelementptr inbounds float, float* %tmp14294, i64 1
+ %tmp14296 = getelementptr inbounds float, float* %tmp14295, i64 1
+ %tmp14297 = getelementptr inbounds float, float* %tmp14296, i64 1
+ %tmp14298 = getelementptr inbounds float, float* %tmp14297, i64 1
+ %tmp14299 = getelementptr inbounds float, float* %tmp14298, i64 1
+ %tmp14300 = getelementptr inbounds float, float* %tmp14299, i64 1
+ %tmp14301 = getelementptr inbounds float, float* %tmp14300, i64 1
+ %tmp14302 = getelementptr inbounds float, float* %tmp14301, i64 1
+ %tmp14303 = getelementptr inbounds float, float* %tmp14302, i64 1
+ %tmp14304 = getelementptr inbounds float, float* %tmp14303, i64 1
+ %tmp14305 = getelementptr inbounds float, float* %tmp14304, i64 1
+ %tmp14306 = getelementptr inbounds float, float* %tmp14305, i64 1
+ %tmp14307 = getelementptr inbounds float, float* %tmp14306, i64 1
+ %tmp14308 = getelementptr inbounds float, float* %tmp14307, i64 1
+ %tmp14309 = getelementptr inbounds float, float* %tmp14308, i64 1
+ %tmp14310 = getelementptr inbounds float, float* %tmp14309, i64 1
+ %tmp14311 = getelementptr inbounds float, float* %tmp14310, i64 1
+ %tmp14312 = getelementptr inbounds float, float* %tmp14311, i64 1
+ %tmp14313 = getelementptr inbounds float, float* %tmp14312, i64 1
+ %tmp14314 = getelementptr inbounds float, float* %tmp14313, i64 1
+ %tmp14315 = getelementptr inbounds float, float* %tmp14314, i64 1
+ %tmp14316 = getelementptr inbounds float, float* %tmp14315, i64 1
+ %tmp14317 = getelementptr inbounds float, float* %tmp14316, i64 1
+ %tmp14318 = getelementptr inbounds float, float* %tmp14317, i64 1
+ %tmp14319 = getelementptr inbounds float, float* %tmp14318, i64 1
+ %tmp14320 = getelementptr inbounds float, float* %tmp14319, i64 1
+ %tmp14321 = getelementptr inbounds float, float* %tmp14320, i64 1
+ %tmp14322 = getelementptr inbounds float, float* %tmp14321, i64 1
+ %tmp14323 = getelementptr inbounds float, float* %tmp14322, i64 1
+ %tmp14324 = getelementptr inbounds float, float* %tmp14323, i64 1
+ %tmp14325 = getelementptr inbounds float, float* %tmp14324, i64 1
+ %tmp14326 = getelementptr inbounds float, float* %tmp14325, i64 1
+ %tmp14327 = getelementptr inbounds float, float* %tmp14326, i64 1
+ %tmp14328 = getelementptr inbounds float, float* %tmp14327, i64 1
+ %tmp14329 = getelementptr inbounds float, float* %tmp14328, i64 1
+ %tmp14330 = getelementptr inbounds float, float* %tmp14329, i64 1
+ %tmp14331 = getelementptr inbounds float, float* %tmp14330, i64 1
+ %tmp14332 = getelementptr inbounds float, float* %tmp14331, i64 1
+ %tmp14333 = getelementptr inbounds float, float* %tmp14332, i64 1
+ %tmp14334 = getelementptr inbounds float, float* %tmp14333, i64 1
+ %tmp14335 = getelementptr inbounds float, float* %tmp14334, i64 1
+ %tmp14336 = getelementptr inbounds float, float* %tmp14335, i64 1
+ %tmp14337 = getelementptr inbounds float, float* %tmp14336, i64 1
+ %tmp14338 = getelementptr inbounds float, float* %tmp14337, i64 1
+ %tmp14339 = getelementptr inbounds float, float* %tmp14338, i64 1
+ %tmp14340 = getelementptr inbounds float, float* %tmp14339, i64 1
+ %tmp14341 = getelementptr inbounds float, float* %tmp14340, i64 1
+ %tmp14342 = getelementptr inbounds float, float* %tmp14341, i64 1
+ %tmp14343 = getelementptr inbounds float, float* %tmp14342, i64 1
+ %tmp14344 = getelementptr inbounds float, float* %tmp14343, i64 1
+ %tmp14345 = getelementptr inbounds float, float* %tmp14344, i64 1
+ %tmp14346 = getelementptr inbounds float, float* %tmp14345, i64 1
+ %tmp14347 = getelementptr inbounds float, float* %tmp14346, i64 1
+ %tmp14348 = getelementptr inbounds float, float* %tmp14347, i64 1
+ %tmp14349 = getelementptr inbounds float, float* %tmp14348, i64 1
+ %tmp14350 = getelementptr inbounds float, float* %tmp14349, i64 1
+ %tmp14351 = getelementptr inbounds float, float* %tmp14350, i64 1
+ %tmp14352 = getelementptr inbounds float, float* %tmp14351, i64 1
+ %tmp14353 = getelementptr inbounds float, float* %tmp14352, i64 1
+ %tmp14354 = getelementptr inbounds float, float* %tmp14353, i64 1
+ %tmp14355 = getelementptr inbounds float, float* %tmp14354, i64 1
+ %tmp14356 = getelementptr inbounds float, float* %tmp14355, i64 1
+ %tmp14357 = getelementptr inbounds float, float* %tmp14356, i64 1
+ %tmp14358 = getelementptr inbounds float, float* %tmp14357, i64 1
+ %tmp14359 = getelementptr inbounds float, float* %tmp14358, i64 1
+ %tmp14360 = getelementptr inbounds float, float* %tmp14359, i64 1
+ %tmp14361 = getelementptr inbounds float, float* %tmp14360, i64 1
+ %tmp14362 = getelementptr inbounds float, float* %tmp14361, i64 1
+ %tmp14363 = getelementptr inbounds float, float* %tmp14362, i64 1
+ %tmp14364 = getelementptr inbounds float, float* %tmp14363, i64 1
+ %tmp14365 = getelementptr inbounds float, float* %tmp14364, i64 1
+ %tmp14366 = getelementptr inbounds float, float* %tmp14365, i64 1
+ %tmp14367 = getelementptr inbounds float, float* %tmp14366, i64 1
+ %tmp14368 = getelementptr inbounds float, float* %tmp14367, i64 1
+ %tmp14369 = getelementptr inbounds float, float* %tmp14368, i64 1
+ %tmp14370 = getelementptr inbounds float, float* %tmp14369, i64 1
+ %tmp14371 = getelementptr inbounds float, float* %tmp14370, i64 1
+ %tmp14372 = getelementptr inbounds float, float* %tmp14371, i64 1
+ %tmp14373 = getelementptr inbounds float, float* %tmp14372, i64 1
+ %tmp14374 = getelementptr inbounds float, float* %tmp14373, i64 1
+ %tmp14375 = getelementptr inbounds float, float* %tmp14374, i64 1
+ %tmp14376 = getelementptr inbounds float, float* %tmp14375, i64 1
+ %tmp14377 = getelementptr inbounds float, float* %tmp14376, i64 1
+ %tmp14378 = getelementptr inbounds float, float* %tmp14377, i64 1
+ %tmp14379 = getelementptr inbounds float, float* %tmp14378, i64 1
+ %tmp14380 = getelementptr inbounds float, float* %tmp14379, i64 1
+ %tmp14381 = getelementptr inbounds float, float* %tmp14380, i64 1
+ %tmp14382 = getelementptr inbounds float, float* %tmp14381, i64 1
+ %tmp14383 = getelementptr inbounds float, float* %tmp14382, i64 1
+ %tmp14384 = getelementptr inbounds float, float* %tmp14383, i64 1
+ %tmp14385 = getelementptr inbounds float, float* %tmp14384, i64 1
+ %tmp14386 = getelementptr inbounds float, float* %tmp14385, i64 1
+ %tmp14387 = getelementptr inbounds float, float* %tmp14386, i64 1
+ %tmp14388 = getelementptr inbounds float, float* %tmp14387, i64 1
+ %tmp14389 = getelementptr inbounds float, float* %tmp14388, i64 1
+ %tmp14390 = getelementptr inbounds float, float* %tmp14389, i64 1
+ %tmp14391 = getelementptr inbounds float, float* %tmp14390, i64 1
+ %tmp14392 = getelementptr inbounds float, float* %tmp14391, i64 1
+ %tmp14393 = getelementptr inbounds float, float* %tmp14392, i64 1
+ %tmp14394 = getelementptr inbounds float, float* %tmp14393, i64 1
+ %tmp14395 = getelementptr inbounds float, float* %tmp14394, i64 1
+ %tmp14396 = getelementptr inbounds float, float* %tmp14395, i64 1
+ %tmp14397 = getelementptr inbounds float, float* %tmp14396, i64 1
+ %tmp14398 = getelementptr inbounds float, float* %tmp14397, i64 1
+ %tmp14399 = getelementptr inbounds float, float* %tmp14398, i64 1
+ %tmp14400 = getelementptr inbounds float, float* %tmp14399, i64 1
+ %tmp14401 = getelementptr inbounds float, float* %tmp14400, i64 1
+ %tmp14402 = getelementptr inbounds float, float* %tmp14401, i64 1
+ %tmp14403 = getelementptr inbounds float, float* %tmp14402, i64 1
+ %tmp14404 = getelementptr inbounds float, float* %tmp14403, i64 1
+ %tmp14405 = getelementptr inbounds float, float* %tmp14404, i64 1
+ %tmp14406 = getelementptr inbounds float, float* %tmp14405, i64 1
+ %tmp14407 = getelementptr inbounds float, float* %tmp14406, i64 1
+ %tmp14408 = getelementptr inbounds float, float* %tmp14407, i64 1
+ %tmp14409 = getelementptr inbounds float, float* %tmp14408, i64 1
+ %tmp14410 = getelementptr inbounds float, float* %tmp14409, i64 1
+ %tmp14411 = getelementptr inbounds float, float* %tmp14410, i64 1
+ %tmp14412 = getelementptr inbounds float, float* %tmp14411, i64 1
+ %tmp14413 = getelementptr inbounds float, float* %tmp14412, i64 1
+ %tmp14414 = getelementptr inbounds float, float* %tmp14413, i64 1
+ %tmp14415 = getelementptr inbounds float, float* %tmp14414, i64 1
+ %tmp14416 = getelementptr inbounds float, float* %tmp14415, i64 1
+ %tmp14417 = getelementptr inbounds float, float* %tmp14416, i64 1
+ %tmp14418 = getelementptr inbounds float, float* %tmp14417, i64 1
+ %tmp14419 = getelementptr inbounds float, float* %tmp14418, i64 1
+ %tmp14420 = getelementptr inbounds float, float* %tmp14419, i64 1
+ %tmp14421 = getelementptr inbounds float, float* %tmp14420, i64 1
+ %tmp14422 = getelementptr inbounds float, float* %tmp14421, i64 1
+ %tmp14423 = getelementptr inbounds float, float* %tmp14422, i64 1
+ %tmp14424 = getelementptr inbounds float, float* %tmp14423, i64 1
+ %tmp14425 = getelementptr inbounds float, float* %tmp14424, i64 1
+ %tmp14426 = getelementptr inbounds float, float* %tmp14425, i64 1
+ %tmp14427 = getelementptr inbounds float, float* %tmp14426, i64 1
+ %tmp14428 = getelementptr inbounds float, float* %tmp14427, i64 1
+ %tmp14429 = getelementptr inbounds float, float* %tmp14428, i64 1
+ %tmp14430 = getelementptr inbounds float, float* %tmp14429, i64 1
+ %tmp14431 = getelementptr inbounds float, float* %tmp14430, i64 1
+ %tmp14432 = getelementptr inbounds float, float* %tmp14431, i64 1
+ %tmp14433 = getelementptr inbounds float, float* %tmp14432, i64 1
+ %tmp14434 = getelementptr inbounds float, float* %tmp14433, i64 1
+ %tmp14435 = getelementptr inbounds float, float* %tmp14434, i64 1
+ %tmp14436 = getelementptr inbounds float, float* %tmp14435, i64 1
+ %tmp14437 = getelementptr inbounds float, float* %tmp14436, i64 1
+ %tmp14438 = getelementptr inbounds float, float* %tmp14437, i64 1
+ %tmp14439 = getelementptr inbounds float, float* %tmp14438, i64 1
+ %tmp14440 = getelementptr inbounds float, float* %tmp14439, i64 1
+ %tmp14441 = getelementptr inbounds float, float* %tmp14440, i64 1
+ %tmp14442 = getelementptr inbounds float, float* %tmp14441, i64 1
+ %tmp14443 = getelementptr inbounds float, float* %tmp14442, i64 1
+ %tmp14444 = getelementptr inbounds float, float* %tmp14443, i64 1
+ %tmp14445 = getelementptr inbounds float, float* %tmp14444, i64 1
+ %tmp14446 = getelementptr inbounds float, float* %tmp14445, i64 1
+ %tmp14447 = getelementptr inbounds float, float* %tmp14446, i64 1
+ %tmp14448 = getelementptr inbounds float, float* %tmp14447, i64 1
+ %tmp14449 = getelementptr inbounds float, float* %tmp14448, i64 1
+ %tmp14450 = getelementptr inbounds float, float* %tmp14449, i64 1
+ %tmp14451 = getelementptr inbounds float, float* %tmp14450, i64 1
+ %tmp14452 = getelementptr inbounds float, float* %tmp14451, i64 1
+ %tmp14453 = getelementptr inbounds float, float* %tmp14452, i64 1
+ %tmp14454 = getelementptr inbounds float, float* %tmp14453, i64 1
+ %tmp14455 = getelementptr inbounds float, float* %tmp14454, i64 1
+ %tmp14456 = getelementptr inbounds float, float* %tmp14455, i64 1
+ %tmp14457 = getelementptr inbounds float, float* %tmp14456, i64 1
+ %tmp14458 = getelementptr inbounds float, float* %tmp14457, i64 1
+ %tmp14459 = getelementptr inbounds float, float* %tmp14458, i64 1
+ %tmp14460 = getelementptr inbounds float, float* %tmp14459, i64 1
+ %tmp14461 = getelementptr inbounds float, float* %tmp14460, i64 1
+ %tmp14462 = getelementptr inbounds float, float* %tmp14461, i64 1
+ %tmp14463 = getelementptr inbounds float, float* %tmp14462, i64 1
+ %tmp14464 = getelementptr inbounds float, float* %tmp14463, i64 1
+ %tmp14465 = getelementptr inbounds float, float* %tmp14464, i64 1
+ %tmp14466 = getelementptr inbounds float, float* %tmp14465, i64 1
+ %tmp14467 = getelementptr inbounds float, float* %tmp14466, i64 1
+ %tmp14468 = getelementptr inbounds float, float* %tmp14467, i64 1
+ %tmp14469 = getelementptr inbounds float, float* %tmp14468, i64 1
+ %tmp14470 = getelementptr inbounds float, float* %tmp14469, i64 1
+ %tmp14471 = getelementptr inbounds float, float* %tmp14470, i64 1
+ %tmp14472 = getelementptr inbounds float, float* %tmp14471, i64 1
+ %tmp14473 = getelementptr inbounds float, float* %tmp14472, i64 1
+ %tmp14474 = getelementptr inbounds float, float* %tmp14473, i64 1
+ %tmp14475 = getelementptr inbounds float, float* %tmp14474, i64 1
+ %tmp14476 = getelementptr inbounds float, float* %tmp14475, i64 1
+ %tmp14477 = getelementptr inbounds float, float* %tmp14476, i64 1
+ %tmp14478 = getelementptr inbounds float, float* %tmp14477, i64 1
+ %tmp14479 = getelementptr inbounds float, float* %tmp14478, i64 1
+ %tmp14480 = getelementptr inbounds float, float* %tmp14479, i64 1
+ %tmp14481 = getelementptr inbounds float, float* %tmp14480, i64 1
+ %tmp14482 = getelementptr inbounds float, float* %tmp14481, i64 1
+ %tmp14483 = getelementptr inbounds float, float* %tmp14482, i64 1
+ %tmp14484 = getelementptr inbounds float, float* %tmp14483, i64 1
+ %tmp14485 = getelementptr inbounds float, float* %tmp14484, i64 1
+ %tmp14486 = getelementptr inbounds float, float* %tmp14485, i64 1
+ %tmp14487 = getelementptr inbounds float, float* %tmp14486, i64 1
+ %tmp14488 = getelementptr inbounds float, float* %tmp14487, i64 1
+ %tmp14489 = getelementptr inbounds float, float* %tmp14488, i64 1
+ %tmp14490 = getelementptr inbounds float, float* %tmp14489, i64 1
+ %tmp14491 = getelementptr inbounds float, float* %tmp14490, i64 1
+ %tmp14492 = getelementptr inbounds float, float* %tmp14491, i64 1
+ %tmp14493 = getelementptr inbounds float, float* %tmp14492, i64 1
+ %tmp14494 = getelementptr inbounds float, float* %tmp14493, i64 1
+ %tmp14495 = getelementptr inbounds float, float* %tmp14494, i64 1
+ %tmp14496 = getelementptr inbounds float, float* %tmp14495, i64 1
+ %tmp14497 = getelementptr inbounds float, float* %tmp14496, i64 1
+ %tmp14498 = getelementptr inbounds float, float* %tmp14497, i64 1
+ %tmp14499 = getelementptr inbounds float, float* %tmp14498, i64 1
+ %tmp14500 = getelementptr inbounds float, float* %tmp14499, i64 1
+ %tmp14501 = getelementptr inbounds float, float* %tmp14500, i64 1
+ %tmp14502 = getelementptr inbounds float, float* %tmp14501, i64 1
+ %tmp14503 = getelementptr inbounds float, float* %tmp14502, i64 1
+ %tmp14504 = getelementptr inbounds float, float* %tmp14503, i64 1
+ %tmp14505 = getelementptr inbounds float, float* %tmp14504, i64 1
+ %tmp14506 = getelementptr inbounds float, float* %tmp14505, i64 1
+ %tmp14507 = getelementptr inbounds float, float* %tmp14506, i64 1
+ %tmp14508 = getelementptr inbounds float, float* %tmp14507, i64 1
+ %tmp14509 = getelementptr inbounds float, float* %tmp14508, i64 1
+ %tmp14510 = getelementptr inbounds float, float* %tmp14509, i64 1
+ %tmp14511 = getelementptr inbounds float, float* %tmp14510, i64 1
+ %tmp14512 = getelementptr inbounds float, float* %tmp14511, i64 1
+ %tmp14513 = getelementptr inbounds float, float* %tmp14512, i64 1
+ %tmp14514 = getelementptr inbounds float, float* %tmp14513, i64 1
+ %tmp14515 = getelementptr inbounds float, float* %tmp14514, i64 1
+ %tmp14516 = getelementptr inbounds float, float* %tmp14515, i64 1
+ %tmp14517 = getelementptr inbounds float, float* %tmp14516, i64 1
+ %tmp14518 = getelementptr inbounds float, float* %tmp14517, i64 1
+ %tmp14519 = getelementptr inbounds float, float* %tmp14518, i64 1
+ %tmp14520 = getelementptr inbounds float, float* %tmp14519, i64 1
+ %tmp14521 = getelementptr inbounds float, float* %tmp14520, i64 1
+ %tmp14522 = getelementptr inbounds float, float* %tmp14521, i64 1
+ %tmp14523 = getelementptr inbounds float, float* %tmp14522, i64 1
+ %tmp14524 = getelementptr inbounds float, float* %tmp14523, i64 1
+ %tmp14525 = getelementptr inbounds float, float* %tmp14524, i64 1
+ %tmp14526 = getelementptr inbounds float, float* %tmp14525, i64 1
+ %tmp14527 = getelementptr inbounds float, float* %tmp14526, i64 1
+ %tmp14528 = getelementptr inbounds float, float* %tmp14527, i64 1
+ %tmp14529 = getelementptr inbounds float, float* %tmp14528, i64 1
+ %tmp14530 = getelementptr inbounds float, float* %tmp14529, i64 1
+ %tmp14531 = getelementptr inbounds float, float* %tmp14530, i64 1
+ %tmp14532 = getelementptr inbounds float, float* %tmp14531, i64 1
+ %tmp14533 = getelementptr inbounds float, float* %tmp14532, i64 1
+ %tmp14534 = getelementptr inbounds float, float* %tmp14533, i64 1
+ %tmp14535 = getelementptr inbounds float, float* %tmp14534, i64 1
+ %tmp14536 = getelementptr inbounds float, float* %tmp14535, i64 1
+ %tmp14537 = getelementptr inbounds float, float* %tmp14536, i64 1
+ %tmp14538 = getelementptr inbounds float, float* %tmp14537, i64 1
+ %tmp14539 = getelementptr inbounds float, float* %tmp14538, i64 1
+ %tmp14540 = getelementptr inbounds float, float* %tmp14539, i64 1
+ %tmp14541 = getelementptr inbounds float, float* %tmp14540, i64 1
+ %tmp14542 = getelementptr inbounds float, float* %tmp14541, i64 1
+ %tmp14543 = getelementptr inbounds float, float* %tmp14542, i64 1
+ %tmp14544 = getelementptr inbounds float, float* %tmp14543, i64 1
+ %tmp14545 = getelementptr inbounds float, float* %tmp14544, i64 1
+ %tmp14546 = getelementptr inbounds float, float* %tmp14545, i64 1
+ %tmp14547 = getelementptr inbounds float, float* %tmp14546, i64 1
+ %tmp14548 = getelementptr inbounds float, float* %tmp14547, i64 1
+ %tmp14549 = getelementptr inbounds float, float* %tmp14548, i64 1
+ %tmp14550 = getelementptr inbounds float, float* %tmp14549, i64 1
+ %tmp14551 = getelementptr inbounds float, float* %tmp14550, i64 1
+ %tmp14552 = getelementptr inbounds float, float* %tmp14551, i64 1
+ %tmp14553 = getelementptr inbounds float, float* %tmp14552, i64 1
+ %tmp14554 = getelementptr inbounds float, float* %tmp14553, i64 1
+ %tmp14555 = getelementptr inbounds float, float* %tmp14554, i64 1
+ %tmp14556 = getelementptr inbounds float, float* %tmp14555, i64 1
+ %tmp14557 = getelementptr inbounds float, float* %tmp14556, i64 1
+ %tmp14558 = getelementptr inbounds float, float* %tmp14557, i64 1
+ %tmp14559 = getelementptr inbounds float, float* %tmp14558, i64 1
+ %tmp14560 = getelementptr inbounds float, float* %tmp14559, i64 1
+ %tmp14561 = getelementptr inbounds float, float* %tmp14560, i64 1
+ %tmp14562 = getelementptr inbounds float, float* %tmp14561, i64 1
+ %tmp14563 = getelementptr inbounds float, float* %tmp14562, i64 1
+ %tmp14564 = getelementptr inbounds float, float* %tmp14563, i64 1
+ %tmp14565 = getelementptr inbounds float, float* %tmp14564, i64 1
+ %tmp14566 = getelementptr inbounds float, float* %tmp14565, i64 1
+ %tmp14567 = getelementptr inbounds float, float* %tmp14566, i64 1
+ %tmp14568 = getelementptr inbounds float, float* %tmp14567, i64 1
+ %tmp14569 = getelementptr inbounds float, float* %tmp14568, i64 1
+ %tmp14570 = getelementptr inbounds float, float* %tmp14569, i64 1
+ %tmp14571 = getelementptr inbounds float, float* %tmp14570, i64 1
+ %tmp14572 = getelementptr inbounds float, float* %tmp14571, i64 1
+ %tmp14573 = getelementptr inbounds float, float* %tmp14572, i64 1
+ %tmp14574 = getelementptr inbounds float, float* %tmp14573, i64 1
+ %tmp14575 = getelementptr inbounds float, float* %tmp14574, i64 1
+ %tmp14576 = getelementptr inbounds float, float* %tmp14575, i64 1
+ %tmp14577 = getelementptr inbounds float, float* %tmp14576, i64 1
+ %tmp14578 = getelementptr inbounds float, float* %tmp14577, i64 1
+ %tmp14579 = getelementptr inbounds float, float* %tmp14578, i64 1
+ %tmp14580 = getelementptr inbounds float, float* %tmp14579, i64 1
+ %tmp14581 = getelementptr inbounds float, float* %tmp14580, i64 1
+ %tmp14582 = getelementptr inbounds float, float* %tmp14581, i64 1
+ %tmp14583 = getelementptr inbounds float, float* %tmp14582, i64 1
+ %tmp14584 = getelementptr inbounds float, float* %tmp14583, i64 1
+ %tmp14585 = getelementptr inbounds float, float* %tmp14584, i64 1
+ %tmp14586 = getelementptr inbounds float, float* %tmp14585, i64 1
+ %tmp14587 = getelementptr inbounds float, float* %tmp14586, i64 1
+ %tmp14588 = getelementptr inbounds float, float* %tmp14587, i64 1
+ %tmp14589 = getelementptr inbounds float, float* %tmp14588, i64 1
+ %tmp14590 = getelementptr inbounds float, float* %tmp14589, i64 1
+ %tmp14591 = getelementptr inbounds float, float* %tmp14590, i64 1
+ %tmp14592 = getelementptr inbounds float, float* %tmp14591, i64 1
+ %tmp14593 = getelementptr inbounds float, float* %tmp14592, i64 1
+ %tmp14594 = getelementptr inbounds float, float* %tmp14593, i64 1
+ %tmp14595 = getelementptr inbounds float, float* %tmp14594, i64 1
+ %tmp14596 = getelementptr inbounds float, float* %tmp14595, i64 1
+ %tmp14597 = getelementptr inbounds float, float* %tmp14596, i64 1
+ %tmp14598 = getelementptr inbounds float, float* %tmp14597, i64 1
+ %tmp14599 = getelementptr inbounds float, float* %tmp14598, i64 1
+ %tmp14600 = getelementptr inbounds float, float* %tmp14599, i64 1
+ %tmp14601 = getelementptr inbounds float, float* %tmp14600, i64 1
+ %tmp14602 = getelementptr inbounds float, float* %tmp14601, i64 1
+ %tmp14603 = getelementptr inbounds float, float* %tmp14602, i64 1
+ %tmp14604 = getelementptr inbounds float, float* %tmp14603, i64 1
+ %tmp14605 = getelementptr inbounds float, float* %tmp14604, i64 1
+ %tmp14606 = getelementptr inbounds float, float* %tmp14605, i64 1
+ %tmp14607 = getelementptr inbounds float, float* %tmp14606, i64 1
+ %tmp14608 = getelementptr inbounds float, float* %tmp14607, i64 1
+ %tmp14609 = getelementptr inbounds float, float* %tmp14608, i64 1
+ %tmp14610 = getelementptr inbounds float, float* %tmp14609, i64 1
+ %tmp14611 = getelementptr inbounds float, float* %tmp14610, i64 1
+ %tmp14612 = getelementptr inbounds float, float* %tmp14611, i64 1
+ %tmp14613 = getelementptr inbounds float, float* %tmp14612, i64 1
+ %tmp14614 = getelementptr inbounds float, float* %tmp14613, i64 1
+ %tmp14615 = getelementptr inbounds float, float* %tmp14614, i64 1
+ %tmp14616 = getelementptr inbounds float, float* %tmp14615, i64 1
+ %tmp14617 = getelementptr inbounds float, float* %tmp14616, i64 1
+ %tmp14618 = getelementptr inbounds float, float* %tmp14617, i64 1
+ %tmp14619 = getelementptr inbounds float, float* %tmp14618, i64 1
+ %tmp14620 = getelementptr inbounds float, float* %tmp14619, i64 1
+ %tmp14621 = getelementptr inbounds float, float* %tmp14620, i64 1
+ %tmp14622 = getelementptr inbounds float, float* %tmp14621, i64 1
+ %tmp14623 = getelementptr inbounds float, float* %tmp14622, i64 1
+ %tmp14624 = getelementptr inbounds float, float* %tmp14623, i64 1
+ %tmp14625 = getelementptr inbounds float, float* %tmp14624, i64 1
+ %tmp14626 = getelementptr inbounds float, float* %tmp14625, i64 1
+ %tmp14627 = getelementptr inbounds float, float* %tmp14626, i64 1
+ %tmp14628 = getelementptr inbounds float, float* %tmp14627, i64 1
+ %tmp14629 = getelementptr inbounds float, float* %tmp14628, i64 1
+ %tmp14630 = getelementptr inbounds float, float* %tmp14629, i64 1
+ %tmp14631 = getelementptr inbounds float, float* %tmp14630, i64 1
+ %tmp14632 = getelementptr inbounds float, float* %tmp14631, i64 1
+ %tmp14633 = getelementptr inbounds float, float* %tmp14632, i64 1
+ %tmp14634 = getelementptr inbounds float, float* %tmp14633, i64 1
+ %tmp14635 = getelementptr inbounds float, float* %tmp14634, i64 1
+ %tmp14636 = getelementptr inbounds float, float* %tmp14635, i64 1
+ %tmp14637 = getelementptr inbounds float, float* %tmp14636, i64 1
+ %tmp14638 = getelementptr inbounds float, float* %tmp14637, i64 1
+ %tmp14639 = getelementptr inbounds float, float* %tmp14638, i64 1
+ %tmp14640 = getelementptr inbounds float, float* %tmp14639, i64 1
+ %tmp14641 = getelementptr inbounds float, float* %tmp14640, i64 1
+ %tmp14642 = getelementptr inbounds float, float* %tmp14641, i64 1
+ %tmp14643 = getelementptr inbounds float, float* %tmp14642, i64 1
+ %tmp14644 = getelementptr inbounds float, float* %tmp14643, i64 1
+ %tmp14645 = getelementptr inbounds float, float* %tmp14644, i64 1
+ %tmp14646 = getelementptr inbounds float, float* %tmp14645, i64 1
+ %tmp14647 = getelementptr inbounds float, float* %tmp14646, i64 1
+ %tmp14648 = getelementptr inbounds float, float* %tmp14647, i64 1
+ %tmp14649 = getelementptr inbounds float, float* %tmp14648, i64 1
+ %tmp14650 = getelementptr inbounds float, float* %tmp14649, i64 1
+ %tmp14651 = getelementptr inbounds float, float* %tmp14650, i64 1
+ %tmp14652 = getelementptr inbounds float, float* %tmp14651, i64 1
+ %tmp14653 = getelementptr inbounds float, float* %tmp14652, i64 1
+ %tmp14654 = getelementptr inbounds float, float* %tmp14653, i64 1
+ %tmp14655 = getelementptr inbounds float, float* %tmp14654, i64 1
+ %tmp14656 = getelementptr inbounds float, float* %tmp14655, i64 1
+ %tmp14657 = getelementptr inbounds float, float* %tmp14656, i64 1
+ %tmp14658 = getelementptr inbounds float, float* %tmp14657, i64 1
+ %tmp14659 = getelementptr inbounds float, float* %tmp14658, i64 1
+ %tmp14660 = getelementptr inbounds float, float* %tmp14659, i64 1
+ %tmp14661 = getelementptr inbounds float, float* %tmp14660, i64 1
+ %tmp14662 = getelementptr inbounds float, float* %tmp14661, i64 1
+ %tmp14663 = getelementptr inbounds float, float* %tmp14662, i64 1
+ %tmp14664 = getelementptr inbounds float, float* %tmp14663, i64 1
+ %tmp14665 = getelementptr inbounds float, float* %tmp14664, i64 1
+ %tmp14666 = getelementptr inbounds float, float* %tmp14665, i64 1
+ %tmp14667 = getelementptr inbounds float, float* %tmp14666, i64 1
+ %tmp14668 = getelementptr inbounds float, float* %tmp14667, i64 1
+ %tmp14669 = getelementptr inbounds float, float* %tmp14668, i64 1
+ %tmp14670 = getelementptr inbounds float, float* %tmp14669, i64 1
+ %tmp14671 = getelementptr inbounds float, float* %tmp14670, i64 1
+ %tmp14672 = getelementptr inbounds float, float* %tmp14671, i64 1
+ %tmp14673 = getelementptr inbounds float, float* %tmp14672, i64 1
+ %tmp14674 = getelementptr inbounds float, float* %tmp14673, i64 1
+ %tmp14675 = getelementptr inbounds float, float* %tmp14674, i64 1
+ %tmp14676 = getelementptr inbounds float, float* %tmp14675, i64 1
+ %tmp14677 = getelementptr inbounds float, float* %tmp14676, i64 1
+ %tmp14678 = getelementptr inbounds float, float* %tmp14677, i64 1
+ %tmp14679 = getelementptr inbounds float, float* %tmp14678, i64 1
+ %tmp14680 = getelementptr inbounds float, float* %tmp14679, i64 1
+ %tmp14681 = getelementptr inbounds float, float* %tmp14680, i64 1
+ %tmp14682 = getelementptr inbounds float, float* %tmp14681, i64 1
+ %tmp14683 = getelementptr inbounds float, float* %tmp14682, i64 1
+ %tmp14684 = getelementptr inbounds float, float* %tmp14683, i64 1
+ %tmp14685 = getelementptr inbounds float, float* %tmp14684, i64 1
+ %tmp14686 = getelementptr inbounds float, float* %tmp14685, i64 1
+ %tmp14687 = getelementptr inbounds float, float* %tmp14686, i64 1
+ %tmp14688 = getelementptr inbounds float, float* %tmp14687, i64 1
+ %tmp14689 = getelementptr inbounds float, float* %tmp14688, i64 1
+ %tmp14690 = getelementptr inbounds float, float* %tmp14689, i64 1
+ %tmp14691 = getelementptr inbounds float, float* %tmp14690, i64 1
+ %tmp14692 = getelementptr inbounds float, float* %tmp14691, i64 1
+ %tmp14693 = getelementptr inbounds float, float* %tmp14692, i64 1
+ %tmp14694 = getelementptr inbounds float, float* %tmp14693, i64 1
+ %tmp14695 = getelementptr inbounds float, float* %tmp14694, i64 1
+ %tmp14696 = getelementptr inbounds float, float* %tmp14695, i64 1
+ %tmp14697 = getelementptr inbounds float, float* %tmp14696, i64 1
+ %tmp14698 = getelementptr inbounds float, float* %tmp14697, i64 1
+ %tmp14699 = getelementptr inbounds float, float* %tmp14698, i64 1
+ %tmp14700 = getelementptr inbounds float, float* %tmp14699, i64 1
+ %tmp14701 = getelementptr inbounds float, float* %tmp14700, i64 1
+ %tmp14702 = getelementptr inbounds float, float* %tmp14701, i64 1
+ %tmp14703 = getelementptr inbounds float, float* %tmp14702, i64 1
+ %tmp14704 = getelementptr inbounds float, float* %tmp14703, i64 1
+ %tmp14705 = getelementptr inbounds float, float* %tmp14704, i64 1
+ %tmp14706 = getelementptr inbounds float, float* %tmp14705, i64 1
+ %tmp14707 = getelementptr inbounds float, float* %tmp14706, i64 1
+ %tmp14708 = getelementptr inbounds float, float* %tmp14707, i64 1
+ %tmp14709 = getelementptr inbounds float, float* %tmp14708, i64 1
+ %tmp14710 = getelementptr inbounds float, float* %tmp14709, i64 1
+ %tmp14711 = getelementptr inbounds float, float* %tmp14710, i64 1
+ %tmp14712 = getelementptr inbounds float, float* %tmp14711, i64 1
+ %tmp14713 = getelementptr inbounds float, float* %tmp14712, i64 1
+ %tmp14714 = getelementptr inbounds float, float* %tmp14713, i64 1
+ %tmp14715 = getelementptr inbounds float, float* %tmp14714, i64 1
+ %tmp14716 = getelementptr inbounds float, float* %tmp14715, i64 1
+ %tmp14717 = getelementptr inbounds float, float* %tmp14716, i64 1
+ %tmp14718 = getelementptr inbounds float, float* %tmp14717, i64 1
+ %tmp14719 = getelementptr inbounds float, float* %tmp14718, i64 1
+ %tmp14720 = getelementptr inbounds float, float* %tmp14719, i64 1
+ %tmp14721 = getelementptr inbounds float, float* %tmp14720, i64 1
+ %tmp14722 = getelementptr inbounds float, float* %tmp14721, i64 1
+ %tmp14723 = getelementptr inbounds float, float* %tmp14722, i64 1
+ %tmp14724 = getelementptr inbounds float, float* %tmp14723, i64 1
+ %tmp14725 = getelementptr inbounds float, float* %tmp14724, i64 1
+ %tmp14726 = getelementptr inbounds float, float* %tmp14725, i64 1
+ %tmp14727 = getelementptr inbounds float, float* %tmp14726, i64 1
+ %tmp14728 = getelementptr inbounds float, float* %tmp14727, i64 1
+ %tmp14729 = getelementptr inbounds float, float* %tmp14728, i64 1
+ %tmp14730 = getelementptr inbounds float, float* %tmp14729, i64 1
+ %tmp14731 = getelementptr inbounds float, float* %tmp14730, i64 1
+ %tmp14732 = getelementptr inbounds float, float* %tmp14731, i64 1
+ %tmp14733 = getelementptr inbounds float, float* %tmp14732, i64 1
+ %tmp14734 = getelementptr inbounds float, float* %tmp14733, i64 1
+ %tmp14735 = getelementptr inbounds float, float* %tmp14734, i64 1
+ %tmp14736 = getelementptr inbounds float, float* %tmp14735, i64 1
+ %tmp14737 = getelementptr inbounds float, float* %tmp14736, i64 1
+ %tmp14738 = getelementptr inbounds float, float* %tmp14737, i64 1
+ %tmp14739 = getelementptr inbounds float, float* %tmp14738, i64 1
+ %tmp14740 = getelementptr inbounds float, float* %tmp14739, i64 1
+ %tmp14741 = getelementptr inbounds float, float* %tmp14740, i64 1
+ %tmp14742 = getelementptr inbounds float, float* %tmp14741, i64 1
+ %tmp14743 = getelementptr inbounds float, float* %tmp14742, i64 1
+ %tmp14744 = getelementptr inbounds float, float* %tmp14743, i64 1
+ %tmp14745 = getelementptr inbounds float, float* %tmp14744, i64 1
+ %tmp14746 = getelementptr inbounds float, float* %tmp14745, i64 1
+ %tmp14747 = getelementptr inbounds float, float* %tmp14746, i64 1
+ %tmp14748 = getelementptr inbounds float, float* %tmp14747, i64 1
+ %tmp14749 = getelementptr inbounds float, float* %tmp14748, i64 1
+ %tmp14750 = getelementptr inbounds float, float* %tmp14749, i64 1
+ %tmp14751 = getelementptr inbounds float, float* %tmp14750, i64 1
+ %tmp14752 = getelementptr inbounds float, float* %tmp14751, i64 1
+ %tmp14753 = getelementptr inbounds float, float* %tmp14752, i64 1
+ %tmp14754 = getelementptr inbounds float, float* %tmp14753, i64 1
+ %tmp14755 = getelementptr inbounds float, float* %tmp14754, i64 1
+ %tmp14756 = getelementptr inbounds float, float* %tmp14755, i64 1
+ %tmp14757 = getelementptr inbounds float, float* %tmp14756, i64 1
+ %tmp14758 = getelementptr inbounds float, float* %tmp14757, i64 1
+ %tmp14759 = getelementptr inbounds float, float* %tmp14758, i64 1
+ %tmp14760 = getelementptr inbounds float, float* %tmp14759, i64 1
+ %tmp14761 = getelementptr inbounds float, float* %tmp14760, i64 1
+ %tmp14762 = getelementptr inbounds float, float* %tmp14761, i64 1
+ %tmp14763 = getelementptr inbounds float, float* %tmp14762, i64 1
+ %tmp14764 = getelementptr inbounds float, float* %tmp14763, i64 1
+ %tmp14765 = getelementptr inbounds float, float* %tmp14764, i64 1
+ %tmp14766 = getelementptr inbounds float, float* %tmp14765, i64 1
+ %tmp14767 = getelementptr inbounds float, float* %tmp14766, i64 1
+ %tmp14768 = getelementptr inbounds float, float* %tmp14767, i64 1
+ %tmp14769 = getelementptr inbounds float, float* %tmp14768, i64 1
+ %tmp14770 = getelementptr inbounds float, float* %tmp14769, i64 1
+ %tmp14771 = getelementptr inbounds float, float* %tmp14770, i64 1
+ %tmp14772 = getelementptr inbounds float, float* %tmp14771, i64 1
+ %tmp14773 = getelementptr inbounds float, float* %tmp14772, i64 1
+ %tmp14774 = getelementptr inbounds float, float* %tmp14773, i64 1
+ %tmp14775 = getelementptr inbounds float, float* %tmp14774, i64 1
+ %tmp14776 = getelementptr inbounds float, float* %tmp14775, i64 1
+ %tmp14777 = getelementptr inbounds float, float* %tmp14776, i64 1
+ %tmp14778 = getelementptr inbounds float, float* %tmp14777, i64 1
+ %tmp14779 = getelementptr inbounds float, float* %tmp14778, i64 1
+ %tmp14780 = getelementptr inbounds float, float* %tmp14779, i64 1
+ %tmp14781 = getelementptr inbounds float, float* %tmp14780, i64 1
+ %tmp14782 = getelementptr inbounds float, float* %tmp14781, i64 1
+ %tmp14783 = getelementptr inbounds float, float* %tmp14782, i64 1
+ %tmp14784 = getelementptr inbounds float, float* %tmp14783, i64 1
+ %tmp14785 = getelementptr inbounds float, float* %tmp14784, i64 1
+ %tmp14786 = getelementptr inbounds float, float* %tmp14785, i64 1
+ %tmp14787 = getelementptr inbounds float, float* %tmp14786, i64 1
+ %tmp14788 = getelementptr inbounds float, float* %tmp14787, i64 1
+ %tmp14789 = getelementptr inbounds float, float* %tmp14788, i64 1
+ %tmp14790 = getelementptr inbounds float, float* %tmp14789, i64 1
+ %tmp14791 = getelementptr inbounds float, float* %tmp14790, i64 1
+ %tmp14792 = getelementptr inbounds float, float* %tmp14791, i64 1
+ %tmp14793 = getelementptr inbounds float, float* %tmp14792, i64 1
+ %tmp14794 = getelementptr inbounds float, float* %tmp14793, i64 1
+ %tmp14795 = getelementptr inbounds float, float* %tmp14794, i64 1
+ %tmp14796 = getelementptr inbounds float, float* %tmp14795, i64 1
+ %tmp14797 = getelementptr inbounds float, float* %tmp14796, i64 1
+ %tmp14798 = getelementptr inbounds float, float* %tmp14797, i64 1
+ %tmp14799 = getelementptr inbounds float, float* %tmp14798, i64 1
+ %tmp14800 = getelementptr inbounds float, float* %tmp14799, i64 1
+ %tmp14801 = getelementptr inbounds float, float* %tmp14800, i64 1
+ %tmp14802 = getelementptr inbounds float, float* %tmp14801, i64 1
+ %tmp14803 = getelementptr inbounds float, float* %tmp14802, i64 1
+ %tmp14804 = getelementptr inbounds float, float* %tmp14803, i64 1
+ %tmp14805 = getelementptr inbounds float, float* %tmp14804, i64 1
+ %tmp14806 = getelementptr inbounds float, float* %tmp14805, i64 1
+ %tmp14807 = getelementptr inbounds float, float* %tmp14806, i64 1
+ %tmp14808 = getelementptr inbounds float, float* %tmp14807, i64 1
+ %tmp14809 = getelementptr inbounds float, float* %tmp14808, i64 1
+ %tmp14810 = getelementptr inbounds float, float* %tmp14809, i64 1
+ %tmp14811 = getelementptr inbounds float, float* %tmp14810, i64 1
+ %tmp14812 = getelementptr inbounds float, float* %tmp14811, i64 1
+ %tmp14813 = getelementptr inbounds float, float* %tmp14812, i64 1
+ %tmp14814 = getelementptr inbounds float, float* %tmp14813, i64 1
+ %tmp14815 = getelementptr inbounds float, float* %tmp14814, i64 1
+ %tmp14816 = getelementptr inbounds float, float* %tmp14815, i64 1
+ %tmp14817 = getelementptr inbounds float, float* %tmp14816, i64 1
+ %tmp14818 = getelementptr inbounds float, float* %tmp14817, i64 1
+ %tmp14819 = getelementptr inbounds float, float* %tmp14818, i64 1
+ %tmp14820 = getelementptr inbounds float, float* %tmp14819, i64 1
+ %tmp14821 = getelementptr inbounds float, float* %tmp14820, i64 1
+ %tmp14822 = getelementptr inbounds float, float* %tmp14821, i64 1
+ %tmp14823 = getelementptr inbounds float, float* %tmp14822, i64 1
+ %tmp14824 = getelementptr inbounds float, float* %tmp14823, i64 1
+ %tmp14825 = getelementptr inbounds float, float* %tmp14824, i64 1
+ %tmp14826 = getelementptr inbounds float, float* %tmp14825, i64 1
+ %tmp14827 = getelementptr inbounds float, float* %tmp14826, i64 1
+ %tmp14828 = getelementptr inbounds float, float* %tmp14827, i64 1
+ %tmp14829 = getelementptr inbounds float, float* %tmp14828, i64 1
+ %tmp14830 = getelementptr inbounds float, float* %tmp14829, i64 1
+ %tmp14831 = getelementptr inbounds float, float* %tmp14830, i64 1
+ %tmp14832 = getelementptr inbounds float, float* %tmp14831, i64 1
+ %tmp14833 = getelementptr inbounds float, float* %tmp14832, i64 1
+ %tmp14834 = getelementptr inbounds float, float* %tmp14833, i64 1
+ %tmp14835 = getelementptr inbounds float, float* %tmp14834, i64 1
+ %tmp14836 = getelementptr inbounds float, float* %tmp14835, i64 1
+ %tmp14837 = getelementptr inbounds float, float* %tmp14836, i64 1
+ %tmp14838 = getelementptr inbounds float, float* %tmp14837, i64 1
+ %tmp14839 = getelementptr inbounds float, float* %tmp14838, i64 1
+ %tmp14840 = getelementptr inbounds float, float* %tmp14839, i64 1
+ %tmp14841 = getelementptr inbounds float, float* %tmp14840, i64 1
+ %tmp14842 = getelementptr inbounds float, float* %tmp14841, i64 1
+ %tmp14843 = getelementptr inbounds float, float* %tmp14842, i64 1
+ %tmp14844 = getelementptr inbounds float, float* %tmp14843, i64 1
+ %tmp14845 = getelementptr inbounds float, float* %tmp14844, i64 1
+ %tmp14846 = getelementptr inbounds float, float* %tmp14845, i64 1
+ %tmp14847 = getelementptr inbounds float, float* %tmp14846, i64 1
+ %tmp14848 = getelementptr inbounds float, float* %tmp14847, i64 1
+ %tmp14849 = getelementptr inbounds float, float* %tmp14848, i64 1
+ %tmp14850 = getelementptr inbounds float, float* %tmp14849, i64 1
+ %tmp14851 = getelementptr inbounds float, float* %tmp14850, i64 1
+ %tmp14852 = getelementptr inbounds float, float* %tmp14851, i64 1
+ %tmp14853 = getelementptr inbounds float, float* %tmp14852, i64 1
+ %tmp14854 = getelementptr inbounds float, float* %tmp14853, i64 1
+ %tmp14855 = getelementptr inbounds float, float* %tmp14854, i64 1
+ %tmp14856 = getelementptr inbounds float, float* %tmp14855, i64 1
+ %tmp14857 = getelementptr inbounds float, float* %tmp14856, i64 1
+ %tmp14858 = getelementptr inbounds float, float* %tmp14857, i64 1
+ %tmp14859 = getelementptr inbounds float, float* %tmp14858, i64 1
+ %tmp14860 = getelementptr inbounds float, float* %tmp14859, i64 1
+ %tmp14861 = getelementptr inbounds float, float* %tmp14860, i64 1
+ %tmp14862 = getelementptr inbounds float, float* %tmp14861, i64 1
+ %tmp14863 = getelementptr inbounds float, float* %tmp14862, i64 1
+ %tmp14864 = getelementptr inbounds float, float* %tmp14863, i64 1
+ %tmp14865 = getelementptr inbounds float, float* %tmp14864, i64 1
+ %tmp14866 = getelementptr inbounds float, float* %tmp14865, i64 1
+ %tmp14867 = getelementptr inbounds float, float* %tmp14866, i64 1
+ %tmp14868 = getelementptr inbounds float, float* %tmp14867, i64 1
+ %tmp14869 = getelementptr inbounds float, float* %tmp14868, i64 1
+ %tmp14870 = getelementptr inbounds float, float* %tmp14869, i64 1
+ %tmp14871 = getelementptr inbounds float, float* %tmp14870, i64 1
+ %tmp14872 = getelementptr inbounds float, float* %tmp14871, i64 1
+ %tmp14873 = getelementptr inbounds float, float* %tmp14872, i64 1
+ %tmp14874 = getelementptr inbounds float, float* %tmp14873, i64 1
+ %tmp14875 = getelementptr inbounds float, float* %tmp14874, i64 1
+ %tmp14876 = getelementptr inbounds float, float* %tmp14875, i64 1
+ %tmp14877 = getelementptr inbounds float, float* %tmp14876, i64 1
+ %tmp14878 = getelementptr inbounds float, float* %tmp14877, i64 1
+ %tmp14879 = getelementptr inbounds float, float* %tmp14878, i64 1
+ %tmp14880 = getelementptr inbounds float, float* %tmp14879, i64 1
+ %tmp14881 = getelementptr inbounds float, float* %tmp14880, i64 1
+ %tmp14882 = getelementptr inbounds float, float* %tmp14881, i64 1
+ %tmp14883 = getelementptr inbounds float, float* %tmp14882, i64 1
+ %tmp14884 = getelementptr inbounds float, float* %tmp14883, i64 1
+ %tmp14885 = getelementptr inbounds float, float* %tmp14884, i64 1
+ %tmp14886 = getelementptr inbounds float, float* %tmp14885, i64 1
+ %tmp14887 = getelementptr inbounds float, float* %tmp14886, i64 1
+ %tmp14888 = getelementptr inbounds float, float* %tmp14887, i64 1
+ %tmp14889 = getelementptr inbounds float, float* %tmp14888, i64 1
+ %tmp14890 = getelementptr inbounds float, float* %tmp14889, i64 1
+ %tmp14891 = getelementptr inbounds float, float* %tmp14890, i64 1
+ %tmp14892 = getelementptr inbounds float, float* %tmp14891, i64 1
+ %tmp14893 = getelementptr inbounds float, float* %tmp14892, i64 1
+ %tmp14894 = getelementptr inbounds float, float* %tmp14893, i64 1
+ %tmp14895 = getelementptr inbounds float, float* %tmp14894, i64 1
+ %tmp14896 = getelementptr inbounds float, float* %tmp14895, i64 1
+ %tmp14897 = getelementptr inbounds float, float* %tmp14896, i64 1
+ %tmp14898 = getelementptr inbounds float, float* %tmp14897, i64 1
+ %tmp14899 = getelementptr inbounds float, float* %tmp14898, i64 1
+ %tmp14900 = getelementptr inbounds float, float* %tmp14899, i64 1
+ %tmp14901 = getelementptr inbounds float, float* %tmp14900, i64 1
+ %tmp14902 = getelementptr inbounds float, float* %tmp14901, i64 1
+ %tmp14903 = getelementptr inbounds float, float* %tmp14902, i64 1
+ %tmp14904 = getelementptr inbounds float, float* %tmp14903, i64 1
+ %tmp14905 = getelementptr inbounds float, float* %tmp14904, i64 1
+ %tmp14906 = getelementptr inbounds float, float* %tmp14905, i64 1
+ %tmp14907 = getelementptr inbounds float, float* %tmp14906, i64 1
+ %tmp14908 = getelementptr inbounds float, float* %tmp14907, i64 1
+ %tmp14909 = getelementptr inbounds float, float* %tmp14908, i64 1
+ %tmp14910 = getelementptr inbounds float, float* %tmp14909, i64 1
+ %tmp14911 = getelementptr inbounds float, float* %tmp14910, i64 1
+ %tmp14912 = getelementptr inbounds float, float* %tmp14911, i64 1
+ %tmp14913 = getelementptr inbounds float, float* %tmp14912, i64 1
+ %tmp14914 = getelementptr inbounds float, float* %tmp14913, i64 1
+ %tmp14915 = getelementptr inbounds float, float* %tmp14914, i64 1
+ %tmp14916 = getelementptr inbounds float, float* %tmp14915, i64 1
+ %tmp14917 = getelementptr inbounds float, float* %tmp14916, i64 1
+ %tmp14918 = getelementptr inbounds float, float* %tmp14917, i64 1
+ %tmp14919 = getelementptr inbounds float, float* %tmp14918, i64 1
+ %tmp14920 = getelementptr inbounds float, float* %tmp14919, i64 1
+ %tmp14921 = getelementptr inbounds float, float* %tmp14920, i64 1
+ %tmp14922 = getelementptr inbounds float, float* %tmp14921, i64 1
+ %tmp14923 = getelementptr inbounds float, float* %tmp14922, i64 1
+ %tmp14924 = getelementptr inbounds float, float* %tmp14923, i64 1
+ %tmp14925 = getelementptr inbounds float, float* %tmp14924, i64 1
+ %tmp14926 = getelementptr inbounds float, float* %tmp14925, i64 1
+ %tmp14927 = getelementptr inbounds float, float* %tmp14926, i64 1
+ %tmp14928 = getelementptr inbounds float, float* %tmp14927, i64 1
+ %tmp14929 = getelementptr inbounds float, float* %tmp14928, i64 1
+ %tmp14930 = getelementptr inbounds float, float* %tmp14929, i64 1
+ %tmp14931 = getelementptr inbounds float, float* %tmp14930, i64 1
+ %tmp14932 = getelementptr inbounds float, float* %tmp14931, i64 1
+ %tmp14933 = getelementptr inbounds float, float* %tmp14932, i64 1
+ %tmp14934 = getelementptr inbounds float, float* %tmp14933, i64 1
+ %tmp14935 = getelementptr inbounds float, float* %tmp14934, i64 1
+ %tmp14936 = getelementptr inbounds float, float* %tmp14935, i64 1
+ %tmp14937 = getelementptr inbounds float, float* %tmp14936, i64 1
+ %tmp14938 = getelementptr inbounds float, float* %tmp14937, i64 1
+ %tmp14939 = getelementptr inbounds float, float* %tmp14938, i64 1
+ %tmp14940 = getelementptr inbounds float, float* %tmp14939, i64 1
+ %tmp14941 = getelementptr inbounds float, float* %tmp14940, i64 1
+ %tmp14942 = getelementptr inbounds float, float* %tmp14941, i64 1
+ %tmp14943 = getelementptr inbounds float, float* %tmp14942, i64 1
+ %tmp14944 = getelementptr inbounds float, float* %tmp14943, i64 1
+ %tmp14945 = getelementptr inbounds float, float* %tmp14944, i64 1
+ %tmp14946 = getelementptr inbounds float, float* %tmp14945, i64 1
+ %tmp14947 = getelementptr inbounds float, float* %tmp14946, i64 1
+ %tmp14948 = getelementptr inbounds float, float* %tmp14947, i64 1
+ %tmp14949 = getelementptr inbounds float, float* %tmp14948, i64 1
+ %tmp14950 = getelementptr inbounds float, float* %tmp14949, i64 1
+ %tmp14951 = getelementptr inbounds float, float* %tmp14950, i64 1
+ %tmp14952 = getelementptr inbounds float, float* %tmp14951, i64 1
+ %tmp14953 = getelementptr inbounds float, float* %tmp14952, i64 1
+ %tmp14954 = getelementptr inbounds float, float* %tmp14953, i64 1
+ %tmp14955 = getelementptr inbounds float, float* %tmp14954, i64 1
+ %tmp14956 = getelementptr inbounds float, float* %tmp14955, i64 1
+ %tmp14957 = getelementptr inbounds float, float* %tmp14956, i64 1
+ %tmp14958 = getelementptr inbounds float, float* %tmp14957, i64 1
+ %tmp14959 = getelementptr inbounds float, float* %tmp14958, i64 1
+ %tmp14960 = getelementptr inbounds float, float* %tmp14959, i64 1
+ %tmp14961 = getelementptr inbounds float, float* %tmp14960, i64 1
+ %tmp14962 = getelementptr inbounds float, float* %tmp14961, i64 1
+ %tmp14963 = getelementptr inbounds float, float* %tmp14962, i64 1
+ %tmp14964 = getelementptr inbounds float, float* %tmp14963, i64 1
+ %tmp14965 = getelementptr inbounds float, float* %tmp14964, i64 1
+ %tmp14966 = getelementptr inbounds float, float* %tmp14965, i64 1
+ %tmp14967 = getelementptr inbounds float, float* %tmp14966, i64 1
+ %tmp14968 = getelementptr inbounds float, float* %tmp14967, i64 1
+ %tmp14969 = getelementptr inbounds float, float* %tmp14968, i64 1
+ %tmp14970 = getelementptr inbounds float, float* %tmp14969, i64 1
+ %tmp14971 = getelementptr inbounds float, float* %tmp14970, i64 1
+ %tmp14972 = getelementptr inbounds float, float* %tmp14971, i64 1
+ %tmp14973 = getelementptr inbounds float, float* %tmp14972, i64 1
+ %tmp14974 = getelementptr inbounds float, float* %tmp14973, i64 1
+ %tmp14975 = getelementptr inbounds float, float* %tmp14974, i64 1
+ %tmp14976 = getelementptr inbounds float, float* %tmp14975, i64 1
+ %tmp14977 = getelementptr inbounds float, float* %tmp14976, i64 1
+ %tmp14978 = getelementptr inbounds float, float* %tmp14977, i64 1
+ %tmp14979 = getelementptr inbounds float, float* %tmp14978, i64 1
+ %tmp14980 = getelementptr inbounds float, float* %tmp14979, i64 1
+ %tmp14981 = getelementptr inbounds float, float* %tmp14980, i64 1
+ %tmp14982 = getelementptr inbounds float, float* %tmp14981, i64 1
+ %tmp14983 = getelementptr inbounds float, float* %tmp14982, i64 1
+ %tmp14984 = getelementptr inbounds float, float* %tmp14983, i64 1
+ %tmp14985 = getelementptr inbounds float, float* %tmp14984, i64 1
+ %tmp14986 = getelementptr inbounds float, float* %tmp14985, i64 1
+ %tmp14987 = getelementptr inbounds float, float* %tmp14986, i64 1
+ %tmp14988 = getelementptr inbounds float, float* %tmp14987, i64 1
+ %tmp14989 = getelementptr inbounds float, float* %tmp14988, i64 1
+ %tmp14990 = getelementptr inbounds float, float* %tmp14989, i64 1
+ %tmp14991 = getelementptr inbounds float, float* %tmp14990, i64 1
+ %tmp14992 = getelementptr inbounds float, float* %tmp14991, i64 1
+ %tmp14993 = getelementptr inbounds float, float* %tmp14992, i64 1
+ %tmp14994 = getelementptr inbounds float, float* %tmp14993, i64 1
+ %tmp14995 = getelementptr inbounds float, float* %tmp14994, i64 1
+ %tmp14996 = getelementptr inbounds float, float* %tmp14995, i64 1
+ %tmp14997 = getelementptr inbounds float, float* %tmp14996, i64 1
+ %tmp14998 = getelementptr inbounds float, float* %tmp14997, i64 1
+ %tmp14999 = getelementptr inbounds float, float* %tmp14998, i64 1
+ %tmp15000 = getelementptr inbounds float, float* %tmp14999, i64 1
+ %tmp15001 = getelementptr inbounds float, float* %tmp15000, i64 1
+ %tmp15002 = getelementptr inbounds float, float* %tmp15001, i64 1
+ %tmp15003 = getelementptr inbounds float, float* %tmp15002, i64 1
+ %tmp15004 = getelementptr inbounds float, float* %tmp15003, i64 1
+ %tmp15005 = getelementptr inbounds float, float* %tmp15004, i64 1
+ %tmp15006 = getelementptr inbounds float, float* %tmp15005, i64 1
+ %tmp15007 = getelementptr inbounds float, float* %tmp15006, i64 1
+ %tmp15008 = getelementptr inbounds float, float* %tmp15007, i64 1
+ %tmp15009 = getelementptr inbounds float, float* %tmp15008, i64 1
+ %tmp15010 = getelementptr inbounds float, float* %tmp15009, i64 1
+ %tmp15011 = getelementptr inbounds float, float* %tmp15010, i64 1
+ %tmp15012 = getelementptr inbounds float, float* %tmp15011, i64 1
+ %tmp15013 = getelementptr inbounds float, float* %tmp15012, i64 1
+ %tmp15014 = getelementptr inbounds float, float* %tmp15013, i64 1
+ %tmp15015 = getelementptr inbounds float, float* %tmp15014, i64 1
+ %tmp15016 = getelementptr inbounds float, float* %tmp15015, i64 1
+ %tmp15017 = getelementptr inbounds float, float* %tmp15016, i64 1
+ %tmp15018 = getelementptr inbounds float, float* %tmp15017, i64 1
+ %tmp15019 = getelementptr inbounds float, float* %tmp15018, i64 1
+ %tmp15020 = getelementptr inbounds float, float* %tmp15019, i64 1
+ %tmp15021 = getelementptr inbounds float, float* %tmp15020, i64 1
+ %tmp15022 = getelementptr inbounds float, float* %tmp15021, i64 1
+ %tmp15023 = getelementptr inbounds float, float* %tmp15022, i64 1
+ %tmp15024 = getelementptr inbounds float, float* %tmp15023, i64 1
+ %tmp15025 = getelementptr inbounds float, float* %tmp15024, i64 1
+ %tmp15026 = getelementptr inbounds float, float* %tmp15025, i64 1
+ %tmp15027 = getelementptr inbounds float, float* %tmp15026, i64 1
+ %tmp15028 = getelementptr inbounds float, float* %tmp15027, i64 1
+ %tmp15029 = getelementptr inbounds float, float* %tmp15028, i64 1
+ %tmp15030 = getelementptr inbounds float, float* %tmp15029, i64 1
+ %tmp15031 = getelementptr inbounds float, float* %tmp15030, i64 1
+ %tmp15032 = getelementptr inbounds float, float* %tmp15031, i64 1
+ %tmp15033 = getelementptr inbounds float, float* %tmp15032, i64 1
+ %tmp15034 = getelementptr inbounds float, float* %tmp15033, i64 1
+ %tmp15035 = getelementptr inbounds float, float* %tmp15034, i64 1
+ %tmp15036 = getelementptr inbounds float, float* %tmp15035, i64 1
+ %tmp15037 = getelementptr inbounds float, float* %tmp15036, i64 1
+ %tmp15038 = getelementptr inbounds float, float* %tmp15037, i64 1
+ %tmp15039 = getelementptr inbounds float, float* %tmp15038, i64 1
+ %tmp15040 = getelementptr inbounds float, float* %tmp15039, i64 1
+ %tmp15041 = getelementptr inbounds float, float* %tmp15040, i64 1
+ %tmp15042 = getelementptr inbounds float, float* %tmp15041, i64 1
+ %tmp15043 = getelementptr inbounds float, float* %tmp15042, i64 1
+ %tmp15044 = getelementptr inbounds float, float* %tmp15043, i64 1
+ %tmp15045 = getelementptr inbounds float, float* %tmp15044, i64 1
+ %tmp15046 = getelementptr inbounds float, float* %tmp15045, i64 1
+ %tmp15047 = getelementptr inbounds float, float* %tmp15046, i64 1
+ %tmp15048 = getelementptr inbounds float, float* %tmp15047, i64 1
+ %tmp15049 = getelementptr inbounds float, float* %tmp15048, i64 1
+ %tmp15050 = getelementptr inbounds float, float* %tmp15049, i64 1
+ %tmp15051 = getelementptr inbounds float, float* %tmp15050, i64 1
+ %tmp15052 = getelementptr inbounds float, float* %tmp15051, i64 1
+ %tmp15053 = getelementptr inbounds float, float* %tmp15052, i64 1
+ %tmp15054 = getelementptr inbounds float, float* %tmp15053, i64 1
+ %tmp15055 = getelementptr inbounds float, float* %tmp15054, i64 1
+ %tmp15056 = getelementptr inbounds float, float* %tmp15055, i64 1
+ %tmp15057 = getelementptr inbounds float, float* %tmp15056, i64 1
+ %tmp15058 = getelementptr inbounds float, float* %tmp15057, i64 1
+ %tmp15059 = getelementptr inbounds float, float* %tmp15058, i64 1
+ %tmp15060 = getelementptr inbounds float, float* %tmp15059, i64 1
+ %tmp15061 = getelementptr inbounds float, float* %tmp15060, i64 1
+ %tmp15062 = getelementptr inbounds float, float* %tmp15061, i64 1
+ %tmp15063 = getelementptr inbounds float, float* %tmp15062, i64 1
+ %tmp15064 = getelementptr inbounds float, float* %tmp15063, i64 1
+ %tmp15065 = getelementptr inbounds float, float* %tmp15064, i64 1
+ %tmp15066 = getelementptr inbounds float, float* %tmp15065, i64 1
+ %tmp15067 = getelementptr inbounds float, float* %tmp15066, i64 1
+ %tmp15068 = getelementptr inbounds float, float* %tmp15067, i64 1
+ %tmp15069 = getelementptr inbounds float, float* %tmp15068, i64 1
+ %tmp15070 = getelementptr inbounds float, float* %tmp15069, i64 1
+ %tmp15071 = getelementptr inbounds float, float* %tmp15070, i64 1
+ %tmp15072 = getelementptr inbounds float, float* %tmp15071, i64 1
+ %tmp15073 = getelementptr inbounds float, float* %tmp15072, i64 1
+ %tmp15074 = getelementptr inbounds float, float* %tmp15073, i64 1
+ %tmp15075 = getelementptr inbounds float, float* %tmp15074, i64 1
+ %tmp15076 = getelementptr inbounds float, float* %tmp15075, i64 1
+ %tmp15077 = getelementptr inbounds float, float* %tmp15076, i64 1
+ %tmp15078 = getelementptr inbounds float, float* %tmp15077, i64 1
+ %tmp15079 = getelementptr inbounds float, float* %tmp15078, i64 1
+ %tmp15080 = getelementptr inbounds float, float* %tmp15079, i64 1
+ %tmp15081 = getelementptr inbounds float, float* %tmp15080, i64 1
+ %tmp15082 = getelementptr inbounds float, float* %tmp15081, i64 1
+ %tmp15083 = getelementptr inbounds float, float* %tmp15082, i64 1
+ %tmp15084 = getelementptr inbounds float, float* %tmp15083, i64 1
+ %tmp15085 = getelementptr inbounds float, float* %tmp15084, i64 1
+ %tmp15086 = getelementptr inbounds float, float* %tmp15085, i64 1
+ %tmp15087 = getelementptr inbounds float, float* %tmp15086, i64 1
+ %tmp15088 = getelementptr inbounds float, float* %tmp15087, i64 1
+ %tmp15089 = getelementptr inbounds float, float* %tmp15088, i64 1
+ %tmp15090 = getelementptr inbounds float, float* %tmp15089, i64 1
+ %tmp15091 = getelementptr inbounds float, float* %tmp15090, i64 1
+ %tmp15092 = getelementptr inbounds float, float* %tmp15091, i64 1
+ %tmp15093 = getelementptr inbounds float, float* %tmp15092, i64 1
+ %tmp15094 = getelementptr inbounds float, float* %tmp15093, i64 1
+ %tmp15095 = getelementptr inbounds float, float* %tmp15094, i64 1
+ %tmp15096 = getelementptr inbounds float, float* %tmp15095, i64 1
+ %tmp15097 = getelementptr inbounds float, float* %tmp15096, i64 1
+ %tmp15098 = getelementptr inbounds float, float* %tmp15097, i64 1
+ %tmp15099 = getelementptr inbounds float, float* %tmp15098, i64 1
+ %tmp15100 = getelementptr inbounds float, float* %tmp15099, i64 1
+ %tmp15101 = getelementptr inbounds float, float* %tmp15100, i64 1
+ %tmp15102 = getelementptr inbounds float, float* %tmp15101, i64 1
+ %tmp15103 = getelementptr inbounds float, float* %tmp15102, i64 1
+ %tmp15104 = getelementptr inbounds float, float* %tmp15103, i64 1
+ %tmp15105 = getelementptr inbounds float, float* %tmp15104, i64 1
+ %tmp15106 = getelementptr inbounds float, float* %tmp15105, i64 1
+ %tmp15107 = getelementptr inbounds float, float* %tmp15106, i64 1
+ %tmp15108 = getelementptr inbounds float, float* %tmp15107, i64 1
+ %tmp15109 = getelementptr inbounds float, float* %tmp15108, i64 1
+ %tmp15110 = getelementptr inbounds float, float* %tmp15109, i64 1
+ %tmp15111 = getelementptr inbounds float, float* %tmp15110, i64 1
+ %tmp15112 = getelementptr inbounds float, float* %tmp15111, i64 1
+ %tmp15113 = getelementptr inbounds float, float* %tmp15112, i64 1
+ %tmp15114 = getelementptr inbounds float, float* %tmp15113, i64 1
+ %tmp15115 = getelementptr inbounds float, float* %tmp15114, i64 1
+ %tmp15116 = getelementptr inbounds float, float* %tmp15115, i64 1
+ %tmp15117 = getelementptr inbounds float, float* %tmp15116, i64 1
+ %tmp15118 = getelementptr inbounds float, float* %tmp15117, i64 1
+ %tmp15119 = getelementptr inbounds float, float* %tmp15118, i64 1
+ %tmp15120 = getelementptr inbounds float, float* %tmp15119, i64 1
+ %tmp15121 = getelementptr inbounds float, float* %tmp15120, i64 1
+ %tmp15122 = getelementptr inbounds float, float* %tmp15121, i64 1
+ %tmp15123 = getelementptr inbounds float, float* %tmp15122, i64 1
+ %tmp15124 = getelementptr inbounds float, float* %tmp15123, i64 1
+ %tmp15125 = getelementptr inbounds float, float* %tmp15124, i64 1
+ %tmp15126 = getelementptr inbounds float, float* %tmp15125, i64 1
+ %tmp15127 = getelementptr inbounds float, float* %tmp15126, i64 1
+ %tmp15128 = getelementptr inbounds float, float* %tmp15127, i64 1
+ %tmp15129 = getelementptr inbounds float, float* %tmp15128, i64 1
+ %tmp15130 = getelementptr inbounds float, float* %tmp15129, i64 1
+ %tmp15131 = getelementptr inbounds float, float* %tmp15130, i64 1
+ %tmp15132 = getelementptr inbounds float, float* %tmp15131, i64 1
+ %tmp15133 = getelementptr inbounds float, float* %tmp15132, i64 1
+ %tmp15134 = getelementptr inbounds float, float* %tmp15133, i64 1
+ %tmp15135 = getelementptr inbounds float, float* %tmp15134, i64 1
+ %tmp15136 = getelementptr inbounds float, float* %tmp15135, i64 1
+ %tmp15137 = getelementptr inbounds float, float* %tmp15136, i64 1
+ %tmp15138 = getelementptr inbounds float, float* %tmp15137, i64 1
+ %tmp15139 = getelementptr inbounds float, float* %tmp15138, i64 1
+ %tmp15140 = getelementptr inbounds float, float* %tmp15139, i64 1
+ %tmp15141 = getelementptr inbounds float, float* %tmp15140, i64 1
+ %tmp15142 = getelementptr inbounds float, float* %tmp15141, i64 1
+ %tmp15143 = getelementptr inbounds float, float* %tmp15142, i64 1
+ %tmp15144 = getelementptr inbounds float, float* %tmp15143, i64 1
+ %tmp15145 = getelementptr inbounds float, float* %tmp15144, i64 1
+ %tmp15146 = getelementptr inbounds float, float* %tmp15145, i64 1
+ %tmp15147 = getelementptr inbounds float, float* %tmp15146, i64 1
+ %tmp15148 = getelementptr inbounds float, float* %tmp15147, i64 1
+ %tmp15149 = getelementptr inbounds float, float* %tmp15148, i64 1
+ %tmp15150 = getelementptr inbounds float, float* %tmp15149, i64 1
+ %tmp15151 = getelementptr inbounds float, float* %tmp15150, i64 1
+ %tmp15152 = getelementptr inbounds float, float* %tmp15151, i64 1
+ %tmp15153 = getelementptr inbounds float, float* %tmp15152, i64 1
+ %tmp15154 = getelementptr inbounds float, float* %tmp15153, i64 1
+ %tmp15155 = getelementptr inbounds float, float* %tmp15154, i64 1
+ %tmp15156 = getelementptr inbounds float, float* %tmp15155, i64 1
+ %tmp15157 = getelementptr inbounds float, float* %tmp15156, i64 1
+ %tmp15158 = getelementptr inbounds float, float* %tmp15157, i64 1
+ %tmp15159 = getelementptr inbounds float, float* %tmp15158, i64 1
+ %tmp15160 = getelementptr inbounds float, float* %tmp15159, i64 1
+ %tmp15161 = getelementptr inbounds float, float* %tmp15160, i64 1
+ %tmp15162 = getelementptr inbounds float, float* %tmp15161, i64 1
+ %tmp15163 = getelementptr inbounds float, float* %tmp15162, i64 1
+ %tmp15164 = getelementptr inbounds float, float* %tmp15163, i64 1
+ %tmp15165 = getelementptr inbounds float, float* %tmp15164, i64 1
+ %tmp15166 = getelementptr inbounds float, float* %tmp15165, i64 1
+ %tmp15167 = getelementptr inbounds float, float* %tmp15166, i64 1
+ %tmp15168 = getelementptr inbounds float, float* %tmp15167, i64 1
+ %tmp15169 = getelementptr inbounds float, float* %tmp15168, i64 1
+ %tmp15170 = getelementptr inbounds float, float* %tmp15169, i64 1
+ %tmp15171 = getelementptr inbounds float, float* %tmp15170, i64 1
+ %tmp15172 = getelementptr inbounds float, float* %tmp15171, i64 1
+ %tmp15173 = getelementptr inbounds float, float* %tmp15172, i64 1
+ %tmp15174 = getelementptr inbounds float, float* %tmp15173, i64 1
+ %tmp15175 = getelementptr inbounds float, float* %tmp15174, i64 1
+ %tmp15176 = getelementptr inbounds float, float* %tmp15175, i64 1
+ %tmp15177 = getelementptr inbounds float, float* %tmp15176, i64 1
+ %tmp15178 = getelementptr inbounds float, float* %tmp15177, i64 1
+ %tmp15179 = getelementptr inbounds float, float* %tmp15178, i64 1
+ %tmp15180 = getelementptr inbounds float, float* %tmp15179, i64 1
+ %tmp15181 = getelementptr inbounds float, float* %tmp15180, i64 1
+ %tmp15182 = getelementptr inbounds float, float* %tmp15181, i64 1
+ %tmp15183 = getelementptr inbounds float, float* %tmp15182, i64 1
+ %tmp15184 = getelementptr inbounds float, float* %tmp15183, i64 1
+ %tmp15185 = getelementptr inbounds float, float* %tmp15184, i64 1
+ %tmp15186 = getelementptr inbounds float, float* %tmp15185, i64 1
+ %tmp15187 = getelementptr inbounds float, float* %tmp15186, i64 1
+ %tmp15188 = getelementptr inbounds float, float* %tmp15187, i64 1
+ %tmp15189 = getelementptr inbounds float, float* %tmp15188, i64 1
+ %tmp15190 = getelementptr inbounds float, float* %tmp15189, i64 1
+ %tmp15191 = getelementptr inbounds float, float* %tmp15190, i64 1
+ %tmp15192 = getelementptr inbounds float, float* %tmp15191, i64 1
+ %tmp15193 = getelementptr inbounds float, float* %tmp15192, i64 1
+ %tmp15194 = getelementptr inbounds float, float* %tmp15193, i64 1
+ %tmp15195 = getelementptr inbounds float, float* %tmp15194, i64 1
+ %tmp15196 = getelementptr inbounds float, float* %tmp15195, i64 1
+ %tmp15197 = getelementptr inbounds float, float* %tmp15196, i64 1
+ %tmp15198 = getelementptr inbounds float, float* %tmp15197, i64 1
+ %tmp15199 = getelementptr inbounds float, float* %tmp15198, i64 1
+ %tmp15200 = getelementptr inbounds float, float* %tmp15199, i64 1
+ %tmp15201 = getelementptr inbounds float, float* %tmp15200, i64 1
+ %tmp15202 = getelementptr inbounds float, float* %tmp15201, i64 1
+ %tmp15203 = getelementptr inbounds float, float* %tmp15202, i64 1
+ %tmp15204 = getelementptr inbounds float, float* %tmp15203, i64 1
+ %tmp15205 = getelementptr inbounds float, float* %tmp15204, i64 1
+ %tmp15206 = getelementptr inbounds float, float* %tmp15205, i64 1
+ %tmp15207 = getelementptr inbounds float, float* %tmp15206, i64 1
+ %tmp15208 = getelementptr inbounds float, float* %tmp15207, i64 1
+ %tmp15209 = getelementptr inbounds float, float* %tmp15208, i64 1
+ %tmp15210 = getelementptr inbounds float, float* %tmp15209, i64 1
+ %tmp15211 = getelementptr inbounds float, float* %tmp15210, i64 1
+ %tmp15212 = getelementptr inbounds float, float* %tmp15211, i64 1
+ %tmp15213 = getelementptr inbounds float, float* %tmp15212, i64 1
+ %tmp15214 = getelementptr inbounds float, float* %tmp15213, i64 1
+ %tmp15215 = getelementptr inbounds float, float* %tmp15214, i64 1
+ %tmp15216 = getelementptr inbounds float, float* %tmp15215, i64 1
+ %tmp15217 = getelementptr inbounds float, float* %tmp15216, i64 1
+ %tmp15218 = getelementptr inbounds float, float* %tmp15217, i64 1
+ %tmp15219 = getelementptr inbounds float, float* %tmp15218, i64 1
+ %tmp15220 = getelementptr inbounds float, float* %tmp15219, i64 1
+ %tmp15221 = getelementptr inbounds float, float* %tmp15220, i64 1
+ %tmp15222 = getelementptr inbounds float, float* %tmp15221, i64 1
+ %tmp15223 = getelementptr inbounds float, float* %tmp15222, i64 1
+ %tmp15224 = getelementptr inbounds float, float* %tmp15223, i64 1
+ %tmp15225 = getelementptr inbounds float, float* %tmp15224, i64 1
+ %tmp15226 = getelementptr inbounds float, float* %tmp15225, i64 1
+ %tmp15227 = getelementptr inbounds float, float* %tmp15226, i64 1
+ %tmp15228 = getelementptr inbounds float, float* %tmp15227, i64 1
+ %tmp15229 = getelementptr inbounds float, float* %tmp15228, i64 1
+ %tmp15230 = getelementptr inbounds float, float* %tmp15229, i64 1
+ %tmp15231 = getelementptr inbounds float, float* %tmp15230, i64 1
+ %tmp15232 = getelementptr inbounds float, float* %tmp15231, i64 1
+ %tmp15233 = getelementptr inbounds float, float* %tmp15232, i64 1
+ %tmp15234 = getelementptr inbounds float, float* %tmp15233, i64 1
+ %tmp15235 = getelementptr inbounds float, float* %tmp15234, i64 1
+ %tmp15236 = getelementptr inbounds float, float* %tmp15235, i64 1
+ %tmp15237 = getelementptr inbounds float, float* %tmp15236, i64 1
+ %tmp15238 = getelementptr inbounds float, float* %tmp15237, i64 1
+ %tmp15239 = getelementptr inbounds float, float* %tmp15238, i64 1
+ %tmp15240 = getelementptr inbounds float, float* %tmp15239, i64 1
+ %tmp15241 = getelementptr inbounds float, float* %tmp15240, i64 1
+ %tmp15242 = getelementptr inbounds float, float* %tmp15241, i64 1
+ %tmp15243 = getelementptr inbounds float, float* %tmp15242, i64 1
+ %tmp15244 = getelementptr inbounds float, float* %tmp15243, i64 1
+ %tmp15245 = getelementptr inbounds float, float* %tmp15244, i64 1
+ %tmp15246 = getelementptr inbounds float, float* %tmp15245, i64 1
+ %tmp15247 = getelementptr inbounds float, float* %tmp15246, i64 1
+ %tmp15248 = getelementptr inbounds float, float* %tmp15247, i64 1
+ %tmp15249 = getelementptr inbounds float, float* %tmp15248, i64 1
+ %tmp15250 = getelementptr inbounds float, float* %tmp15249, i64 1
+ %tmp15251 = getelementptr inbounds float, float* %tmp15250, i64 1
+ %tmp15252 = getelementptr inbounds float, float* %tmp15251, i64 1
+ %tmp15253 = getelementptr inbounds float, float* %tmp15252, i64 1
+ %tmp15254 = getelementptr inbounds float, float* %tmp15253, i64 1
+ %tmp15255 = getelementptr inbounds float, float* %tmp15254, i64 1
+ %tmp15256 = getelementptr inbounds float, float* %tmp15255, i64 1
+ %tmp15257 = getelementptr inbounds float, float* %tmp15256, i64 1
+ %tmp15258 = getelementptr inbounds float, float* %tmp15257, i64 1
+ %tmp15259 = getelementptr inbounds float, float* %tmp15258, i64 1
+ %tmp15260 = getelementptr inbounds float, float* %tmp15259, i64 1
+ %tmp15261 = getelementptr inbounds float, float* %tmp15260, i64 1
+ %tmp15262 = getelementptr inbounds float, float* %tmp15261, i64 1
+ %tmp15263 = getelementptr inbounds float, float* %tmp15262, i64 1
+ %tmp15264 = getelementptr inbounds float, float* %tmp15263, i64 1
+ %tmp15265 = getelementptr inbounds float, float* %tmp15264, i64 1
+ %tmp15266 = getelementptr inbounds float, float* %tmp15265, i64 1
+ %tmp15267 = getelementptr inbounds float, float* %tmp15266, i64 1
+ %tmp15268 = getelementptr inbounds float, float* %tmp15267, i64 1
+ %tmp15269 = getelementptr inbounds float, float* %tmp15268, i64 1
+ %tmp15270 = getelementptr inbounds float, float* %tmp15269, i64 1
+ %tmp15271 = getelementptr inbounds float, float* %tmp15270, i64 1
+ %tmp15272 = getelementptr inbounds float, float* %tmp15271, i64 1
+ %tmp15273 = getelementptr inbounds float, float* %tmp15272, i64 1
+ %tmp15274 = getelementptr inbounds float, float* %tmp15273, i64 1
+ %tmp15275 = getelementptr inbounds float, float* %tmp15274, i64 1
+ %tmp15276 = getelementptr inbounds float, float* %tmp15275, i64 1
+ %tmp15277 = getelementptr inbounds float, float* %tmp15276, i64 1
+ %tmp15278 = getelementptr inbounds float, float* %tmp15277, i64 1
+ %tmp15279 = getelementptr inbounds float, float* %tmp15278, i64 1
+ %tmp15280 = getelementptr inbounds float, float* %tmp15279, i64 1
+ %tmp15281 = getelementptr inbounds float, float* %tmp15280, i64 1
+ %tmp15282 = getelementptr inbounds float, float* %tmp15281, i64 1
+ %tmp15283 = getelementptr inbounds float, float* %tmp15282, i64 1
+ %tmp15284 = getelementptr inbounds float, float* %tmp15283, i64 1
+ %tmp15285 = getelementptr inbounds float, float* %tmp15284, i64 1
+ %tmp15286 = getelementptr inbounds float, float* %tmp15285, i64 1
+ %tmp15287 = getelementptr inbounds float, float* %tmp15286, i64 1
+ %tmp15288 = getelementptr inbounds float, float* %tmp15287, i64 1
+ %tmp15289 = getelementptr inbounds float, float* %tmp15288, i64 1
+ %tmp15290 = getelementptr inbounds float, float* %tmp15289, i64 1
+ %tmp15291 = getelementptr inbounds float, float* %tmp15290, i64 1
+ %tmp15292 = getelementptr inbounds float, float* %tmp15291, i64 1
+ %tmp15293 = getelementptr inbounds float, float* %tmp15292, i64 1
+ %tmp15294 = getelementptr inbounds float, float* %tmp15293, i64 1
+ %tmp15295 = getelementptr inbounds float, float* %tmp15294, i64 1
+ %tmp15296 = getelementptr inbounds float, float* %tmp15295, i64 1
+ %tmp15297 = getelementptr inbounds float, float* %tmp15296, i64 1
+ %tmp15298 = getelementptr inbounds float, float* %tmp15297, i64 1
+ %tmp15299 = getelementptr inbounds float, float* %tmp15298, i64 1
+ %tmp15300 = getelementptr inbounds float, float* %tmp15299, i64 1
+ %tmp15301 = getelementptr inbounds float, float* %tmp15300, i64 1
+ %tmp15302 = getelementptr inbounds float, float* %tmp15301, i64 1
+ %tmp15303 = getelementptr inbounds float, float* %tmp15302, i64 1
+ %tmp15304 = getelementptr inbounds float, float* %tmp15303, i64 1
+ %tmp15305 = getelementptr inbounds float, float* %tmp15304, i64 1
+ %tmp15306 = getelementptr inbounds float, float* %tmp15305, i64 1
+ %tmp15307 = getelementptr inbounds float, float* %tmp15306, i64 1
+ %tmp15308 = getelementptr inbounds float, float* %tmp15307, i64 1
+ %tmp15309 = getelementptr inbounds float, float* %tmp15308, i64 1
+ %tmp15310 = getelementptr inbounds float, float* %tmp15309, i64 1
+ %tmp15311 = getelementptr inbounds float, float* %tmp15310, i64 1
+ %tmp15312 = getelementptr inbounds float, float* %tmp15311, i64 1
+ %tmp15313 = getelementptr inbounds float, float* %tmp15312, i64 1
+ %tmp15314 = getelementptr inbounds float, float* %tmp15313, i64 1
+ %tmp15315 = getelementptr inbounds float, float* %tmp15314, i64 1
+ %tmp15316 = getelementptr inbounds float, float* %tmp15315, i64 1
+ %tmp15317 = getelementptr inbounds float, float* %tmp15316, i64 1
+ %tmp15318 = getelementptr inbounds float, float* %tmp15317, i64 1
+ %tmp15319 = getelementptr inbounds float, float* %tmp15318, i64 1
+ %tmp15320 = getelementptr inbounds float, float* %tmp15319, i64 1
+ %tmp15321 = getelementptr inbounds float, float* %tmp15320, i64 1
+ %tmp15322 = getelementptr inbounds float, float* %tmp15321, i64 1
+ %tmp15323 = getelementptr inbounds float, float* %tmp15322, i64 1
+ %tmp15324 = getelementptr inbounds float, float* %tmp15323, i64 1
+ %tmp15325 = getelementptr inbounds float, float* %tmp15324, i64 1
+ %tmp15326 = getelementptr inbounds float, float* %tmp15325, i64 1
+ %tmp15327 = getelementptr inbounds float, float* %tmp15326, i64 1
+ %tmp15328 = getelementptr inbounds float, float* %tmp15327, i64 1
+ %tmp15329 = getelementptr inbounds float, float* %tmp15328, i64 1
+ %tmp15330 = getelementptr inbounds float, float* %tmp15329, i64 1
+ %tmp15331 = getelementptr inbounds float, float* %tmp15330, i64 1
+ %tmp15332 = getelementptr inbounds float, float* %tmp15331, i64 1
+ %tmp15333 = getelementptr inbounds float, float* %tmp15332, i64 1
+ %tmp15334 = getelementptr inbounds float, float* %tmp15333, i64 1
+ %tmp15335 = getelementptr inbounds float, float* %tmp15334, i64 1
+ %tmp15336 = getelementptr inbounds float, float* %tmp15335, i64 1
+ %tmp15337 = getelementptr inbounds float, float* %tmp15336, i64 1
+ %tmp15338 = getelementptr inbounds float, float* %tmp15337, i64 1
+ %tmp15339 = getelementptr inbounds float, float* %tmp15338, i64 1
+ %tmp15340 = getelementptr inbounds float, float* %tmp15339, i64 1
+ %tmp15341 = getelementptr inbounds float, float* %tmp15340, i64 1
+ %tmp15342 = getelementptr inbounds float, float* %tmp15341, i64 1
+ %tmp15343 = getelementptr inbounds float, float* %tmp15342, i64 1
+ %tmp15344 = getelementptr inbounds float, float* %tmp15343, i64 1
+ %tmp15345 = getelementptr inbounds float, float* %tmp15344, i64 1
+ %tmp15346 = getelementptr inbounds float, float* %tmp15345, i64 1
+ %tmp15347 = getelementptr inbounds float, float* %tmp15346, i64 1
+ %tmp15348 = getelementptr inbounds float, float* %tmp15347, i64 1
+ %tmp15349 = getelementptr inbounds float, float* %tmp15348, i64 1
+ %tmp15350 = getelementptr inbounds float, float* %tmp15349, i64 1
+ %tmp15351 = getelementptr inbounds float, float* %tmp15350, i64 1
+ %tmp15352 = getelementptr inbounds float, float* %tmp15351, i64 1
+ %tmp15353 = getelementptr inbounds float, float* %tmp15352, i64 1
+ %tmp15354 = getelementptr inbounds float, float* %tmp15353, i64 1
+ %tmp15355 = getelementptr inbounds float, float* %tmp15354, i64 1
+ %tmp15356 = getelementptr inbounds float, float* %tmp15355, i64 1
+ %tmp15357 = getelementptr inbounds float, float* %tmp15356, i64 1
+ %tmp15358 = getelementptr inbounds float, float* %tmp15357, i64 1
+ %tmp15359 = getelementptr inbounds float, float* %tmp15358, i64 1
+ %tmp15360 = getelementptr inbounds float, float* %tmp15359, i64 1
+ %tmp15361 = getelementptr inbounds float, float* %tmp15360, i64 1
+ %tmp15362 = getelementptr inbounds float, float* %tmp15361, i64 1
+ %tmp15363 = getelementptr inbounds float, float* %tmp15362, i64 1
+ %tmp15364 = getelementptr inbounds float, float* %tmp15363, i64 1
+ %tmp15365 = getelementptr inbounds float, float* %tmp15364, i64 1
+ %tmp15366 = getelementptr inbounds float, float* %tmp15365, i64 1
+ %tmp15367 = getelementptr inbounds float, float* %tmp15366, i64 1
+ %tmp15368 = getelementptr inbounds float, float* %tmp15367, i64 1
+ %tmp15369 = getelementptr inbounds float, float* %tmp15368, i64 1
+ %tmp15370 = getelementptr inbounds float, float* %tmp15369, i64 1
+ %tmp15371 = getelementptr inbounds float, float* %tmp15370, i64 1
+ %tmp15372 = getelementptr inbounds float, float* %tmp15371, i64 1
+ %tmp15373 = getelementptr inbounds float, float* %tmp15372, i64 1
+ %tmp15374 = getelementptr inbounds float, float* %tmp15373, i64 1
+ %tmp15375 = getelementptr inbounds float, float* %tmp15374, i64 1
+ %tmp15376 = getelementptr inbounds float, float* %tmp15375, i64 1
+ %tmp15377 = getelementptr inbounds float, float* %tmp15376, i64 1
+ %tmp15378 = getelementptr inbounds float, float* %tmp15377, i64 1
+ %tmp15379 = getelementptr inbounds float, float* %tmp15378, i64 1
+ %tmp15380 = getelementptr inbounds float, float* %tmp15379, i64 1
+ %tmp15381 = getelementptr inbounds float, float* %tmp15380, i64 1
+ %tmp15382 = getelementptr inbounds float, float* %tmp15381, i64 1
+ %tmp15383 = getelementptr inbounds float, float* %tmp15382, i64 1
+ %tmp15384 = getelementptr inbounds float, float* %tmp15383, i64 1
+ %tmp15385 = getelementptr inbounds float, float* %tmp15384, i64 1
+ %tmp15386 = getelementptr inbounds float, float* %tmp15385, i64 1
+ %tmp15387 = getelementptr inbounds float, float* %tmp15386, i64 1
+ %tmp15388 = getelementptr inbounds float, float* %tmp15387, i64 1
+ %tmp15389 = getelementptr inbounds float, float* %tmp15388, i64 1
+ %tmp15390 = getelementptr inbounds float, float* %tmp15389, i64 1
+ %tmp15391 = getelementptr inbounds float, float* %tmp15390, i64 1
+ %tmp15392 = getelementptr inbounds float, float* %tmp15391, i64 1
+ %tmp15393 = getelementptr inbounds float, float* %tmp15392, i64 1
+ %tmp15394 = getelementptr inbounds float, float* %tmp15393, i64 1
+ %tmp15395 = getelementptr inbounds float, float* %tmp15394, i64 1
+ %tmp15396 = getelementptr inbounds float, float* %tmp15395, i64 1
+ %tmp15397 = getelementptr inbounds float, float* %tmp15396, i64 1
+ %tmp15398 = getelementptr inbounds float, float* %tmp15397, i64 1
+ %tmp15399 = getelementptr inbounds float, float* %tmp15398, i64 1
+ %tmp15400 = getelementptr inbounds float, float* %tmp15399, i64 1
+ %tmp15401 = getelementptr inbounds float, float* %tmp15400, i64 1
+ %tmp15402 = getelementptr inbounds float, float* %tmp15401, i64 1
+ %tmp15403 = getelementptr inbounds float, float* %tmp15402, i64 1
+ %tmp15404 = getelementptr inbounds float, float* %tmp15403, i64 1
+ %tmp15405 = getelementptr inbounds float, float* %tmp15404, i64 1
+ %tmp15406 = getelementptr inbounds float, float* %tmp15405, i64 1
+ %tmp15407 = getelementptr inbounds float, float* %tmp15406, i64 1
+ %tmp15408 = getelementptr inbounds float, float* %tmp15407, i64 1
+ %tmp15409 = getelementptr inbounds float, float* %tmp15408, i64 1
+ %tmp15410 = getelementptr inbounds float, float* %tmp15409, i64 1
+ %tmp15411 = getelementptr inbounds float, float* %tmp15410, i64 1
+ %tmp15412 = getelementptr inbounds float, float* %tmp15411, i64 1
+ %tmp15413 = getelementptr inbounds float, float* %tmp15412, i64 1
+ %tmp15414 = getelementptr inbounds float, float* %tmp15413, i64 1
+ %tmp15415 = getelementptr inbounds float, float* %tmp15414, i64 1
+ %tmp15416 = getelementptr inbounds float, float* %tmp15415, i64 1
+ %tmp15417 = getelementptr inbounds float, float* %tmp15416, i64 1
+ %tmp15418 = getelementptr inbounds float, float* %tmp15417, i64 1
+ %tmp15419 = getelementptr inbounds float, float* %tmp15418, i64 1
+ %tmp15420 = getelementptr inbounds float, float* %tmp15419, i64 1
+ %tmp15421 = getelementptr inbounds float, float* %tmp15420, i64 1
+ %tmp15422 = getelementptr inbounds float, float* %tmp15421, i64 1
+ %tmp15423 = getelementptr inbounds float, float* %tmp15422, i64 1
+ %tmp15424 = getelementptr inbounds float, float* %tmp15423, i64 1
+ %tmp15425 = getelementptr inbounds float, float* %tmp15424, i64 1
+ %tmp15426 = getelementptr inbounds float, float* %tmp15425, i64 1
+ %tmp15427 = getelementptr inbounds float, float* %tmp15426, i64 1
+ %tmp15428 = getelementptr inbounds float, float* %tmp15427, i64 1
+ %tmp15429 = getelementptr inbounds float, float* %tmp15428, i64 1
+ %tmp15430 = getelementptr inbounds float, float* %tmp15429, i64 1
+ %tmp15431 = getelementptr inbounds float, float* %tmp15430, i64 1
+ %tmp15432 = getelementptr inbounds float, float* %tmp15431, i64 1
+ %tmp15433 = getelementptr inbounds float, float* %tmp15432, i64 1
+ %tmp15434 = getelementptr inbounds float, float* %tmp15433, i64 1
+ %tmp15435 = getelementptr inbounds float, float* %tmp15434, i64 1
+ %tmp15436 = getelementptr inbounds float, float* %tmp15435, i64 1
+ %tmp15437 = getelementptr inbounds float, float* %tmp15436, i64 1
+ %tmp15438 = getelementptr inbounds float, float* %tmp15437, i64 1
+ %tmp15439 = getelementptr inbounds float, float* %tmp15438, i64 1
+ %tmp15440 = getelementptr inbounds float, float* %tmp15439, i64 1
+ %tmp15441 = getelementptr inbounds float, float* %tmp15440, i64 1
+ %tmp15442 = getelementptr inbounds float, float* %tmp15441, i64 1
+ %tmp15443 = getelementptr inbounds float, float* %tmp15442, i64 1
+ %tmp15444 = getelementptr inbounds float, float* %tmp15443, i64 1
+ %tmp15445 = getelementptr inbounds float, float* %tmp15444, i64 1
+ %tmp15446 = getelementptr inbounds float, float* %tmp15445, i64 1
+ %tmp15447 = getelementptr inbounds float, float* %tmp15446, i64 1
+ %tmp15448 = getelementptr inbounds float, float* %tmp15447, i64 1
+ %tmp15449 = getelementptr inbounds float, float* %tmp15448, i64 1
+ %tmp15450 = getelementptr inbounds float, float* %tmp15449, i64 1
+ %tmp15451 = getelementptr inbounds float, float* %tmp15450, i64 1
+ %tmp15452 = getelementptr inbounds float, float* %tmp15451, i64 1
+ %tmp15453 = getelementptr inbounds float, float* %tmp15452, i64 1
+ %tmp15454 = getelementptr inbounds float, float* %tmp15453, i64 1
+ %tmp15455 = getelementptr inbounds float, float* %tmp15454, i64 1
+ %tmp15456 = getelementptr inbounds float, float* %tmp15455, i64 1
+ %tmp15457 = getelementptr inbounds float, float* %tmp15456, i64 1
+ %tmp15458 = getelementptr inbounds float, float* %tmp15457, i64 1
+ %tmp15459 = getelementptr inbounds float, float* %tmp15458, i64 1
+ %tmp15460 = getelementptr inbounds float, float* %tmp15459, i64 1
+ %tmp15461 = getelementptr inbounds float, float* %tmp15460, i64 1
+ %tmp15462 = getelementptr inbounds float, float* %tmp15461, i64 1
+ %tmp15463 = getelementptr inbounds float, float* %tmp15462, i64 1
+ %tmp15464 = getelementptr inbounds float, float* %tmp15463, i64 1
+ %tmp15465 = getelementptr inbounds float, float* %tmp15464, i64 1
+ %tmp15466 = getelementptr inbounds float, float* %tmp15465, i64 1
+ %tmp15467 = getelementptr inbounds float, float* %tmp15466, i64 1
+ %tmp15468 = getelementptr inbounds float, float* %tmp15467, i64 1
+ %tmp15469 = getelementptr inbounds float, float* %tmp15468, i64 1
+ %tmp15470 = getelementptr inbounds float, float* %tmp15469, i64 1
+ %tmp15471 = getelementptr inbounds float, float* %tmp15470, i64 1
+ %tmp15472 = getelementptr inbounds float, float* %tmp15471, i64 1
+ %tmp15473 = getelementptr inbounds float, float* %tmp15472, i64 1
+ %tmp15474 = getelementptr inbounds float, float* %tmp15473, i64 1
+ %tmp15475 = getelementptr inbounds float, float* %tmp15474, i64 1
+ %tmp15476 = getelementptr inbounds float, float* %tmp15475, i64 1
+ %tmp15477 = getelementptr inbounds float, float* %tmp15476, i64 1
+ %tmp15478 = getelementptr inbounds float, float* %tmp15477, i64 1
+ %tmp15479 = getelementptr inbounds float, float* %tmp15478, i64 1
+ %tmp15480 = getelementptr inbounds float, float* %tmp15479, i64 1
+ %tmp15481 = getelementptr inbounds float, float* %tmp15480, i64 1
+ %tmp15482 = getelementptr inbounds float, float* %tmp15481, i64 1
+ %tmp15483 = getelementptr inbounds float, float* %tmp15482, i64 1
+ %tmp15484 = getelementptr inbounds float, float* %tmp15483, i64 1
+ %tmp15485 = getelementptr inbounds float, float* %tmp15484, i64 1
+ %tmp15486 = getelementptr inbounds float, float* %tmp15485, i64 1
+ %tmp15487 = getelementptr inbounds float, float* %tmp15486, i64 1
+ %tmp15488 = getelementptr inbounds float, float* %tmp15487, i64 1
+ %tmp15489 = getelementptr inbounds float, float* %tmp15488, i64 1
+ %tmp15490 = getelementptr inbounds float, float* %tmp15489, i64 1
+ %tmp15491 = getelementptr inbounds float, float* %tmp15490, i64 1
+ %tmp15492 = getelementptr inbounds float, float* %tmp15491, i64 1
+ %tmp15493 = getelementptr inbounds float, float* %tmp15492, i64 1
+ %tmp15494 = getelementptr inbounds float, float* %tmp15493, i64 1
+ %tmp15495 = getelementptr inbounds float, float* %tmp15494, i64 1
+ %tmp15496 = getelementptr inbounds float, float* %tmp15495, i64 1
+ %tmp15497 = getelementptr inbounds float, float* %tmp15496, i64 1
+ %tmp15498 = getelementptr inbounds float, float* %tmp15497, i64 1
+ %tmp15499 = getelementptr inbounds float, float* %tmp15498, i64 1
+ %tmp15500 = getelementptr inbounds float, float* %tmp15499, i64 1
+ %tmp15501 = getelementptr inbounds float, float* %tmp15500, i64 1
+ %tmp15502 = getelementptr inbounds float, float* %tmp15501, i64 1
+ %tmp15503 = getelementptr inbounds float, float* %tmp15502, i64 1
+ %tmp15504 = getelementptr inbounds float, float* %tmp15503, i64 1
+ %tmp15505 = getelementptr inbounds float, float* %tmp15504, i64 1
+ %tmp15506 = getelementptr inbounds float, float* %tmp15505, i64 1
+ %tmp15507 = getelementptr inbounds float, float* %tmp15506, i64 1
+ %tmp15508 = getelementptr inbounds float, float* %tmp15507, i64 1
+ %tmp15509 = getelementptr inbounds float, float* %tmp15508, i64 1
+ %tmp15510 = getelementptr inbounds float, float* %tmp15509, i64 1
+ %tmp15511 = getelementptr inbounds float, float* %tmp15510, i64 1
+ %tmp15512 = getelementptr inbounds float, float* %tmp15511, i64 1
+ %tmp15513 = getelementptr inbounds float, float* %tmp15512, i64 1
+ %tmp15514 = getelementptr inbounds float, float* %tmp15513, i64 1
+ %tmp15515 = getelementptr inbounds float, float* %tmp15514, i64 1
+ %tmp15516 = getelementptr inbounds float, float* %tmp15515, i64 1
+ %tmp15517 = getelementptr inbounds float, float* %tmp15516, i64 1
+ %tmp15518 = getelementptr inbounds float, float* %tmp15517, i64 1
+ %tmp15519 = getelementptr inbounds float, float* %tmp15518, i64 1
+ %tmp15520 = getelementptr inbounds float, float* %tmp15519, i64 1
+ %tmp15521 = getelementptr inbounds float, float* %tmp15520, i64 1
+ %tmp15522 = getelementptr inbounds float, float* %tmp15521, i64 1
+ %tmp15523 = getelementptr inbounds float, float* %tmp15522, i64 1
+ %tmp15524 = getelementptr inbounds float, float* %tmp15523, i64 1
+ %tmp15525 = getelementptr inbounds float, float* %tmp15524, i64 1
+ %tmp15526 = getelementptr inbounds float, float* %tmp15525, i64 1
+ %tmp15527 = getelementptr inbounds float, float* %tmp15526, i64 1
+ %tmp15528 = getelementptr inbounds float, float* %tmp15527, i64 1
+ %tmp15529 = getelementptr inbounds float, float* %tmp15528, i64 1
+ %tmp15530 = getelementptr inbounds float, float* %tmp15529, i64 1
+ %tmp15531 = getelementptr inbounds float, float* %tmp15530, i64 1
+ %tmp15532 = getelementptr inbounds float, float* %tmp15531, i64 1
+ %tmp15533 = getelementptr inbounds float, float* %tmp15532, i64 1
+ %tmp15534 = getelementptr inbounds float, float* %tmp15533, i64 1
+ %tmp15535 = getelementptr inbounds float, float* %tmp15534, i64 1
+ %tmp15536 = getelementptr inbounds float, float* %tmp15535, i64 1
+ %tmp15537 = getelementptr inbounds float, float* %tmp15536, i64 1
+ %tmp15538 = getelementptr inbounds float, float* %tmp15537, i64 1
+ %tmp15539 = getelementptr inbounds float, float* %tmp15538, i64 1
+ %tmp15540 = getelementptr inbounds float, float* %tmp15539, i64 1
+ %tmp15541 = getelementptr inbounds float, float* %tmp15540, i64 1
+ %tmp15542 = getelementptr inbounds float, float* %tmp15541, i64 1
+ %tmp15543 = getelementptr inbounds float, float* %tmp15542, i64 1
+ %tmp15544 = getelementptr inbounds float, float* %tmp15543, i64 1
+ %tmp15545 = getelementptr inbounds float, float* %tmp15544, i64 1
+ %tmp15546 = getelementptr inbounds float, float* %tmp15545, i64 1
+ %tmp15547 = getelementptr inbounds float, float* %tmp15546, i64 1
+ %tmp15548 = getelementptr inbounds float, float* %tmp15547, i64 1
+ %tmp15549 = getelementptr inbounds float, float* %tmp15548, i64 1
+ %tmp15550 = getelementptr inbounds float, float* %tmp15549, i64 1
+ %tmp15551 = getelementptr inbounds float, float* %tmp15550, i64 1
+ %tmp15552 = getelementptr inbounds float, float* %tmp15551, i64 1
+ %tmp15553 = getelementptr inbounds float, float* %tmp15552, i64 1
+ %tmp15554 = getelementptr inbounds float, float* %tmp15553, i64 1
+ %tmp15555 = getelementptr inbounds float, float* %tmp15554, i64 1
+ %tmp15556 = getelementptr inbounds float, float* %tmp15555, i64 1
+ %tmp15557 = getelementptr inbounds float, float* %tmp15556, i64 1
+ %tmp15558 = getelementptr inbounds float, float* %tmp15557, i64 1
+ %tmp15559 = getelementptr inbounds float, float* %tmp15558, i64 1
+ %tmp15560 = getelementptr inbounds float, float* %tmp15559, i64 1
+ %tmp15561 = getelementptr inbounds float, float* %tmp15560, i64 1
+ %tmp15562 = getelementptr inbounds float, float* %tmp15561, i64 1
+ %tmp15563 = getelementptr inbounds float, float* %tmp15562, i64 1
+ %tmp15564 = getelementptr inbounds float, float* %tmp15563, i64 1
+ %tmp15565 = getelementptr inbounds float, float* %tmp15564, i64 1
+ %tmp15566 = getelementptr inbounds float, float* %tmp15565, i64 1
+ %tmp15567 = getelementptr inbounds float, float* %tmp15566, i64 1
+ %tmp15568 = getelementptr inbounds float, float* %tmp15567, i64 1
+ %tmp15569 = getelementptr inbounds float, float* %tmp15568, i64 1
+ %tmp15570 = getelementptr inbounds float, float* %tmp15569, i64 1
+ %tmp15571 = getelementptr inbounds float, float* %tmp15570, i64 1
+ %tmp15572 = getelementptr inbounds float, float* %tmp15571, i64 1
+ %tmp15573 = getelementptr inbounds float, float* %tmp15572, i64 1
+ %tmp15574 = getelementptr inbounds float, float* %tmp15573, i64 1
+ %tmp15575 = getelementptr inbounds float, float* %tmp15574, i64 1
+ %tmp15576 = getelementptr inbounds float, float* %tmp15575, i64 1
+ %tmp15577 = getelementptr inbounds float, float* %tmp15576, i64 1
+ %tmp15578 = getelementptr inbounds float, float* %tmp15577, i64 1
+ %tmp15579 = getelementptr inbounds float, float* %tmp15578, i64 1
+ %tmp15580 = getelementptr inbounds float, float* %tmp15579, i64 1
+ %tmp15581 = getelementptr inbounds float, float* %tmp15580, i64 1
+ %tmp15582 = getelementptr inbounds float, float* %tmp15581, i64 1
+ %tmp15583 = getelementptr inbounds float, float* %tmp15582, i64 1
+ %tmp15584 = getelementptr inbounds float, float* %tmp15583, i64 1
+ %tmp15585 = getelementptr inbounds float, float* %tmp15584, i64 1
+ %tmp15586 = getelementptr inbounds float, float* %tmp15585, i64 1
+ %tmp15587 = getelementptr inbounds float, float* %tmp15586, i64 1
+ %tmp15588 = getelementptr inbounds float, float* %tmp15587, i64 1
+ %tmp15589 = getelementptr inbounds float, float* %tmp15588, i64 1
+ %tmp15590 = getelementptr inbounds float, float* %tmp15589, i64 1
+ %tmp15591 = getelementptr inbounds float, float* %tmp15590, i64 1
+ %tmp15592 = getelementptr inbounds float, float* %tmp15591, i64 1
+ %tmp15593 = getelementptr inbounds float, float* %tmp15592, i64 1
+ %tmp15594 = getelementptr inbounds float, float* %tmp15593, i64 1
+ %tmp15595 = getelementptr inbounds float, float* %tmp15594, i64 1
+ %tmp15596 = getelementptr inbounds float, float* %tmp15595, i64 1
+ %tmp15597 = getelementptr inbounds float, float* %tmp15596, i64 1
+ %tmp15598 = getelementptr inbounds float, float* %tmp15597, i64 1
+ %tmp15599 = getelementptr inbounds float, float* %tmp15598, i64 1
+ %tmp15600 = getelementptr inbounds float, float* %tmp15599, i64 1
+ %tmp15601 = getelementptr inbounds float, float* %tmp15600, i64 1
+ %tmp15602 = getelementptr inbounds float, float* %tmp15601, i64 1
+ %tmp15603 = getelementptr inbounds float, float* %tmp15602, i64 1
+ %tmp15604 = getelementptr inbounds float, float* %tmp15603, i64 1
+ %tmp15605 = getelementptr inbounds float, float* %tmp15604, i64 1
+ %tmp15606 = getelementptr inbounds float, float* %tmp15605, i64 1
+ %tmp15607 = getelementptr inbounds float, float* %tmp15606, i64 1
+ %tmp15608 = getelementptr inbounds float, float* %tmp15607, i64 1
+ %tmp15609 = getelementptr inbounds float, float* %tmp15608, i64 1
+ %tmp15610 = getelementptr inbounds float, float* %tmp15609, i64 1
+ %tmp15611 = getelementptr inbounds float, float* %tmp15610, i64 1
+ %tmp15612 = getelementptr inbounds float, float* %tmp15611, i64 1
+ %tmp15613 = getelementptr inbounds float, float* %tmp15612, i64 1
+ %tmp15614 = getelementptr inbounds float, float* %tmp15613, i64 1
+ %tmp15615 = getelementptr inbounds float, float* %tmp15614, i64 1
+ %tmp15616 = getelementptr inbounds float, float* %tmp15615, i64 1
+ %tmp15617 = getelementptr inbounds float, float* %tmp15616, i64 1
+ %tmp15618 = getelementptr inbounds float, float* %tmp15617, i64 1
+ %tmp15619 = getelementptr inbounds float, float* %tmp15618, i64 1
+ %tmp15620 = getelementptr inbounds float, float* %tmp15619, i64 1
+ %tmp15621 = getelementptr inbounds float, float* %tmp15620, i64 1
+ %tmp15622 = getelementptr inbounds float, float* %tmp15621, i64 1
+ %tmp15623 = getelementptr inbounds float, float* %tmp15622, i64 1
+ %tmp15624 = getelementptr inbounds float, float* %tmp15623, i64 1
+ %tmp15625 = getelementptr inbounds float, float* %tmp15624, i64 1
+ %tmp15626 = getelementptr inbounds float, float* %tmp15625, i64 1
+ %tmp15627 = getelementptr inbounds float, float* %tmp15626, i64 1
+ %tmp15628 = getelementptr inbounds float, float* %tmp15627, i64 1
+ %tmp15629 = getelementptr inbounds float, float* %tmp15628, i64 1
+ %tmp15630 = getelementptr inbounds float, float* %tmp15629, i64 1
+ %tmp15631 = getelementptr inbounds float, float* %tmp15630, i64 1
+ %tmp15632 = getelementptr inbounds float, float* %tmp15631, i64 1
+ %tmp15633 = getelementptr inbounds float, float* %tmp15632, i64 1
+ %tmp15634 = getelementptr inbounds float, float* %tmp15633, i64 1
+ %tmp15635 = getelementptr inbounds float, float* %tmp15634, i64 1
+ %tmp15636 = getelementptr inbounds float, float* %tmp15635, i64 1
+ %tmp15637 = getelementptr inbounds float, float* %tmp15636, i64 1
+ %tmp15638 = getelementptr inbounds float, float* %tmp15637, i64 1
+ %tmp15639 = getelementptr inbounds float, float* %tmp15638, i64 1
+ %tmp15640 = getelementptr inbounds float, float* %tmp15639, i64 1
+ %tmp15641 = getelementptr inbounds float, float* %tmp15640, i64 1
+ %tmp15642 = getelementptr inbounds float, float* %tmp15641, i64 1
+ %tmp15643 = getelementptr inbounds float, float* %tmp15642, i64 1
+ %tmp15644 = getelementptr inbounds float, float* %tmp15643, i64 1
+ %tmp15645 = getelementptr inbounds float, float* %tmp15644, i64 1
+ %tmp15646 = getelementptr inbounds float, float* %tmp15645, i64 1
+ %tmp15647 = getelementptr inbounds float, float* %tmp15646, i64 1
+ %tmp15648 = getelementptr inbounds float, float* %tmp15647, i64 1
+ %tmp15649 = getelementptr inbounds float, float* %tmp15648, i64 1
+ %tmp15650 = getelementptr inbounds float, float* %tmp15649, i64 1
+ %tmp15651 = getelementptr inbounds float, float* %tmp15650, i64 1
+ %tmp15652 = getelementptr inbounds float, float* %tmp15651, i64 1
+ %tmp15653 = getelementptr inbounds float, float* %tmp15652, i64 1
+ %tmp15654 = getelementptr inbounds float, float* %tmp15653, i64 1
+ %tmp15655 = getelementptr inbounds float, float* %tmp15654, i64 1
+ %tmp15656 = getelementptr inbounds float, float* %tmp15655, i64 1
+ %tmp15657 = getelementptr inbounds float, float* %tmp15656, i64 1
+ %tmp15658 = getelementptr inbounds float, float* %tmp15657, i64 1
+ %tmp15659 = getelementptr inbounds float, float* %tmp15658, i64 1
+ %tmp15660 = getelementptr inbounds float, float* %tmp15659, i64 1
+ %tmp15661 = getelementptr inbounds float, float* %tmp15660, i64 1
+ %tmp15662 = getelementptr inbounds float, float* %tmp15661, i64 1
+ %tmp15663 = getelementptr inbounds float, float* %tmp15662, i64 1
+ %tmp15664 = getelementptr inbounds float, float* %tmp15663, i64 1
+ %tmp15665 = getelementptr inbounds float, float* %tmp15664, i64 1
+ %tmp15666 = getelementptr inbounds float, float* %tmp15665, i64 1
+ %tmp15667 = getelementptr inbounds float, float* %tmp15666, i64 1
+ %tmp15668 = getelementptr inbounds float, float* %tmp15667, i64 1
+ %tmp15669 = getelementptr inbounds float, float* %tmp15668, i64 1
+ %tmp15670 = getelementptr inbounds float, float* %tmp15669, i64 1
+ %tmp15671 = getelementptr inbounds float, float* %tmp15670, i64 1
+ %tmp15672 = getelementptr inbounds float, float* %tmp15671, i64 1
+ %tmp15673 = getelementptr inbounds float, float* %tmp15672, i64 1
+ %tmp15674 = getelementptr inbounds float, float* %tmp15673, i64 1
+ %tmp15675 = getelementptr inbounds float, float* %tmp15674, i64 1
+ %tmp15676 = getelementptr inbounds float, float* %tmp15675, i64 1
+ %tmp15677 = getelementptr inbounds float, float* %tmp15676, i64 1
+ %tmp15678 = getelementptr inbounds float, float* %tmp15677, i64 1
+ %tmp15679 = getelementptr inbounds float, float* %tmp15678, i64 1
+ %tmp15680 = getelementptr inbounds float, float* %tmp15679, i64 1
+ %tmp15681 = getelementptr inbounds float, float* %tmp15680, i64 1
+ %tmp15682 = getelementptr inbounds float, float* %tmp15681, i64 1
+ %tmp15683 = getelementptr inbounds float, float* %tmp15682, i64 1
+ %tmp15684 = getelementptr inbounds float, float* %tmp15683, i64 1
+ %tmp15685 = getelementptr inbounds float, float* %tmp15684, i64 1
+ %tmp15686 = getelementptr inbounds float, float* %tmp15685, i64 1
+ %tmp15687 = getelementptr inbounds float, float* %tmp15686, i64 1
+ %tmp15688 = getelementptr inbounds float, float* %tmp15687, i64 1
+ %tmp15689 = getelementptr inbounds float, float* %tmp15688, i64 1
+ %tmp15690 = getelementptr inbounds float, float* %tmp15689, i64 1
+ %tmp15691 = getelementptr inbounds float, float* %tmp15690, i64 1
+ %tmp15692 = getelementptr inbounds float, float* %tmp15691, i64 1
+ %tmp15693 = getelementptr inbounds float, float* %tmp15692, i64 1
+ %tmp15694 = getelementptr inbounds float, float* %tmp15693, i64 1
+ %tmp15695 = getelementptr inbounds float, float* %tmp15694, i64 1
+ %tmp15696 = getelementptr inbounds float, float* %tmp15695, i64 1
+ %tmp15697 = getelementptr inbounds float, float* %tmp15696, i64 1
+ %tmp15698 = getelementptr inbounds float, float* %tmp15697, i64 1
+ %tmp15699 = getelementptr inbounds float, float* %tmp15698, i64 1
+ %tmp15700 = getelementptr inbounds float, float* %tmp15699, i64 1
+ %tmp15701 = getelementptr inbounds float, float* %tmp15700, i64 1
+ %tmp15702 = getelementptr inbounds float, float* %tmp15701, i64 1
+ %tmp15703 = getelementptr inbounds float, float* %tmp15702, i64 1
+ %tmp15704 = getelementptr inbounds float, float* %tmp15703, i64 1
+ %tmp15705 = getelementptr inbounds float, float* %tmp15704, i64 1
+ %tmp15706 = getelementptr inbounds float, float* %tmp15705, i64 1
+ %tmp15707 = getelementptr inbounds float, float* %tmp15706, i64 1
+ %tmp15708 = getelementptr inbounds float, float* %tmp15707, i64 1
+ %tmp15709 = getelementptr inbounds float, float* %tmp15708, i64 1
+ %tmp15710 = getelementptr inbounds float, float* %tmp15709, i64 1
+ %tmp15711 = getelementptr inbounds float, float* %tmp15710, i64 1
+ %tmp15712 = getelementptr inbounds float, float* %tmp15711, i64 1
+ %tmp15713 = getelementptr inbounds float, float* %tmp15712, i64 1
+ %tmp15714 = getelementptr inbounds float, float* %tmp15713, i64 1
+ %tmp15715 = getelementptr inbounds float, float* %tmp15714, i64 1
+ %tmp15716 = getelementptr inbounds float, float* %tmp15715, i64 1
+ %tmp15717 = getelementptr inbounds float, float* %tmp15716, i64 1
+ %tmp15718 = getelementptr inbounds float, float* %tmp15717, i64 1
+ %tmp15719 = getelementptr inbounds float, float* %tmp15718, i64 1
+ %tmp15720 = getelementptr inbounds float, float* %tmp15719, i64 1
+ %tmp15721 = getelementptr inbounds float, float* %tmp15720, i64 1
+ %tmp15722 = getelementptr inbounds float, float* %tmp15721, i64 1
+ %tmp15723 = getelementptr inbounds float, float* %tmp15722, i64 1
+ %tmp15724 = getelementptr inbounds float, float* %tmp15723, i64 1
+ %tmp15725 = getelementptr inbounds float, float* %tmp15724, i64 1
+ %tmp15726 = getelementptr inbounds float, float* %tmp15725, i64 1
+ %tmp15727 = getelementptr inbounds float, float* %tmp15726, i64 1
+ %tmp15728 = getelementptr inbounds float, float* %tmp15727, i64 1
+ %tmp15729 = getelementptr inbounds float, float* %tmp15728, i64 1
+ %tmp15730 = getelementptr inbounds float, float* %tmp15729, i64 1
+ %tmp15731 = getelementptr inbounds float, float* %tmp15730, i64 1
+ %tmp15732 = getelementptr inbounds float, float* %tmp15731, i64 1
+ %tmp15733 = getelementptr inbounds float, float* %tmp15732, i64 1
+ %tmp15734 = getelementptr inbounds float, float* %tmp15733, i64 1
+ %tmp15735 = getelementptr inbounds float, float* %tmp15734, i64 1
+ %tmp15736 = getelementptr inbounds float, float* %tmp15735, i64 1
+ %tmp15737 = getelementptr inbounds float, float* %tmp15736, i64 1
+ %tmp15738 = getelementptr inbounds float, float* %tmp15737, i64 1
+ %tmp15739 = getelementptr inbounds float, float* %tmp15738, i64 1
+ %tmp15740 = getelementptr inbounds float, float* %tmp15739, i64 1
+ %tmp15741 = getelementptr inbounds float, float* %tmp15740, i64 1
+ %tmp15742 = getelementptr inbounds float, float* %tmp15741, i64 1
+ %tmp15743 = getelementptr inbounds float, float* %tmp15742, i64 1
+ %tmp15744 = getelementptr inbounds float, float* %tmp15743, i64 1
+ %tmp15745 = getelementptr inbounds float, float* %tmp15744, i64 1
+ %tmp15746 = getelementptr inbounds float, float* %tmp15745, i64 1
+ %tmp15747 = getelementptr inbounds float, float* %tmp15746, i64 1
+ %tmp15748 = getelementptr inbounds float, float* %tmp15747, i64 1
+ %tmp15749 = getelementptr inbounds float, float* %tmp15748, i64 1
+ %tmp15750 = getelementptr inbounds float, float* %tmp15749, i64 1
+ %tmp15751 = getelementptr inbounds float, float* %tmp15750, i64 1
+ %tmp15752 = getelementptr inbounds float, float* %tmp15751, i64 1
+ %tmp15753 = getelementptr inbounds float, float* %tmp15752, i64 1
+ %tmp15754 = getelementptr inbounds float, float* %tmp15753, i64 1
+ %tmp15755 = getelementptr inbounds float, float* %tmp15754, i64 1
+ %tmp15756 = getelementptr inbounds float, float* %tmp15755, i64 1
+ %tmp15757 = getelementptr inbounds float, float* %tmp15756, i64 1
+ %tmp15758 = getelementptr inbounds float, float* %tmp15757, i64 1
+ %tmp15759 = getelementptr inbounds float, float* %tmp15758, i64 1
+ %tmp15760 = getelementptr inbounds float, float* %tmp15759, i64 1
+ %tmp15761 = getelementptr inbounds float, float* %tmp15760, i64 1
+ %tmp15762 = getelementptr inbounds float, float* %tmp15761, i64 1
+ %tmp15763 = getelementptr inbounds float, float* %tmp15762, i64 1
+ %tmp15764 = getelementptr inbounds float, float* %tmp15763, i64 1
+ %tmp15765 = getelementptr inbounds float, float* %tmp15764, i64 1
+ %tmp15766 = getelementptr inbounds float, float* %tmp15765, i64 1
+ %tmp15767 = getelementptr inbounds float, float* %tmp15766, i64 1
+ %tmp15768 = getelementptr inbounds float, float* %tmp15767, i64 1
+ %tmp15769 = getelementptr inbounds float, float* %tmp15768, i64 1
+ %tmp15770 = getelementptr inbounds float, float* %tmp15769, i64 1
+ %tmp15771 = getelementptr inbounds float, float* %tmp15770, i64 1
+ %tmp15772 = getelementptr inbounds float, float* %tmp15771, i64 1
+ %tmp15773 = getelementptr inbounds float, float* %tmp15772, i64 1
+ %tmp15774 = getelementptr inbounds float, float* %tmp15773, i64 1
+ %tmp15775 = getelementptr inbounds float, float* %tmp15774, i64 1
+ %tmp15776 = getelementptr inbounds float, float* %tmp15775, i64 1
+ %tmp15777 = getelementptr inbounds float, float* %tmp15776, i64 1
+ %tmp15778 = getelementptr inbounds float, float* %tmp15777, i64 1
+ %tmp15779 = getelementptr inbounds float, float* %tmp15778, i64 1
+ %tmp15780 = getelementptr inbounds float, float* %tmp15779, i64 1
+ %tmp15781 = getelementptr inbounds float, float* %tmp15780, i64 1
+ %tmp15782 = getelementptr inbounds float, float* %tmp15781, i64 1
+ %tmp15783 = getelementptr inbounds float, float* %tmp15782, i64 1
+ %tmp15784 = getelementptr inbounds float, float* %tmp15783, i64 1
+ %tmp15785 = getelementptr inbounds float, float* %tmp15784, i64 1
+ %tmp15786 = getelementptr inbounds float, float* %tmp15785, i64 1
+ %tmp15787 = getelementptr inbounds float, float* %tmp15786, i64 1
+ %tmp15788 = getelementptr inbounds float, float* %tmp15787, i64 1
+ %tmp15789 = getelementptr inbounds float, float* %tmp15788, i64 1
+ %tmp15790 = getelementptr inbounds float, float* %tmp15789, i64 1
+ %tmp15791 = getelementptr inbounds float, float* %tmp15790, i64 1
+ %tmp15792 = getelementptr inbounds float, float* %tmp15791, i64 1
+ %tmp15793 = getelementptr inbounds float, float* %tmp15792, i64 1
+ %tmp15794 = getelementptr inbounds float, float* %tmp15793, i64 1
+ %tmp15795 = getelementptr inbounds float, float* %tmp15794, i64 1
+ %tmp15796 = getelementptr inbounds float, float* %tmp15795, i64 1
+ %tmp15797 = getelementptr inbounds float, float* %tmp15796, i64 1
+ %tmp15798 = getelementptr inbounds float, float* %tmp15797, i64 1
+ %tmp15799 = getelementptr inbounds float, float* %tmp15798, i64 1
+ %tmp15800 = getelementptr inbounds float, float* %tmp15799, i64 1
+ %tmp15801 = getelementptr inbounds float, float* %tmp15800, i64 1
+ %tmp15802 = getelementptr inbounds float, float* %tmp15801, i64 1
+ %tmp15803 = getelementptr inbounds float, float* %tmp15802, i64 1
+ %tmp15804 = getelementptr inbounds float, float* %tmp15803, i64 1
+ %tmp15805 = getelementptr inbounds float, float* %tmp15804, i64 1
+ %tmp15806 = getelementptr inbounds float, float* %tmp15805, i64 1
+ %tmp15807 = getelementptr inbounds float, float* %tmp15806, i64 1
+ %tmp15808 = getelementptr inbounds float, float* %tmp15807, i64 1
+ %tmp15809 = getelementptr inbounds float, float* %tmp15808, i64 1
+ %tmp15810 = getelementptr inbounds float, float* %tmp15809, i64 1
+ %tmp15811 = getelementptr inbounds float, float* %tmp15810, i64 1
+ %tmp15812 = getelementptr inbounds float, float* %tmp15811, i64 1
+ %tmp15813 = getelementptr inbounds float, float* %tmp15812, i64 1
+ %tmp15814 = getelementptr inbounds float, float* %tmp15813, i64 1
+ %tmp15815 = getelementptr inbounds float, float* %tmp15814, i64 1
+ %tmp15816 = getelementptr inbounds float, float* %tmp15815, i64 1
+ %tmp15817 = getelementptr inbounds float, float* %tmp15816, i64 1
+ %tmp15818 = getelementptr inbounds float, float* %tmp15817, i64 1
+ %tmp15819 = getelementptr inbounds float, float* %tmp15818, i64 1
+ %tmp15820 = getelementptr inbounds float, float* %tmp15819, i64 1
+ %tmp15821 = getelementptr inbounds float, float* %tmp15820, i64 1
+ %tmp15822 = getelementptr inbounds float, float* %tmp15821, i64 1
+ %tmp15823 = getelementptr inbounds float, float* %tmp15822, i64 1
+ %tmp15824 = getelementptr inbounds float, float* %tmp15823, i64 1
+ %tmp15825 = getelementptr inbounds float, float* %tmp15824, i64 1
+ %tmp15826 = getelementptr inbounds float, float* %tmp15825, i64 1
+ %tmp15827 = getelementptr inbounds float, float* %tmp15826, i64 1
+ %tmp15828 = getelementptr inbounds float, float* %tmp15827, i64 1
+ %tmp15829 = getelementptr inbounds float, float* %tmp15828, i64 1
+ %tmp15830 = getelementptr inbounds float, float* %tmp15829, i64 1
+ %tmp15831 = getelementptr inbounds float, float* %tmp15830, i64 1
+ %tmp15832 = getelementptr inbounds float, float* %tmp15831, i64 1
+ %tmp15833 = getelementptr inbounds float, float* %tmp15832, i64 1
+ %tmp15834 = getelementptr inbounds float, float* %tmp15833, i64 1
+ %tmp15835 = getelementptr inbounds float, float* %tmp15834, i64 1
+ %tmp15836 = getelementptr inbounds float, float* %tmp15835, i64 1
+ %tmp15837 = getelementptr inbounds float, float* %tmp15836, i64 1
+ %tmp15838 = getelementptr inbounds float, float* %tmp15837, i64 1
+ %tmp15839 = getelementptr inbounds float, float* %tmp15838, i64 1
+ %tmp15840 = getelementptr inbounds float, float* %tmp15839, i64 1
+ %tmp15841 = getelementptr inbounds float, float* %tmp15840, i64 1
+ %tmp15842 = getelementptr inbounds float, float* %tmp15841, i64 1
+ %tmp15843 = getelementptr inbounds float, float* %tmp15842, i64 1
+ %tmp15844 = getelementptr inbounds float, float* %tmp15843, i64 1
+ %tmp15845 = getelementptr inbounds float, float* %tmp15844, i64 1
+ %tmp15846 = getelementptr inbounds float, float* %tmp15845, i64 1
+ %tmp15847 = getelementptr inbounds float, float* %tmp15846, i64 1
+ %tmp15848 = getelementptr inbounds float, float* %tmp15847, i64 1
+ %tmp15849 = getelementptr inbounds float, float* %tmp15848, i64 1
+ %tmp15850 = getelementptr inbounds float, float* %tmp15849, i64 1
+ %tmp15851 = getelementptr inbounds float, float* %tmp15850, i64 1
+ %tmp15852 = getelementptr inbounds float, float* %tmp15851, i64 1
+ %tmp15853 = getelementptr inbounds float, float* %tmp15852, i64 1
+ %tmp15854 = getelementptr inbounds float, float* %tmp15853, i64 1
+ %tmp15855 = getelementptr inbounds float, float* %tmp15854, i64 1
+ %tmp15856 = getelementptr inbounds float, float* %tmp15855, i64 1
+ %tmp15857 = getelementptr inbounds float, float* %tmp15856, i64 1
+ %tmp15858 = getelementptr inbounds float, float* %tmp15857, i64 1
+ %tmp15859 = getelementptr inbounds float, float* %tmp15858, i64 1
+ %tmp15860 = getelementptr inbounds float, float* %tmp15859, i64 1
+ %tmp15861 = getelementptr inbounds float, float* %tmp15860, i64 1
+ %tmp15862 = getelementptr inbounds float, float* %tmp15861, i64 1
+ %tmp15863 = getelementptr inbounds float, float* %tmp15862, i64 1
+ %tmp15864 = getelementptr inbounds float, float* %tmp15863, i64 1
+ %tmp15865 = getelementptr inbounds float, float* %tmp15864, i64 1
+ %tmp15866 = getelementptr inbounds float, float* %tmp15865, i64 1
+ %tmp15867 = getelementptr inbounds float, float* %tmp15866, i64 1
+ %tmp15868 = getelementptr inbounds float, float* %tmp15867, i64 1
+ %tmp15869 = getelementptr inbounds float, float* %tmp15868, i64 1
+ %tmp15870 = getelementptr inbounds float, float* %tmp15869, i64 1
+ %tmp15871 = getelementptr inbounds float, float* %tmp15870, i64 1
+ %tmp15872 = getelementptr inbounds float, float* %tmp15871, i64 1
+ %tmp15873 = getelementptr inbounds float, float* %tmp15872, i64 1
+ %tmp15874 = getelementptr inbounds float, float* %tmp15873, i64 1
+ %tmp15875 = getelementptr inbounds float, float* %tmp15874, i64 1
+ %tmp15876 = getelementptr inbounds float, float* %tmp15875, i64 1
+ %tmp15877 = getelementptr inbounds float, float* %tmp15876, i64 1
+ %tmp15878 = getelementptr inbounds float, float* %tmp15877, i64 1
+ %tmp15879 = getelementptr inbounds float, float* %tmp15878, i64 1
+ %tmp15880 = getelementptr inbounds float, float* %tmp15879, i64 1
+ %tmp15881 = getelementptr inbounds float, float* %tmp15880, i64 1
+ %tmp15882 = getelementptr inbounds float, float* %tmp15881, i64 1
+ %tmp15883 = getelementptr inbounds float, float* %tmp15882, i64 1
+ %tmp15884 = getelementptr inbounds float, float* %tmp15883, i64 1
+ %tmp15885 = getelementptr inbounds float, float* %tmp15884, i64 1
+ %tmp15886 = getelementptr inbounds float, float* %tmp15885, i64 1
+ %tmp15887 = getelementptr inbounds float, float* %tmp15886, i64 1
+ %tmp15888 = getelementptr inbounds float, float* %tmp15887, i64 1
+ %tmp15889 = getelementptr inbounds float, float* %tmp15888, i64 1
+ %tmp15890 = getelementptr inbounds float, float* %tmp15889, i64 1
+ %tmp15891 = getelementptr inbounds float, float* %tmp15890, i64 1
+ %tmp15892 = getelementptr inbounds float, float* %tmp15891, i64 1
+ %tmp15893 = getelementptr inbounds float, float* %tmp15892, i64 1
+ %tmp15894 = getelementptr inbounds float, float* %tmp15893, i64 1
+ %tmp15895 = getelementptr inbounds float, float* %tmp15894, i64 1
+ %tmp15896 = getelementptr inbounds float, float* %tmp15895, i64 1
+ %tmp15897 = getelementptr inbounds float, float* %tmp15896, i64 1
+ %tmp15898 = getelementptr inbounds float, float* %tmp15897, i64 1
+ %tmp15899 = getelementptr inbounds float, float* %tmp15898, i64 1
+ %tmp15900 = getelementptr inbounds float, float* %tmp15899, i64 1
+ %tmp15901 = getelementptr inbounds float, float* %tmp15900, i64 1
+ %tmp15902 = getelementptr inbounds float, float* %tmp15901, i64 1
+ %tmp15903 = getelementptr inbounds float, float* %tmp15902, i64 1
+ %tmp15904 = getelementptr inbounds float, float* %tmp15903, i64 1
+ %tmp15905 = getelementptr inbounds float, float* %tmp15904, i64 1
+ %tmp15906 = getelementptr inbounds float, float* %tmp15905, i64 1
+ %tmp15907 = getelementptr inbounds float, float* %tmp15906, i64 1
+ %tmp15908 = getelementptr inbounds float, float* %tmp15907, i64 1
+ %tmp15909 = getelementptr inbounds float, float* %tmp15908, i64 1
+ %tmp15910 = getelementptr inbounds float, float* %tmp15909, i64 1
+ %tmp15911 = getelementptr inbounds float, float* %tmp15910, i64 1
+ %tmp15912 = getelementptr inbounds float, float* %tmp15911, i64 1
+ %tmp15913 = getelementptr inbounds float, float* %tmp15912, i64 1
+ %tmp15914 = getelementptr inbounds float, float* %tmp15913, i64 1
+ %tmp15915 = getelementptr inbounds float, float* %tmp15914, i64 1
+ %tmp15916 = getelementptr inbounds float, float* %tmp15915, i64 1
+ %tmp15917 = getelementptr inbounds float, float* %tmp15916, i64 1
+ %tmp15918 = getelementptr inbounds float, float* %tmp15917, i64 1
+ %tmp15919 = getelementptr inbounds float, float* %tmp15918, i64 1
+ %tmp15920 = getelementptr inbounds float, float* %tmp15919, i64 1
+ %tmp15921 = getelementptr inbounds float, float* %tmp15920, i64 1
+ %tmp15922 = getelementptr inbounds float, float* %tmp15921, i64 1
+ %tmp15923 = getelementptr inbounds float, float* %tmp15922, i64 1
+ %tmp15924 = getelementptr inbounds float, float* %tmp15923, i64 1
+ %tmp15925 = getelementptr inbounds float, float* %tmp15924, i64 1
+ %tmp15926 = getelementptr inbounds float, float* %tmp15925, i64 1
+ %tmp15927 = getelementptr inbounds float, float* %tmp15926, i64 1
+ %tmp15928 = getelementptr inbounds float, float* %tmp15927, i64 1
+ %tmp15929 = getelementptr inbounds float, float* %tmp15928, i64 1
+ %tmp15930 = getelementptr inbounds float, float* %tmp15929, i64 1
+ %tmp15931 = getelementptr inbounds float, float* %tmp15930, i64 1
+ %tmp15932 = getelementptr inbounds float, float* %tmp15931, i64 1
+ %tmp15933 = getelementptr inbounds float, float* %tmp15932, i64 1
+ %tmp15934 = getelementptr inbounds float, float* %tmp15933, i64 1
+ %tmp15935 = getelementptr inbounds float, float* %tmp15934, i64 1
+ %tmp15936 = getelementptr inbounds float, float* %tmp15935, i64 1
+ %tmp15937 = getelementptr inbounds float, float* %tmp15936, i64 1
+ %tmp15938 = getelementptr inbounds float, float* %tmp15937, i64 1
+ %tmp15939 = getelementptr inbounds float, float* %tmp15938, i64 1
+ %tmp15940 = getelementptr inbounds float, float* %tmp15939, i64 1
+ %tmp15941 = getelementptr inbounds float, float* %tmp15940, i64 1
+ %tmp15942 = getelementptr inbounds float, float* %tmp15941, i64 1
+ %tmp15943 = getelementptr inbounds float, float* %tmp15942, i64 1
+ %tmp15944 = getelementptr inbounds float, float* %tmp15943, i64 1
+ %tmp15945 = getelementptr inbounds float, float* %tmp15944, i64 1
+ %tmp15946 = getelementptr inbounds float, float* %tmp15945, i64 1
+ %tmp15947 = getelementptr inbounds float, float* %tmp15946, i64 1
+ %tmp15948 = getelementptr inbounds float, float* %tmp15947, i64 1
+ %tmp15949 = getelementptr inbounds float, float* %tmp15948, i64 1
+ %tmp15950 = getelementptr inbounds float, float* %tmp15949, i64 1
+ %tmp15951 = getelementptr inbounds float, float* %tmp15950, i64 1
+ %tmp15952 = getelementptr inbounds float, float* %tmp15951, i64 1
+ %tmp15953 = getelementptr inbounds float, float* %tmp15952, i64 1
+ %tmp15954 = getelementptr inbounds float, float* %tmp15953, i64 1
+ %tmp15955 = getelementptr inbounds float, float* %tmp15954, i64 1
+ %tmp15956 = getelementptr inbounds float, float* %tmp15955, i64 1
+ %tmp15957 = getelementptr inbounds float, float* %tmp15956, i64 1
+ %tmp15958 = getelementptr inbounds float, float* %tmp15957, i64 1
+ %tmp15959 = getelementptr inbounds float, float* %tmp15958, i64 1
+ %tmp15960 = getelementptr inbounds float, float* %tmp15959, i64 1
+ %tmp15961 = getelementptr inbounds float, float* %tmp15960, i64 1
+ %tmp15962 = getelementptr inbounds float, float* %tmp15961, i64 1
+ %tmp15963 = getelementptr inbounds float, float* %tmp15962, i64 1
+ %tmp15964 = getelementptr inbounds float, float* %tmp15963, i64 1
+ %tmp15965 = getelementptr inbounds float, float* %tmp15964, i64 1
+ %tmp15966 = getelementptr inbounds float, float* %tmp15965, i64 1
+ %tmp15967 = getelementptr inbounds float, float* %tmp15966, i64 1
+ %tmp15968 = getelementptr inbounds float, float* %tmp15967, i64 1
+ %tmp15969 = getelementptr inbounds float, float* %tmp15968, i64 1
+ %tmp15970 = getelementptr inbounds float, float* %tmp15969, i64 1
+ %tmp15971 = getelementptr inbounds float, float* %tmp15970, i64 1
+ %tmp15972 = getelementptr inbounds float, float* %tmp15971, i64 1
+ %tmp15973 = getelementptr inbounds float, float* %tmp15972, i64 1
+ %tmp15974 = getelementptr inbounds float, float* %tmp15973, i64 1
+ %tmp15975 = getelementptr inbounds float, float* %tmp15974, i64 1
+ %tmp15976 = getelementptr inbounds float, float* %tmp15975, i64 1
+ %tmp15977 = getelementptr inbounds float, float* %tmp15976, i64 1
+ %tmp15978 = getelementptr inbounds float, float* %tmp15977, i64 1
+ %tmp15979 = getelementptr inbounds float, float* %tmp15978, i64 1
+ %tmp15980 = getelementptr inbounds float, float* %tmp15979, i64 1
+ %tmp15981 = getelementptr inbounds float, float* %tmp15980, i64 1
+ %tmp15982 = getelementptr inbounds float, float* %tmp15981, i64 1
+ %tmp15983 = getelementptr inbounds float, float* %tmp15982, i64 1
+ %tmp15984 = getelementptr inbounds float, float* %tmp15983, i64 1
+ %tmp15985 = getelementptr inbounds float, float* %tmp15984, i64 1
+ %tmp15986 = getelementptr inbounds float, float* %tmp15985, i64 1
+ %tmp15987 = getelementptr inbounds float, float* %tmp15986, i64 1
+ %tmp15988 = getelementptr inbounds float, float* %tmp15987, i64 1
+ %tmp15989 = getelementptr inbounds float, float* %tmp15988, i64 1
+ %tmp15990 = getelementptr inbounds float, float* %tmp15989, i64 1
+ %tmp15991 = getelementptr inbounds float, float* %tmp15990, i64 1
+ %tmp15992 = getelementptr inbounds float, float* %tmp15991, i64 1
+ %tmp15993 = getelementptr inbounds float, float* %tmp15992, i64 1
+ %tmp15994 = getelementptr inbounds float, float* %tmp15993, i64 1
+ %tmp15995 = getelementptr inbounds float, float* %tmp15994, i64 1
+ %tmp15996 = getelementptr inbounds float, float* %tmp15995, i64 1
+ %tmp15997 = getelementptr inbounds float, float* %tmp15996, i64 1
+ %tmp15998 = getelementptr inbounds float, float* %tmp15997, i64 1
+ %tmp15999 = getelementptr inbounds float, float* %tmp15998, i64 1
+ %tmp16000 = getelementptr inbounds float, float* %tmp15999, i64 1
+ %tmp16001 = getelementptr inbounds float, float* %tmp16000, i64 1
+ %tmp16002 = getelementptr inbounds float, float* %tmp16001, i64 1
+ %tmp16003 = getelementptr inbounds float, float* %tmp16002, i64 1
+ %tmp16004 = getelementptr inbounds float, float* %tmp16003, i64 1
+ %tmp16005 = getelementptr inbounds float, float* %tmp16004, i64 1
+ %tmp16006 = getelementptr inbounds float, float* %tmp16005, i64 1
+ %tmp16007 = getelementptr inbounds float, float* %tmp16006, i64 1
+ %tmp16008 = getelementptr inbounds float, float* %tmp16007, i64 1
+ %tmp16009 = getelementptr inbounds float, float* %tmp16008, i64 1
+ %tmp16010 = getelementptr inbounds float, float* %tmp16009, i64 1
+ %tmp16011 = getelementptr inbounds float, float* %tmp16010, i64 1
+ %tmp16012 = getelementptr inbounds float, float* %tmp16011, i64 1
+ %tmp16013 = getelementptr inbounds float, float* %tmp16012, i64 1
+ %tmp16014 = getelementptr inbounds float, float* %tmp16013, i64 1
+ %tmp16015 = getelementptr inbounds float, float* %tmp16014, i64 1
+ %tmp16016 = getelementptr inbounds float, float* %tmp16015, i64 1
+ %tmp16017 = getelementptr inbounds float, float* %tmp16016, i64 1
+ %tmp16018 = getelementptr inbounds float, float* %tmp16017, i64 1
+ %tmp16019 = getelementptr inbounds float, float* %tmp16018, i64 1
+ %tmp16020 = getelementptr inbounds float, float* %tmp16019, i64 1
+ %tmp16021 = getelementptr inbounds float, float* %tmp16020, i64 1
+ %tmp16022 = getelementptr inbounds float, float* %tmp16021, i64 1
+ %tmp16023 = getelementptr inbounds float, float* %tmp16022, i64 1
+ %tmp16024 = getelementptr inbounds float, float* %tmp16023, i64 1
+ %tmp16025 = getelementptr inbounds float, float* %tmp16024, i64 1
+ %tmp16026 = getelementptr inbounds float, float* %tmp16025, i64 1
+ %tmp16027 = getelementptr inbounds float, float* %tmp16026, i64 1
+ %tmp16028 = getelementptr inbounds float, float* %tmp16027, i64 1
+ %tmp16029 = getelementptr inbounds float, float* %tmp16028, i64 1
+ %tmp16030 = getelementptr inbounds float, float* %tmp16029, i64 1
+ %tmp16031 = getelementptr inbounds float, float* %tmp16030, i64 1
+ %tmp16032 = getelementptr inbounds float, float* %tmp16031, i64 1
+ %tmp16033 = getelementptr inbounds float, float* %tmp16032, i64 1
+ %tmp16034 = getelementptr inbounds float, float* %tmp16033, i64 1
+ %tmp16035 = getelementptr inbounds float, float* %tmp16034, i64 1
+ %tmp16036 = getelementptr inbounds float, float* %tmp16035, i64 1
+ %tmp16037 = getelementptr inbounds float, float* %tmp16036, i64 1
+ %tmp16038 = getelementptr inbounds float, float* %tmp16037, i64 1
+ %tmp16039 = getelementptr inbounds float, float* %tmp16038, i64 1
+ %tmp16040 = getelementptr inbounds float, float* %tmp16039, i64 1
+ %tmp16041 = getelementptr inbounds float, float* %tmp16040, i64 1
+ %tmp16042 = getelementptr inbounds float, float* %tmp16041, i64 1
+ %tmp16043 = getelementptr inbounds float, float* %tmp16042, i64 1
+ %tmp16044 = getelementptr inbounds float, float* %tmp16043, i64 1
+ %tmp16045 = getelementptr inbounds float, float* %tmp16044, i64 1
+ %tmp16046 = getelementptr inbounds float, float* %tmp16045, i64 1
+ %tmp16047 = getelementptr inbounds float, float* %tmp16046, i64 1
+ %tmp16048 = getelementptr inbounds float, float* %tmp16047, i64 1
+ %tmp16049 = getelementptr inbounds float, float* %tmp16048, i64 1
+ %tmp16050 = getelementptr inbounds float, float* %tmp16049, i64 1
+ %tmp16051 = getelementptr inbounds float, float* %tmp16050, i64 1
+ %tmp16052 = getelementptr inbounds float, float* %tmp16051, i64 1
+ %tmp16053 = getelementptr inbounds float, float* %tmp16052, i64 1
+ %tmp16054 = getelementptr inbounds float, float* %tmp16053, i64 1
+ %tmp16055 = getelementptr inbounds float, float* %tmp16054, i64 1
+ %tmp16056 = getelementptr inbounds float, float* %tmp16055, i64 1
+ %tmp16057 = getelementptr inbounds float, float* %tmp16056, i64 1
+ %tmp16058 = getelementptr inbounds float, float* %tmp16057, i64 1
+ %tmp16059 = getelementptr inbounds float, float* %tmp16058, i64 1
+ %tmp16060 = getelementptr inbounds float, float* %tmp16059, i64 1
+ %tmp16061 = getelementptr inbounds float, float* %tmp16060, i64 1
+ %tmp16062 = getelementptr inbounds float, float* %tmp16061, i64 1
+ %tmp16063 = getelementptr inbounds float, float* %tmp16062, i64 1
+ %tmp16064 = getelementptr inbounds float, float* %tmp16063, i64 1
+ %tmp16065 = getelementptr inbounds float, float* %tmp16064, i64 1
+ %tmp16066 = getelementptr inbounds float, float* %tmp16065, i64 1
+ %tmp16067 = getelementptr inbounds float, float* %tmp16066, i64 1
+ %tmp16068 = getelementptr inbounds float, float* %tmp16067, i64 1
+ %tmp16069 = getelementptr inbounds float, float* %tmp16068, i64 1
+ %tmp16070 = getelementptr inbounds float, float* %tmp16069, i64 1
+ %tmp16071 = getelementptr inbounds float, float* %tmp16070, i64 1
+ %tmp16072 = getelementptr inbounds float, float* %tmp16071, i64 1
+ %tmp16073 = getelementptr inbounds float, float* %tmp16072, i64 1
+ %tmp16074 = getelementptr inbounds float, float* %tmp16073, i64 1
+ %tmp16075 = getelementptr inbounds float, float* %tmp16074, i64 1
+ %tmp16076 = getelementptr inbounds float, float* %tmp16075, i64 1
+ %tmp16077 = getelementptr inbounds float, float* %tmp16076, i64 1
+ %tmp16078 = getelementptr inbounds float, float* %tmp16077, i64 1
+ %tmp16079 = getelementptr inbounds float, float* %tmp16078, i64 1
+ %tmp16080 = getelementptr inbounds float, float* %tmp16079, i64 1
+ %tmp16081 = getelementptr inbounds float, float* %tmp16080, i64 1
+ %tmp16082 = getelementptr inbounds float, float* %tmp16081, i64 1
+ %tmp16083 = getelementptr inbounds float, float* %tmp16082, i64 1
+ %tmp16084 = getelementptr inbounds float, float* %tmp16083, i64 1
+ %tmp16085 = getelementptr inbounds float, float* %tmp16084, i64 1
+ %tmp16086 = getelementptr inbounds float, float* %tmp16085, i64 1
+ %tmp16087 = getelementptr inbounds float, float* %tmp16086, i64 1
+ %tmp16088 = getelementptr inbounds float, float* %tmp16087, i64 1
+ %tmp16089 = getelementptr inbounds float, float* %tmp16088, i64 1
+ %tmp16090 = getelementptr inbounds float, float* %tmp16089, i64 1
+ %tmp16091 = getelementptr inbounds float, float* %tmp16090, i64 1
+ %tmp16092 = getelementptr inbounds float, float* %tmp16091, i64 1
+ %tmp16093 = getelementptr inbounds float, float* %tmp16092, i64 1
+ %tmp16094 = getelementptr inbounds float, float* %tmp16093, i64 1
+ %tmp16095 = getelementptr inbounds float, float* %tmp16094, i64 1
+ %tmp16096 = getelementptr inbounds float, float* %tmp16095, i64 1
+ %tmp16097 = getelementptr inbounds float, float* %tmp16096, i64 1
+ %tmp16098 = getelementptr inbounds float, float* %tmp16097, i64 1
+ %tmp16099 = getelementptr inbounds float, float* %tmp16098, i64 1
+ %tmp16100 = getelementptr inbounds float, float* %tmp16099, i64 1
+ %tmp16101 = getelementptr inbounds float, float* %tmp16100, i64 1
+ %tmp16102 = getelementptr inbounds float, float* %tmp16101, i64 1
+ %tmp16103 = getelementptr inbounds float, float* %tmp16102, i64 1
+ %tmp16104 = getelementptr inbounds float, float* %tmp16103, i64 1
+ %tmp16105 = getelementptr inbounds float, float* %tmp16104, i64 1
+ %tmp16106 = getelementptr inbounds float, float* %tmp16105, i64 1
+ %tmp16107 = getelementptr inbounds float, float* %tmp16106, i64 1
+ %tmp16108 = getelementptr inbounds float, float* %tmp16107, i64 1
+ %tmp16109 = getelementptr inbounds float, float* %tmp16108, i64 1
+ %tmp16110 = getelementptr inbounds float, float* %tmp16109, i64 1
+ %tmp16111 = getelementptr inbounds float, float* %tmp16110, i64 1
+ %tmp16112 = getelementptr inbounds float, float* %tmp16111, i64 1
+ %tmp16113 = getelementptr inbounds float, float* %tmp16112, i64 1
+ %tmp16114 = getelementptr inbounds float, float* %tmp16113, i64 1
+ %tmp16115 = getelementptr inbounds float, float* %tmp16114, i64 1
+ %tmp16116 = getelementptr inbounds float, float* %tmp16115, i64 1
+ %tmp16117 = getelementptr inbounds float, float* %tmp16116, i64 1
+ %tmp16118 = getelementptr inbounds float, float* %tmp16117, i64 1
+ %tmp16119 = getelementptr inbounds float, float* %tmp16118, i64 1
+ %tmp16120 = getelementptr inbounds float, float* %tmp16119, i64 1
+ %tmp16121 = getelementptr inbounds float, float* %tmp16120, i64 1
+ %tmp16122 = getelementptr inbounds float, float* %tmp16121, i64 1
+ %tmp16123 = getelementptr inbounds float, float* %tmp16122, i64 1
+ %tmp16124 = getelementptr inbounds float, float* %tmp16123, i64 1
+ %tmp16125 = getelementptr inbounds float, float* %tmp16124, i64 1
+ %tmp16126 = getelementptr inbounds float, float* %tmp16125, i64 1
+ %tmp16127 = getelementptr inbounds float, float* %tmp16126, i64 1
+ %tmp16128 = getelementptr inbounds float, float* %tmp16127, i64 1
+ %tmp16129 = getelementptr inbounds float, float* %tmp16128, i64 1
+ %tmp16130 = getelementptr inbounds float, float* %tmp16129, i64 1
+ %tmp16131 = getelementptr inbounds float, float* %tmp16130, i64 1
+ %tmp16132 = getelementptr inbounds float, float* %tmp16131, i64 1
+ %tmp16133 = getelementptr inbounds float, float* %tmp16132, i64 1
+ %tmp16134 = getelementptr inbounds float, float* %tmp16133, i64 1
+ %tmp16135 = getelementptr inbounds float, float* %tmp16134, i64 1
+ %tmp16136 = getelementptr inbounds float, float* %tmp16135, i64 1
+ %tmp16137 = getelementptr inbounds float, float* %tmp16136, i64 1
+ %tmp16138 = getelementptr inbounds float, float* %tmp16137, i64 1
+ %tmp16139 = getelementptr inbounds float, float* %tmp16138, i64 1
+ %tmp16140 = getelementptr inbounds float, float* %tmp16139, i64 1
+ %tmp16141 = getelementptr inbounds float, float* %tmp16140, i64 1
+ %tmp16142 = getelementptr inbounds float, float* %tmp16141, i64 1
+ %tmp16143 = getelementptr inbounds float, float* %tmp16142, i64 1
+ %tmp16144 = getelementptr inbounds float, float* %tmp16143, i64 1
+ %tmp16145 = getelementptr inbounds float, float* %tmp16144, i64 1
+ %tmp16146 = getelementptr inbounds float, float* %tmp16145, i64 1
+ %tmp16147 = getelementptr inbounds float, float* %tmp16146, i64 1
+ %tmp16148 = getelementptr inbounds float, float* %tmp16147, i64 1
+ %tmp16149 = getelementptr inbounds float, float* %tmp16148, i64 1
+ %tmp16150 = getelementptr inbounds float, float* %tmp16149, i64 1
+ %tmp16151 = getelementptr inbounds float, float* %tmp16150, i64 1
+ %tmp16152 = getelementptr inbounds float, float* %tmp16151, i64 1
+ %tmp16153 = getelementptr inbounds float, float* %tmp16152, i64 1
+ %tmp16154 = getelementptr inbounds float, float* %tmp16153, i64 1
+ %tmp16155 = getelementptr inbounds float, float* %tmp16154, i64 1
+ %tmp16156 = getelementptr inbounds float, float* %tmp16155, i64 1
+ %tmp16157 = getelementptr inbounds float, float* %tmp16156, i64 1
+ %tmp16158 = getelementptr inbounds float, float* %tmp16157, i64 1
+ %tmp16159 = getelementptr inbounds float, float* %tmp16158, i64 1
+ %tmp16160 = getelementptr inbounds float, float* %tmp16159, i64 1
+ %tmp16161 = getelementptr inbounds float, float* %tmp16160, i64 1
+ %tmp16162 = getelementptr inbounds float, float* %tmp16161, i64 1
+ %tmp16163 = getelementptr inbounds float, float* %tmp16162, i64 1
+ %tmp16164 = getelementptr inbounds float, float* %tmp16163, i64 1
+ %tmp16165 = getelementptr inbounds float, float* %tmp16164, i64 1
+ %tmp16166 = getelementptr inbounds float, float* %tmp16165, i64 1
+ %tmp16167 = getelementptr inbounds float, float* %tmp16166, i64 1
+ %tmp16168 = getelementptr inbounds float, float* %tmp16167, i64 1
+ %tmp16169 = getelementptr inbounds float, float* %tmp16168, i64 1
+ %tmp16170 = getelementptr inbounds float, float* %tmp16169, i64 1
+ %tmp16171 = getelementptr inbounds float, float* %tmp16170, i64 1
+ %tmp16172 = getelementptr inbounds float, float* %tmp16171, i64 1
+ %tmp16173 = getelementptr inbounds float, float* %tmp16172, i64 1
+ %tmp16174 = getelementptr inbounds float, float* %tmp16173, i64 1
+ %tmp16175 = getelementptr inbounds float, float* %tmp16174, i64 1
+ %tmp16176 = getelementptr inbounds float, float* %tmp16175, i64 1
+ %tmp16177 = getelementptr inbounds float, float* %tmp16176, i64 1
+ %tmp16178 = getelementptr inbounds float, float* %tmp16177, i64 1
+ %tmp16179 = getelementptr inbounds float, float* %tmp16178, i64 1
+ %tmp16180 = getelementptr inbounds float, float* %tmp16179, i64 1
+ %tmp16181 = getelementptr inbounds float, float* %tmp16180, i64 1
+ %tmp16182 = getelementptr inbounds float, float* %tmp16181, i64 1
+ %tmp16183 = getelementptr inbounds float, float* %tmp16182, i64 1
+ %tmp16184 = getelementptr inbounds float, float* %tmp16183, i64 1
+ %tmp16185 = getelementptr inbounds float, float* %tmp16184, i64 1
+ %tmp16186 = getelementptr inbounds float, float* %tmp16185, i64 1
+ %tmp16187 = getelementptr inbounds float, float* %tmp16186, i64 1
+ %tmp16188 = getelementptr inbounds float, float* %tmp16187, i64 1
+ %tmp16189 = getelementptr inbounds float, float* %tmp16188, i64 1
+ %tmp16190 = getelementptr inbounds float, float* %tmp16189, i64 1
+ %tmp16191 = getelementptr inbounds float, float* %tmp16190, i64 1
+ %tmp16192 = getelementptr inbounds float, float* %tmp16191, i64 1
+ %tmp16193 = getelementptr inbounds float, float* %tmp16192, i64 1
+ %tmp16194 = getelementptr inbounds float, float* %tmp16193, i64 1
+ %tmp16195 = getelementptr inbounds float, float* %tmp16194, i64 1
+ %tmp16196 = getelementptr inbounds float, float* %tmp16195, i64 1
+ %tmp16197 = getelementptr inbounds float, float* %tmp16196, i64 1
+ %tmp16198 = getelementptr inbounds float, float* %tmp16197, i64 1
+ %tmp16199 = getelementptr inbounds float, float* %tmp16198, i64 1
+ %tmp16200 = getelementptr inbounds float, float* %tmp16199, i64 1
+ %tmp16201 = getelementptr inbounds float, float* %tmp16200, i64 1
+ %tmp16202 = getelementptr inbounds float, float* %tmp16201, i64 1
+ %tmp16203 = getelementptr inbounds float, float* %tmp16202, i64 1
+ %tmp16204 = getelementptr inbounds float, float* %tmp16203, i64 1
+ %tmp16205 = getelementptr inbounds float, float* %tmp16204, i64 1
+ %tmp16206 = getelementptr inbounds float, float* %tmp16205, i64 1
+ %tmp16207 = getelementptr inbounds float, float* %tmp16206, i64 1
+ %tmp16208 = getelementptr inbounds float, float* %tmp16207, i64 1
+ %tmp16209 = getelementptr inbounds float, float* %tmp16208, i64 1
+ %tmp16210 = getelementptr inbounds float, float* %tmp16209, i64 1
+ %tmp16211 = getelementptr inbounds float, float* %tmp16210, i64 1
+ %tmp16212 = getelementptr inbounds float, float* %tmp16211, i64 1
+ %tmp16213 = getelementptr inbounds float, float* %tmp16212, i64 1
+ %tmp16214 = getelementptr inbounds float, float* %tmp16213, i64 1
+ %tmp16215 = getelementptr inbounds float, float* %tmp16214, i64 1
+ %tmp16216 = getelementptr inbounds float, float* %tmp16215, i64 1
+ %tmp16217 = getelementptr inbounds float, float* %tmp16216, i64 1
+ %tmp16218 = getelementptr inbounds float, float* %tmp16217, i64 1
+ %tmp16219 = getelementptr inbounds float, float* %tmp16218, i64 1
+ %tmp16220 = getelementptr inbounds float, float* %tmp16219, i64 1
+ %tmp16221 = getelementptr inbounds float, float* %tmp16220, i64 1
+ %tmp16222 = getelementptr inbounds float, float* %tmp16221, i64 1
+ %tmp16223 = getelementptr inbounds float, float* %tmp16222, i64 1
+ %tmp16224 = getelementptr inbounds float, float* %tmp16223, i64 1
+ %tmp16225 = getelementptr inbounds float, float* %tmp16224, i64 1
+ %tmp16226 = getelementptr inbounds float, float* %tmp16225, i64 1
+ %tmp16227 = getelementptr inbounds float, float* %tmp16226, i64 1
+ %tmp16228 = getelementptr inbounds float, float* %tmp16227, i64 1
+ %tmp16229 = getelementptr inbounds float, float* %tmp16228, i64 1
+ %tmp16230 = getelementptr inbounds float, float* %tmp16229, i64 1
+ %tmp16231 = getelementptr inbounds float, float* %tmp16230, i64 1
+ %tmp16232 = getelementptr inbounds float, float* %tmp16231, i64 1
+ %tmp16233 = getelementptr inbounds float, float* %tmp16232, i64 1
+ %tmp16234 = getelementptr inbounds float, float* %tmp16233, i64 1
+ %tmp16235 = getelementptr inbounds float, float* %tmp16234, i64 1
+ %tmp16236 = getelementptr inbounds float, float* %tmp16235, i64 1
+ %tmp16237 = getelementptr inbounds float, float* %tmp16236, i64 1
+ %tmp16238 = getelementptr inbounds float, float* %tmp16237, i64 1
+ %tmp16239 = getelementptr inbounds float, float* %tmp16238, i64 1
+ %tmp16240 = getelementptr inbounds float, float* %tmp16239, i64 1
+ %tmp16241 = getelementptr inbounds float, float* %tmp16240, i64 1
+ %tmp16242 = getelementptr inbounds float, float* %tmp16241, i64 1
+ %tmp16243 = getelementptr inbounds float, float* %tmp16242, i64 1
+ %tmp16244 = getelementptr inbounds float, float* %tmp16243, i64 1
+ %tmp16245 = getelementptr inbounds float, float* %tmp16244, i64 1
+ %tmp16246 = getelementptr inbounds float, float* %tmp16245, i64 1
+ %tmp16247 = getelementptr inbounds float, float* %tmp16246, i64 1
+ %tmp16248 = getelementptr inbounds float, float* %tmp16247, i64 1
+ %tmp16249 = getelementptr inbounds float, float* %tmp16248, i64 1
+ %tmp16250 = getelementptr inbounds float, float* %tmp16249, i64 1
+ %tmp16251 = getelementptr inbounds float, float* %tmp16250, i64 1
+ %tmp16252 = getelementptr inbounds float, float* %tmp16251, i64 1
+ %tmp16253 = getelementptr inbounds float, float* %tmp16252, i64 1
+ %tmp16254 = getelementptr inbounds float, float* %tmp16253, i64 1
+ %tmp16255 = getelementptr inbounds float, float* %tmp16254, i64 1
+ %tmp16256 = getelementptr inbounds float, float* %tmp16255, i64 1
+ %tmp16257 = getelementptr inbounds float, float* %tmp16256, i64 1
+ %tmp16258 = getelementptr inbounds float, float* %tmp16257, i64 1
+ %tmp16259 = getelementptr inbounds float, float* %tmp16258, i64 1
+ %tmp16260 = getelementptr inbounds float, float* %tmp16259, i64 1
+ %tmp16261 = getelementptr inbounds float, float* %tmp16260, i64 1
+ %tmp16262 = getelementptr inbounds float, float* %tmp16261, i64 1
+ %tmp16263 = getelementptr inbounds float, float* %tmp16262, i64 1
+ %tmp16264 = getelementptr inbounds float, float* %tmp16263, i64 1
+ %tmp16265 = getelementptr inbounds float, float* %tmp16264, i64 1
+ %tmp16266 = getelementptr inbounds float, float* %tmp16265, i64 1
+ %tmp16267 = getelementptr inbounds float, float* %tmp16266, i64 1
+ %tmp16268 = getelementptr inbounds float, float* %tmp16267, i64 1
+ %tmp16269 = getelementptr inbounds float, float* %tmp16268, i64 1
+ %tmp16270 = getelementptr inbounds float, float* %tmp16269, i64 1
+ %tmp16271 = getelementptr inbounds float, float* %tmp16270, i64 1
+ %tmp16272 = getelementptr inbounds float, float* %tmp16271, i64 1
+ %tmp16273 = getelementptr inbounds float, float* %tmp16272, i64 1
+ %tmp16274 = getelementptr inbounds float, float* %tmp16273, i64 1
+ %tmp16275 = getelementptr inbounds float, float* %tmp16274, i64 1
+ %tmp16276 = getelementptr inbounds float, float* %tmp16275, i64 1
+ %tmp16277 = getelementptr inbounds float, float* %tmp16276, i64 1
+ %tmp16278 = getelementptr inbounds float, float* %tmp16277, i64 1
+ %tmp16279 = getelementptr inbounds float, float* %tmp16278, i64 1
+ %tmp16280 = getelementptr inbounds float, float* %tmp16279, i64 1
+ %tmp16281 = getelementptr inbounds float, float* %tmp16280, i64 1
+ %tmp16282 = getelementptr inbounds float, float* %tmp16281, i64 1
+ %tmp16283 = getelementptr inbounds float, float* %tmp16282, i64 1
+ %tmp16284 = getelementptr inbounds float, float* %tmp16283, i64 1
+ %tmp16285 = getelementptr inbounds float, float* %tmp16284, i64 1
+ %tmp16286 = getelementptr inbounds float, float* %tmp16285, i64 1
+ %tmp16287 = getelementptr inbounds float, float* %tmp16286, i64 1
+ %tmp16288 = getelementptr inbounds float, float* %tmp16287, i64 1
+ %tmp16289 = getelementptr inbounds float, float* %tmp16288, i64 1
+ %tmp16290 = getelementptr inbounds float, float* %tmp16289, i64 1
+ %tmp16291 = getelementptr inbounds float, float* %tmp16290, i64 1
+ %tmp16292 = getelementptr inbounds float, float* %tmp16291, i64 1
+ %tmp16293 = getelementptr inbounds float, float* %tmp16292, i64 1
+ %tmp16294 = getelementptr inbounds float, float* %tmp16293, i64 1
+ %tmp16295 = getelementptr inbounds float, float* %tmp16294, i64 1
+ %tmp16296 = getelementptr inbounds float, float* %tmp16295, i64 1
+ %tmp16297 = getelementptr inbounds float, float* %tmp16296, i64 1
+ %tmp16298 = getelementptr inbounds float, float* %tmp16297, i64 1
+ %tmp16299 = getelementptr inbounds float, float* %tmp16298, i64 1
+ %tmp16300 = getelementptr inbounds float, float* %tmp16299, i64 1
+ %tmp16301 = getelementptr inbounds float, float* %tmp16300, i64 1
+ %tmp16302 = getelementptr inbounds float, float* %tmp16301, i64 1
+ %tmp16303 = getelementptr inbounds float, float* %tmp16302, i64 1
+ %tmp16304 = getelementptr inbounds float, float* %tmp16303, i64 1
+ %tmp16305 = getelementptr inbounds float, float* %tmp16304, i64 1
+ %tmp16306 = getelementptr inbounds float, float* %tmp16305, i64 1
+ %tmp16307 = getelementptr inbounds float, float* %tmp16306, i64 1
+ %tmp16308 = getelementptr inbounds float, float* %tmp16307, i64 1
+ %tmp16309 = getelementptr inbounds float, float* %tmp16308, i64 1
+ %tmp16310 = getelementptr inbounds float, float* %tmp16309, i64 1
+ %tmp16311 = getelementptr inbounds float, float* %tmp16310, i64 1
+ %tmp16312 = getelementptr inbounds float, float* %tmp16311, i64 1
+ %tmp16313 = getelementptr inbounds float, float* %tmp16312, i64 1
+ %tmp16314 = getelementptr inbounds float, float* %tmp16313, i64 1
+ %tmp16315 = getelementptr inbounds float, float* %tmp16314, i64 1
+ %tmp16316 = getelementptr inbounds float, float* %tmp16315, i64 1
+ %tmp16317 = getelementptr inbounds float, float* %tmp16316, i64 1
+ %tmp16318 = getelementptr inbounds float, float* %tmp16317, i64 1
+ %tmp16319 = getelementptr inbounds float, float* %tmp16318, i64 1
+ %tmp16320 = getelementptr inbounds float, float* %tmp16319, i64 1
+ %tmp16321 = getelementptr inbounds float, float* %tmp16320, i64 1
+ %tmp16322 = getelementptr inbounds float, float* %tmp16321, i64 1
+ %tmp16323 = getelementptr inbounds float, float* %tmp16322, i64 1
+ %tmp16324 = getelementptr inbounds float, float* %tmp16323, i64 1
+ %tmp16325 = getelementptr inbounds float, float* %tmp16324, i64 1
+ %tmp16326 = getelementptr inbounds float, float* %tmp16325, i64 1
+ %tmp16327 = getelementptr inbounds float, float* %tmp16326, i64 1
+ %tmp16328 = getelementptr inbounds float, float* %tmp16327, i64 1
+ %tmp16329 = getelementptr inbounds float, float* %tmp16328, i64 1
+ %tmp16330 = getelementptr inbounds float, float* %tmp16329, i64 1
+ %tmp16331 = getelementptr inbounds float, float* %tmp16330, i64 1
+ %tmp16332 = getelementptr inbounds float, float* %tmp16331, i64 1
+ %tmp16333 = getelementptr inbounds float, float* %tmp16332, i64 1
+ %tmp16334 = getelementptr inbounds float, float* %tmp16333, i64 1
+ %tmp16335 = getelementptr inbounds float, float* %tmp16334, i64 1
+ %tmp16336 = getelementptr inbounds float, float* %tmp16335, i64 1
+ %tmp16337 = getelementptr inbounds float, float* %tmp16336, i64 1
+ %tmp16338 = getelementptr inbounds float, float* %tmp16337, i64 1
+ %tmp16339 = getelementptr inbounds float, float* %tmp16338, i64 1
+ %tmp16340 = getelementptr inbounds float, float* %tmp16339, i64 1
+ %tmp16341 = getelementptr inbounds float, float* %tmp16340, i64 1
+ %tmp16342 = getelementptr inbounds float, float* %tmp16341, i64 1
+ %tmp16343 = getelementptr inbounds float, float* %tmp16342, i64 1
+ %tmp16344 = getelementptr inbounds float, float* %tmp16343, i64 1
+ %tmp16345 = getelementptr inbounds float, float* %tmp16344, i64 1
+ %tmp16346 = getelementptr inbounds float, float* %tmp16345, i64 1
+ %tmp16347 = getelementptr inbounds float, float* %tmp16346, i64 1
+ %tmp16348 = getelementptr inbounds float, float* %tmp16347, i64 1
+ %tmp16349 = getelementptr inbounds float, float* %tmp16348, i64 1
+ %tmp16350 = getelementptr inbounds float, float* %tmp16349, i64 1
+ %tmp16351 = getelementptr inbounds float, float* %tmp16350, i64 1
+ %tmp16352 = getelementptr inbounds float, float* %tmp16351, i64 1
+ %tmp16353 = getelementptr inbounds float, float* %tmp16352, i64 1
+ %tmp16354 = getelementptr inbounds float, float* %tmp16353, i64 1
+ %tmp16355 = getelementptr inbounds float, float* %tmp16354, i64 1
+ %tmp16356 = getelementptr inbounds float, float* %tmp16355, i64 1
+ %tmp16357 = getelementptr inbounds float, float* %tmp16356, i64 1
+ %tmp16358 = getelementptr inbounds float, float* %tmp16357, i64 1
+ %tmp16359 = getelementptr inbounds float, float* %tmp16358, i64 1
+ %tmp16360 = getelementptr inbounds float, float* %tmp16359, i64 1
+ %tmp16361 = getelementptr inbounds float, float* %tmp16360, i64 1
+ %tmp16362 = getelementptr inbounds float, float* %tmp16361, i64 1
+ %tmp16363 = getelementptr inbounds float, float* %tmp16362, i64 1
+ %tmp16364 = getelementptr inbounds float, float* %tmp16363, i64 1
+ %tmp16365 = getelementptr inbounds float, float* %tmp16364, i64 1
+ %tmp16366 = getelementptr inbounds float, float* %tmp16365, i64 1
+ %tmp16367 = getelementptr inbounds float, float* %tmp16366, i64 1
+ %tmp16368 = getelementptr inbounds float, float* %tmp16367, i64 1
+ %tmp16369 = getelementptr inbounds float, float* %tmp16368, i64 1
+ %tmp16370 = getelementptr inbounds float, float* %tmp16369, i64 1
+ %tmp16371 = getelementptr inbounds float, float* %tmp16370, i64 1
+ %tmp16372 = getelementptr inbounds float, float* %tmp16371, i64 1
+ %tmp16373 = getelementptr inbounds float, float* %tmp16372, i64 1
+ %tmp16374 = getelementptr inbounds float, float* %tmp16373, i64 1
+ %tmp16375 = getelementptr inbounds float, float* %tmp16374, i64 1
+ %tmp16376 = getelementptr inbounds float, float* %tmp16375, i64 1
+ %tmp16377 = getelementptr inbounds float, float* %tmp16376, i64 1
+ %tmp16378 = getelementptr inbounds float, float* %tmp16377, i64 1
+ %tmp16379 = getelementptr inbounds float, float* %tmp16378, i64 1
+ %tmp16380 = getelementptr inbounds float, float* %tmp16379, i64 1
+ %tmp16381 = getelementptr inbounds float, float* %tmp16380, i64 1
+ %tmp16382 = getelementptr inbounds float, float* %tmp16381, i64 1
+ %tmp16383 = getelementptr inbounds float, float* %tmp16382, i64 1
+ %tmp16384 = getelementptr inbounds float, float* %tmp16383, i64 1
+ %tmp16385 = getelementptr inbounds float, float* %tmp16384, i64 1
+ %tmp16386 = getelementptr inbounds float, float* %tmp16385, i64 1
+ %tmp16387 = getelementptr inbounds float, float* %tmp16386, i64 1
+ %tmp16388 = getelementptr inbounds float, float* %tmp16387, i64 1
+ %tmp16389 = getelementptr inbounds float, float* %tmp16388, i64 1
+ %tmp16390 = getelementptr inbounds float, float* %tmp16389, i64 1
+ %tmp16391 = getelementptr inbounds float, float* %tmp16390, i64 1
+ %tmp16392 = getelementptr inbounds float, float* %tmp16391, i64 1
+ %tmp16393 = getelementptr inbounds float, float* %tmp16392, i64 1
+ %tmp16394 = getelementptr inbounds float, float* %tmp16393, i64 1
+ %tmp16395 = getelementptr inbounds float, float* %tmp16394, i64 1
+ %tmp16396 = getelementptr inbounds float, float* %tmp16395, i64 1
+ %tmp16397 = getelementptr inbounds float, float* %tmp16396, i64 1
+ %tmp16398 = getelementptr inbounds float, float* %tmp16397, i64 1
+ %tmp16399 = getelementptr inbounds float, float* %tmp16398, i64 1
+ %tmp16400 = getelementptr inbounds float, float* %tmp16399, i64 1
+ %tmp16401 = getelementptr inbounds float, float* %tmp16400, i64 1
+ %tmp16402 = getelementptr inbounds float, float* %tmp16401, i64 1
+ %tmp16403 = getelementptr inbounds float, float* %tmp16402, i64 1
+ %tmp16404 = getelementptr inbounds float, float* %tmp16403, i64 1
+ %tmp16405 = getelementptr inbounds float, float* %tmp16404, i64 1
+ %tmp16406 = getelementptr inbounds float, float* %tmp16405, i64 1
+ %tmp16407 = getelementptr inbounds float, float* %tmp16406, i64 1
+ %tmp16408 = getelementptr inbounds float, float* %tmp16407, i64 1
+ %tmp16409 = getelementptr inbounds float, float* %tmp16408, i64 1
+ %tmp16410 = getelementptr inbounds float, float* %tmp16409, i64 1
+ %tmp16411 = getelementptr inbounds float, float* %tmp16410, i64 1
+ %tmp16412 = getelementptr inbounds float, float* %tmp16411, i64 1
+ %tmp16413 = getelementptr inbounds float, float* %tmp16412, i64 1
+ %tmp16414 = getelementptr inbounds float, float* %tmp16413, i64 1
+ %tmp16415 = getelementptr inbounds float, float* %tmp16414, i64 1
+ %tmp16416 = getelementptr inbounds float, float* %tmp16415, i64 1
+ %tmp16417 = getelementptr inbounds float, float* %tmp16416, i64 1
+ %tmp16418 = getelementptr inbounds float, float* %tmp16417, i64 1
+ %tmp16419 = getelementptr inbounds float, float* %tmp16418, i64 1
+ %tmp16420 = getelementptr inbounds float, float* %tmp16419, i64 1
+ %tmp16421 = getelementptr inbounds float, float* %tmp16420, i64 1
+ %tmp16422 = getelementptr inbounds float, float* %tmp16421, i64 1
+ %tmp16423 = getelementptr inbounds float, float* %tmp16422, i64 1
+ %tmp16424 = getelementptr inbounds float, float* %tmp16423, i64 1
+ %tmp16425 = getelementptr inbounds float, float* %tmp16424, i64 1
+ %tmp16426 = getelementptr inbounds float, float* %tmp16425, i64 1
+ %tmp16427 = getelementptr inbounds float, float* %tmp16426, i64 1
+ %tmp16428 = getelementptr inbounds float, float* %tmp16427, i64 1
+ %tmp16429 = getelementptr inbounds float, float* %tmp16428, i64 1
+ %tmp16430 = getelementptr inbounds float, float* %tmp16429, i64 1
+ %tmp16431 = getelementptr inbounds float, float* %tmp16430, i64 1
+ %tmp16432 = getelementptr inbounds float, float* %tmp16431, i64 1
+ %tmp16433 = getelementptr inbounds float, float* %tmp16432, i64 1
+ %tmp16434 = getelementptr inbounds float, float* %tmp16433, i64 1
+ %tmp16435 = getelementptr inbounds float, float* %tmp16434, i64 1
+ %tmp16436 = getelementptr inbounds float, float* %tmp16435, i64 1
+ %tmp16437 = getelementptr inbounds float, float* %tmp16436, i64 1
+ %tmp16438 = getelementptr inbounds float, float* %tmp16437, i64 1
+ %tmp16439 = getelementptr inbounds float, float* %tmp16438, i64 1
+ %tmp16440 = getelementptr inbounds float, float* %tmp16439, i64 1
+ %tmp16441 = getelementptr inbounds float, float* %tmp16440, i64 1
+ %tmp16442 = getelementptr inbounds float, float* %tmp16441, i64 1
+ %tmp16443 = getelementptr inbounds float, float* %tmp16442, i64 1
+ %tmp16444 = getelementptr inbounds float, float* %tmp16443, i64 1
+ %tmp16445 = getelementptr inbounds float, float* %tmp16444, i64 1
+ %tmp16446 = getelementptr inbounds float, float* %tmp16445, i64 1
+ %tmp16447 = getelementptr inbounds float, float* %tmp16446, i64 1
+ %tmp16448 = getelementptr inbounds float, float* %tmp16447, i64 1
+ %tmp16449 = getelementptr inbounds float, float* %tmp16448, i64 1
+ %tmp16450 = getelementptr inbounds float, float* %tmp16449, i64 1
+ %tmp16451 = getelementptr inbounds float, float* %tmp16450, i64 1
+ %tmp16452 = getelementptr inbounds float, float* %tmp16451, i64 1
+ %tmp16453 = getelementptr inbounds float, float* %tmp16452, i64 1
+ %tmp16454 = getelementptr inbounds float, float* %tmp16453, i64 1
+ %tmp16455 = getelementptr inbounds float, float* %tmp16454, i64 1
+ %tmp16456 = getelementptr inbounds float, float* %tmp16455, i64 1
+ %tmp16457 = getelementptr inbounds float, float* %tmp16456, i64 1
+ %tmp16458 = getelementptr inbounds float, float* %tmp16457, i64 1
+ %tmp16459 = getelementptr inbounds float, float* %tmp16458, i64 1
+ %tmp16460 = getelementptr inbounds float, float* %tmp16459, i64 1
+ %tmp16461 = getelementptr inbounds float, float* %tmp16460, i64 1
+ %tmp16462 = getelementptr inbounds float, float* %tmp16461, i64 1
+ %tmp16463 = getelementptr inbounds float, float* %tmp16462, i64 1
+ %tmp16464 = getelementptr inbounds float, float* %tmp16463, i64 1
+ %tmp16465 = getelementptr inbounds float, float* %tmp16464, i64 1
+ %tmp16466 = getelementptr inbounds float, float* %tmp16465, i64 1
+ %tmp16467 = getelementptr inbounds float, float* %tmp16466, i64 1
+ %tmp16468 = getelementptr inbounds float, float* %tmp16467, i64 1
+ %tmp16469 = getelementptr inbounds float, float* %tmp16468, i64 1
+ %tmp16470 = getelementptr inbounds float, float* %tmp16469, i64 1
+ %tmp16471 = getelementptr inbounds float, float* %tmp16470, i64 1
+ %tmp16472 = getelementptr inbounds float, float* %tmp16471, i64 1
+ %tmp16473 = getelementptr inbounds float, float* %tmp16472, i64 1
+ %tmp16474 = getelementptr inbounds float, float* %tmp16473, i64 1
+ %tmp16475 = getelementptr inbounds float, float* %tmp16474, i64 1
+ %tmp16476 = getelementptr inbounds float, float* %tmp16475, i64 1
+ %tmp16477 = getelementptr inbounds float, float* %tmp16476, i64 1
+ %tmp16478 = getelementptr inbounds float, float* %tmp16477, i64 1
+ %tmp16479 = getelementptr inbounds float, float* %tmp16478, i64 1
+ %tmp16480 = getelementptr inbounds float, float* %tmp16479, i64 1
+ %tmp16481 = getelementptr inbounds float, float* %tmp16480, i64 1
+ %tmp16482 = getelementptr inbounds float, float* %tmp16481, i64 1
+ %tmp16483 = getelementptr inbounds float, float* %tmp16482, i64 1
+ %tmp16484 = getelementptr inbounds float, float* %tmp16483, i64 1
+ %tmp16485 = getelementptr inbounds float, float* %tmp16484, i64 1
+ %tmp16486 = getelementptr inbounds float, float* %tmp16485, i64 1
+ %tmp16487 = getelementptr inbounds float, float* %tmp16486, i64 1
+ %tmp16488 = getelementptr inbounds float, float* %tmp16487, i64 1
+ %tmp16489 = getelementptr inbounds float, float* %tmp16488, i64 1
+ %tmp16490 = getelementptr inbounds float, float* %tmp16489, i64 1
+ %tmp16491 = getelementptr inbounds float, float* %tmp16490, i64 1
+ %tmp16492 = getelementptr inbounds float, float* %tmp16491, i64 1
+ %tmp16493 = getelementptr inbounds float, float* %tmp16492, i64 1
+ %tmp16494 = getelementptr inbounds float, float* %tmp16493, i64 1
+ %tmp16495 = getelementptr inbounds float, float* %tmp16494, i64 1
+ %tmp16496 = getelementptr inbounds float, float* %tmp16495, i64 1
+ %tmp16497 = getelementptr inbounds float, float* %tmp16496, i64 1
+ %tmp16498 = getelementptr inbounds float, float* %tmp16497, i64 1
+ %tmp16499 = getelementptr inbounds float, float* %tmp16498, i64 1
+ %tmp16500 = getelementptr inbounds float, float* %tmp16499, i64 1
+ %tmp16501 = getelementptr inbounds float, float* %tmp16500, i64 1
+ %tmp16502 = getelementptr inbounds float, float* %tmp16501, i64 1
+ %tmp16503 = getelementptr inbounds float, float* %tmp16502, i64 1
+ %tmp16504 = getelementptr inbounds float, float* %tmp16503, i64 1
+ %tmp16505 = getelementptr inbounds float, float* %tmp16504, i64 1
+ %tmp16506 = getelementptr inbounds float, float* %tmp16505, i64 1
+ %tmp16507 = getelementptr inbounds float, float* %tmp16506, i64 1
+ %tmp16508 = getelementptr inbounds float, float* %tmp16507, i64 1
+ %tmp16509 = getelementptr inbounds float, float* %tmp16508, i64 1
+ %tmp16510 = getelementptr inbounds float, float* %tmp16509, i64 1
+ %tmp16511 = getelementptr inbounds float, float* %tmp16510, i64 1
+ %tmp16512 = getelementptr inbounds float, float* %tmp16511, i64 1
+ %tmp16513 = getelementptr inbounds float, float* %tmp16512, i64 1
+ %tmp16514 = getelementptr inbounds float, float* %tmp16513, i64 1
+ %tmp16515 = getelementptr inbounds float, float* %tmp16514, i64 1
+ %tmp16516 = getelementptr inbounds float, float* %tmp16515, i64 1
+ %tmp16517 = getelementptr inbounds float, float* %tmp16516, i64 1
+ %tmp16518 = getelementptr inbounds float, float* %tmp16517, i64 1
+ %tmp16519 = getelementptr inbounds float, float* %tmp16518, i64 1
+ %tmp16520 = getelementptr inbounds float, float* %tmp16519, i64 1
+ %tmp16521 = getelementptr inbounds float, float* %tmp16520, i64 1
+ %tmp16522 = getelementptr inbounds float, float* %tmp16521, i64 1
+ %tmp16523 = getelementptr inbounds float, float* %tmp16522, i64 1
+ %tmp16524 = getelementptr inbounds float, float* %tmp16523, i64 1
+ %tmp16525 = getelementptr inbounds float, float* %tmp16524, i64 1
+ %tmp16526 = getelementptr inbounds float, float* %tmp16525, i64 1
+ %tmp16527 = getelementptr inbounds float, float* %tmp16526, i64 1
+ %tmp16528 = getelementptr inbounds float, float* %tmp16527, i64 1
+ %tmp16529 = getelementptr inbounds float, float* %tmp16528, i64 1
+ %tmp16530 = getelementptr inbounds float, float* %tmp16529, i64 1
+ %tmp16531 = getelementptr inbounds float, float* %tmp16530, i64 1
+ %tmp16532 = getelementptr inbounds float, float* %tmp16531, i64 1
+ %tmp16533 = getelementptr inbounds float, float* %tmp16532, i64 1
+ %tmp16534 = getelementptr inbounds float, float* %tmp16533, i64 1
+ %tmp16535 = getelementptr inbounds float, float* %tmp16534, i64 1
+ %tmp16536 = getelementptr inbounds float, float* %tmp16535, i64 1
+ %tmp16537 = getelementptr inbounds float, float* %tmp16536, i64 1
+ %tmp16538 = getelementptr inbounds float, float* %tmp16537, i64 1
+ %tmp16539 = getelementptr inbounds float, float* %tmp16538, i64 1
+ %tmp16540 = getelementptr inbounds float, float* %tmp16539, i64 1
+ %tmp16541 = getelementptr inbounds float, float* %tmp16540, i64 1
+ %tmp16542 = getelementptr inbounds float, float* %tmp16541, i64 1
+ %tmp16543 = getelementptr inbounds float, float* %tmp16542, i64 1
+ %tmp16544 = getelementptr inbounds float, float* %tmp16543, i64 1
+ %tmp16545 = getelementptr inbounds float, float* %tmp16544, i64 1
+ %tmp16546 = getelementptr inbounds float, float* %tmp16545, i64 1
+ %tmp16547 = getelementptr inbounds float, float* %tmp16546, i64 1
+ %tmp16548 = getelementptr inbounds float, float* %tmp16547, i64 1
+ %tmp16549 = getelementptr inbounds float, float* %tmp16548, i64 1
+ %tmp16550 = getelementptr inbounds float, float* %tmp16549, i64 1
+ %tmp16551 = getelementptr inbounds float, float* %tmp16550, i64 1
+ %tmp16552 = getelementptr inbounds float, float* %tmp16551, i64 1
+ %tmp16553 = getelementptr inbounds float, float* %tmp16552, i64 1
+ %tmp16554 = getelementptr inbounds float, float* %tmp16553, i64 1
+ %tmp16555 = getelementptr inbounds float, float* %tmp16554, i64 1
+ %tmp16556 = getelementptr inbounds float, float* %tmp16555, i64 1
+ %tmp16557 = getelementptr inbounds float, float* %tmp16556, i64 1
+ %tmp16558 = getelementptr inbounds float, float* %tmp16557, i64 1
+ %tmp16559 = getelementptr inbounds float, float* %tmp16558, i64 1
+ %tmp16560 = getelementptr inbounds float, float* %tmp16559, i64 1
+ %tmp16561 = getelementptr inbounds float, float* %tmp16560, i64 1
+ %tmp16562 = getelementptr inbounds float, float* %tmp16561, i64 1
+ %tmp16563 = getelementptr inbounds float, float* %tmp16562, i64 1
+ %tmp16564 = getelementptr inbounds float, float* %tmp16563, i64 1
+ %tmp16565 = getelementptr inbounds float, float* %tmp16564, i64 1
+ %tmp16566 = getelementptr inbounds float, float* %tmp16565, i64 1
+ %tmp16567 = getelementptr inbounds float, float* %tmp16566, i64 1
+ %tmp16568 = getelementptr inbounds float, float* %tmp16567, i64 1
+ %tmp16569 = getelementptr inbounds float, float* %tmp16568, i64 1
+ %tmp16570 = getelementptr inbounds float, float* %tmp16569, i64 1
+ %tmp16571 = getelementptr inbounds float, float* %tmp16570, i64 1
+ %tmp16572 = getelementptr inbounds float, float* %tmp16571, i64 1
+ %tmp16573 = getelementptr inbounds float, float* %tmp16572, i64 1
+ %tmp16574 = getelementptr inbounds float, float* %tmp16573, i64 1
+ %tmp16575 = getelementptr inbounds float, float* %tmp16574, i64 1
+ %tmp16576 = getelementptr inbounds float, float* %tmp16575, i64 1
+ %tmp16577 = getelementptr inbounds float, float* %tmp16576, i64 1
+ %tmp16578 = getelementptr inbounds float, float* %tmp16577, i64 1
+ %tmp16579 = getelementptr inbounds float, float* %tmp16578, i64 1
+ %tmp16580 = getelementptr inbounds float, float* %tmp16579, i64 1
+ %tmp16581 = getelementptr inbounds float, float* %tmp16580, i64 1
+ %tmp16582 = getelementptr inbounds float, float* %tmp16581, i64 1
+ %tmp16583 = getelementptr inbounds float, float* %tmp16582, i64 1
+ %tmp16584 = getelementptr inbounds float, float* %tmp16583, i64 1
+ %tmp16585 = getelementptr inbounds float, float* %tmp16584, i64 1
+ %tmp16586 = getelementptr inbounds float, float* %tmp16585, i64 1
+ %tmp16587 = getelementptr inbounds float, float* %tmp16586, i64 1
+ %tmp16588 = getelementptr inbounds float, float* %tmp16587, i64 1
+ %tmp16589 = getelementptr inbounds float, float* %tmp16588, i64 1
+ %tmp16590 = getelementptr inbounds float, float* %tmp16589, i64 1
+ %tmp16591 = getelementptr inbounds float, float* %tmp16590, i64 1
+ %tmp16592 = getelementptr inbounds float, float* %tmp16591, i64 1
+ %tmp16593 = getelementptr inbounds float, float* %tmp16592, i64 1
+ %tmp16594 = getelementptr inbounds float, float* %tmp16593, i64 1
+ %tmp16595 = getelementptr inbounds float, float* %tmp16594, i64 1
+ %tmp16596 = getelementptr inbounds float, float* %tmp16595, i64 1
+ %tmp16597 = getelementptr inbounds float, float* %tmp16596, i64 1
+ %tmp16598 = getelementptr inbounds float, float* %tmp16597, i64 1
+ %tmp16599 = getelementptr inbounds float, float* %tmp16598, i64 1
+ %tmp16600 = getelementptr inbounds float, float* %tmp16599, i64 1
+ %tmp16601 = getelementptr inbounds float, float* %tmp16600, i64 1
+ %tmp16602 = getelementptr inbounds float, float* %tmp16601, i64 1
+ %tmp16603 = getelementptr inbounds float, float* %tmp16602, i64 1
+ %tmp16604 = getelementptr inbounds float, float* %tmp16603, i64 1
+ %tmp16605 = getelementptr inbounds float, float* %tmp16604, i64 1
+ %tmp16606 = getelementptr inbounds float, float* %tmp16605, i64 1
+ %tmp16607 = getelementptr inbounds float, float* %tmp16606, i64 1
+ %tmp16608 = getelementptr inbounds float, float* %tmp16607, i64 1
+ %tmp16609 = getelementptr inbounds float, float* %tmp16608, i64 1
+ %tmp16610 = getelementptr inbounds float, float* %tmp16609, i64 1
+ %tmp16611 = getelementptr inbounds float, float* %tmp16610, i64 1
+ %tmp16612 = getelementptr inbounds float, float* %tmp16611, i64 1
+ %tmp16613 = getelementptr inbounds float, float* %tmp16612, i64 1
+ %tmp16614 = getelementptr inbounds float, float* %tmp16613, i64 1
+ %tmp16615 = getelementptr inbounds float, float* %tmp16614, i64 1
+ %tmp16616 = getelementptr inbounds float, float* %tmp16615, i64 1
+ %tmp16617 = getelementptr inbounds float, float* %tmp16616, i64 1
+ %tmp16618 = getelementptr inbounds float, float* %tmp16617, i64 1
+ %tmp16619 = getelementptr inbounds float, float* %tmp16618, i64 1
+ %tmp16620 = getelementptr inbounds float, float* %tmp16619, i64 1
+ %tmp16621 = getelementptr inbounds float, float* %tmp16620, i64 1
+ %tmp16622 = getelementptr inbounds float, float* %tmp16621, i64 1
+ %tmp16623 = getelementptr inbounds float, float* %tmp16622, i64 1
+ %tmp16624 = getelementptr inbounds float, float* %tmp16623, i64 1
+ %tmp16625 = getelementptr inbounds float, float* %tmp16624, i64 1
+ %tmp16626 = getelementptr inbounds float, float* %tmp16625, i64 1
+ %tmp16627 = getelementptr inbounds float, float* %tmp16626, i64 1
+ %tmp16628 = getelementptr inbounds float, float* %tmp16627, i64 1
+ %tmp16629 = getelementptr inbounds float, float* %tmp16628, i64 1
+ %tmp16630 = getelementptr inbounds float, float* %tmp16629, i64 1
+ %tmp16631 = getelementptr inbounds float, float* %tmp16630, i64 1
+ %tmp16632 = getelementptr inbounds float, float* %tmp16631, i64 1
+ %tmp16633 = getelementptr inbounds float, float* %tmp16632, i64 1
+ %tmp16634 = getelementptr inbounds float, float* %tmp16633, i64 1
+ %tmp16635 = getelementptr inbounds float, float* %tmp16634, i64 1
+ %tmp16636 = getelementptr inbounds float, float* %tmp16635, i64 1
+ %tmp16637 = getelementptr inbounds float, float* %tmp16636, i64 1
+ %tmp16638 = getelementptr inbounds float, float* %tmp16637, i64 1
+ %tmp16639 = getelementptr inbounds float, float* %tmp16638, i64 1
+ %tmp16640 = getelementptr inbounds float, float* %tmp16639, i64 1
+ %tmp16641 = getelementptr inbounds float, float* %tmp16640, i64 1
+ %tmp16642 = getelementptr inbounds float, float* %tmp16641, i64 1
+ %tmp16643 = getelementptr inbounds float, float* %tmp16642, i64 1
+ %tmp16644 = getelementptr inbounds float, float* %tmp16643, i64 1
+ %tmp16645 = getelementptr inbounds float, float* %tmp16644, i64 1
+ %tmp16646 = getelementptr inbounds float, float* %tmp16645, i64 1
+ %tmp16647 = getelementptr inbounds float, float* %tmp16646, i64 1
+ %tmp16648 = getelementptr inbounds float, float* %tmp16647, i64 1
+ %tmp16649 = getelementptr inbounds float, float* %tmp16648, i64 1
+ %tmp16650 = getelementptr inbounds float, float* %tmp16649, i64 1
+ %tmp16651 = getelementptr inbounds float, float* %tmp16650, i64 1
+ %tmp16652 = getelementptr inbounds float, float* %tmp16651, i64 1
+ %tmp16653 = getelementptr inbounds float, float* %tmp16652, i64 1
+ %tmp16654 = getelementptr inbounds float, float* %tmp16653, i64 1
+ %tmp16655 = getelementptr inbounds float, float* %tmp16654, i64 1
+ %tmp16656 = getelementptr inbounds float, float* %tmp16655, i64 1
+ %tmp16657 = getelementptr inbounds float, float* %tmp16656, i64 1
+ %tmp16658 = getelementptr inbounds float, float* %tmp16657, i64 1
+ %tmp16659 = getelementptr inbounds float, float* %tmp16658, i64 1
+ %tmp16660 = getelementptr inbounds float, float* %tmp16659, i64 1
+ %tmp16661 = getelementptr inbounds float, float* %tmp16660, i64 1
+ %tmp16662 = getelementptr inbounds float, float* %tmp16661, i64 1
+ %tmp16663 = getelementptr inbounds float, float* %tmp16662, i64 1
+ %tmp16664 = getelementptr inbounds float, float* %tmp16663, i64 1
+ %tmp16665 = getelementptr inbounds float, float* %tmp16664, i64 1
+ %tmp16666 = getelementptr inbounds float, float* %tmp16665, i64 1
+ %tmp16667 = getelementptr inbounds float, float* %tmp16666, i64 1
+ %tmp16668 = getelementptr inbounds float, float* %tmp16667, i64 1
+ %tmp16669 = getelementptr inbounds float, float* %tmp16668, i64 1
+ %tmp16670 = getelementptr inbounds float, float* %tmp16669, i64 1
+ %tmp16671 = getelementptr inbounds float, float* %tmp16670, i64 1
+ %tmp16672 = getelementptr inbounds float, float* %tmp16671, i64 1
+ %tmp16673 = getelementptr inbounds float, float* %tmp16672, i64 1
+ %tmp16674 = getelementptr inbounds float, float* %tmp16673, i64 1
+ %tmp16675 = getelementptr inbounds float, float* %tmp16674, i64 1
+ %tmp16676 = getelementptr inbounds float, float* %tmp16675, i64 1
+ %tmp16677 = getelementptr inbounds float, float* %tmp16676, i64 1
+ %tmp16678 = getelementptr inbounds float, float* %tmp16677, i64 1
+ %tmp16679 = getelementptr inbounds float, float* %tmp16678, i64 1
+ %tmp16680 = getelementptr inbounds float, float* %tmp16679, i64 1
+ %tmp16681 = getelementptr inbounds float, float* %tmp16680, i64 1
+ %tmp16682 = getelementptr inbounds float, float* %tmp16681, i64 1
+ %tmp16683 = getelementptr inbounds float, float* %tmp16682, i64 1
+ %tmp16684 = getelementptr inbounds float, float* %tmp16683, i64 1
+ %tmp16685 = getelementptr inbounds float, float* %tmp16684, i64 1
+ %tmp16686 = getelementptr inbounds float, float* %tmp16685, i64 1
+ %tmp16687 = getelementptr inbounds float, float* %tmp16686, i64 1
+ %tmp16688 = getelementptr inbounds float, float* %tmp16687, i64 1
+ %tmp16689 = getelementptr inbounds float, float* %tmp16688, i64 1
+ %tmp16690 = getelementptr inbounds float, float* %tmp16689, i64 1
+ %tmp16691 = getelementptr inbounds float, float* %tmp16690, i64 1
+ %tmp16692 = getelementptr inbounds float, float* %tmp16691, i64 1
+ %tmp16693 = getelementptr inbounds float, float* %tmp16692, i64 1
+ %tmp16694 = getelementptr inbounds float, float* %tmp16693, i64 1
+ %tmp16695 = getelementptr inbounds float, float* %tmp16694, i64 1
+ %tmp16696 = getelementptr inbounds float, float* %tmp16695, i64 1
+ %tmp16697 = getelementptr inbounds float, float* %tmp16696, i64 1
+ %tmp16698 = getelementptr inbounds float, float* %tmp16697, i64 1
+ %tmp16699 = getelementptr inbounds float, float* %tmp16698, i64 1
+ %tmp16700 = getelementptr inbounds float, float* %tmp16699, i64 1
+ %tmp16701 = getelementptr inbounds float, float* %tmp16700, i64 1
+ %tmp16702 = getelementptr inbounds float, float* %tmp16701, i64 1
+ %tmp16703 = getelementptr inbounds float, float* %tmp16702, i64 1
+ %tmp16704 = getelementptr inbounds float, float* %tmp16703, i64 1
+ %tmp16705 = getelementptr inbounds float, float* %tmp16704, i64 1
+ %tmp16706 = getelementptr inbounds float, float* %tmp16705, i64 1
+ %tmp16707 = getelementptr inbounds float, float* %tmp16706, i64 1
+ %tmp16708 = getelementptr inbounds float, float* %tmp16707, i64 1
+ %tmp16709 = getelementptr inbounds float, float* %tmp16708, i64 1
+ %tmp16710 = getelementptr inbounds float, float* %tmp16709, i64 1
+ %tmp16711 = getelementptr inbounds float, float* %tmp16710, i64 1
+ %tmp16712 = getelementptr inbounds float, float* %tmp16711, i64 1
+ %tmp16713 = getelementptr inbounds float, float* %tmp16712, i64 1
+ %tmp16714 = getelementptr inbounds float, float* %tmp16713, i64 1
+ %tmp16715 = getelementptr inbounds float, float* %tmp16714, i64 1
+ %tmp16716 = getelementptr inbounds float, float* %tmp16715, i64 1
+ %tmp16717 = getelementptr inbounds float, float* %tmp16716, i64 1
+ %tmp16718 = getelementptr inbounds float, float* %tmp16717, i64 1
+ %tmp16719 = getelementptr inbounds float, float* %tmp16718, i64 1
+ %tmp16720 = getelementptr inbounds float, float* %tmp16719, i64 1
+ %tmp16721 = getelementptr inbounds float, float* %tmp16720, i64 1
+ %tmp16722 = getelementptr inbounds float, float* %tmp16721, i64 1
+ %tmp16723 = getelementptr inbounds float, float* %tmp16722, i64 1
+ %tmp16724 = getelementptr inbounds float, float* %tmp16723, i64 1
+ %tmp16725 = getelementptr inbounds float, float* %tmp16724, i64 1
+ %tmp16726 = getelementptr inbounds float, float* %tmp16725, i64 1
+ %tmp16727 = getelementptr inbounds float, float* %tmp16726, i64 1
+ %tmp16728 = getelementptr inbounds float, float* %tmp16727, i64 1
+ %tmp16729 = getelementptr inbounds float, float* %tmp16728, i64 1
+ %tmp16730 = getelementptr inbounds float, float* %tmp16729, i64 1
+ %tmp16731 = getelementptr inbounds float, float* %tmp16730, i64 1
+ %tmp16732 = getelementptr inbounds float, float* %tmp16731, i64 1
+ %tmp16733 = getelementptr inbounds float, float* %tmp16732, i64 1
+ %tmp16734 = getelementptr inbounds float, float* %tmp16733, i64 1
+ %tmp16735 = getelementptr inbounds float, float* %tmp16734, i64 1
+ %tmp16736 = getelementptr inbounds float, float* %tmp16735, i64 1
+ %tmp16737 = getelementptr inbounds float, float* %tmp16736, i64 1
+ %tmp16738 = getelementptr inbounds float, float* %tmp16737, i64 1
+ %tmp16739 = getelementptr inbounds float, float* %tmp16738, i64 1
+ %tmp16740 = getelementptr inbounds float, float* %tmp16739, i64 1
+ %tmp16741 = getelementptr inbounds float, float* %tmp16740, i64 1
+ %tmp16742 = getelementptr inbounds float, float* %tmp16741, i64 1
+ %tmp16743 = getelementptr inbounds float, float* %tmp16742, i64 1
+ %tmp16744 = getelementptr inbounds float, float* %tmp16743, i64 1
+ %tmp16745 = getelementptr inbounds float, float* %tmp16744, i64 1
+ %tmp16746 = getelementptr inbounds float, float* %tmp16745, i64 1
+ %tmp16747 = getelementptr inbounds float, float* %tmp16746, i64 1
+ %tmp16748 = getelementptr inbounds float, float* %tmp16747, i64 1
+ %tmp16749 = getelementptr inbounds float, float* %tmp16748, i64 1
+ %tmp16750 = getelementptr inbounds float, float* %tmp16749, i64 1
+ %tmp16751 = getelementptr inbounds float, float* %tmp16750, i64 1
+ %tmp16752 = getelementptr inbounds float, float* %tmp16751, i64 1
+ %tmp16753 = getelementptr inbounds float, float* %tmp16752, i64 1
+ %tmp16754 = getelementptr inbounds float, float* %tmp16753, i64 1
+ %tmp16755 = getelementptr inbounds float, float* %tmp16754, i64 1
+ %tmp16756 = getelementptr inbounds float, float* %tmp16755, i64 1
+ %tmp16757 = getelementptr inbounds float, float* %tmp16756, i64 1
+ %tmp16758 = getelementptr inbounds float, float* %tmp16757, i64 1
+ %tmp16759 = getelementptr inbounds float, float* %tmp16758, i64 1
+ %tmp16760 = getelementptr inbounds float, float* %tmp16759, i64 1
+ %tmp16761 = getelementptr inbounds float, float* %tmp16760, i64 1
+ %tmp16762 = getelementptr inbounds float, float* %tmp16761, i64 1
+ %tmp16763 = getelementptr inbounds float, float* %tmp16762, i64 1
+ %tmp16764 = getelementptr inbounds float, float* %tmp16763, i64 1
+ %tmp16765 = getelementptr inbounds float, float* %tmp16764, i64 1
+ %tmp16766 = getelementptr inbounds float, float* %tmp16765, i64 1
+ %tmp16767 = getelementptr inbounds float, float* %tmp16766, i64 1
+ %tmp16768 = getelementptr inbounds float, float* %tmp16767, i64 1
+ %tmp16769 = getelementptr inbounds float, float* %tmp16768, i64 1
+ %tmp16770 = getelementptr inbounds float, float* %tmp16769, i64 1
+ %tmp16771 = getelementptr inbounds float, float* %tmp16770, i64 1
+ %tmp16772 = getelementptr inbounds float, float* %tmp16771, i64 1
+ %tmp16773 = getelementptr inbounds float, float* %tmp16772, i64 1
+ %tmp16774 = getelementptr inbounds float, float* %tmp16773, i64 1
+ %tmp16775 = getelementptr inbounds float, float* %tmp16774, i64 1
+ %tmp16776 = getelementptr inbounds float, float* %tmp16775, i64 1
+ %tmp16777 = getelementptr inbounds float, float* %tmp16776, i64 1
+ %tmp16778 = getelementptr inbounds float, float* %tmp16777, i64 1
+ %tmp16779 = getelementptr inbounds float, float* %tmp16778, i64 1
+ %tmp16780 = getelementptr inbounds float, float* %tmp16779, i64 1
+ %tmp16781 = getelementptr inbounds float, float* %tmp16780, i64 1
+ %tmp16782 = getelementptr inbounds float, float* %tmp16781, i64 1
+ %tmp16783 = getelementptr inbounds float, float* %tmp16782, i64 1
+ %tmp16784 = getelementptr inbounds float, float* %tmp16783, i64 1
+ %tmp16785 = getelementptr inbounds float, float* %tmp16784, i64 1
+ %tmp16786 = getelementptr inbounds float, float* %tmp16785, i64 1
+ %tmp16787 = getelementptr inbounds float, float* %tmp16786, i64 1
+ %tmp16788 = getelementptr inbounds float, float* %tmp16787, i64 1
+ %tmp16789 = getelementptr inbounds float, float* %tmp16788, i64 1
+ %tmp16790 = getelementptr inbounds float, float* %tmp16789, i64 1
+ %tmp16791 = getelementptr inbounds float, float* %tmp16790, i64 1
+ %tmp16792 = getelementptr inbounds float, float* %tmp16791, i64 1
+ %tmp16793 = getelementptr inbounds float, float* %tmp16792, i64 1
+ %tmp16794 = getelementptr inbounds float, float* %tmp16793, i64 1
+ %tmp16795 = getelementptr inbounds float, float* %tmp16794, i64 1
+ %tmp16796 = getelementptr inbounds float, float* %tmp16795, i64 1
+ %tmp16797 = getelementptr inbounds float, float* %tmp16796, i64 1
+ %tmp16798 = getelementptr inbounds float, float* %tmp16797, i64 1
+ %tmp16799 = getelementptr inbounds float, float* %tmp16798, i64 1
+ %tmp16800 = getelementptr inbounds float, float* %tmp16799, i64 1
+ %tmp16801 = getelementptr inbounds float, float* %tmp16800, i64 1
+ %tmp16802 = getelementptr inbounds float, float* %tmp16801, i64 1
+ %tmp16803 = getelementptr inbounds float, float* %tmp16802, i64 1
+ %tmp16804 = getelementptr inbounds float, float* %tmp16803, i64 1
+ %tmp16805 = getelementptr inbounds float, float* %tmp16804, i64 1
+ %tmp16806 = getelementptr inbounds float, float* %tmp16805, i64 1
+ %tmp16807 = getelementptr inbounds float, float* %tmp16806, i64 1
+ %tmp16808 = getelementptr inbounds float, float* %tmp16807, i64 1
+ %tmp16809 = getelementptr inbounds float, float* %tmp16808, i64 1
+ %tmp16810 = getelementptr inbounds float, float* %tmp16809, i64 1
+ %tmp16811 = getelementptr inbounds float, float* %tmp16810, i64 1
+ %tmp16812 = getelementptr inbounds float, float* %tmp16811, i64 1
+ %tmp16813 = getelementptr inbounds float, float* %tmp16812, i64 1
+ %tmp16814 = getelementptr inbounds float, float* %tmp16813, i64 1
+ %tmp16815 = getelementptr inbounds float, float* %tmp16814, i64 1
+ %tmp16816 = getelementptr inbounds float, float* %tmp16815, i64 1
+ %tmp16817 = getelementptr inbounds float, float* %tmp16816, i64 1
+ %tmp16818 = getelementptr inbounds float, float* %tmp16817, i64 1
+ %tmp16819 = getelementptr inbounds float, float* %tmp16818, i64 1
+ %tmp16820 = getelementptr inbounds float, float* %tmp16819, i64 1
+ %tmp16821 = getelementptr inbounds float, float* %tmp16820, i64 1
+ %tmp16822 = getelementptr inbounds float, float* %tmp16821, i64 1
+ %tmp16823 = getelementptr inbounds float, float* %tmp16822, i64 1
+ %tmp16824 = getelementptr inbounds float, float* %tmp16823, i64 1
+ %tmp16825 = getelementptr inbounds float, float* %tmp16824, i64 1
+ %tmp16826 = getelementptr inbounds float, float* %tmp16825, i64 1
+ %tmp16827 = getelementptr inbounds float, float* %tmp16826, i64 1
+ %tmp16828 = getelementptr inbounds float, float* %tmp16827, i64 1
+ %tmp16829 = getelementptr inbounds float, float* %tmp16828, i64 1
+ %tmp16830 = getelementptr inbounds float, float* %tmp16829, i64 1
+ %tmp16831 = getelementptr inbounds float, float* %tmp16830, i64 1
+ %tmp16832 = getelementptr inbounds float, float* %tmp16831, i64 1
+ %tmp16833 = getelementptr inbounds float, float* %tmp16832, i64 1
+ %tmp16834 = getelementptr inbounds float, float* %tmp16833, i64 1
+ %tmp16835 = getelementptr inbounds float, float* %tmp16834, i64 1
+ %tmp16836 = getelementptr inbounds float, float* %tmp16835, i64 1
+ %tmp16837 = getelementptr inbounds float, float* %tmp16836, i64 1
+ %tmp16838 = getelementptr inbounds float, float* %tmp16837, i64 1
+ %tmp16839 = getelementptr inbounds float, float* %tmp16838, i64 1
+ %tmp16840 = getelementptr inbounds float, float* %tmp16839, i64 1
+ %tmp16841 = getelementptr inbounds float, float* %tmp16840, i64 1
+ %tmp16842 = getelementptr inbounds float, float* %tmp16841, i64 1
+ %tmp16843 = getelementptr inbounds float, float* %tmp16842, i64 1
+ %tmp16844 = getelementptr inbounds float, float* %tmp16843, i64 1
+ %tmp16845 = getelementptr inbounds float, float* %tmp16844, i64 1
+ %tmp16846 = getelementptr inbounds float, float* %tmp16845, i64 1
+ %tmp16847 = getelementptr inbounds float, float* %tmp16846, i64 1
+ %tmp16848 = getelementptr inbounds float, float* %tmp16847, i64 1
+ %tmp16849 = getelementptr inbounds float, float* %tmp16848, i64 1
+ %tmp16850 = getelementptr inbounds float, float* %tmp16849, i64 1
+ %tmp16851 = getelementptr inbounds float, float* %tmp16850, i64 1
+ %tmp16852 = getelementptr inbounds float, float* %tmp16851, i64 1
+ %tmp16853 = getelementptr inbounds float, float* %tmp16852, i64 1
+ %tmp16854 = getelementptr inbounds float, float* %tmp16853, i64 1
+ %tmp16855 = getelementptr inbounds float, float* %tmp16854, i64 1
+ %tmp16856 = getelementptr inbounds float, float* %tmp16855, i64 1
+ %tmp16857 = getelementptr inbounds float, float* %tmp16856, i64 1
+ %tmp16858 = getelementptr inbounds float, float* %tmp16857, i64 1
+ %tmp16859 = getelementptr inbounds float, float* %tmp16858, i64 1
+ %tmp16860 = getelementptr inbounds float, float* %tmp16859, i64 1
+ %tmp16861 = getelementptr inbounds float, float* %tmp16860, i64 1
+ %tmp16862 = getelementptr inbounds float, float* %tmp16861, i64 1
+ %tmp16863 = getelementptr inbounds float, float* %tmp16862, i64 1
+ %tmp16864 = getelementptr inbounds float, float* %tmp16863, i64 1
+ %tmp16865 = getelementptr inbounds float, float* %tmp16864, i64 1
+ %tmp16866 = getelementptr inbounds float, float* %tmp16865, i64 1
+ %tmp16867 = getelementptr inbounds float, float* %tmp16866, i64 1
+ %tmp16868 = getelementptr inbounds float, float* %tmp16867, i64 1
+ %tmp16869 = getelementptr inbounds float, float* %tmp16868, i64 1
+ %tmp16870 = getelementptr inbounds float, float* %tmp16869, i64 1
+ %tmp16871 = getelementptr inbounds float, float* %tmp16870, i64 1
+ %tmp16872 = getelementptr inbounds float, float* %tmp16871, i64 1
+ %tmp16873 = getelementptr inbounds float, float* %tmp16872, i64 1
+ %tmp16874 = getelementptr inbounds float, float* %tmp16873, i64 1
+ %tmp16875 = getelementptr inbounds float, float* %tmp16874, i64 1
+ %tmp16876 = getelementptr inbounds float, float* %tmp16875, i64 1
+ %tmp16877 = getelementptr inbounds float, float* %tmp16876, i64 1
+ %tmp16878 = getelementptr inbounds float, float* %tmp16877, i64 1
+ %tmp16879 = getelementptr inbounds float, float* %tmp16878, i64 1
+ %tmp16880 = getelementptr inbounds float, float* %tmp16879, i64 1
+ %tmp16881 = getelementptr inbounds float, float* %tmp16880, i64 1
+ %tmp16882 = getelementptr inbounds float, float* %tmp16881, i64 1
+ %tmp16883 = getelementptr inbounds float, float* %tmp16882, i64 1
+ %tmp16884 = getelementptr inbounds float, float* %tmp16883, i64 1
+ %tmp16885 = getelementptr inbounds float, float* %tmp16884, i64 1
+ %tmp16886 = getelementptr inbounds float, float* %tmp16885, i64 1
+ %tmp16887 = getelementptr inbounds float, float* %tmp16886, i64 1
+ %tmp16888 = getelementptr inbounds float, float* %tmp16887, i64 1
+ %tmp16889 = getelementptr inbounds float, float* %tmp16888, i64 1
+ %tmp16890 = getelementptr inbounds float, float* %tmp16889, i64 1
+ %tmp16891 = getelementptr inbounds float, float* %tmp16890, i64 1
+ %tmp16892 = getelementptr inbounds float, float* %tmp16891, i64 1
+ %tmp16893 = getelementptr inbounds float, float* %tmp16892, i64 1
+ %tmp16894 = getelementptr inbounds float, float* %tmp16893, i64 1
+ %tmp16895 = getelementptr inbounds float, float* %tmp16894, i64 1
+ %tmp16896 = getelementptr inbounds float, float* %tmp16895, i64 1
+ %tmp16897 = getelementptr inbounds float, float* %tmp16896, i64 1
+ %tmp16898 = getelementptr inbounds float, float* %tmp16897, i64 1
+ %tmp16899 = getelementptr inbounds float, float* %tmp16898, i64 1
+ %tmp16900 = getelementptr inbounds float, float* %tmp16899, i64 1
+ %tmp16901 = getelementptr inbounds float, float* %tmp16900, i64 1
+ %tmp16902 = getelementptr inbounds float, float* %tmp16901, i64 1
+ %tmp16903 = getelementptr inbounds float, float* %tmp16902, i64 1
+ %tmp16904 = getelementptr inbounds float, float* %tmp16903, i64 1
+ %tmp16905 = getelementptr inbounds float, float* %tmp16904, i64 1
+ %tmp16906 = getelementptr inbounds float, float* %tmp16905, i64 1
+ %tmp16907 = getelementptr inbounds float, float* %tmp16906, i64 1
+ %tmp16908 = getelementptr inbounds float, float* %tmp16907, i64 1
+ %tmp16909 = getelementptr inbounds float, float* %tmp16908, i64 1
+ %tmp16910 = getelementptr inbounds float, float* %tmp16909, i64 1
+ %tmp16911 = getelementptr inbounds float, float* %tmp16910, i64 1
+ %tmp16912 = getelementptr inbounds float, float* %tmp16911, i64 1
+ %tmp16913 = getelementptr inbounds float, float* %tmp16912, i64 1
+ %tmp16914 = getelementptr inbounds float, float* %tmp16913, i64 1
+ %tmp16915 = getelementptr inbounds float, float* %tmp16914, i64 1
+ %tmp16916 = getelementptr inbounds float, float* %tmp16915, i64 1
+ %tmp16917 = getelementptr inbounds float, float* %tmp16916, i64 1
+ %tmp16918 = getelementptr inbounds float, float* %tmp16917, i64 1
+ %tmp16919 = getelementptr inbounds float, float* %tmp16918, i64 1
+ %tmp16920 = getelementptr inbounds float, float* %tmp16919, i64 1
+ %tmp16921 = getelementptr inbounds float, float* %tmp16920, i64 1
+ %tmp16922 = getelementptr inbounds float, float* %tmp16921, i64 1
+ %tmp16923 = getelementptr inbounds float, float* %tmp16922, i64 1
+ %tmp16924 = getelementptr inbounds float, float* %tmp16923, i64 1
+ %tmp16925 = getelementptr inbounds float, float* %tmp16924, i64 1
+ %tmp16926 = getelementptr inbounds float, float* %tmp16925, i64 1
+ %tmp16927 = getelementptr inbounds float, float* %tmp16926, i64 1
+ %tmp16928 = getelementptr inbounds float, float* %tmp16927, i64 1
+ %tmp16929 = getelementptr inbounds float, float* %tmp16928, i64 1
+ %tmp16930 = getelementptr inbounds float, float* %tmp16929, i64 1
+ %tmp16931 = getelementptr inbounds float, float* %tmp16930, i64 1
+ %tmp16932 = getelementptr inbounds float, float* %tmp16931, i64 1
+ %tmp16933 = getelementptr inbounds float, float* %tmp16932, i64 1
+ %tmp16934 = getelementptr inbounds float, float* %tmp16933, i64 1
+ %tmp16935 = getelementptr inbounds float, float* %tmp16934, i64 1
+ %tmp16936 = getelementptr inbounds float, float* %tmp16935, i64 1
+ %tmp16937 = getelementptr inbounds float, float* %tmp16936, i64 1
+ %tmp16938 = getelementptr inbounds float, float* %tmp16937, i64 1
+ %tmp16939 = getelementptr inbounds float, float* %tmp16938, i64 1
+ %tmp16940 = getelementptr inbounds float, float* %tmp16939, i64 1
+ %tmp16941 = getelementptr inbounds float, float* %tmp16940, i64 1
+ %tmp16942 = getelementptr inbounds float, float* %tmp16941, i64 1
+ %tmp16943 = getelementptr inbounds float, float* %tmp16942, i64 1
+ %tmp16944 = getelementptr inbounds float, float* %tmp16943, i64 1
+ %tmp16945 = getelementptr inbounds float, float* %tmp16944, i64 1
+ %tmp16946 = getelementptr inbounds float, float* %tmp16945, i64 1
+ %tmp16947 = getelementptr inbounds float, float* %tmp16946, i64 1
+ %tmp16948 = getelementptr inbounds float, float* %tmp16947, i64 1
+ %tmp16949 = getelementptr inbounds float, float* %tmp16948, i64 1
+ %tmp16950 = getelementptr inbounds float, float* %tmp16949, i64 1
+ %tmp16951 = getelementptr inbounds float, float* %tmp16950, i64 1
+ %tmp16952 = getelementptr inbounds float, float* %tmp16951, i64 1
+ %tmp16953 = getelementptr inbounds float, float* %tmp16952, i64 1
+ %tmp16954 = getelementptr inbounds float, float* %tmp16953, i64 1
+ %tmp16955 = getelementptr inbounds float, float* %tmp16954, i64 1
+ %tmp16956 = getelementptr inbounds float, float* %tmp16955, i64 1
+ %tmp16957 = getelementptr inbounds float, float* %tmp16956, i64 1
+ %tmp16958 = getelementptr inbounds float, float* %tmp16957, i64 1
+ %tmp16959 = getelementptr inbounds float, float* %tmp16958, i64 1
+ %tmp16960 = getelementptr inbounds float, float* %tmp16959, i64 1
+ %tmp16961 = getelementptr inbounds float, float* %tmp16960, i64 1
+ %tmp16962 = getelementptr inbounds float, float* %tmp16961, i64 1
+ %tmp16963 = getelementptr inbounds float, float* %tmp16962, i64 1
+ %tmp16964 = getelementptr inbounds float, float* %tmp16963, i64 1
+ %tmp16965 = getelementptr inbounds float, float* %tmp16964, i64 1
+ %tmp16966 = getelementptr inbounds float, float* %tmp16965, i64 1
+ %tmp16967 = getelementptr inbounds float, float* %tmp16966, i64 1
+ %tmp16968 = getelementptr inbounds float, float* %tmp16967, i64 1
+ %tmp16969 = getelementptr inbounds float, float* %tmp16968, i64 1
+ %tmp16970 = getelementptr inbounds float, float* %tmp16969, i64 1
+ %tmp16971 = getelementptr inbounds float, float* %tmp16970, i64 1
+ %tmp16972 = getelementptr inbounds float, float* %tmp16971, i64 1
+ %tmp16973 = getelementptr inbounds float, float* %tmp16972, i64 1
+ %tmp16974 = getelementptr inbounds float, float* %tmp16973, i64 1
+ %tmp16975 = getelementptr inbounds float, float* %tmp16974, i64 1
+ %tmp16976 = getelementptr inbounds float, float* %tmp16975, i64 1
+ %tmp16977 = getelementptr inbounds float, float* %tmp16976, i64 1
+ %tmp16978 = getelementptr inbounds float, float* %tmp16977, i64 1
+ %tmp16979 = getelementptr inbounds float, float* %tmp16978, i64 1
+ %tmp16980 = getelementptr inbounds float, float* %tmp16979, i64 1
+ %tmp16981 = getelementptr inbounds float, float* %tmp16980, i64 1
+ %tmp16982 = getelementptr inbounds float, float* %tmp16981, i64 1
+ %tmp16983 = getelementptr inbounds float, float* %tmp16982, i64 1
+ %tmp16984 = getelementptr inbounds float, float* %tmp16983, i64 1
+ %tmp16985 = getelementptr inbounds float, float* %tmp16984, i64 1
+ %tmp16986 = getelementptr inbounds float, float* %tmp16985, i64 1
+ %tmp16987 = getelementptr inbounds float, float* %tmp16986, i64 1
+ %tmp16988 = getelementptr inbounds float, float* %tmp16987, i64 1
+ %tmp16989 = getelementptr inbounds float, float* %tmp16988, i64 1
+ %tmp16990 = getelementptr inbounds float, float* %tmp16989, i64 1
+ %tmp16991 = getelementptr inbounds float, float* %tmp16990, i64 1
+ %tmp16992 = getelementptr inbounds float, float* %tmp16991, i64 1
+ %tmp16993 = getelementptr inbounds float, float* %tmp16992, i64 1
+ %tmp16994 = getelementptr inbounds float, float* %tmp16993, i64 1
+ %tmp16995 = getelementptr inbounds float, float* %tmp16994, i64 1
+ %tmp16996 = getelementptr inbounds float, float* %tmp16995, i64 1
+ %tmp16997 = getelementptr inbounds float, float* %tmp16996, i64 1
+ %tmp16998 = getelementptr inbounds float, float* %tmp16997, i64 1
+ %tmp16999 = getelementptr inbounds float, float* %tmp16998, i64 1
+ %tmp17000 = getelementptr inbounds float, float* %tmp16999, i64 1
+ %tmp17001 = getelementptr inbounds float, float* %tmp17000, i64 1
+ %tmp17002 = getelementptr inbounds float, float* %tmp17001, i64 1
+ %tmp17003 = getelementptr inbounds float, float* %tmp17002, i64 1
+ %tmp17004 = getelementptr inbounds float, float* %tmp17003, i64 1
+ %tmp17005 = getelementptr inbounds float, float* %tmp17004, i64 1
+ %tmp17006 = getelementptr inbounds float, float* %tmp17005, i64 1
+ %tmp17007 = getelementptr inbounds float, float* %tmp17006, i64 1
+ %tmp17008 = getelementptr inbounds float, float* %tmp17007, i64 1
+ %tmp17009 = getelementptr inbounds float, float* %tmp17008, i64 1
+ %tmp17010 = getelementptr inbounds float, float* %tmp17009, i64 1
+ %tmp17011 = getelementptr inbounds float, float* %tmp17010, i64 1
+ %tmp17012 = getelementptr inbounds float, float* %tmp17011, i64 1
+ %tmp17013 = getelementptr inbounds float, float* %tmp17012, i64 1
+ %tmp17014 = getelementptr inbounds float, float* %tmp17013, i64 1
+ %tmp17015 = getelementptr inbounds float, float* %tmp17014, i64 1
+ %tmp17016 = getelementptr inbounds float, float* %tmp17015, i64 1
+ %tmp17017 = getelementptr inbounds float, float* %tmp17016, i64 1
+ %tmp17018 = getelementptr inbounds float, float* %tmp17017, i64 1
+ %tmp17019 = getelementptr inbounds float, float* %tmp17018, i64 1
+ %tmp17020 = getelementptr inbounds float, float* %tmp17019, i64 1
+ %tmp17021 = getelementptr inbounds float, float* %tmp17020, i64 1
+ %tmp17022 = getelementptr inbounds float, float* %tmp17021, i64 1
+ %tmp17023 = getelementptr inbounds float, float* %tmp17022, i64 1
+ %tmp17024 = getelementptr inbounds float, float* %tmp17023, i64 1
+ %tmp17025 = getelementptr inbounds float, float* %tmp17024, i64 1
+ %tmp17026 = getelementptr inbounds float, float* %tmp17025, i64 1
+ %tmp17027 = getelementptr inbounds float, float* %tmp17026, i64 1
+ %tmp17028 = getelementptr inbounds float, float* %tmp17027, i64 1
+ %tmp17029 = getelementptr inbounds float, float* %tmp17028, i64 1
+ %tmp17030 = getelementptr inbounds float, float* %tmp17029, i64 1
+ %tmp17031 = getelementptr inbounds float, float* %tmp17030, i64 1
+ %tmp17032 = getelementptr inbounds float, float* %tmp17031, i64 1
+ %tmp17033 = getelementptr inbounds float, float* %tmp17032, i64 1
+ %tmp17034 = getelementptr inbounds float, float* %tmp17033, i64 1
+ %tmp17035 = getelementptr inbounds float, float* %tmp17034, i64 1
+ %tmp17036 = getelementptr inbounds float, float* %tmp17035, i64 1
+ %tmp17037 = getelementptr inbounds float, float* %tmp17036, i64 1
+ %tmp17038 = getelementptr inbounds float, float* %tmp17037, i64 1
+ %tmp17039 = getelementptr inbounds float, float* %tmp17038, i64 1
+ %tmp17040 = getelementptr inbounds float, float* %tmp17039, i64 1
+ %tmp17041 = getelementptr inbounds float, float* %tmp17040, i64 1
+ %tmp17042 = getelementptr inbounds float, float* %tmp17041, i64 1
+ %tmp17043 = getelementptr inbounds float, float* %tmp17042, i64 1
+ %tmp17044 = getelementptr inbounds float, float* %tmp17043, i64 1
+ %tmp17045 = getelementptr inbounds float, float* %tmp17044, i64 1
+ %tmp17046 = getelementptr inbounds float, float* %tmp17045, i64 1
+ %tmp17047 = getelementptr inbounds float, float* %tmp17046, i64 1
+ %tmp17048 = getelementptr inbounds float, float* %tmp17047, i64 1
+ %tmp17049 = getelementptr inbounds float, float* %tmp17048, i64 1
+ %tmp17050 = getelementptr inbounds float, float* %tmp17049, i64 1
+ %tmp17051 = getelementptr inbounds float, float* %tmp17050, i64 1
+ %tmp17052 = getelementptr inbounds float, float* %tmp17051, i64 1
+ %tmp17053 = getelementptr inbounds float, float* %tmp17052, i64 1
+ %tmp17054 = getelementptr inbounds float, float* %tmp17053, i64 1
+ %tmp17055 = getelementptr inbounds float, float* %tmp17054, i64 1
+ %tmp17056 = getelementptr inbounds float, float* %tmp17055, i64 1
+ %tmp17057 = getelementptr inbounds float, float* %tmp17056, i64 1
+ %tmp17058 = getelementptr inbounds float, float* %tmp17057, i64 1
+ %tmp17059 = getelementptr inbounds float, float* %tmp17058, i64 1
+ %tmp17060 = getelementptr inbounds float, float* %tmp17059, i64 1
+ %tmp17061 = getelementptr inbounds float, float* %tmp17060, i64 1
+ %tmp17062 = getelementptr inbounds float, float* %tmp17061, i64 1
+ %tmp17063 = getelementptr inbounds float, float* %tmp17062, i64 1
+ %tmp17064 = getelementptr inbounds float, float* %tmp17063, i64 1
+ %tmp17065 = getelementptr inbounds float, float* %tmp17064, i64 1
+ %tmp17066 = getelementptr inbounds float, float* %tmp17065, i64 1
+ %tmp17067 = getelementptr inbounds float, float* %tmp17066, i64 1
+ %tmp17068 = getelementptr inbounds float, float* %tmp17067, i64 1
+ %tmp17069 = getelementptr inbounds float, float* %tmp17068, i64 1
+ %tmp17070 = getelementptr inbounds float, float* %tmp17069, i64 1
+ %tmp17071 = getelementptr inbounds float, float* %tmp17070, i64 1
+ %tmp17072 = getelementptr inbounds float, float* %tmp17071, i64 1
+ %tmp17073 = getelementptr inbounds float, float* %tmp17072, i64 1
+ %tmp17074 = getelementptr inbounds float, float* %tmp17073, i64 1
+ %tmp17075 = getelementptr inbounds float, float* %tmp17074, i64 1
+ %tmp17076 = getelementptr inbounds float, float* %tmp17075, i64 1
+ %tmp17077 = getelementptr inbounds float, float* %tmp17076, i64 1
+ %tmp17078 = getelementptr inbounds float, float* %tmp17077, i64 1
+ %tmp17079 = getelementptr inbounds float, float* %tmp17078, i64 1
+ %tmp17080 = getelementptr inbounds float, float* %tmp17079, i64 1
+ %tmp17081 = getelementptr inbounds float, float* %tmp17080, i64 1
+ %tmp17082 = getelementptr inbounds float, float* %tmp17081, i64 1
+ %tmp17083 = getelementptr inbounds float, float* %tmp17082, i64 1
+ %tmp17084 = getelementptr inbounds float, float* %tmp17083, i64 1
+ %tmp17085 = getelementptr inbounds float, float* %tmp17084, i64 1
+ %tmp17086 = getelementptr inbounds float, float* %tmp17085, i64 1
+ %tmp17087 = getelementptr inbounds float, float* %tmp17086, i64 1
+ %tmp17088 = getelementptr inbounds float, float* %tmp17087, i64 1
+ %tmp17089 = getelementptr inbounds float, float* %tmp17088, i64 1
+ %tmp17090 = getelementptr inbounds float, float* %tmp17089, i64 1
+ %tmp17091 = getelementptr inbounds float, float* %tmp17090, i64 1
+ %tmp17092 = getelementptr inbounds float, float* %tmp17091, i64 1
+ %tmp17093 = getelementptr inbounds float, float* %tmp17092, i64 1
+ %tmp17094 = getelementptr inbounds float, float* %tmp17093, i64 1
+ %tmp17095 = getelementptr inbounds float, float* %tmp17094, i64 1
+ %tmp17096 = getelementptr inbounds float, float* %tmp17095, i64 1
+ %tmp17097 = getelementptr inbounds float, float* %tmp17096, i64 1
+ %tmp17098 = getelementptr inbounds float, float* %tmp17097, i64 1
+ %tmp17099 = getelementptr inbounds float, float* %tmp17098, i64 1
+ %tmp17100 = getelementptr inbounds float, float* %tmp17099, i64 1
+ %tmp17101 = getelementptr inbounds float, float* %tmp17100, i64 1
+ %tmp17102 = getelementptr inbounds float, float* %tmp17101, i64 1
+ %tmp17103 = getelementptr inbounds float, float* %tmp17102, i64 1
+ %tmp17104 = getelementptr inbounds float, float* %tmp17103, i64 1
+ %tmp17105 = getelementptr inbounds float, float* %tmp17104, i64 1
+ %tmp17106 = getelementptr inbounds float, float* %tmp17105, i64 1
+ %tmp17107 = getelementptr inbounds float, float* %tmp17106, i64 1
+ %tmp17108 = getelementptr inbounds float, float* %tmp17107, i64 1
+ %tmp17109 = getelementptr inbounds float, float* %tmp17108, i64 1
+ %tmp17110 = getelementptr inbounds float, float* %tmp17109, i64 1
+ %tmp17111 = getelementptr inbounds float, float* %tmp17110, i64 1
+ %tmp17112 = getelementptr inbounds float, float* %tmp17111, i64 1
+ %tmp17113 = getelementptr inbounds float, float* %tmp17112, i64 1
+ %tmp17114 = getelementptr inbounds float, float* %tmp17113, i64 1
+ %tmp17115 = getelementptr inbounds float, float* %tmp17114, i64 1
+ %tmp17116 = getelementptr inbounds float, float* %tmp17115, i64 1
+ %tmp17117 = getelementptr inbounds float, float* %tmp17116, i64 1
+ %tmp17118 = getelementptr inbounds float, float* %tmp17117, i64 1
+ %tmp17119 = getelementptr inbounds float, float* %tmp17118, i64 1
+ %tmp17120 = getelementptr inbounds float, float* %tmp17119, i64 1
+ %tmp17121 = getelementptr inbounds float, float* %tmp17120, i64 1
+ %tmp17122 = getelementptr inbounds float, float* %tmp17121, i64 1
+ %tmp17123 = getelementptr inbounds float, float* %tmp17122, i64 1
+ %tmp17124 = getelementptr inbounds float, float* %tmp17123, i64 1
+ %tmp17125 = getelementptr inbounds float, float* %tmp17124, i64 1
+ %tmp17126 = getelementptr inbounds float, float* %tmp17125, i64 1
+ %tmp17127 = getelementptr inbounds float, float* %tmp17126, i64 1
+ %tmp17128 = getelementptr inbounds float, float* %tmp17127, i64 1
+ %tmp17129 = getelementptr inbounds float, float* %tmp17128, i64 1
+ %tmp17130 = getelementptr inbounds float, float* %tmp17129, i64 1
+ %tmp17131 = getelementptr inbounds float, float* %tmp17130, i64 1
+ %tmp17132 = getelementptr inbounds float, float* %tmp17131, i64 1
+ %tmp17133 = getelementptr inbounds float, float* %tmp17132, i64 1
+ %tmp17134 = getelementptr inbounds float, float* %tmp17133, i64 1
+ %tmp17135 = getelementptr inbounds float, float* %tmp17134, i64 1
+ %tmp17136 = getelementptr inbounds float, float* %tmp17135, i64 1
+ %tmp17137 = getelementptr inbounds float, float* %tmp17136, i64 1
+ %tmp17138 = getelementptr inbounds float, float* %tmp17137, i64 1
+ %tmp17139 = getelementptr inbounds float, float* %tmp17138, i64 1
+ %tmp17140 = getelementptr inbounds float, float* %tmp17139, i64 1
+ %tmp17141 = getelementptr inbounds float, float* %tmp17140, i64 1
+ %tmp17142 = getelementptr inbounds float, float* %tmp17141, i64 1
+ %tmp17143 = getelementptr inbounds float, float* %tmp17142, i64 1
+ %tmp17144 = getelementptr inbounds float, float* %tmp17143, i64 1
+ %tmp17145 = getelementptr inbounds float, float* %tmp17144, i64 1
+ %tmp17146 = getelementptr inbounds float, float* %tmp17145, i64 1
+ %tmp17147 = getelementptr inbounds float, float* %tmp17146, i64 1
+ %tmp17148 = getelementptr inbounds float, float* %tmp17147, i64 1
+ %tmp17149 = getelementptr inbounds float, float* %tmp17148, i64 1
+ %tmp17150 = getelementptr inbounds float, float* %tmp17149, i64 1
+ %tmp17151 = getelementptr inbounds float, float* %tmp17150, i64 1
+ %tmp17152 = getelementptr inbounds float, float* %tmp17151, i64 1
+ %tmp17153 = getelementptr inbounds float, float* %tmp17152, i64 1
+ %tmp17154 = getelementptr inbounds float, float* %tmp17153, i64 1
+ %tmp17155 = getelementptr inbounds float, float* %tmp17154, i64 1
+ %tmp17156 = getelementptr inbounds float, float* %tmp17155, i64 1
+ %tmp17157 = getelementptr inbounds float, float* %tmp17156, i64 1
+ %tmp17158 = getelementptr inbounds float, float* %tmp17157, i64 1
+ %tmp17159 = getelementptr inbounds float, float* %tmp17158, i64 1
+ %tmp17160 = getelementptr inbounds float, float* %tmp17159, i64 1
+ %tmp17161 = getelementptr inbounds float, float* %tmp17160, i64 1
+ %tmp17162 = getelementptr inbounds float, float* %tmp17161, i64 1
+ %tmp17163 = getelementptr inbounds float, float* %tmp17162, i64 1
+ %tmp17164 = getelementptr inbounds float, float* %tmp17163, i64 1
+ %tmp17165 = getelementptr inbounds float, float* %tmp17164, i64 1
+ %tmp17166 = getelementptr inbounds float, float* %tmp17165, i64 1
+ %tmp17167 = getelementptr inbounds float, float* %tmp17166, i64 1
+ %tmp17168 = getelementptr inbounds float, float* %tmp17167, i64 1
+ %tmp17169 = getelementptr inbounds float, float* %tmp17168, i64 1
+ %tmp17170 = getelementptr inbounds float, float* %tmp17169, i64 1
+ %tmp17171 = getelementptr inbounds float, float* %tmp17170, i64 1
+ %tmp17172 = getelementptr inbounds float, float* %tmp17171, i64 1
+ %tmp17173 = getelementptr inbounds float, float* %tmp17172, i64 1
+ %tmp17174 = getelementptr inbounds float, float* %tmp17173, i64 1
+ %tmp17175 = getelementptr inbounds float, float* %tmp17174, i64 1
+ %tmp17176 = getelementptr inbounds float, float* %tmp17175, i64 1
+ %tmp17177 = getelementptr inbounds float, float* %tmp17176, i64 1
+ %tmp17178 = getelementptr inbounds float, float* %tmp17177, i64 1
+ %tmp17179 = getelementptr inbounds float, float* %tmp17178, i64 1
+ %tmp17180 = getelementptr inbounds float, float* %tmp17179, i64 1
+ %tmp17181 = getelementptr inbounds float, float* %tmp17180, i64 1
+ %tmp17182 = getelementptr inbounds float, float* %tmp17181, i64 1
+ %tmp17183 = getelementptr inbounds float, float* %tmp17182, i64 1
+ %tmp17184 = getelementptr inbounds float, float* %tmp17183, i64 1
+ %tmp17185 = getelementptr inbounds float, float* %tmp17184, i64 1
+ %tmp17186 = getelementptr inbounds float, float* %tmp17185, i64 1
+ %tmp17187 = getelementptr inbounds float, float* %tmp17186, i64 1
+ %tmp17188 = getelementptr inbounds float, float* %tmp17187, i64 1
+ %tmp17189 = getelementptr inbounds float, float* %tmp17188, i64 1
+ %tmp17190 = getelementptr inbounds float, float* %tmp17189, i64 1
+ %tmp17191 = getelementptr inbounds float, float* %tmp17190, i64 1
+ %tmp17192 = getelementptr inbounds float, float* %tmp17191, i64 1
+ %tmp17193 = getelementptr inbounds float, float* %tmp17192, i64 1
+ %tmp17194 = getelementptr inbounds float, float* %tmp17193, i64 1
+ %tmp17195 = getelementptr inbounds float, float* %tmp17194, i64 1
+ %tmp17196 = getelementptr inbounds float, float* %tmp17195, i64 1
+ %tmp17197 = getelementptr inbounds float, float* %tmp17196, i64 1
+ %tmp17198 = getelementptr inbounds float, float* %tmp17197, i64 1
+ %tmp17199 = getelementptr inbounds float, float* %tmp17198, i64 1
+ %tmp17200 = getelementptr inbounds float, float* %tmp17199, i64 1
+ %tmp17201 = getelementptr inbounds float, float* %tmp17200, i64 1
+ %tmp17202 = getelementptr inbounds float, float* %tmp17201, i64 1
+ %tmp17203 = getelementptr inbounds float, float* %tmp17202, i64 1
+ %tmp17204 = getelementptr inbounds float, float* %tmp17203, i64 1
+ %tmp17205 = getelementptr inbounds float, float* %tmp17204, i64 1
+ %tmp17206 = getelementptr inbounds float, float* %tmp17205, i64 1
+ %tmp17207 = getelementptr inbounds float, float* %tmp17206, i64 1
+ %tmp17208 = getelementptr inbounds float, float* %tmp17207, i64 1
+ %tmp17209 = getelementptr inbounds float, float* %tmp17208, i64 1
+ %tmp17210 = getelementptr inbounds float, float* %tmp17209, i64 1
+ %tmp17211 = getelementptr inbounds float, float* %tmp17210, i64 1
+ %tmp17212 = getelementptr inbounds float, float* %tmp17211, i64 1
+ %tmp17213 = getelementptr inbounds float, float* %tmp17212, i64 1
+ %tmp17214 = getelementptr inbounds float, float* %tmp17213, i64 1
+ %tmp17215 = getelementptr inbounds float, float* %tmp17214, i64 1
+ %tmp17216 = getelementptr inbounds float, float* %tmp17215, i64 1
+ %tmp17217 = getelementptr inbounds float, float* %tmp17216, i64 1
+ %tmp17218 = getelementptr inbounds float, float* %tmp17217, i64 1
+ %tmp17219 = getelementptr inbounds float, float* %tmp17218, i64 1
+ %tmp17220 = getelementptr inbounds float, float* %tmp17219, i64 1
+ %tmp17221 = getelementptr inbounds float, float* %tmp17220, i64 1
+ %tmp17222 = getelementptr inbounds float, float* %tmp17221, i64 1
+ %tmp17223 = getelementptr inbounds float, float* %tmp17222, i64 1
+ %tmp17224 = getelementptr inbounds float, float* %tmp17223, i64 1
+ %tmp17225 = getelementptr inbounds float, float* %tmp17224, i64 1
+ %tmp17226 = getelementptr inbounds float, float* %tmp17225, i64 1
+ %tmp17227 = getelementptr inbounds float, float* %tmp17226, i64 1
+ %tmp17228 = getelementptr inbounds float, float* %tmp17227, i64 1
+ %tmp17229 = getelementptr inbounds float, float* %tmp17228, i64 1
+ %tmp17230 = getelementptr inbounds float, float* %tmp17229, i64 1
+ %tmp17231 = getelementptr inbounds float, float* %tmp17230, i64 1
+ %tmp17232 = getelementptr inbounds float, float* %tmp17231, i64 1
+ %tmp17233 = getelementptr inbounds float, float* %tmp17232, i64 1
+ %tmp17234 = getelementptr inbounds float, float* %tmp17233, i64 1
+ %tmp17235 = getelementptr inbounds float, float* %tmp17234, i64 1
+ %tmp17236 = getelementptr inbounds float, float* %tmp17235, i64 1
+ %tmp17237 = getelementptr inbounds float, float* %tmp17236, i64 1
+ %tmp17238 = getelementptr inbounds float, float* %tmp17237, i64 1
+ %tmp17239 = getelementptr inbounds float, float* %tmp17238, i64 1
+ %tmp17240 = getelementptr inbounds float, float* %tmp17239, i64 1
+ %tmp17241 = getelementptr inbounds float, float* %tmp17240, i64 1
+ %tmp17242 = getelementptr inbounds float, float* %tmp17241, i64 1
+ %tmp17243 = getelementptr inbounds float, float* %tmp17242, i64 1
+ %tmp17244 = getelementptr inbounds float, float* %tmp17243, i64 1
+ %tmp17245 = getelementptr inbounds float, float* %tmp17244, i64 1
+ %tmp17246 = getelementptr inbounds float, float* %tmp17245, i64 1
+ %tmp17247 = getelementptr inbounds float, float* %tmp17246, i64 1
+ %tmp17248 = getelementptr inbounds float, float* %tmp17247, i64 1
+ %tmp17249 = getelementptr inbounds float, float* %tmp17248, i64 1
+ %tmp17250 = getelementptr inbounds float, float* %tmp17249, i64 1
+ %tmp17251 = getelementptr inbounds float, float* %tmp17250, i64 1
+ %tmp17252 = getelementptr inbounds float, float* %tmp17251, i64 1
+ %tmp17253 = getelementptr inbounds float, float* %tmp17252, i64 1
+ %tmp17254 = getelementptr inbounds float, float* %tmp17253, i64 1
+ %tmp17255 = getelementptr inbounds float, float* %tmp17254, i64 1
+ %tmp17256 = getelementptr inbounds float, float* %tmp17255, i64 1
+ %tmp17257 = getelementptr inbounds float, float* %tmp17256, i64 1
+ %tmp17258 = getelementptr inbounds float, float* %tmp17257, i64 1
+ %tmp17259 = getelementptr inbounds float, float* %tmp17258, i64 1
+ %tmp17260 = getelementptr inbounds float, float* %tmp17259, i64 1
+ %tmp17261 = getelementptr inbounds float, float* %tmp17260, i64 1
+ %tmp17262 = getelementptr inbounds float, float* %tmp17261, i64 1
+ %tmp17263 = getelementptr inbounds float, float* %tmp17262, i64 1
+ %tmp17264 = getelementptr inbounds float, float* %tmp17263, i64 1
+ %tmp17265 = getelementptr inbounds float, float* %tmp17264, i64 1
+ %tmp17266 = getelementptr inbounds float, float* %tmp17265, i64 1
+ %tmp17267 = getelementptr inbounds float, float* %tmp17266, i64 1
+ %tmp17268 = getelementptr inbounds float, float* %tmp17267, i64 1
+ %tmp17269 = getelementptr inbounds float, float* %tmp17268, i64 1
+ %tmp17270 = getelementptr inbounds float, float* %tmp17269, i64 1
+ %tmp17271 = getelementptr inbounds float, float* %tmp17270, i64 1
+ %tmp17272 = getelementptr inbounds float, float* %tmp17271, i64 1
+ %tmp17273 = getelementptr inbounds float, float* %tmp17272, i64 1
+ %tmp17274 = getelementptr inbounds float, float* %tmp17273, i64 1
+ %tmp17275 = getelementptr inbounds float, float* %tmp17274, i64 1
+ %tmp17276 = getelementptr inbounds float, float* %tmp17275, i64 1
+ %tmp17277 = getelementptr inbounds float, float* %tmp17276, i64 1
+ %tmp17278 = getelementptr inbounds float, float* %tmp17277, i64 1
+ %tmp17279 = getelementptr inbounds float, float* %tmp17278, i64 1
+ %tmp17280 = getelementptr inbounds float, float* %tmp17279, i64 1
+ %tmp17281 = getelementptr inbounds float, float* %tmp17280, i64 1
+ %tmp17282 = getelementptr inbounds float, float* %tmp17281, i64 1
+ %tmp17283 = getelementptr inbounds float, float* %tmp17282, i64 1
+ %tmp17284 = getelementptr inbounds float, float* %tmp17283, i64 1
+ %tmp17285 = getelementptr inbounds float, float* %tmp17284, i64 1
+ %tmp17286 = getelementptr inbounds float, float* %tmp17285, i64 1
+ %tmp17287 = getelementptr inbounds float, float* %tmp17286, i64 1
+ %tmp17288 = getelementptr inbounds float, float* %tmp17287, i64 1
+ %tmp17289 = getelementptr inbounds float, float* %tmp17288, i64 1
+ %tmp17290 = getelementptr inbounds float, float* %tmp17289, i64 1
+ %tmp17291 = getelementptr inbounds float, float* %tmp17290, i64 1
+ %tmp17292 = getelementptr inbounds float, float* %tmp17291, i64 1
+ %tmp17293 = getelementptr inbounds float, float* %tmp17292, i64 1
+ %tmp17294 = getelementptr inbounds float, float* %tmp17293, i64 1
+ %tmp17295 = getelementptr inbounds float, float* %tmp17294, i64 1
+ %tmp17296 = getelementptr inbounds float, float* %tmp17295, i64 1
+ %tmp17297 = getelementptr inbounds float, float* %tmp17296, i64 1
+ %tmp17298 = getelementptr inbounds float, float* %tmp17297, i64 1
+ %tmp17299 = getelementptr inbounds float, float* %tmp17298, i64 1
+ %tmp17300 = getelementptr inbounds float, float* %tmp17299, i64 1
+ %tmp17301 = getelementptr inbounds float, float* %tmp17300, i64 1
+ %tmp17302 = getelementptr inbounds float, float* %tmp17301, i64 1
+ %tmp17303 = getelementptr inbounds float, float* %tmp17302, i64 1
+ %tmp17304 = getelementptr inbounds float, float* %tmp17303, i64 1
+ %tmp17305 = getelementptr inbounds float, float* %tmp17304, i64 1
+ %tmp17306 = getelementptr inbounds float, float* %tmp17305, i64 1
+ %tmp17307 = getelementptr inbounds float, float* %tmp17306, i64 1
+ %tmp17308 = getelementptr inbounds float, float* %tmp17307, i64 1
+ %tmp17309 = getelementptr inbounds float, float* %tmp17308, i64 1
+ %tmp17310 = getelementptr inbounds float, float* %tmp17309, i64 1
+ %tmp17311 = getelementptr inbounds float, float* %tmp17310, i64 1
+ %tmp17312 = getelementptr inbounds float, float* %tmp17311, i64 1
+ %tmp17313 = getelementptr inbounds float, float* %tmp17312, i64 1
+ %tmp17314 = getelementptr inbounds float, float* %tmp17313, i64 1
+ %tmp17315 = getelementptr inbounds float, float* %tmp17314, i64 1
+ %tmp17316 = getelementptr inbounds float, float* %tmp17315, i64 1
+ %tmp17317 = getelementptr inbounds float, float* %tmp17316, i64 1
+ %tmp17318 = getelementptr inbounds float, float* %tmp17317, i64 1
+ %tmp17319 = getelementptr inbounds float, float* %tmp17318, i64 1
+ %tmp17320 = getelementptr inbounds float, float* %tmp17319, i64 1
+ %tmp17321 = getelementptr inbounds float, float* %tmp17320, i64 1
+ %tmp17322 = getelementptr inbounds float, float* %tmp17321, i64 1
+ %tmp17323 = getelementptr inbounds float, float* %tmp17322, i64 1
+ %tmp17324 = getelementptr inbounds float, float* %tmp17323, i64 1
+ %tmp17325 = getelementptr inbounds float, float* %tmp17324, i64 1
+ %tmp17326 = getelementptr inbounds float, float* %tmp17325, i64 1
+ %tmp17327 = getelementptr inbounds float, float* %tmp17326, i64 1
+ %tmp17328 = getelementptr inbounds float, float* %tmp17327, i64 1
+ %tmp17329 = getelementptr inbounds float, float* %tmp17328, i64 1
+ %tmp17330 = getelementptr inbounds float, float* %tmp17329, i64 1
+ %tmp17331 = getelementptr inbounds float, float* %tmp17330, i64 1
+ %tmp17332 = getelementptr inbounds float, float* %tmp17331, i64 1
+ %tmp17333 = getelementptr inbounds float, float* %tmp17332, i64 1
+ %tmp17334 = getelementptr inbounds float, float* %tmp17333, i64 1
+ %tmp17335 = getelementptr inbounds float, float* %tmp17334, i64 1
+ %tmp17336 = getelementptr inbounds float, float* %tmp17335, i64 1
+ %tmp17337 = getelementptr inbounds float, float* %tmp17336, i64 1
+ %tmp17338 = getelementptr inbounds float, float* %tmp17337, i64 1
+ %tmp17339 = getelementptr inbounds float, float* %tmp17338, i64 1
+ %tmp17340 = getelementptr inbounds float, float* %tmp17339, i64 1
+ %tmp17341 = getelementptr inbounds float, float* %tmp17340, i64 1
+ %tmp17342 = getelementptr inbounds float, float* %tmp17341, i64 1
+ %tmp17343 = getelementptr inbounds float, float* %tmp17342, i64 1
+ %tmp17344 = getelementptr inbounds float, float* %tmp17343, i64 1
+ %tmp17345 = getelementptr inbounds float, float* %tmp17344, i64 1
+ %tmp17346 = getelementptr inbounds float, float* %tmp17345, i64 1
+ %tmp17347 = getelementptr inbounds float, float* %tmp17346, i64 1
+ %tmp17348 = getelementptr inbounds float, float* %tmp17347, i64 1
+ %tmp17349 = getelementptr inbounds float, float* %tmp17348, i64 1
+ %tmp17350 = getelementptr inbounds float, float* %tmp17349, i64 1
+ %tmp17351 = getelementptr inbounds float, float* %tmp17350, i64 1
+ %tmp17352 = getelementptr inbounds float, float* %tmp17351, i64 1
+ %tmp17353 = getelementptr inbounds float, float* %tmp17352, i64 1
+ %tmp17354 = getelementptr inbounds float, float* %tmp17353, i64 1
+ %tmp17355 = getelementptr inbounds float, float* %tmp17354, i64 1
+ %tmp17356 = getelementptr inbounds float, float* %tmp17355, i64 1
+ %tmp17357 = getelementptr inbounds float, float* %tmp17356, i64 1
+ %tmp17358 = getelementptr inbounds float, float* %tmp17357, i64 1
+ %tmp17359 = getelementptr inbounds float, float* %tmp17358, i64 1
+ %tmp17360 = getelementptr inbounds float, float* %tmp17359, i64 1
+ %tmp17361 = getelementptr inbounds float, float* %tmp17360, i64 1
+ %tmp17362 = getelementptr inbounds float, float* %tmp17361, i64 1
+ %tmp17363 = getelementptr inbounds float, float* %tmp17362, i64 1
+ %tmp17364 = getelementptr inbounds float, float* %tmp17363, i64 1
+ %tmp17365 = getelementptr inbounds float, float* %tmp17364, i64 1
+ %tmp17366 = getelementptr inbounds float, float* %tmp17365, i64 1
+ %tmp17367 = getelementptr inbounds float, float* %tmp17366, i64 1
+ %tmp17368 = getelementptr inbounds float, float* %tmp17367, i64 1
+ %tmp17369 = getelementptr inbounds float, float* %tmp17368, i64 1
+ %tmp17370 = getelementptr inbounds float, float* %tmp17369, i64 1
+ %tmp17371 = getelementptr inbounds float, float* %tmp17370, i64 1
+ %tmp17372 = getelementptr inbounds float, float* %tmp17371, i64 1
+ %tmp17373 = getelementptr inbounds float, float* %tmp17372, i64 1
+ %tmp17374 = getelementptr inbounds float, float* %tmp17373, i64 1
+ %tmp17375 = getelementptr inbounds float, float* %tmp17374, i64 1
+ %tmp17376 = getelementptr inbounds float, float* %tmp17375, i64 1
+ %tmp17377 = getelementptr inbounds float, float* %tmp17376, i64 1
+ %tmp17378 = getelementptr inbounds float, float* %tmp17377, i64 1
+ %tmp17379 = getelementptr inbounds float, float* %tmp17378, i64 1
+ %tmp17380 = getelementptr inbounds float, float* %tmp17379, i64 1
+ %tmp17381 = getelementptr inbounds float, float* %tmp17380, i64 1
+ %tmp17382 = getelementptr inbounds float, float* %tmp17381, i64 1
+ %tmp17383 = getelementptr inbounds float, float* %tmp17382, i64 1
+ %tmp17384 = getelementptr inbounds float, float* %tmp17383, i64 1
+ %tmp17385 = getelementptr inbounds float, float* %tmp17384, i64 1
+ %tmp17386 = getelementptr inbounds float, float* %tmp17385, i64 1
+ %tmp17387 = getelementptr inbounds float, float* %tmp17386, i64 1
+ %tmp17388 = getelementptr inbounds float, float* %tmp17387, i64 1
+ %tmp17389 = getelementptr inbounds float, float* %tmp17388, i64 1
+ %tmp17390 = getelementptr inbounds float, float* %tmp17389, i64 1
+ %tmp17391 = getelementptr inbounds float, float* %tmp17390, i64 1
+ %tmp17392 = getelementptr inbounds float, float* %tmp17391, i64 1
+ %tmp17393 = getelementptr inbounds float, float* %tmp17392, i64 1
+ %tmp17394 = getelementptr inbounds float, float* %tmp17393, i64 1
+ %tmp17395 = getelementptr inbounds float, float* %tmp17394, i64 1
+ %tmp17396 = getelementptr inbounds float, float* %tmp17395, i64 1
+ %tmp17397 = getelementptr inbounds float, float* %tmp17396, i64 1
+ %tmp17398 = getelementptr inbounds float, float* %tmp17397, i64 1
+ %tmp17399 = getelementptr inbounds float, float* %tmp17398, i64 1
+ %tmp17400 = getelementptr inbounds float, float* %tmp17399, i64 1
+ %tmp17401 = getelementptr inbounds float, float* %tmp17400, i64 1
+ %tmp17402 = getelementptr inbounds float, float* %tmp17401, i64 1
+ %tmp17403 = getelementptr inbounds float, float* %tmp17402, i64 1
+ %tmp17404 = getelementptr inbounds float, float* %tmp17403, i64 1
+ %tmp17405 = getelementptr inbounds float, float* %tmp17404, i64 1
+ %tmp17406 = getelementptr inbounds float, float* %tmp17405, i64 1
+ %tmp17407 = getelementptr inbounds float, float* %tmp17406, i64 1
+ %tmp17408 = getelementptr inbounds float, float* %tmp17407, i64 1
+ %tmp17409 = getelementptr inbounds float, float* %tmp17408, i64 1
+ %tmp17410 = getelementptr inbounds float, float* %tmp17409, i64 1
+ %tmp17411 = getelementptr inbounds float, float* %tmp17410, i64 1
+ %tmp17412 = getelementptr inbounds float, float* %tmp17411, i64 1
+ %tmp17413 = getelementptr inbounds float, float* %tmp17412, i64 1
+ %tmp17414 = getelementptr inbounds float, float* %tmp17413, i64 1
+ %tmp17415 = getelementptr inbounds float, float* %tmp17414, i64 1
+ %tmp17416 = getelementptr inbounds float, float* %tmp17415, i64 1
+ %tmp17417 = getelementptr inbounds float, float* %tmp17416, i64 1
+ %tmp17418 = getelementptr inbounds float, float* %tmp17417, i64 1
+ %tmp17419 = getelementptr inbounds float, float* %tmp17418, i64 1
+ %tmp17420 = getelementptr inbounds float, float* %tmp17419, i64 1
+ %tmp17421 = getelementptr inbounds float, float* %tmp17420, i64 1
+ %tmp17422 = getelementptr inbounds float, float* %tmp17421, i64 1
+ %tmp17423 = getelementptr inbounds float, float* %tmp17422, i64 1
+ %tmp17424 = getelementptr inbounds float, float* %tmp17423, i64 1
+ %tmp17425 = getelementptr inbounds float, float* %tmp17424, i64 1
+ %tmp17426 = getelementptr inbounds float, float* %tmp17425, i64 1
+ %tmp17427 = getelementptr inbounds float, float* %tmp17426, i64 1
+ %tmp17428 = getelementptr inbounds float, float* %tmp17427, i64 1
+ %tmp17429 = getelementptr inbounds float, float* %tmp17428, i64 1
+ %tmp17430 = getelementptr inbounds float, float* %tmp17429, i64 1
+ %tmp17431 = getelementptr inbounds float, float* %tmp17430, i64 1
+ %tmp17432 = getelementptr inbounds float, float* %tmp17431, i64 1
+ %tmp17433 = getelementptr inbounds float, float* %tmp17432, i64 1
+ %tmp17434 = getelementptr inbounds float, float* %tmp17433, i64 1
+ %tmp17435 = getelementptr inbounds float, float* %tmp17434, i64 1
+ %tmp17436 = getelementptr inbounds float, float* %tmp17435, i64 1
+ %tmp17437 = getelementptr inbounds float, float* %tmp17436, i64 1
+ %tmp17438 = getelementptr inbounds float, float* %tmp17437, i64 1
+ %tmp17439 = getelementptr inbounds float, float* %tmp17438, i64 1
+ %tmp17440 = getelementptr inbounds float, float* %tmp17439, i64 1
+ %tmp17441 = getelementptr inbounds float, float* %tmp17440, i64 1
+ %tmp17442 = getelementptr inbounds float, float* %tmp17441, i64 1
+ %tmp17443 = getelementptr inbounds float, float* %tmp17442, i64 1
+ %tmp17444 = getelementptr inbounds float, float* %tmp17443, i64 1
+ %tmp17445 = getelementptr inbounds float, float* %tmp17444, i64 1
+ %tmp17446 = getelementptr inbounds float, float* %tmp17445, i64 1
+ %tmp17447 = getelementptr inbounds float, float* %tmp17446, i64 1
+ %tmp17448 = getelementptr inbounds float, float* %tmp17447, i64 1
+ %tmp17449 = getelementptr inbounds float, float* %tmp17448, i64 1
+ %tmp17450 = getelementptr inbounds float, float* %tmp17449, i64 1
+ %tmp17451 = getelementptr inbounds float, float* %tmp17450, i64 1
+ %tmp17452 = getelementptr inbounds float, float* %tmp17451, i64 1
+ %tmp17453 = getelementptr inbounds float, float* %tmp17452, i64 1
+ %tmp17454 = getelementptr inbounds float, float* %tmp17453, i64 1
+ %tmp17455 = getelementptr inbounds float, float* %tmp17454, i64 1
+ %tmp17456 = getelementptr inbounds float, float* %tmp17455, i64 1
+ %tmp17457 = getelementptr inbounds float, float* %tmp17456, i64 1
+ %tmp17458 = getelementptr inbounds float, float* %tmp17457, i64 1
+ %tmp17459 = getelementptr inbounds float, float* %tmp17458, i64 1
+ %tmp17460 = getelementptr inbounds float, float* %tmp17459, i64 1
+ %tmp17461 = getelementptr inbounds float, float* %tmp17460, i64 1
+ %tmp17462 = getelementptr inbounds float, float* %tmp17461, i64 1
+ %tmp17463 = getelementptr inbounds float, float* %tmp17462, i64 1
+ %tmp17464 = getelementptr inbounds float, float* %tmp17463, i64 1
+ %tmp17465 = getelementptr inbounds float, float* %tmp17464, i64 1
+ %tmp17466 = getelementptr inbounds float, float* %tmp17465, i64 1
+ %tmp17467 = getelementptr inbounds float, float* %tmp17466, i64 1
+ %tmp17468 = getelementptr inbounds float, float* %tmp17467, i64 1
+ %tmp17469 = getelementptr inbounds float, float* %tmp17468, i64 1
+ %tmp17470 = getelementptr inbounds float, float* %tmp17469, i64 1
+ %tmp17471 = getelementptr inbounds float, float* %tmp17470, i64 1
+ %tmp17472 = getelementptr inbounds float, float* %tmp17471, i64 1
+ %tmp17473 = getelementptr inbounds float, float* %tmp17472, i64 1
+ %tmp17474 = getelementptr inbounds float, float* %tmp17473, i64 1
+ %tmp17475 = getelementptr inbounds float, float* %tmp17474, i64 1
+ %tmp17476 = getelementptr inbounds float, float* %tmp17475, i64 1
+ %tmp17477 = getelementptr inbounds float, float* %tmp17476, i64 1
+ %tmp17478 = getelementptr inbounds float, float* %tmp17477, i64 1
+ %tmp17479 = getelementptr inbounds float, float* %tmp17478, i64 1
+ %tmp17480 = getelementptr inbounds float, float* %tmp17479, i64 1
+ %tmp17481 = getelementptr inbounds float, float* %tmp17480, i64 1
+ %tmp17482 = getelementptr inbounds float, float* %tmp17481, i64 1
+ %tmp17483 = getelementptr inbounds float, float* %tmp17482, i64 1
+ %tmp17484 = getelementptr inbounds float, float* %tmp17483, i64 1
+ %tmp17485 = getelementptr inbounds float, float* %tmp17484, i64 1
+ %tmp17486 = getelementptr inbounds float, float* %tmp17485, i64 1
+ %tmp17487 = getelementptr inbounds float, float* %tmp17486, i64 1
+ %tmp17488 = getelementptr inbounds float, float* %tmp17487, i64 1
+ %tmp17489 = getelementptr inbounds float, float* %tmp17488, i64 1
+ %tmp17490 = getelementptr inbounds float, float* %tmp17489, i64 1
+ %tmp17491 = getelementptr inbounds float, float* %tmp17490, i64 1
+ %tmp17492 = getelementptr inbounds float, float* %tmp17491, i64 1
+ %tmp17493 = getelementptr inbounds float, float* %tmp17492, i64 1
+ %tmp17494 = getelementptr inbounds float, float* %tmp17493, i64 1
+ %tmp17495 = getelementptr inbounds float, float* %tmp17494, i64 1
+ %tmp17496 = getelementptr inbounds float, float* %tmp17495, i64 1
+ %tmp17497 = getelementptr inbounds float, float* %tmp17496, i64 1
+ %tmp17498 = getelementptr inbounds float, float* %tmp17497, i64 1
+ %tmp17499 = getelementptr inbounds float, float* %tmp17498, i64 1
+ %tmp17500 = getelementptr inbounds float, float* %tmp17499, i64 1
+ %tmp17501 = getelementptr inbounds float, float* %tmp17500, i64 1
+ %tmp17502 = getelementptr inbounds float, float* %tmp17501, i64 1
+ %tmp17503 = getelementptr inbounds float, float* %tmp17502, i64 1
+ %tmp17504 = getelementptr inbounds float, float* %tmp17503, i64 1
+ %tmp17505 = getelementptr inbounds float, float* %tmp17504, i64 1
+ %tmp17506 = getelementptr inbounds float, float* %tmp17505, i64 1
+ %tmp17507 = getelementptr inbounds float, float* %tmp17506, i64 1
+ %tmp17508 = getelementptr inbounds float, float* %tmp17507, i64 1
+ %tmp17509 = getelementptr inbounds float, float* %tmp17508, i64 1
+ %tmp17510 = getelementptr inbounds float, float* %tmp17509, i64 1
+ %tmp17511 = getelementptr inbounds float, float* %tmp17510, i64 1
+ %tmp17512 = getelementptr inbounds float, float* %tmp17511, i64 1
+ %tmp17513 = getelementptr inbounds float, float* %tmp17512, i64 1
+ %tmp17514 = getelementptr inbounds float, float* %tmp17513, i64 1
+ %tmp17515 = getelementptr inbounds float, float* %tmp17514, i64 1
+ %tmp17516 = getelementptr inbounds float, float* %tmp17515, i64 1
+ %tmp17517 = getelementptr inbounds float, float* %tmp17516, i64 1
+ %tmp17518 = getelementptr inbounds float, float* %tmp17517, i64 1
+ %tmp17519 = getelementptr inbounds float, float* %tmp17518, i64 1
+ %tmp17520 = getelementptr inbounds float, float* %tmp17519, i64 1
+ %tmp17521 = getelementptr inbounds float, float* %tmp17520, i64 1
+ %tmp17522 = getelementptr inbounds float, float* %tmp17521, i64 1
+ %tmp17523 = getelementptr inbounds float, float* %tmp17522, i64 1
+ %tmp17524 = getelementptr inbounds float, float* %tmp17523, i64 1
+ %tmp17525 = getelementptr inbounds float, float* %tmp17524, i64 1
+ %tmp17526 = getelementptr inbounds float, float* %tmp17525, i64 1
+ %tmp17527 = getelementptr inbounds float, float* %tmp17526, i64 1
+ %tmp17528 = getelementptr inbounds float, float* %tmp17527, i64 1
+ %tmp17529 = getelementptr inbounds float, float* %tmp17528, i64 1
+ %tmp17530 = getelementptr inbounds float, float* %tmp17529, i64 1
+ %tmp17531 = getelementptr inbounds float, float* %tmp17530, i64 1
+ %tmp17532 = getelementptr inbounds float, float* %tmp17531, i64 1
+ %tmp17533 = getelementptr inbounds float, float* %tmp17532, i64 1
+ %tmp17534 = getelementptr inbounds float, float* %tmp17533, i64 1
+ %tmp17535 = getelementptr inbounds float, float* %tmp17534, i64 1
+ %tmp17536 = getelementptr inbounds float, float* %tmp17535, i64 1
+ %tmp17537 = getelementptr inbounds float, float* %tmp17536, i64 1
+ %tmp17538 = getelementptr inbounds float, float* %tmp17537, i64 1
+ %tmp17539 = getelementptr inbounds float, float* %tmp17538, i64 1
+ %tmp17540 = getelementptr inbounds float, float* %tmp17539, i64 1
+ %tmp17541 = getelementptr inbounds float, float* %tmp17540, i64 1
+ %tmp17542 = getelementptr inbounds float, float* %tmp17541, i64 1
+ %tmp17543 = getelementptr inbounds float, float* %tmp17542, i64 1
+ %tmp17544 = getelementptr inbounds float, float* %tmp17543, i64 1
+ %tmp17545 = getelementptr inbounds float, float* %tmp17544, i64 1
+ %tmp17546 = getelementptr inbounds float, float* %tmp17545, i64 1
+ %tmp17547 = getelementptr inbounds float, float* %tmp17546, i64 1
+ %tmp17548 = getelementptr inbounds float, float* %tmp17547, i64 1
+ %tmp17549 = getelementptr inbounds float, float* %tmp17548, i64 1
+ %tmp17550 = getelementptr inbounds float, float* %tmp17549, i64 1
+ %tmp17551 = getelementptr inbounds float, float* %tmp17550, i64 1
+ %tmp17552 = getelementptr inbounds float, float* %tmp17551, i64 1
+ %tmp17553 = getelementptr inbounds float, float* %tmp17552, i64 1
+ %tmp17554 = getelementptr inbounds float, float* %tmp17553, i64 1
+ %tmp17555 = getelementptr inbounds float, float* %tmp17554, i64 1
+ %tmp17556 = getelementptr inbounds float, float* %tmp17555, i64 1
+ %tmp17557 = getelementptr inbounds float, float* %tmp17556, i64 1
+ %tmp17558 = getelementptr inbounds float, float* %tmp17557, i64 1
+ %tmp17559 = getelementptr inbounds float, float* %tmp17558, i64 1
+ %tmp17560 = getelementptr inbounds float, float* %tmp17559, i64 1
+ %tmp17561 = getelementptr inbounds float, float* %tmp17560, i64 1
+ %tmp17562 = getelementptr inbounds float, float* %tmp17561, i64 1
+ %tmp17563 = getelementptr inbounds float, float* %tmp17562, i64 1
+ %tmp17564 = getelementptr inbounds float, float* %tmp17563, i64 1
+ %tmp17565 = getelementptr inbounds float, float* %tmp17564, i64 1
+ %tmp17566 = getelementptr inbounds float, float* %tmp17565, i64 1
+ %tmp17567 = getelementptr inbounds float, float* %tmp17566, i64 1
+ %tmp17568 = getelementptr inbounds float, float* %tmp17567, i64 1
+ %tmp17569 = getelementptr inbounds float, float* %tmp17568, i64 1
+ %tmp17570 = getelementptr inbounds float, float* %tmp17569, i64 1
+ %tmp17571 = getelementptr inbounds float, float* %tmp17570, i64 1
+ %tmp17572 = getelementptr inbounds float, float* %tmp17571, i64 1
+ %tmp17573 = getelementptr inbounds float, float* %tmp17572, i64 1
+ %tmp17574 = getelementptr inbounds float, float* %tmp17573, i64 1
+ %tmp17575 = getelementptr inbounds float, float* %tmp17574, i64 1
+ %tmp17576 = getelementptr inbounds float, float* %tmp17575, i64 1
+ %tmp17577 = getelementptr inbounds float, float* %tmp17576, i64 1
+ %tmp17578 = getelementptr inbounds float, float* %tmp17577, i64 1
+ %tmp17579 = getelementptr inbounds float, float* %tmp17578, i64 1
+ %tmp17580 = getelementptr inbounds float, float* %tmp17579, i64 1
+ %tmp17581 = getelementptr inbounds float, float* %tmp17580, i64 1
+ %tmp17582 = getelementptr inbounds float, float* %tmp17581, i64 1
+ %tmp17583 = getelementptr inbounds float, float* %tmp17582, i64 1
+ %tmp17584 = getelementptr inbounds float, float* %tmp17583, i64 1
+ %tmp17585 = getelementptr inbounds float, float* %tmp17584, i64 1
+ %tmp17586 = getelementptr inbounds float, float* %tmp17585, i64 1
+ %tmp17587 = getelementptr inbounds float, float* %tmp17586, i64 1
+ %tmp17588 = getelementptr inbounds float, float* %tmp17587, i64 1
+ %tmp17589 = getelementptr inbounds float, float* %tmp17588, i64 1
+ %tmp17590 = getelementptr inbounds float, float* %tmp17589, i64 1
+ %tmp17591 = getelementptr inbounds float, float* %tmp17590, i64 1
+ %tmp17592 = getelementptr inbounds float, float* %tmp17591, i64 1
+ %tmp17593 = getelementptr inbounds float, float* %tmp17592, i64 1
+ %tmp17594 = getelementptr inbounds float, float* %tmp17593, i64 1
+ %tmp17595 = getelementptr inbounds float, float* %tmp17594, i64 1
+ %tmp17596 = getelementptr inbounds float, float* %tmp17595, i64 1
+ %tmp17597 = getelementptr inbounds float, float* %tmp17596, i64 1
+ %tmp17598 = getelementptr inbounds float, float* %tmp17597, i64 1
+ %tmp17599 = getelementptr inbounds float, float* %tmp17598, i64 1
+ %tmp17600 = getelementptr inbounds float, float* %tmp17599, i64 1
+ %tmp17601 = getelementptr inbounds float, float* %tmp17600, i64 1
+ %tmp17602 = getelementptr inbounds float, float* %tmp17601, i64 1
+ %tmp17603 = getelementptr inbounds float, float* %tmp17602, i64 1
+ %tmp17604 = getelementptr inbounds float, float* %tmp17603, i64 1
+ %tmp17605 = getelementptr inbounds float, float* %tmp17604, i64 1
+ %tmp17606 = getelementptr inbounds float, float* %tmp17605, i64 1
+ %tmp17607 = getelementptr inbounds float, float* %tmp17606, i64 1
+ %tmp17608 = getelementptr inbounds float, float* %tmp17607, i64 1
+ %tmp17609 = getelementptr inbounds float, float* %tmp17608, i64 1
+ %tmp17610 = getelementptr inbounds float, float* %tmp17609, i64 1
+ %tmp17611 = getelementptr inbounds float, float* %tmp17610, i64 1
+ %tmp17612 = getelementptr inbounds float, float* %tmp17611, i64 1
+ %tmp17613 = getelementptr inbounds float, float* %tmp17612, i64 1
+ %tmp17614 = getelementptr inbounds float, float* %tmp17613, i64 1
+ %tmp17615 = getelementptr inbounds float, float* %tmp17614, i64 1
+ %tmp17616 = getelementptr inbounds float, float* %tmp17615, i64 1
+ %tmp17617 = getelementptr inbounds float, float* %tmp17616, i64 1
+ %tmp17618 = getelementptr inbounds float, float* %tmp17617, i64 1
+ %tmp17619 = getelementptr inbounds float, float* %tmp17618, i64 1
+ %tmp17620 = getelementptr inbounds float, float* %tmp17619, i64 1
+ %tmp17621 = getelementptr inbounds float, float* %tmp17620, i64 1
+ %tmp17622 = getelementptr inbounds float, float* %tmp17621, i64 1
+ %tmp17623 = getelementptr inbounds float, float* %tmp17622, i64 1
+ %tmp17624 = getelementptr inbounds float, float* %tmp17623, i64 1
+ %tmp17625 = getelementptr inbounds float, float* %tmp17624, i64 1
+ %tmp17626 = getelementptr inbounds float, float* %tmp17625, i64 1
+ %tmp17627 = getelementptr inbounds float, float* %tmp17626, i64 1
+ %tmp17628 = getelementptr inbounds float, float* %tmp17627, i64 1
+ %tmp17629 = getelementptr inbounds float, float* %tmp17628, i64 1
+ %tmp17630 = getelementptr inbounds float, float* %tmp17629, i64 1
+ %tmp17631 = getelementptr inbounds float, float* %tmp17630, i64 1
+ %tmp17632 = getelementptr inbounds float, float* %tmp17631, i64 1
+ %tmp17633 = getelementptr inbounds float, float* %tmp17632, i64 1
+ %tmp17634 = getelementptr inbounds float, float* %tmp17633, i64 1
+ %tmp17635 = getelementptr inbounds float, float* %tmp17634, i64 1
+ %tmp17636 = getelementptr inbounds float, float* %tmp17635, i64 1
+ %tmp17637 = getelementptr inbounds float, float* %tmp17636, i64 1
+ %tmp17638 = getelementptr inbounds float, float* %tmp17637, i64 1
+ %tmp17639 = getelementptr inbounds float, float* %tmp17638, i64 1
+ %tmp17640 = getelementptr inbounds float, float* %tmp17639, i64 1
+ %tmp17641 = getelementptr inbounds float, float* %tmp17640, i64 1
+ %tmp17642 = getelementptr inbounds float, float* %tmp17641, i64 1
+ %tmp17643 = getelementptr inbounds float, float* %tmp17642, i64 1
+ %tmp17644 = getelementptr inbounds float, float* %tmp17643, i64 1
+ %tmp17645 = getelementptr inbounds float, float* %tmp17644, i64 1
+ %tmp17646 = getelementptr inbounds float, float* %tmp17645, i64 1
+ %tmp17647 = getelementptr inbounds float, float* %tmp17646, i64 1
+ %tmp17648 = getelementptr inbounds float, float* %tmp17647, i64 1
+ %tmp17649 = getelementptr inbounds float, float* %tmp17648, i64 1
+ %tmp17650 = getelementptr inbounds float, float* %tmp17649, i64 1
+ %tmp17651 = getelementptr inbounds float, float* %tmp17650, i64 1
+ %tmp17652 = getelementptr inbounds float, float* %tmp17651, i64 1
+ %tmp17653 = getelementptr inbounds float, float* %tmp17652, i64 1
+ %tmp17654 = getelementptr inbounds float, float* %tmp17653, i64 1
+ %tmp17655 = getelementptr inbounds float, float* %tmp17654, i64 1
+ %tmp17656 = getelementptr inbounds float, float* %tmp17655, i64 1
+ %tmp17657 = getelementptr inbounds float, float* %tmp17656, i64 1
+ %tmp17658 = getelementptr inbounds float, float* %tmp17657, i64 1
+ %tmp17659 = getelementptr inbounds float, float* %tmp17658, i64 1
+ %tmp17660 = getelementptr inbounds float, float* %tmp17659, i64 1
+ %tmp17661 = getelementptr inbounds float, float* %tmp17660, i64 1
+ %tmp17662 = getelementptr inbounds float, float* %tmp17661, i64 1
+ %tmp17663 = getelementptr inbounds float, float* %tmp17662, i64 1
+ %tmp17664 = getelementptr inbounds float, float* %tmp17663, i64 1
+ %tmp17665 = getelementptr inbounds float, float* %tmp17664, i64 1
+ %tmp17666 = getelementptr inbounds float, float* %tmp17665, i64 1
+ %tmp17667 = getelementptr inbounds float, float* %tmp17666, i64 1
+ %tmp17668 = getelementptr inbounds float, float* %tmp17667, i64 1
+ %tmp17669 = getelementptr inbounds float, float* %tmp17668, i64 1
+ %tmp17670 = getelementptr inbounds float, float* %tmp17669, i64 1
+ %tmp17671 = getelementptr inbounds float, float* %tmp17670, i64 1
+ %tmp17672 = getelementptr inbounds float, float* %tmp17671, i64 1
+ %tmp17673 = getelementptr inbounds float, float* %tmp17672, i64 1
+ %tmp17674 = getelementptr inbounds float, float* %tmp17673, i64 1
+ %tmp17675 = getelementptr inbounds float, float* %tmp17674, i64 1
+ %tmp17676 = getelementptr inbounds float, float* %tmp17675, i64 1
+ %tmp17677 = getelementptr inbounds float, float* %tmp17676, i64 1
+ %tmp17678 = getelementptr inbounds float, float* %tmp17677, i64 1
+ %tmp17679 = getelementptr inbounds float, float* %tmp17678, i64 1
+ %tmp17680 = getelementptr inbounds float, float* %tmp17679, i64 1
+ %tmp17681 = getelementptr inbounds float, float* %tmp17680, i64 1
+ %tmp17682 = getelementptr inbounds float, float* %tmp17681, i64 1
+ %tmp17683 = getelementptr inbounds float, float* %tmp17682, i64 1
+ %tmp17684 = getelementptr inbounds float, float* %tmp17683, i64 1
+ %tmp17685 = getelementptr inbounds float, float* %tmp17684, i64 1
+ %tmp17686 = getelementptr inbounds float, float* %tmp17685, i64 1
+ %tmp17687 = getelementptr inbounds float, float* %tmp17686, i64 1
+ %tmp17688 = getelementptr inbounds float, float* %tmp17687, i64 1
+ %tmp17689 = getelementptr inbounds float, float* %tmp17688, i64 1
+ %tmp17690 = getelementptr inbounds float, float* %tmp17689, i64 1
+ %tmp17691 = getelementptr inbounds float, float* %tmp17690, i64 1
+ %tmp17692 = getelementptr inbounds float, float* %tmp17691, i64 1
+ %tmp17693 = getelementptr inbounds float, float* %tmp17692, i64 1
+ %tmp17694 = getelementptr inbounds float, float* %tmp17693, i64 1
+ %tmp17695 = getelementptr inbounds float, float* %tmp17694, i64 1
+ %tmp17696 = getelementptr inbounds float, float* %tmp17695, i64 1
+ %tmp17697 = getelementptr inbounds float, float* %tmp17696, i64 1
+ %tmp17698 = getelementptr inbounds float, float* %tmp17697, i64 1
+ %tmp17699 = getelementptr inbounds float, float* %tmp17698, i64 1
+ %tmp17700 = getelementptr inbounds float, float* %tmp17699, i64 1
+ %tmp17701 = getelementptr inbounds float, float* %tmp17700, i64 1
+ %tmp17702 = getelementptr inbounds float, float* %tmp17701, i64 1
+ %tmp17703 = getelementptr inbounds float, float* %tmp17702, i64 1
+ %tmp17704 = getelementptr inbounds float, float* %tmp17703, i64 1
+ %tmp17705 = getelementptr inbounds float, float* %tmp17704, i64 1
+ %tmp17706 = getelementptr inbounds float, float* %tmp17705, i64 1
+ %tmp17707 = getelementptr inbounds float, float* %tmp17706, i64 1
+ %tmp17708 = getelementptr inbounds float, float* %tmp17707, i64 1
+ %tmp17709 = getelementptr inbounds float, float* %tmp17708, i64 1
+ %tmp17710 = getelementptr inbounds float, float* %tmp17709, i64 1
+ %tmp17711 = getelementptr inbounds float, float* %tmp17710, i64 1
+ %tmp17712 = getelementptr inbounds float, float* %tmp17711, i64 1
+ %tmp17713 = getelementptr inbounds float, float* %tmp17712, i64 1
+ %tmp17714 = getelementptr inbounds float, float* %tmp17713, i64 1
+ %tmp17715 = getelementptr inbounds float, float* %tmp17714, i64 1
+ %tmp17716 = getelementptr inbounds float, float* %tmp17715, i64 1
+ %tmp17717 = getelementptr inbounds float, float* %tmp17716, i64 1
+ %tmp17718 = getelementptr inbounds float, float* %tmp17717, i64 1
+ %tmp17719 = getelementptr inbounds float, float* %tmp17718, i64 1
+ %tmp17720 = getelementptr inbounds float, float* %tmp17719, i64 1
+ %tmp17721 = getelementptr inbounds float, float* %tmp17720, i64 1
+ %tmp17722 = getelementptr inbounds float, float* %tmp17721, i64 1
+ %tmp17723 = getelementptr inbounds float, float* %tmp17722, i64 1
+ %tmp17724 = getelementptr inbounds float, float* %tmp17723, i64 1
+ %tmp17725 = getelementptr inbounds float, float* %tmp17724, i64 1
+ %tmp17726 = getelementptr inbounds float, float* %tmp17725, i64 1
+ %tmp17727 = getelementptr inbounds float, float* %tmp17726, i64 1
+ %tmp17728 = getelementptr inbounds float, float* %tmp17727, i64 1
+ %tmp17729 = getelementptr inbounds float, float* %tmp17728, i64 1
+ %tmp17730 = getelementptr inbounds float, float* %tmp17729, i64 1
+ %tmp17731 = getelementptr inbounds float, float* %tmp17730, i64 1
+ %tmp17732 = getelementptr inbounds float, float* %tmp17731, i64 1
+ %tmp17733 = getelementptr inbounds float, float* %tmp17732, i64 1
+ %tmp17734 = getelementptr inbounds float, float* %tmp17733, i64 1
+ %tmp17735 = getelementptr inbounds float, float* %tmp17734, i64 1
+ %tmp17736 = getelementptr inbounds float, float* %tmp17735, i64 1
+ %tmp17737 = getelementptr inbounds float, float* %tmp17736, i64 1
+ %tmp17738 = getelementptr inbounds float, float* %tmp17737, i64 1
+ %tmp17739 = getelementptr inbounds float, float* %tmp17738, i64 1
+ %tmp17740 = getelementptr inbounds float, float* %tmp17739, i64 1
+ %tmp17741 = getelementptr inbounds float, float* %tmp17740, i64 1
+ %tmp17742 = getelementptr inbounds float, float* %tmp17741, i64 1
+ %tmp17743 = getelementptr inbounds float, float* %tmp17742, i64 1
+ %tmp17744 = getelementptr inbounds float, float* %tmp17743, i64 1
+ %tmp17745 = getelementptr inbounds float, float* %tmp17744, i64 1
+ %tmp17746 = getelementptr inbounds float, float* %tmp17745, i64 1
+ %tmp17747 = getelementptr inbounds float, float* %tmp17746, i64 1
+ %tmp17748 = getelementptr inbounds float, float* %tmp17747, i64 1
+ %tmp17749 = getelementptr inbounds float, float* %tmp17748, i64 1
+ %tmp17750 = getelementptr inbounds float, float* %tmp17749, i64 1
+ %tmp17751 = getelementptr inbounds float, float* %tmp17750, i64 1
+ %tmp17752 = getelementptr inbounds float, float* %tmp17751, i64 1
+ %tmp17753 = getelementptr inbounds float, float* %tmp17752, i64 1
+ %tmp17754 = getelementptr inbounds float, float* %tmp17753, i64 1
+ %tmp17755 = getelementptr inbounds float, float* %tmp17754, i64 1
+ %tmp17756 = getelementptr inbounds float, float* %tmp17755, i64 1
+ %tmp17757 = getelementptr inbounds float, float* %tmp17756, i64 1
+ %tmp17758 = getelementptr inbounds float, float* %tmp17757, i64 1
+ %tmp17759 = getelementptr inbounds float, float* %tmp17758, i64 1
+ %tmp17760 = getelementptr inbounds float, float* %tmp17759, i64 1
+ %tmp17761 = getelementptr inbounds float, float* %tmp17760, i64 1
+ %tmp17762 = getelementptr inbounds float, float* %tmp17761, i64 1
+ %tmp17763 = getelementptr inbounds float, float* %tmp17762, i64 1
+ %tmp17764 = getelementptr inbounds float, float* %tmp17763, i64 1
+ %tmp17765 = getelementptr inbounds float, float* %tmp17764, i64 1
+ %tmp17766 = getelementptr inbounds float, float* %tmp17765, i64 1
+ %tmp17767 = getelementptr inbounds float, float* %tmp17766, i64 1
+ %tmp17768 = getelementptr inbounds float, float* %tmp17767, i64 1
+ %tmp17769 = getelementptr inbounds float, float* %tmp17768, i64 1
+ %tmp17770 = getelementptr inbounds float, float* %tmp17769, i64 1
+ %tmp17771 = getelementptr inbounds float, float* %tmp17770, i64 1
+ %tmp17772 = getelementptr inbounds float, float* %tmp17771, i64 1
+ %tmp17773 = getelementptr inbounds float, float* %tmp17772, i64 1
+ %tmp17774 = getelementptr inbounds float, float* %tmp17773, i64 1
+ %tmp17775 = getelementptr inbounds float, float* %tmp17774, i64 1
+ %tmp17776 = getelementptr inbounds float, float* %tmp17775, i64 1
+ %tmp17777 = getelementptr inbounds float, float* %tmp17776, i64 1
+ %tmp17778 = getelementptr inbounds float, float* %tmp17777, i64 1
+ %tmp17779 = getelementptr inbounds float, float* %tmp17778, i64 1
+ %tmp17780 = getelementptr inbounds float, float* %tmp17779, i64 1
+ %tmp17781 = getelementptr inbounds float, float* %tmp17780, i64 1
+ %tmp17782 = getelementptr inbounds float, float* %tmp17781, i64 1
+ %tmp17783 = getelementptr inbounds float, float* %tmp17782, i64 1
+ %tmp17784 = getelementptr inbounds float, float* %tmp17783, i64 1
+ %tmp17785 = getelementptr inbounds float, float* %tmp17784, i64 1
+ %tmp17786 = getelementptr inbounds float, float* %tmp17785, i64 1
+ %tmp17787 = getelementptr inbounds float, float* %tmp17786, i64 1
+ %tmp17788 = getelementptr inbounds float, float* %tmp17787, i64 1
+ %tmp17789 = getelementptr inbounds float, float* %tmp17788, i64 1
+ %tmp17790 = getelementptr inbounds float, float* %tmp17789, i64 1
+ %tmp17791 = getelementptr inbounds float, float* %tmp17790, i64 1
+ %tmp17792 = getelementptr inbounds float, float* %tmp17791, i64 1
+ %tmp17793 = getelementptr inbounds float, float* %tmp17792, i64 1
+ %tmp17794 = getelementptr inbounds float, float* %tmp17793, i64 1
+ %tmp17795 = getelementptr inbounds float, float* %tmp17794, i64 1
+ %tmp17796 = getelementptr inbounds float, float* %tmp17795, i64 1
+ %tmp17797 = getelementptr inbounds float, float* %tmp17796, i64 1
+ %tmp17798 = getelementptr inbounds float, float* %tmp17797, i64 1
+ %tmp17799 = getelementptr inbounds float, float* %tmp17798, i64 1
+ %tmp17800 = getelementptr inbounds float, float* %tmp17799, i64 1
+ %tmp17801 = getelementptr inbounds float, float* %tmp17800, i64 1
+ %tmp17802 = getelementptr inbounds float, float* %tmp17801, i64 1
+ %tmp17803 = getelementptr inbounds float, float* %tmp17802, i64 1
+ %tmp17804 = getelementptr inbounds float, float* %tmp17803, i64 1
+ %tmp17805 = getelementptr inbounds float, float* %tmp17804, i64 1
+ %tmp17806 = getelementptr inbounds float, float* %tmp17805, i64 1
+ %tmp17807 = getelementptr inbounds float, float* %tmp17806, i64 1
+ %tmp17808 = getelementptr inbounds float, float* %tmp17807, i64 1
+ %tmp17809 = getelementptr inbounds float, float* %tmp17808, i64 1
+ %tmp17810 = getelementptr inbounds float, float* %tmp17809, i64 1
+ %tmp17811 = getelementptr inbounds float, float* %tmp17810, i64 1
+ %tmp17812 = getelementptr inbounds float, float* %tmp17811, i64 1
+ %tmp17813 = getelementptr inbounds float, float* %tmp17812, i64 1
+ %tmp17814 = getelementptr inbounds float, float* %tmp17813, i64 1
+ %tmp17815 = getelementptr inbounds float, float* %tmp17814, i64 1
+ %tmp17816 = getelementptr inbounds float, float* %tmp17815, i64 1
+ %tmp17817 = getelementptr inbounds float, float* %tmp17816, i64 1
+ %tmp17818 = getelementptr inbounds float, float* %tmp17817, i64 1
+ %tmp17819 = getelementptr inbounds float, float* %tmp17818, i64 1
+ %tmp17820 = getelementptr inbounds float, float* %tmp17819, i64 1
+ %tmp17821 = getelementptr inbounds float, float* %tmp17820, i64 1
+ %tmp17822 = getelementptr inbounds float, float* %tmp17821, i64 1
+ %tmp17823 = getelementptr inbounds float, float* %tmp17822, i64 1
+ %tmp17824 = getelementptr inbounds float, float* %tmp17823, i64 1
+ %tmp17825 = getelementptr inbounds float, float* %tmp17824, i64 1
+ %tmp17826 = getelementptr inbounds float, float* %tmp17825, i64 1
+ %tmp17827 = getelementptr inbounds float, float* %tmp17826, i64 1
+ %tmp17828 = getelementptr inbounds float, float* %tmp17827, i64 1
+ %tmp17829 = getelementptr inbounds float, float* %tmp17828, i64 1
+ %tmp17830 = getelementptr inbounds float, float* %tmp17829, i64 1
+ %tmp17831 = getelementptr inbounds float, float* %tmp17830, i64 1
+ %tmp17832 = getelementptr inbounds float, float* %tmp17831, i64 1
+ %tmp17833 = getelementptr inbounds float, float* %tmp17832, i64 1
+ %tmp17834 = getelementptr inbounds float, float* %tmp17833, i64 1
+ %tmp17835 = getelementptr inbounds float, float* %tmp17834, i64 1
+ %tmp17836 = getelementptr inbounds float, float* %tmp17835, i64 1
+ %tmp17837 = getelementptr inbounds float, float* %tmp17836, i64 1
+ %tmp17838 = getelementptr inbounds float, float* %tmp17837, i64 1
+ %tmp17839 = getelementptr inbounds float, float* %tmp17838, i64 1
+ %tmp17840 = getelementptr inbounds float, float* %tmp17839, i64 1
+ %tmp17841 = getelementptr inbounds float, float* %tmp17840, i64 1
+ %tmp17842 = getelementptr inbounds float, float* %tmp17841, i64 1
+ %tmp17843 = getelementptr inbounds float, float* %tmp17842, i64 1
+ %tmp17844 = getelementptr inbounds float, float* %tmp17843, i64 1
+ %tmp17845 = getelementptr inbounds float, float* %tmp17844, i64 1
+ %tmp17846 = getelementptr inbounds float, float* %tmp17845, i64 1
+ %tmp17847 = getelementptr inbounds float, float* %tmp17846, i64 1
+ %tmp17848 = getelementptr inbounds float, float* %tmp17847, i64 1
+ %tmp17849 = getelementptr inbounds float, float* %tmp17848, i64 1
+ %tmp17850 = getelementptr inbounds float, float* %tmp17849, i64 1
+ %tmp17851 = getelementptr inbounds float, float* %tmp17850, i64 1
+ %tmp17852 = getelementptr inbounds float, float* %tmp17851, i64 1
+ %tmp17853 = getelementptr inbounds float, float* %tmp17852, i64 1
+ %tmp17854 = getelementptr inbounds float, float* %tmp17853, i64 1
+ %tmp17855 = getelementptr inbounds float, float* %tmp17854, i64 1
+ %tmp17856 = getelementptr inbounds float, float* %tmp17855, i64 1
+ %tmp17857 = getelementptr inbounds float, float* %tmp17856, i64 1
+ %tmp17858 = getelementptr inbounds float, float* %tmp17857, i64 1
+ %tmp17859 = getelementptr inbounds float, float* %tmp17858, i64 1
+ %tmp17860 = getelementptr inbounds float, float* %tmp17859, i64 1
+ %tmp17861 = getelementptr inbounds float, float* %tmp17860, i64 1
+ %tmp17862 = getelementptr inbounds float, float* %tmp17861, i64 1
+ %tmp17863 = getelementptr inbounds float, float* %tmp17862, i64 1
+ %tmp17864 = getelementptr inbounds float, float* %tmp17863, i64 1
+ %tmp17865 = getelementptr inbounds float, float* %tmp17864, i64 1
+ %tmp17866 = getelementptr inbounds float, float* %tmp17865, i64 1
+ %tmp17867 = getelementptr inbounds float, float* %tmp17866, i64 1
+ %tmp17868 = getelementptr inbounds float, float* %tmp17867, i64 1
+ %tmp17869 = getelementptr inbounds float, float* %tmp17868, i64 1
+ %tmp17870 = getelementptr inbounds float, float* %tmp17869, i64 1
+ %tmp17871 = getelementptr inbounds float, float* %tmp17870, i64 1
+ %tmp17872 = getelementptr inbounds float, float* %tmp17871, i64 1
+ %tmp17873 = getelementptr inbounds float, float* %tmp17872, i64 1
+ %tmp17874 = getelementptr inbounds float, float* %tmp17873, i64 1
+ %tmp17875 = getelementptr inbounds float, float* %tmp17874, i64 1
+ %tmp17876 = getelementptr inbounds float, float* %tmp17875, i64 1
+ %tmp17877 = getelementptr inbounds float, float* %tmp17876, i64 1
+ %tmp17878 = getelementptr inbounds float, float* %tmp17877, i64 1
+ %tmp17879 = getelementptr inbounds float, float* %tmp17878, i64 1
+ %tmp17880 = getelementptr inbounds float, float* %tmp17879, i64 1
+ %tmp17881 = getelementptr inbounds float, float* %tmp17880, i64 1
+ %tmp17882 = getelementptr inbounds float, float* %tmp17881, i64 1
+ %tmp17883 = getelementptr inbounds float, float* %tmp17882, i64 1
+ %tmp17884 = getelementptr inbounds float, float* %tmp17883, i64 1
+ %tmp17885 = getelementptr inbounds float, float* %tmp17884, i64 1
+ %tmp17886 = getelementptr inbounds float, float* %tmp17885, i64 1
+ %tmp17887 = getelementptr inbounds float, float* %tmp17886, i64 1
+ %tmp17888 = getelementptr inbounds float, float* %tmp17887, i64 1
+ %tmp17889 = getelementptr inbounds float, float* %tmp17888, i64 1
+ %tmp17890 = getelementptr inbounds float, float* %tmp17889, i64 1
+ %tmp17891 = getelementptr inbounds float, float* %tmp17890, i64 1
+ %tmp17892 = getelementptr inbounds float, float* %tmp17891, i64 1
+ %tmp17893 = getelementptr inbounds float, float* %tmp17892, i64 1
+ %tmp17894 = getelementptr inbounds float, float* %tmp17893, i64 1
+ %tmp17895 = getelementptr inbounds float, float* %tmp17894, i64 1
+ %tmp17896 = getelementptr inbounds float, float* %tmp17895, i64 1
+ %tmp17897 = getelementptr inbounds float, float* %tmp17896, i64 1
+ %tmp17898 = getelementptr inbounds float, float* %tmp17897, i64 1
+ %tmp17899 = getelementptr inbounds float, float* %tmp17898, i64 1
+ %tmp17900 = getelementptr inbounds float, float* %tmp17899, i64 1
+ %tmp17901 = getelementptr inbounds float, float* %tmp17900, i64 1
+ %tmp17902 = getelementptr inbounds float, float* %tmp17901, i64 1
+ %tmp17903 = getelementptr inbounds float, float* %tmp17902, i64 1
+ %tmp17904 = getelementptr inbounds float, float* %tmp17903, i64 1
+ %tmp17905 = getelementptr inbounds float, float* %tmp17904, i64 1
+ %tmp17906 = getelementptr inbounds float, float* %tmp17905, i64 1
+ %tmp17907 = getelementptr inbounds float, float* %tmp17906, i64 1
+ %tmp17908 = getelementptr inbounds float, float* %tmp17907, i64 1
+ %tmp17909 = getelementptr inbounds float, float* %tmp17908, i64 1
+ %tmp17910 = getelementptr inbounds float, float* %tmp17909, i64 1
+ %tmp17911 = getelementptr inbounds float, float* %tmp17910, i64 1
+ %tmp17912 = getelementptr inbounds float, float* %tmp17911, i64 1
+ %tmp17913 = getelementptr inbounds float, float* %tmp17912, i64 1
+ %tmp17914 = getelementptr inbounds float, float* %tmp17913, i64 1
+ %tmp17915 = getelementptr inbounds float, float* %tmp17914, i64 1
+ %tmp17916 = getelementptr inbounds float, float* %tmp17915, i64 1
+ %tmp17917 = getelementptr inbounds float, float* %tmp17916, i64 1
+ %tmp17918 = getelementptr inbounds float, float* %tmp17917, i64 1
+ %tmp17919 = getelementptr inbounds float, float* %tmp17918, i64 1
+ %tmp17920 = getelementptr inbounds float, float* %tmp17919, i64 1
+ %tmp17921 = getelementptr inbounds float, float* %tmp17920, i64 1
+ %tmp17922 = getelementptr inbounds float, float* %tmp17921, i64 1
+ %tmp17923 = getelementptr inbounds float, float* %tmp17922, i64 1
+ %tmp17924 = getelementptr inbounds float, float* %tmp17923, i64 1
+ %tmp17925 = getelementptr inbounds float, float* %tmp17924, i64 1
+ %tmp17926 = getelementptr inbounds float, float* %tmp17925, i64 1
+ %tmp17927 = getelementptr inbounds float, float* %tmp17926, i64 1
+ %tmp17928 = getelementptr inbounds float, float* %tmp17927, i64 1
+ %tmp17929 = getelementptr inbounds float, float* %tmp17928, i64 1
+ %tmp17930 = getelementptr inbounds float, float* %tmp17929, i64 1
+ %tmp17931 = getelementptr inbounds float, float* %tmp17930, i64 1
+ %tmp17932 = getelementptr inbounds float, float* %tmp17931, i64 1
+ %tmp17933 = getelementptr inbounds float, float* %tmp17932, i64 1
+ %tmp17934 = getelementptr inbounds float, float* %tmp17933, i64 1
+ %tmp17935 = getelementptr inbounds float, float* %tmp17934, i64 1
+ %tmp17936 = getelementptr inbounds float, float* %tmp17935, i64 1
+ %tmp17937 = getelementptr inbounds float, float* %tmp17936, i64 1
+ %tmp17938 = getelementptr inbounds float, float* %tmp17937, i64 1
+ %tmp17939 = getelementptr inbounds float, float* %tmp17938, i64 1
+ %tmp17940 = getelementptr inbounds float, float* %tmp17939, i64 1
+ %tmp17941 = getelementptr inbounds float, float* %tmp17940, i64 1
+ %tmp17942 = getelementptr inbounds float, float* %tmp17941, i64 1
+ %tmp17943 = getelementptr inbounds float, float* %tmp17942, i64 1
+ %tmp17944 = getelementptr inbounds float, float* %tmp17943, i64 1
+ %tmp17945 = getelementptr inbounds float, float* %tmp17944, i64 1
+ %tmp17946 = getelementptr inbounds float, float* %tmp17945, i64 1
+ %tmp17947 = getelementptr inbounds float, float* %tmp17946, i64 1
+ %tmp17948 = getelementptr inbounds float, float* %tmp17947, i64 1
+ %tmp17949 = getelementptr inbounds float, float* %tmp17948, i64 1
+ %tmp17950 = getelementptr inbounds float, float* %tmp17949, i64 1
+ %tmp17951 = getelementptr inbounds float, float* %tmp17950, i64 1
+ %tmp17952 = getelementptr inbounds float, float* %tmp17951, i64 1
+ %tmp17953 = getelementptr inbounds float, float* %tmp17952, i64 1
+ %tmp17954 = getelementptr inbounds float, float* %tmp17953, i64 1
+ %tmp17955 = getelementptr inbounds float, float* %tmp17954, i64 1
+ %tmp17956 = getelementptr inbounds float, float* %tmp17955, i64 1
+ %tmp17957 = getelementptr inbounds float, float* %tmp17956, i64 1
+ %tmp17958 = getelementptr inbounds float, float* %tmp17957, i64 1
+ %tmp17959 = getelementptr inbounds float, float* %tmp17958, i64 1
+ %tmp17960 = getelementptr inbounds float, float* %tmp17959, i64 1
+ %tmp17961 = getelementptr inbounds float, float* %tmp17960, i64 1
+ %tmp17962 = getelementptr inbounds float, float* %tmp17961, i64 1
+ %tmp17963 = getelementptr inbounds float, float* %tmp17962, i64 1
+ %tmp17964 = getelementptr inbounds float, float* %tmp17963, i64 1
+ %tmp17965 = getelementptr inbounds float, float* %tmp17964, i64 1
+ %tmp17966 = getelementptr inbounds float, float* %tmp17965, i64 1
+ %tmp17967 = getelementptr inbounds float, float* %tmp17966, i64 1
+ %tmp17968 = getelementptr inbounds float, float* %tmp17967, i64 1
+ %tmp17969 = getelementptr inbounds float, float* %tmp17968, i64 1
+ %tmp17970 = getelementptr inbounds float, float* %tmp17969, i64 1
+ %tmp17971 = getelementptr inbounds float, float* %tmp17970, i64 1
+ %tmp17972 = getelementptr inbounds float, float* %tmp17971, i64 1
+ %tmp17973 = getelementptr inbounds float, float* %tmp17972, i64 1
+ %tmp17974 = getelementptr inbounds float, float* %tmp17973, i64 1
+ %tmp17975 = getelementptr inbounds float, float* %tmp17974, i64 1
+ %tmp17976 = getelementptr inbounds float, float* %tmp17975, i64 1
+ %tmp17977 = getelementptr inbounds float, float* %tmp17976, i64 1
+ %tmp17978 = getelementptr inbounds float, float* %tmp17977, i64 1
+ %tmp17979 = getelementptr inbounds float, float* %tmp17978, i64 1
+ %tmp17980 = getelementptr inbounds float, float* %tmp17979, i64 1
+ %tmp17981 = getelementptr inbounds float, float* %tmp17980, i64 1
+ %tmp17982 = getelementptr inbounds float, float* %tmp17981, i64 1
+ %tmp17983 = getelementptr inbounds float, float* %tmp17982, i64 1
+ %tmp17984 = getelementptr inbounds float, float* %tmp17983, i64 1
+ %tmp17985 = getelementptr inbounds float, float* %tmp17984, i64 1
+ %tmp17986 = getelementptr inbounds float, float* %tmp17985, i64 1
+ %tmp17987 = getelementptr inbounds float, float* %tmp17986, i64 1
+ %tmp17988 = getelementptr inbounds float, float* %tmp17987, i64 1
+ %tmp17989 = getelementptr inbounds float, float* %tmp17988, i64 1
+ %tmp17990 = getelementptr inbounds float, float* %tmp17989, i64 1
+ %tmp17991 = getelementptr inbounds float, float* %tmp17990, i64 1
+ %tmp17992 = getelementptr inbounds float, float* %tmp17991, i64 1
+ %tmp17993 = getelementptr inbounds float, float* %tmp17992, i64 1
+ %tmp17994 = getelementptr inbounds float, float* %tmp17993, i64 1
+ %tmp17995 = getelementptr inbounds float, float* %tmp17994, i64 1
+ %tmp17996 = getelementptr inbounds float, float* %tmp17995, i64 1
+ %tmp17997 = getelementptr inbounds float, float* %tmp17996, i64 1
+ %tmp17998 = getelementptr inbounds float, float* %tmp17997, i64 1
+ %tmp17999 = getelementptr inbounds float, float* %tmp17998, i64 1
+ %tmp18000 = getelementptr inbounds float, float* %tmp17999, i64 1
+ %tmp18001 = getelementptr inbounds float, float* %tmp18000, i64 1
+ %tmp18002 = getelementptr inbounds float, float* %tmp18001, i64 1
+ %tmp18003 = getelementptr inbounds float, float* %tmp18002, i64 1
+ %tmp18004 = getelementptr inbounds float, float* %tmp18003, i64 1
+ %tmp18005 = getelementptr inbounds float, float* %tmp18004, i64 1
+ %tmp18006 = getelementptr inbounds float, float* %tmp18005, i64 1
+ %tmp18007 = getelementptr inbounds float, float* %tmp18006, i64 1
+ %tmp18008 = getelementptr inbounds float, float* %tmp18007, i64 1
+ %tmp18009 = getelementptr inbounds float, float* %tmp18008, i64 1
+ %tmp18010 = getelementptr inbounds float, float* %tmp18009, i64 1
+ %tmp18011 = getelementptr inbounds float, float* %tmp18010, i64 1
+ %tmp18012 = getelementptr inbounds float, float* %tmp18011, i64 1
+ %tmp18013 = getelementptr inbounds float, float* %tmp18012, i64 1
+ %tmp18014 = getelementptr inbounds float, float* %tmp18013, i64 1
+ %tmp18015 = getelementptr inbounds float, float* %tmp18014, i64 1
+ %tmp18016 = getelementptr inbounds float, float* %tmp18015, i64 1
+ %tmp18017 = getelementptr inbounds float, float* %tmp18016, i64 1
+ %tmp18018 = getelementptr inbounds float, float* %tmp18017, i64 1
+ %tmp18019 = getelementptr inbounds float, float* %tmp18018, i64 1
+ %tmp18020 = getelementptr inbounds float, float* %tmp18019, i64 1
+ %tmp18021 = getelementptr inbounds float, float* %tmp18020, i64 1
+ %tmp18022 = getelementptr inbounds float, float* %tmp18021, i64 1
+ %tmp18023 = getelementptr inbounds float, float* %tmp18022, i64 1
+ %tmp18024 = getelementptr inbounds float, float* %tmp18023, i64 1
+ %tmp18025 = getelementptr inbounds float, float* %tmp18024, i64 1
+ %tmp18026 = getelementptr inbounds float, float* %tmp18025, i64 1
+ %tmp18027 = getelementptr inbounds float, float* %tmp18026, i64 1
+ %tmp18028 = getelementptr inbounds float, float* %tmp18027, i64 1
+ %tmp18029 = getelementptr inbounds float, float* %tmp18028, i64 1
+ %tmp18030 = getelementptr inbounds float, float* %tmp18029, i64 1
+ %tmp18031 = getelementptr inbounds float, float* %tmp18030, i64 1
+ %tmp18032 = getelementptr inbounds float, float* %tmp18031, i64 1
+ %tmp18033 = getelementptr inbounds float, float* %tmp18032, i64 1
+ %tmp18034 = getelementptr inbounds float, float* %tmp18033, i64 1
+ %tmp18035 = getelementptr inbounds float, float* %tmp18034, i64 1
+ %tmp18036 = getelementptr inbounds float, float* %tmp18035, i64 1
+ %tmp18037 = getelementptr inbounds float, float* %tmp18036, i64 1
+ %tmp18038 = getelementptr inbounds float, float* %tmp18037, i64 1
+ %tmp18039 = getelementptr inbounds float, float* %tmp18038, i64 1
+ %tmp18040 = getelementptr inbounds float, float* %tmp18039, i64 1
+ %tmp18041 = getelementptr inbounds float, float* %tmp18040, i64 1
+ %tmp18042 = getelementptr inbounds float, float* %tmp18041, i64 1
+ %tmp18043 = getelementptr inbounds float, float* %tmp18042, i64 1
+ %tmp18044 = getelementptr inbounds float, float* %tmp18043, i64 1
+ %tmp18045 = getelementptr inbounds float, float* %tmp18044, i64 1
+ %tmp18046 = getelementptr inbounds float, float* %tmp18045, i64 1
+ %tmp18047 = getelementptr inbounds float, float* %tmp18046, i64 1
+ %tmp18048 = getelementptr inbounds float, float* %tmp18047, i64 1
+ %tmp18049 = getelementptr inbounds float, float* %tmp18048, i64 1
+ %tmp18050 = getelementptr inbounds float, float* %tmp18049, i64 1
+ %tmp18051 = getelementptr inbounds float, float* %tmp18050, i64 1
+ %tmp18052 = getelementptr inbounds float, float* %tmp18051, i64 1
+ %tmp18053 = getelementptr inbounds float, float* %tmp18052, i64 1
+ %tmp18054 = getelementptr inbounds float, float* %tmp18053, i64 1
+ %tmp18055 = getelementptr inbounds float, float* %tmp18054, i64 1
+ %tmp18056 = getelementptr inbounds float, float* %tmp18055, i64 1
+ %tmp18057 = getelementptr inbounds float, float* %tmp18056, i64 1
+ %tmp18058 = getelementptr inbounds float, float* %tmp18057, i64 1
+ %tmp18059 = getelementptr inbounds float, float* %tmp18058, i64 1
+ %tmp18060 = getelementptr inbounds float, float* %tmp18059, i64 1
+ %tmp18061 = getelementptr inbounds float, float* %tmp18060, i64 1
+ %tmp18062 = getelementptr inbounds float, float* %tmp18061, i64 1
+ %tmp18063 = getelementptr inbounds float, float* %tmp18062, i64 1
+ %tmp18064 = getelementptr inbounds float, float* %tmp18063, i64 1
+ %tmp18065 = getelementptr inbounds float, float* %tmp18064, i64 1
+ %tmp18066 = getelementptr inbounds float, float* %tmp18065, i64 1
+ %tmp18067 = getelementptr inbounds float, float* %tmp18066, i64 1
+ %tmp18068 = getelementptr inbounds float, float* %tmp18067, i64 1
+ %tmp18069 = getelementptr inbounds float, float* %tmp18068, i64 1
+ %tmp18070 = getelementptr inbounds float, float* %tmp18069, i64 1
+ %tmp18071 = getelementptr inbounds float, float* %tmp18070, i64 1
+ %tmp18072 = getelementptr inbounds float, float* %tmp18071, i64 1
+ %tmp18073 = getelementptr inbounds float, float* %tmp18072, i64 1
+ %tmp18074 = getelementptr inbounds float, float* %tmp18073, i64 1
+ %tmp18075 = getelementptr inbounds float, float* %tmp18074, i64 1
+ %tmp18076 = getelementptr inbounds float, float* %tmp18075, i64 1
+ %tmp18077 = getelementptr inbounds float, float* %tmp18076, i64 1
+ %tmp18078 = getelementptr inbounds float, float* %tmp18077, i64 1
+ %tmp18079 = getelementptr inbounds float, float* %tmp18078, i64 1
+ %tmp18080 = getelementptr inbounds float, float* %tmp18079, i64 1
+ %tmp18081 = getelementptr inbounds float, float* %tmp18080, i64 1
+ %tmp18082 = getelementptr inbounds float, float* %tmp18081, i64 1
+ %tmp18083 = getelementptr inbounds float, float* %tmp18082, i64 1
+ %tmp18084 = getelementptr inbounds float, float* %tmp18083, i64 1
+ %tmp18085 = getelementptr inbounds float, float* %tmp18084, i64 1
+ %tmp18086 = getelementptr inbounds float, float* %tmp18085, i64 1
+ %tmp18087 = getelementptr inbounds float, float* %tmp18086, i64 1
+ %tmp18088 = getelementptr inbounds float, float* %tmp18087, i64 1
+ %tmp18089 = getelementptr inbounds float, float* %tmp18088, i64 1
+ %tmp18090 = getelementptr inbounds float, float* %tmp18089, i64 1
+ %tmp18091 = getelementptr inbounds float, float* %tmp18090, i64 1
+ %tmp18092 = getelementptr inbounds float, float* %tmp18091, i64 1
+ %tmp18093 = getelementptr inbounds float, float* %tmp18092, i64 1
+ %tmp18094 = getelementptr inbounds float, float* %tmp18093, i64 1
+ %tmp18095 = getelementptr inbounds float, float* %tmp18094, i64 1
+ %tmp18096 = getelementptr inbounds float, float* %tmp18095, i64 1
+ %tmp18097 = getelementptr inbounds float, float* %tmp18096, i64 1
+ %tmp18098 = getelementptr inbounds float, float* %tmp18097, i64 1
+ %tmp18099 = getelementptr inbounds float, float* %tmp18098, i64 1
+ %tmp18100 = getelementptr inbounds float, float* %tmp18099, i64 1
+ %tmp18101 = getelementptr inbounds float, float* %tmp18100, i64 1
+ %tmp18102 = getelementptr inbounds float, float* %tmp18101, i64 1
+ %tmp18103 = getelementptr inbounds float, float* %tmp18102, i64 1
+ %tmp18104 = getelementptr inbounds float, float* %tmp18103, i64 1
+ %tmp18105 = getelementptr inbounds float, float* %tmp18104, i64 1
+ %tmp18106 = getelementptr inbounds float, float* %tmp18105, i64 1
+ %tmp18107 = getelementptr inbounds float, float* %tmp18106, i64 1
+ %tmp18108 = getelementptr inbounds float, float* %tmp18107, i64 1
+ %tmp18109 = getelementptr inbounds float, float* %tmp18108, i64 1
+ %tmp18110 = getelementptr inbounds float, float* %tmp18109, i64 1
+ %tmp18111 = getelementptr inbounds float, float* %tmp18110, i64 1
+ %tmp18112 = getelementptr inbounds float, float* %tmp18111, i64 1
+ %tmp18113 = getelementptr inbounds float, float* %tmp18112, i64 1
+ %tmp18114 = getelementptr inbounds float, float* %tmp18113, i64 1
+ %tmp18115 = getelementptr inbounds float, float* %tmp18114, i64 1
+ %tmp18116 = getelementptr inbounds float, float* %tmp18115, i64 1
+ %tmp18117 = getelementptr inbounds float, float* %tmp18116, i64 1
+ %tmp18118 = getelementptr inbounds float, float* %tmp18117, i64 1
+ %tmp18119 = getelementptr inbounds float, float* %tmp18118, i64 1
+ %tmp18120 = getelementptr inbounds float, float* %tmp18119, i64 1
+ %tmp18121 = getelementptr inbounds float, float* %tmp18120, i64 1
+ %tmp18122 = getelementptr inbounds float, float* %tmp18121, i64 1
+ %tmp18123 = getelementptr inbounds float, float* %tmp18122, i64 1
+ %tmp18124 = getelementptr inbounds float, float* %tmp18123, i64 1
+ %tmp18125 = getelementptr inbounds float, float* %tmp18124, i64 1
+ %tmp18126 = getelementptr inbounds float, float* %tmp18125, i64 1
+ %tmp18127 = getelementptr inbounds float, float* %tmp18126, i64 1
+ %tmp18128 = getelementptr inbounds float, float* %tmp18127, i64 1
+ %tmp18129 = getelementptr inbounds float, float* %tmp18128, i64 1
+ %tmp18130 = getelementptr inbounds float, float* %tmp18129, i64 1
+ %tmp18131 = getelementptr inbounds float, float* %tmp18130, i64 1
+ %tmp18132 = getelementptr inbounds float, float* %tmp18131, i64 1
+ %tmp18133 = getelementptr inbounds float, float* %tmp18132, i64 1
+ %tmp18134 = getelementptr inbounds float, float* %tmp18133, i64 1
+ %tmp18135 = getelementptr inbounds float, float* %tmp18134, i64 1
+ %tmp18136 = getelementptr inbounds float, float* %tmp18135, i64 1
+ %tmp18137 = getelementptr inbounds float, float* %tmp18136, i64 1
+ %tmp18138 = getelementptr inbounds float, float* %tmp18137, i64 1
+ %tmp18139 = getelementptr inbounds float, float* %tmp18138, i64 1
+ %tmp18140 = getelementptr inbounds float, float* %tmp18139, i64 1
+ %tmp18141 = getelementptr inbounds float, float* %tmp18140, i64 1
+ %tmp18142 = getelementptr inbounds float, float* %tmp18141, i64 1
+ %tmp18143 = getelementptr inbounds float, float* %tmp18142, i64 1
+ %tmp18144 = getelementptr inbounds float, float* %tmp18143, i64 1
+ %tmp18145 = getelementptr inbounds float, float* %tmp18144, i64 1
+ %tmp18146 = getelementptr inbounds float, float* %tmp18145, i64 1
+ %tmp18147 = getelementptr inbounds float, float* %tmp18146, i64 1
+ %tmp18148 = getelementptr inbounds float, float* %tmp18147, i64 1
+ %tmp18149 = getelementptr inbounds float, float* %tmp18148, i64 1
+ %tmp18150 = getelementptr inbounds float, float* %tmp18149, i64 1
+ %tmp18151 = getelementptr inbounds float, float* %tmp18150, i64 1
+ %tmp18152 = getelementptr inbounds float, float* %tmp18151, i64 1
+ %tmp18153 = getelementptr inbounds float, float* %tmp18152, i64 1
+ %tmp18154 = getelementptr inbounds float, float* %tmp18153, i64 1
+ %tmp18155 = getelementptr inbounds float, float* %tmp18154, i64 1
+ %tmp18156 = getelementptr inbounds float, float* %tmp18155, i64 1
+ %tmp18157 = getelementptr inbounds float, float* %tmp18156, i64 1
+ %tmp18158 = getelementptr inbounds float, float* %tmp18157, i64 1
+ %tmp18159 = getelementptr inbounds float, float* %tmp18158, i64 1
+ %tmp18160 = getelementptr inbounds float, float* %tmp18159, i64 1
+ %tmp18161 = getelementptr inbounds float, float* %tmp18160, i64 1
+ %tmp18162 = getelementptr inbounds float, float* %tmp18161, i64 1
+ %tmp18163 = getelementptr inbounds float, float* %tmp18162, i64 1
+ %tmp18164 = getelementptr inbounds float, float* %tmp18163, i64 1
+ %tmp18165 = getelementptr inbounds float, float* %tmp18164, i64 1
+ %tmp18166 = getelementptr inbounds float, float* %tmp18165, i64 1
+ %tmp18167 = getelementptr inbounds float, float* %tmp18166, i64 1
+ %tmp18168 = getelementptr inbounds float, float* %tmp18167, i64 1
+ %tmp18169 = getelementptr inbounds float, float* %tmp18168, i64 1
+ %tmp18170 = getelementptr inbounds float, float* %tmp18169, i64 1
+ %tmp18171 = getelementptr inbounds float, float* %tmp18170, i64 1
+ %tmp18172 = getelementptr inbounds float, float* %tmp18171, i64 1
+ %tmp18173 = getelementptr inbounds float, float* %tmp18172, i64 1
+ %tmp18174 = getelementptr inbounds float, float* %tmp18173, i64 1
+ %tmp18175 = getelementptr inbounds float, float* %tmp18174, i64 1
+ %tmp18176 = getelementptr inbounds float, float* %tmp18175, i64 1
+ %tmp18177 = getelementptr inbounds float, float* %tmp18176, i64 1
+ %tmp18178 = getelementptr inbounds float, float* %tmp18177, i64 1
+ %tmp18179 = getelementptr inbounds float, float* %tmp18178, i64 1
+ %tmp18180 = getelementptr inbounds float, float* %tmp18179, i64 1
+ %tmp18181 = getelementptr inbounds float, float* %tmp18180, i64 1
+ %tmp18182 = getelementptr inbounds float, float* %tmp18181, i64 1
+ %tmp18183 = getelementptr inbounds float, float* %tmp18182, i64 1
+ %tmp18184 = getelementptr inbounds float, float* %tmp18183, i64 1
+ %tmp18185 = getelementptr inbounds float, float* %tmp18184, i64 1
+ %tmp18186 = getelementptr inbounds float, float* %tmp18185, i64 1
+ %tmp18187 = getelementptr inbounds float, float* %tmp18186, i64 1
+ %tmp18188 = getelementptr inbounds float, float* %tmp18187, i64 1
+ %tmp18189 = getelementptr inbounds float, float* %tmp18188, i64 1
+ %tmp18190 = getelementptr inbounds float, float* %tmp18189, i64 1
+ %tmp18191 = getelementptr inbounds float, float* %tmp18190, i64 1
+ %tmp18192 = getelementptr inbounds float, float* %tmp18191, i64 1
+ %tmp18193 = getelementptr inbounds float, float* %tmp18192, i64 1
+ %tmp18194 = getelementptr inbounds float, float* %tmp18193, i64 1
+ %tmp18195 = getelementptr inbounds float, float* %tmp18194, i64 1
+ %tmp18196 = getelementptr inbounds float, float* %tmp18195, i64 1
+ %tmp18197 = getelementptr inbounds float, float* %tmp18196, i64 1
+ %tmp18198 = getelementptr inbounds float, float* %tmp18197, i64 1
+ %tmp18199 = getelementptr inbounds float, float* %tmp18198, i64 1
+ %tmp18200 = getelementptr inbounds float, float* %tmp18199, i64 1
+ %tmp18201 = getelementptr inbounds float, float* %tmp18200, i64 1
+ %tmp18202 = getelementptr inbounds float, float* %tmp18201, i64 1
+ %tmp18203 = getelementptr inbounds float, float* %tmp18202, i64 1
+ %tmp18204 = getelementptr inbounds float, float* %tmp18203, i64 1
+ %tmp18205 = getelementptr inbounds float, float* %tmp18204, i64 1
+ %tmp18206 = getelementptr inbounds float, float* %tmp18205, i64 1
+ %tmp18207 = getelementptr inbounds float, float* %tmp18206, i64 1
+ %tmp18208 = getelementptr inbounds float, float* %tmp18207, i64 1
+ %tmp18209 = getelementptr inbounds float, float* %tmp18208, i64 1
+ %tmp18210 = getelementptr inbounds float, float* %tmp18209, i64 1
+ %tmp18211 = getelementptr inbounds float, float* %tmp18210, i64 1
+ %tmp18212 = getelementptr inbounds float, float* %tmp18211, i64 1
+ %tmp18213 = getelementptr inbounds float, float* %tmp18212, i64 1
+ %tmp18214 = getelementptr inbounds float, float* %tmp18213, i64 1
+ %tmp18215 = getelementptr inbounds float, float* %tmp18214, i64 1
+ %tmp18216 = getelementptr inbounds float, float* %tmp18215, i64 1
+ %tmp18217 = getelementptr inbounds float, float* %tmp18216, i64 1
+ %tmp18218 = getelementptr inbounds float, float* %tmp18217, i64 1
+ %tmp18219 = getelementptr inbounds float, float* %tmp18218, i64 1
+ %tmp18220 = getelementptr inbounds float, float* %tmp18219, i64 1
+ %tmp18221 = getelementptr inbounds float, float* %tmp18220, i64 1
+ %tmp18222 = getelementptr inbounds float, float* %tmp18221, i64 1
+ %tmp18223 = getelementptr inbounds float, float* %tmp18222, i64 1
+ %tmp18224 = getelementptr inbounds float, float* %tmp18223, i64 1
+ %tmp18225 = getelementptr inbounds float, float* %tmp18224, i64 1
+ %tmp18226 = getelementptr inbounds float, float* %tmp18225, i64 1
+ %tmp18227 = getelementptr inbounds float, float* %tmp18226, i64 1
+ %tmp18228 = getelementptr inbounds float, float* %tmp18227, i64 1
+ %tmp18229 = getelementptr inbounds float, float* %tmp18228, i64 1
+ %tmp18230 = getelementptr inbounds float, float* %tmp18229, i64 1
+ %tmp18231 = getelementptr inbounds float, float* %tmp18230, i64 1
+ %tmp18232 = getelementptr inbounds float, float* %tmp18231, i64 1
+ %tmp18233 = getelementptr inbounds float, float* %tmp18232, i64 1
+ %tmp18234 = getelementptr inbounds float, float* %tmp18233, i64 1
+ %tmp18235 = getelementptr inbounds float, float* %tmp18234, i64 1
+ %tmp18236 = getelementptr inbounds float, float* %tmp18235, i64 1
+ %tmp18237 = getelementptr inbounds float, float* %tmp18236, i64 1
+ %tmp18238 = getelementptr inbounds float, float* %tmp18237, i64 1
+ %tmp18239 = getelementptr inbounds float, float* %tmp18238, i64 1
+ %tmp18240 = getelementptr inbounds float, float* %tmp18239, i64 1
+ %tmp18241 = getelementptr inbounds float, float* %tmp18240, i64 1
+ %tmp18242 = getelementptr inbounds float, float* %tmp18241, i64 1
+ %tmp18243 = getelementptr inbounds float, float* %tmp18242, i64 1
+ %tmp18244 = getelementptr inbounds float, float* %tmp18243, i64 1
+ %tmp18245 = getelementptr inbounds float, float* %tmp18244, i64 1
+ %tmp18246 = getelementptr inbounds float, float* %tmp18245, i64 1
+ %tmp18247 = getelementptr inbounds float, float* %tmp18246, i64 1
+ %tmp18248 = getelementptr inbounds float, float* %tmp18247, i64 1
+ %tmp18249 = getelementptr inbounds float, float* %tmp18248, i64 1
+ %tmp18250 = getelementptr inbounds float, float* %tmp18249, i64 1
+ %tmp18251 = getelementptr inbounds float, float* %tmp18250, i64 1
+ %tmp18252 = getelementptr inbounds float, float* %tmp18251, i64 1
+ %tmp18253 = getelementptr inbounds float, float* %tmp18252, i64 1
+ %tmp18254 = getelementptr inbounds float, float* %tmp18253, i64 1
+ %tmp18255 = getelementptr inbounds float, float* %tmp18254, i64 1
+ %tmp18256 = getelementptr inbounds float, float* %tmp18255, i64 1
+ %tmp18257 = getelementptr inbounds float, float* %tmp18256, i64 1
+ %tmp18258 = getelementptr inbounds float, float* %tmp18257, i64 1
+ %tmp18259 = getelementptr inbounds float, float* %tmp18258, i64 1
+ %tmp18260 = getelementptr inbounds float, float* %tmp18259, i64 1
+ %tmp18261 = getelementptr inbounds float, float* %tmp18260, i64 1
+ %tmp18262 = getelementptr inbounds float, float* %tmp18261, i64 1
+ %tmp18263 = getelementptr inbounds float, float* %tmp18262, i64 1
+ %tmp18264 = getelementptr inbounds float, float* %tmp18263, i64 1
+ %tmp18265 = getelementptr inbounds float, float* %tmp18264, i64 1
+ %tmp18266 = getelementptr inbounds float, float* %tmp18265, i64 1
+ %tmp18267 = getelementptr inbounds float, float* %tmp18266, i64 1
+ %tmp18268 = getelementptr inbounds float, float* %tmp18267, i64 1
+ %tmp18269 = getelementptr inbounds float, float* %tmp18268, i64 1
+ %tmp18270 = getelementptr inbounds float, float* %tmp18269, i64 1
+ %tmp18271 = getelementptr inbounds float, float* %tmp18270, i64 1
+ %tmp18272 = getelementptr inbounds float, float* %tmp18271, i64 1
+ %tmp18273 = getelementptr inbounds float, float* %tmp18272, i64 1
+ %tmp18274 = getelementptr inbounds float, float* %tmp18273, i64 1
+ %tmp18275 = getelementptr inbounds float, float* %tmp18274, i64 1
+ %tmp18276 = getelementptr inbounds float, float* %tmp18275, i64 1
+ %tmp18277 = getelementptr inbounds float, float* %tmp18276, i64 1
+ %tmp18278 = getelementptr inbounds float, float* %tmp18277, i64 1
+ %tmp18279 = getelementptr inbounds float, float* %tmp18278, i64 1
+ %tmp18280 = getelementptr inbounds float, float* %tmp18279, i64 1
+ %tmp18281 = getelementptr inbounds float, float* %tmp18280, i64 1
+ %tmp18282 = getelementptr inbounds float, float* %tmp18281, i64 1
+ %tmp18283 = getelementptr inbounds float, float* %tmp18282, i64 1
+ %tmp18284 = getelementptr inbounds float, float* %tmp18283, i64 1
+ %tmp18285 = getelementptr inbounds float, float* %tmp18284, i64 1
+ %tmp18286 = getelementptr inbounds float, float* %tmp18285, i64 1
+ %tmp18287 = getelementptr inbounds float, float* %tmp18286, i64 1
+ %tmp18288 = getelementptr inbounds float, float* %tmp18287, i64 1
+ %tmp18289 = getelementptr inbounds float, float* %tmp18288, i64 1
+ %tmp18290 = getelementptr inbounds float, float* %tmp18289, i64 1
+ %tmp18291 = getelementptr inbounds float, float* %tmp18290, i64 1
+ %tmp18292 = getelementptr inbounds float, float* %tmp18291, i64 1
+ %tmp18293 = getelementptr inbounds float, float* %tmp18292, i64 1
+ %tmp18294 = getelementptr inbounds float, float* %tmp18293, i64 1
+ %tmp18295 = getelementptr inbounds float, float* %tmp18294, i64 1
+ %tmp18296 = getelementptr inbounds float, float* %tmp18295, i64 1
+ %tmp18297 = getelementptr inbounds float, float* %tmp18296, i64 1
+ %tmp18298 = getelementptr inbounds float, float* %tmp18297, i64 1
+ %tmp18299 = getelementptr inbounds float, float* %tmp18298, i64 1
+ %tmp18300 = getelementptr inbounds float, float* %tmp18299, i64 1
+ %tmp18301 = getelementptr inbounds float, float* %tmp18300, i64 1
+ %tmp18302 = getelementptr inbounds float, float* %tmp18301, i64 1
+ %tmp18303 = getelementptr inbounds float, float* %tmp18302, i64 1
+ %tmp18304 = getelementptr inbounds float, float* %tmp18303, i64 1
+ %tmp18305 = getelementptr inbounds float, float* %tmp18304, i64 1
+ %tmp18306 = getelementptr inbounds float, float* %tmp18305, i64 1
+ %tmp18307 = getelementptr inbounds float, float* %tmp18306, i64 1
+ %tmp18308 = getelementptr inbounds float, float* %tmp18307, i64 1
+ %tmp18309 = getelementptr inbounds float, float* %tmp18308, i64 1
+ %tmp18310 = getelementptr inbounds float, float* %tmp18309, i64 1
+ %tmp18311 = getelementptr inbounds float, float* %tmp18310, i64 1
+ %tmp18312 = getelementptr inbounds float, float* %tmp18311, i64 1
+ %tmp18313 = getelementptr inbounds float, float* %tmp18312, i64 1
+ %tmp18314 = getelementptr inbounds float, float* %tmp18313, i64 1
+ %tmp18315 = getelementptr inbounds float, float* %tmp18314, i64 1
+ %tmp18316 = getelementptr inbounds float, float* %tmp18315, i64 1
+ %tmp18317 = getelementptr inbounds float, float* %tmp18316, i64 1
+ %tmp18318 = getelementptr inbounds float, float* %tmp18317, i64 1
+ %tmp18319 = getelementptr inbounds float, float* %tmp18318, i64 1
+ %tmp18320 = getelementptr inbounds float, float* %tmp18319, i64 1
+ %tmp18321 = getelementptr inbounds float, float* %tmp18320, i64 1
+ %tmp18322 = getelementptr inbounds float, float* %tmp18321, i64 1
+ %tmp18323 = getelementptr inbounds float, float* %tmp18322, i64 1
+ %tmp18324 = getelementptr inbounds float, float* %tmp18323, i64 1
+ %tmp18325 = getelementptr inbounds float, float* %tmp18324, i64 1
+ %tmp18326 = getelementptr inbounds float, float* %tmp18325, i64 1
+ %tmp18327 = getelementptr inbounds float, float* %tmp18326, i64 1
+ %tmp18328 = getelementptr inbounds float, float* %tmp18327, i64 1
+ %tmp18329 = getelementptr inbounds float, float* %tmp18328, i64 1
+ %tmp18330 = getelementptr inbounds float, float* %tmp18329, i64 1
+ %tmp18331 = getelementptr inbounds float, float* %tmp18330, i64 1
+ %tmp18332 = getelementptr inbounds float, float* %tmp18331, i64 1
+ %tmp18333 = getelementptr inbounds float, float* %tmp18332, i64 1
+ %tmp18334 = getelementptr inbounds float, float* %tmp18333, i64 1
+ %tmp18335 = getelementptr inbounds float, float* %tmp18334, i64 1
+ %tmp18336 = getelementptr inbounds float, float* %tmp18335, i64 1
+ %tmp18337 = getelementptr inbounds float, float* %tmp18336, i64 1
+ %tmp18338 = getelementptr inbounds float, float* %tmp18337, i64 1
+ %tmp18339 = getelementptr inbounds float, float* %tmp18338, i64 1
+ %tmp18340 = getelementptr inbounds float, float* %tmp18339, i64 1
+ %tmp18341 = getelementptr inbounds float, float* %tmp18340, i64 1
+ %tmp18342 = getelementptr inbounds float, float* %tmp18341, i64 1
+ %tmp18343 = getelementptr inbounds float, float* %tmp18342, i64 1
+ %tmp18344 = getelementptr inbounds float, float* %tmp18343, i64 1
+ %tmp18345 = getelementptr inbounds float, float* %tmp18344, i64 1
+ %tmp18346 = getelementptr inbounds float, float* %tmp18345, i64 1
+ %tmp18347 = getelementptr inbounds float, float* %tmp18346, i64 1
+ %tmp18348 = getelementptr inbounds float, float* %tmp18347, i64 1
+ %tmp18349 = getelementptr inbounds float, float* %tmp18348, i64 1
+ %tmp18350 = getelementptr inbounds float, float* %tmp18349, i64 1
+ %tmp18351 = getelementptr inbounds float, float* %tmp18350, i64 1
+ %tmp18352 = getelementptr inbounds float, float* %tmp18351, i64 1
+ %tmp18353 = getelementptr inbounds float, float* %tmp18352, i64 1
+ %tmp18354 = getelementptr inbounds float, float* %tmp18353, i64 1
+ %tmp18355 = getelementptr inbounds float, float* %tmp18354, i64 1
+ %tmp18356 = getelementptr inbounds float, float* %tmp18355, i64 1
+ %tmp18357 = getelementptr inbounds float, float* %tmp18356, i64 1
+ %tmp18358 = getelementptr inbounds float, float* %tmp18357, i64 1
+ %tmp18359 = getelementptr inbounds float, float* %tmp18358, i64 1
+ %tmp18360 = getelementptr inbounds float, float* %tmp18359, i64 1
+ %tmp18361 = getelementptr inbounds float, float* %tmp18360, i64 1
+ %tmp18362 = getelementptr inbounds float, float* %tmp18361, i64 1
+ %tmp18363 = getelementptr inbounds float, float* %tmp18362, i64 1
+ %tmp18364 = getelementptr inbounds float, float* %tmp18363, i64 1
+ %tmp18365 = getelementptr inbounds float, float* %tmp18364, i64 1
+ %tmp18366 = getelementptr inbounds float, float* %tmp18365, i64 1
+ %tmp18367 = getelementptr inbounds float, float* %tmp18366, i64 1
+ %tmp18368 = getelementptr inbounds float, float* %tmp18367, i64 1
+ %tmp18369 = getelementptr inbounds float, float* %tmp18368, i64 1
+ %tmp18370 = getelementptr inbounds float, float* %tmp18369, i64 1
+ %tmp18371 = getelementptr inbounds float, float* %tmp18370, i64 1
+ %tmp18372 = getelementptr inbounds float, float* %tmp18371, i64 1
+ %tmp18373 = getelementptr inbounds float, float* %tmp18372, i64 1
+ %tmp18374 = getelementptr inbounds float, float* %tmp18373, i64 1
+ %tmp18375 = getelementptr inbounds float, float* %tmp18374, i64 1
+ %tmp18376 = getelementptr inbounds float, float* %tmp18375, i64 1
+ %tmp18377 = getelementptr inbounds float, float* %tmp18376, i64 1
+ %tmp18378 = getelementptr inbounds float, float* %tmp18377, i64 1
+ %tmp18379 = getelementptr inbounds float, float* %tmp18378, i64 1
+ %tmp18380 = getelementptr inbounds float, float* %tmp18379, i64 1
+ %tmp18381 = getelementptr inbounds float, float* %tmp18380, i64 1
+ %tmp18382 = getelementptr inbounds float, float* %tmp18381, i64 1
+ %tmp18383 = getelementptr inbounds float, float* %tmp18382, i64 1
+ %tmp18384 = getelementptr inbounds float, float* %tmp18383, i64 1
+ %tmp18385 = getelementptr inbounds float, float* %tmp18384, i64 1
+ %tmp18386 = getelementptr inbounds float, float* %tmp18385, i64 1
+ %tmp18387 = getelementptr inbounds float, float* %tmp18386, i64 1
+ %tmp18388 = getelementptr inbounds float, float* %tmp18387, i64 1
+ %tmp18389 = getelementptr inbounds float, float* %tmp18388, i64 1
+ %tmp18390 = getelementptr inbounds float, float* %tmp18389, i64 1
+ %tmp18391 = getelementptr inbounds float, float* %tmp18390, i64 1
+ %tmp18392 = getelementptr inbounds float, float* %tmp18391, i64 1
+ %tmp18393 = getelementptr inbounds float, float* %tmp18392, i64 1
+ %tmp18394 = getelementptr inbounds float, float* %tmp18393, i64 1
+ %tmp18395 = getelementptr inbounds float, float* %tmp18394, i64 1
+ %tmp18396 = getelementptr inbounds float, float* %tmp18395, i64 1
+ %tmp18397 = getelementptr inbounds float, float* %tmp18396, i64 1
+ %tmp18398 = getelementptr inbounds float, float* %tmp18397, i64 1
+ %tmp18399 = getelementptr inbounds float, float* %tmp18398, i64 1
+ %tmp18400 = getelementptr inbounds float, float* %tmp18399, i64 1
+ %tmp18401 = getelementptr inbounds float, float* %tmp18400, i64 1
+ %tmp18402 = getelementptr inbounds float, float* %tmp18401, i64 1
+ %tmp18403 = getelementptr inbounds float, float* %tmp18402, i64 1
+ %tmp18404 = getelementptr inbounds float, float* %tmp18403, i64 1
+ %tmp18405 = getelementptr inbounds float, float* %tmp18404, i64 1
+ %tmp18406 = getelementptr inbounds float, float* %tmp18405, i64 1
+ %tmp18407 = getelementptr inbounds float, float* %tmp18406, i64 1
+ %tmp18408 = getelementptr inbounds float, float* %tmp18407, i64 1
+ %tmp18409 = getelementptr inbounds float, float* %tmp18408, i64 1
+ %tmp18410 = getelementptr inbounds float, float* %tmp18409, i64 1
+ %tmp18411 = getelementptr inbounds float, float* %tmp18410, i64 1
+ %tmp18412 = getelementptr inbounds float, float* %tmp18411, i64 1
+ %tmp18413 = getelementptr inbounds float, float* %tmp18412, i64 1
+ %tmp18414 = getelementptr inbounds float, float* %tmp18413, i64 1
+ %tmp18415 = getelementptr inbounds float, float* %tmp18414, i64 1
+ %tmp18416 = getelementptr inbounds float, float* %tmp18415, i64 1
+ %tmp18417 = getelementptr inbounds float, float* %tmp18416, i64 1
+ %tmp18418 = getelementptr inbounds float, float* %tmp18417, i64 1
+ %tmp18419 = getelementptr inbounds float, float* %tmp18418, i64 1
+ %tmp18420 = getelementptr inbounds float, float* %tmp18419, i64 1
+ %tmp18421 = getelementptr inbounds float, float* %tmp18420, i64 1
+ %tmp18422 = getelementptr inbounds float, float* %tmp18421, i64 1
+ %tmp18423 = getelementptr inbounds float, float* %tmp18422, i64 1
+ %tmp18424 = getelementptr inbounds float, float* %tmp18423, i64 1
+ %tmp18425 = getelementptr inbounds float, float* %tmp18424, i64 1
+ %tmp18426 = getelementptr inbounds float, float* %tmp18425, i64 1
+ %tmp18427 = getelementptr inbounds float, float* %tmp18426, i64 1
+ %tmp18428 = getelementptr inbounds float, float* %tmp18427, i64 1
+ %tmp18429 = getelementptr inbounds float, float* %tmp18428, i64 1
+ %tmp18430 = getelementptr inbounds float, float* %tmp18429, i64 1
+ %tmp18431 = getelementptr inbounds float, float* %tmp18430, i64 1
+ %tmp18432 = getelementptr inbounds float, float* %tmp18431, i64 1
+ %tmp18433 = getelementptr inbounds float, float* %tmp18432, i64 1
+ %tmp18434 = getelementptr inbounds float, float* %tmp18433, i64 1
+ %tmp18435 = getelementptr inbounds float, float* %tmp18434, i64 1
+ %tmp18436 = getelementptr inbounds float, float* %tmp18435, i64 1
+ %tmp18437 = getelementptr inbounds float, float* %tmp18436, i64 1
+ %tmp18438 = getelementptr inbounds float, float* %tmp18437, i64 1
+ %tmp18439 = getelementptr inbounds float, float* %tmp18438, i64 1
+ %tmp18440 = getelementptr inbounds float, float* %tmp18439, i64 1
+ %tmp18441 = getelementptr inbounds float, float* %tmp18440, i64 1
+ %tmp18442 = getelementptr inbounds float, float* %tmp18441, i64 1
+ %tmp18443 = getelementptr inbounds float, float* %tmp18442, i64 1
+ %tmp18444 = getelementptr inbounds float, float* %tmp18443, i64 1
+ %tmp18445 = getelementptr inbounds float, float* %tmp18444, i64 1
+ %tmp18446 = getelementptr inbounds float, float* %tmp18445, i64 1
+ %tmp18447 = getelementptr inbounds float, float* %tmp18446, i64 1
+ %tmp18448 = getelementptr inbounds float, float* %tmp18447, i64 1
+ %tmp18449 = getelementptr inbounds float, float* %tmp18448, i64 1
+ %tmp18450 = getelementptr inbounds float, float* %tmp18449, i64 1
+ %tmp18451 = getelementptr inbounds float, float* %tmp18450, i64 1
+ %tmp18452 = getelementptr inbounds float, float* %tmp18451, i64 1
+ %tmp18453 = getelementptr inbounds float, float* %tmp18452, i64 1
+ %tmp18454 = getelementptr inbounds float, float* %tmp18453, i64 1
+ %tmp18455 = getelementptr inbounds float, float* %tmp18454, i64 1
+ %tmp18456 = getelementptr inbounds float, float* %tmp18455, i64 1
+ %tmp18457 = getelementptr inbounds float, float* %tmp18456, i64 1
+ %tmp18458 = getelementptr inbounds float, float* %tmp18457, i64 1
+ %tmp18459 = getelementptr inbounds float, float* %tmp18458, i64 1
+ %tmp18460 = getelementptr inbounds float, float* %tmp18459, i64 1
+ %tmp18461 = getelementptr inbounds float, float* %tmp18460, i64 1
+ %tmp18462 = getelementptr inbounds float, float* %tmp18461, i64 1
+ %tmp18463 = getelementptr inbounds float, float* %tmp18462, i64 1
+ %tmp18464 = getelementptr inbounds float, float* %tmp18463, i64 1
+ %tmp18465 = getelementptr inbounds float, float* %tmp18464, i64 1
+ %tmp18466 = getelementptr inbounds float, float* %tmp18465, i64 1
+ %tmp18467 = getelementptr inbounds float, float* %tmp18466, i64 1
+ %tmp18468 = getelementptr inbounds float, float* %tmp18467, i64 1
+ %tmp18469 = getelementptr inbounds float, float* %tmp18468, i64 1
+ %tmp18470 = getelementptr inbounds float, float* %tmp18469, i64 1
+ %tmp18471 = getelementptr inbounds float, float* %tmp18470, i64 1
+ %tmp18472 = getelementptr inbounds float, float* %tmp18471, i64 1
+ %tmp18473 = getelementptr inbounds float, float* %tmp18472, i64 1
+ %tmp18474 = getelementptr inbounds float, float* %tmp18473, i64 1
+ %tmp18475 = getelementptr inbounds float, float* %tmp18474, i64 1
+ %tmp18476 = getelementptr inbounds float, float* %tmp18475, i64 1
+ %tmp18477 = getelementptr inbounds float, float* %tmp18476, i64 1
+ %tmp18478 = getelementptr inbounds float, float* %tmp18477, i64 1
+ %tmp18479 = getelementptr inbounds float, float* %tmp18478, i64 1
+ %tmp18480 = getelementptr inbounds float, float* %tmp18479, i64 1
+ %tmp18481 = getelementptr inbounds float, float* %tmp18480, i64 1
+ %tmp18482 = getelementptr inbounds float, float* %tmp18481, i64 1
+ %tmp18483 = getelementptr inbounds float, float* %tmp18482, i64 1
+ %tmp18484 = getelementptr inbounds float, float* %tmp18483, i64 1
+ %tmp18485 = getelementptr inbounds float, float* %tmp18484, i64 1
+ %tmp18486 = getelementptr inbounds float, float* %tmp18485, i64 1
+ %tmp18487 = getelementptr inbounds float, float* %tmp18486, i64 1
+ %tmp18488 = getelementptr inbounds float, float* %tmp18487, i64 1
+ %tmp18489 = getelementptr inbounds float, float* %tmp18488, i64 1
+ %tmp18490 = getelementptr inbounds float, float* %tmp18489, i64 1
+ %tmp18491 = getelementptr inbounds float, float* %tmp18490, i64 1
+ %tmp18492 = getelementptr inbounds float, float* %tmp18491, i64 1
+ %tmp18493 = getelementptr inbounds float, float* %tmp18492, i64 1
+ %tmp18494 = getelementptr inbounds float, float* %tmp18493, i64 1
+ %tmp18495 = getelementptr inbounds float, float* %tmp18494, i64 1
+ %tmp18496 = getelementptr inbounds float, float* %tmp18495, i64 1
+ %tmp18497 = getelementptr inbounds float, float* %tmp18496, i64 1
+ %tmp18498 = getelementptr inbounds float, float* %tmp18497, i64 1
+ %tmp18499 = getelementptr inbounds float, float* %tmp18498, i64 1
+ %tmp18500 = getelementptr inbounds float, float* %tmp18499, i64 1
+ %tmp18501 = getelementptr inbounds float, float* %tmp18500, i64 1
+ %tmp18502 = getelementptr inbounds float, float* %tmp18501, i64 1
+ %tmp18503 = getelementptr inbounds float, float* %tmp18502, i64 1
+ %tmp18504 = getelementptr inbounds float, float* %tmp18503, i64 1
+ %tmp18505 = getelementptr inbounds float, float* %tmp18504, i64 1
+ %tmp18506 = getelementptr inbounds float, float* %tmp18505, i64 1
+ %tmp18507 = getelementptr inbounds float, float* %tmp18506, i64 1
+ %tmp18508 = getelementptr inbounds float, float* %tmp18507, i64 1
+ %tmp18509 = getelementptr inbounds float, float* %tmp18508, i64 1
+ %tmp18510 = getelementptr inbounds float, float* %tmp18509, i64 1
+ %tmp18511 = getelementptr inbounds float, float* %tmp18510, i64 1
+ %tmp18512 = getelementptr inbounds float, float* %tmp18511, i64 1
+ %tmp18513 = getelementptr inbounds float, float* %tmp18512, i64 1
+ %tmp18514 = getelementptr inbounds float, float* %tmp18513, i64 1
+ %tmp18515 = getelementptr inbounds float, float* %tmp18514, i64 1
+ %tmp18516 = getelementptr inbounds float, float* %tmp18515, i64 1
+ %tmp18517 = getelementptr inbounds float, float* %tmp18516, i64 1
+ %tmp18518 = getelementptr inbounds float, float* %tmp18517, i64 1
+ %tmp18519 = getelementptr inbounds float, float* %tmp18518, i64 1
+ %tmp18520 = getelementptr inbounds float, float* %tmp18519, i64 1
+ %tmp18521 = getelementptr inbounds float, float* %tmp18520, i64 1
+ %tmp18522 = getelementptr inbounds float, float* %tmp18521, i64 1
+ %tmp18523 = getelementptr inbounds float, float* %tmp18522, i64 1
+ %tmp18524 = getelementptr inbounds float, float* %tmp18523, i64 1
+ %tmp18525 = getelementptr inbounds float, float* %tmp18524, i64 1
+ %tmp18526 = getelementptr inbounds float, float* %tmp18525, i64 1
+ %tmp18527 = getelementptr inbounds float, float* %tmp18526, i64 1
+ %tmp18528 = getelementptr inbounds float, float* %tmp18527, i64 1
+ %tmp18529 = getelementptr inbounds float, float* %tmp18528, i64 1
+ %tmp18530 = getelementptr inbounds float, float* %tmp18529, i64 1
+ %tmp18531 = getelementptr inbounds float, float* %tmp18530, i64 1
+ %tmp18532 = getelementptr inbounds float, float* %tmp18531, i64 1
+ %tmp18533 = getelementptr inbounds float, float* %tmp18532, i64 1
+ %tmp18534 = getelementptr inbounds float, float* %tmp18533, i64 1
+ %tmp18535 = getelementptr inbounds float, float* %tmp18534, i64 1
+ %tmp18536 = getelementptr inbounds float, float* %tmp18535, i64 1
+ %tmp18537 = getelementptr inbounds float, float* %tmp18536, i64 1
+ %tmp18538 = getelementptr inbounds float, float* %tmp18537, i64 1
+ %tmp18539 = getelementptr inbounds float, float* %tmp18538, i64 1
+ %tmp18540 = getelementptr inbounds float, float* %tmp18539, i64 1
+ %tmp18541 = getelementptr inbounds float, float* %tmp18540, i64 1
+ %tmp18542 = getelementptr inbounds float, float* %tmp18541, i64 1
+ %tmp18543 = getelementptr inbounds float, float* %tmp18542, i64 1
+ %tmp18544 = getelementptr inbounds float, float* %tmp18543, i64 1
+ %tmp18545 = getelementptr inbounds float, float* %tmp18544, i64 1
+ %tmp18546 = getelementptr inbounds float, float* %tmp18545, i64 1
+ %tmp18547 = getelementptr inbounds float, float* %tmp18546, i64 1
+ %tmp18548 = getelementptr inbounds float, float* %tmp18547, i64 1
+ %tmp18549 = getelementptr inbounds float, float* %tmp18548, i64 1
+ %tmp18550 = getelementptr inbounds float, float* %tmp18549, i64 1
+ %tmp18551 = getelementptr inbounds float, float* %tmp18550, i64 1
+ %tmp18552 = getelementptr inbounds float, float* %tmp18551, i64 1
+ %tmp18553 = getelementptr inbounds float, float* %tmp18552, i64 1
+ %tmp18554 = getelementptr inbounds float, float* %tmp18553, i64 1
+ %tmp18555 = getelementptr inbounds float, float* %tmp18554, i64 1
+ %tmp18556 = getelementptr inbounds float, float* %tmp18555, i64 1
+ %tmp18557 = getelementptr inbounds float, float* %tmp18556, i64 1
+ %tmp18558 = getelementptr inbounds float, float* %tmp18557, i64 1
+ %tmp18559 = getelementptr inbounds float, float* %tmp18558, i64 1
+ %tmp18560 = getelementptr inbounds float, float* %tmp18559, i64 1
+ %tmp18561 = getelementptr inbounds float, float* %tmp18560, i64 1
+ %tmp18562 = getelementptr inbounds float, float* %tmp18561, i64 1
+ %tmp18563 = getelementptr inbounds float, float* %tmp18562, i64 1
+ %tmp18564 = getelementptr inbounds float, float* %tmp18563, i64 1
+ %tmp18565 = getelementptr inbounds float, float* %tmp18564, i64 1
+ %tmp18566 = getelementptr inbounds float, float* %tmp18565, i64 1
+ %tmp18567 = getelementptr inbounds float, float* %tmp18566, i64 1
+ %tmp18568 = getelementptr inbounds float, float* %tmp18567, i64 1
+ %tmp18569 = getelementptr inbounds float, float* %tmp18568, i64 1
+ %tmp18570 = getelementptr inbounds float, float* %tmp18569, i64 1
+ %tmp18571 = getelementptr inbounds float, float* %tmp18570, i64 1
+ %tmp18572 = getelementptr inbounds float, float* %tmp18571, i64 1
+ %tmp18573 = getelementptr inbounds float, float* %tmp18572, i64 1
+ %tmp18574 = getelementptr inbounds float, float* %tmp18573, i64 1
+ %tmp18575 = getelementptr inbounds float, float* %tmp18574, i64 1
+ %tmp18576 = getelementptr inbounds float, float* %tmp18575, i64 1
+ %tmp18577 = getelementptr inbounds float, float* %tmp18576, i64 1
+ %tmp18578 = getelementptr inbounds float, float* %tmp18577, i64 1
+ %tmp18579 = getelementptr inbounds float, float* %tmp18578, i64 1
+ %tmp18580 = getelementptr inbounds float, float* %tmp18579, i64 1
+ %tmp18581 = getelementptr inbounds float, float* %tmp18580, i64 1
+ %tmp18582 = getelementptr inbounds float, float* %tmp18581, i64 1
+ %tmp18583 = getelementptr inbounds float, float* %tmp18582, i64 1
+ %tmp18584 = getelementptr inbounds float, float* %tmp18583, i64 1
+ %tmp18585 = getelementptr inbounds float, float* %tmp18584, i64 1
+ %tmp18586 = getelementptr inbounds float, float* %tmp18585, i64 1
+ %tmp18587 = getelementptr inbounds float, float* %tmp18586, i64 1
+ %tmp18588 = getelementptr inbounds float, float* %tmp18587, i64 1
+ %tmp18589 = getelementptr inbounds float, float* %tmp18588, i64 1
+ %tmp18590 = getelementptr inbounds float, float* %tmp18589, i64 1
+ %tmp18591 = getelementptr inbounds float, float* %tmp18590, i64 1
+ %tmp18592 = getelementptr inbounds float, float* %tmp18591, i64 1
+ %tmp18593 = getelementptr inbounds float, float* %tmp18592, i64 1
+ %tmp18594 = getelementptr inbounds float, float* %tmp18593, i64 1
+ %tmp18595 = getelementptr inbounds float, float* %tmp18594, i64 1
+ %tmp18596 = getelementptr inbounds float, float* %tmp18595, i64 1
+ %tmp18597 = getelementptr inbounds float, float* %tmp18596, i64 1
+ %tmp18598 = getelementptr inbounds float, float* %tmp18597, i64 1
+ %tmp18599 = getelementptr inbounds float, float* %tmp18598, i64 1
+ %tmp18600 = getelementptr inbounds float, float* %tmp18599, i64 1
+ %tmp18601 = getelementptr inbounds float, float* %tmp18600, i64 1
+ %tmp18602 = getelementptr inbounds float, float* %tmp18601, i64 1
+ %tmp18603 = getelementptr inbounds float, float* %tmp18602, i64 1
+ %tmp18604 = getelementptr inbounds float, float* %tmp18603, i64 1
+ %tmp18605 = getelementptr inbounds float, float* %tmp18604, i64 1
+ %tmp18606 = getelementptr inbounds float, float* %tmp18605, i64 1
+ %tmp18607 = getelementptr inbounds float, float* %tmp18606, i64 1
+ %tmp18608 = getelementptr inbounds float, float* %tmp18607, i64 1
+ %tmp18609 = getelementptr inbounds float, float* %tmp18608, i64 1
+ %tmp18610 = getelementptr inbounds float, float* %tmp18609, i64 1
+ %tmp18611 = getelementptr inbounds float, float* %tmp18610, i64 1
+ %tmp18612 = getelementptr inbounds float, float* %tmp18611, i64 1
+ %tmp18613 = getelementptr inbounds float, float* %tmp18612, i64 1
+ %tmp18614 = getelementptr inbounds float, float* %tmp18613, i64 1
+ %tmp18615 = getelementptr inbounds float, float* %tmp18614, i64 1
+ %tmp18616 = getelementptr inbounds float, float* %tmp18615, i64 1
+ %tmp18617 = getelementptr inbounds float, float* %tmp18616, i64 1
+ %tmp18618 = getelementptr inbounds float, float* %tmp18617, i64 1
+ %tmp18619 = getelementptr inbounds float, float* %tmp18618, i64 1
+ %tmp18620 = getelementptr inbounds float, float* %tmp18619, i64 1
+ %tmp18621 = getelementptr inbounds float, float* %tmp18620, i64 1
+ %tmp18622 = getelementptr inbounds float, float* %tmp18621, i64 1
+ %tmp18623 = getelementptr inbounds float, float* %tmp18622, i64 1
+ %tmp18624 = getelementptr inbounds float, float* %tmp18623, i64 1
+ %tmp18625 = getelementptr inbounds float, float* %tmp18624, i64 1
+ %tmp18626 = getelementptr inbounds float, float* %tmp18625, i64 1
+ %tmp18627 = getelementptr inbounds float, float* %tmp18626, i64 1
+ %tmp18628 = getelementptr inbounds float, float* %tmp18627, i64 1
+ %tmp18629 = getelementptr inbounds float, float* %tmp18628, i64 1
+ %tmp18630 = getelementptr inbounds float, float* %tmp18629, i64 1
+ %tmp18631 = getelementptr inbounds float, float* %tmp18630, i64 1
+ %tmp18632 = getelementptr inbounds float, float* %tmp18631, i64 1
+ %tmp18633 = getelementptr inbounds float, float* %tmp18632, i64 1
+ %tmp18634 = getelementptr inbounds float, float* %tmp18633, i64 1
+ %tmp18635 = getelementptr inbounds float, float* %tmp18634, i64 1
+ %tmp18636 = getelementptr inbounds float, float* %tmp18635, i64 1
+ %tmp18637 = getelementptr inbounds float, float* %tmp18636, i64 1
+ %tmp18638 = getelementptr inbounds float, float* %tmp18637, i64 1
+ %tmp18639 = getelementptr inbounds float, float* %tmp18638, i64 1
+ %tmp18640 = getelementptr inbounds float, float* %tmp18639, i64 1
+ %tmp18641 = getelementptr inbounds float, float* %tmp18640, i64 1
+ %tmp18642 = getelementptr inbounds float, float* %tmp18641, i64 1
+ %tmp18643 = getelementptr inbounds float, float* %tmp18642, i64 1
+ %tmp18644 = getelementptr inbounds float, float* %tmp18643, i64 1
+ %tmp18645 = getelementptr inbounds float, float* %tmp18644, i64 1
+ %tmp18646 = getelementptr inbounds float, float* %tmp18645, i64 1
+ %tmp18647 = getelementptr inbounds float, float* %tmp18646, i64 1
+ %tmp18648 = getelementptr inbounds float, float* %tmp18647, i64 1
+ %tmp18649 = getelementptr inbounds float, float* %tmp18648, i64 1
+ %tmp18650 = getelementptr inbounds float, float* %tmp18649, i64 1
+ %tmp18651 = getelementptr inbounds float, float* %tmp18650, i64 1
+ %tmp18652 = getelementptr inbounds float, float* %tmp18651, i64 1
+ %tmp18653 = getelementptr inbounds float, float* %tmp18652, i64 1
+ %tmp18654 = getelementptr inbounds float, float* %tmp18653, i64 1
+ %tmp18655 = getelementptr inbounds float, float* %tmp18654, i64 1
+ %tmp18656 = getelementptr inbounds float, float* %tmp18655, i64 1
+ %tmp18657 = getelementptr inbounds float, float* %tmp18656, i64 1
+ %tmp18658 = getelementptr inbounds float, float* %tmp18657, i64 1
+ %tmp18659 = getelementptr inbounds float, float* %tmp18658, i64 1
+ %tmp18660 = getelementptr inbounds float, float* %tmp18659, i64 1
+ %tmp18661 = getelementptr inbounds float, float* %tmp18660, i64 1
+ %tmp18662 = getelementptr inbounds float, float* %tmp18661, i64 1
+ %tmp18663 = getelementptr inbounds float, float* %tmp18662, i64 1
+ %tmp18664 = getelementptr inbounds float, float* %tmp18663, i64 1
+ %tmp18665 = getelementptr inbounds float, float* %tmp18664, i64 1
+ %tmp18666 = getelementptr inbounds float, float* %tmp18665, i64 1
+ %tmp18667 = getelementptr inbounds float, float* %tmp18666, i64 1
+ %tmp18668 = getelementptr inbounds float, float* %tmp18667, i64 1
+ %tmp18669 = getelementptr inbounds float, float* %tmp18668, i64 1
+ %tmp18670 = getelementptr inbounds float, float* %tmp18669, i64 1
+ %tmp18671 = getelementptr inbounds float, float* %tmp18670, i64 1
+ %tmp18672 = getelementptr inbounds float, float* %tmp18671, i64 1
+ %tmp18673 = getelementptr inbounds float, float* %tmp18672, i64 1
+ %tmp18674 = getelementptr inbounds float, float* %tmp18673, i64 1
+ %tmp18675 = getelementptr inbounds float, float* %tmp18674, i64 1
+ %tmp18676 = getelementptr inbounds float, float* %tmp18675, i64 1
+ %tmp18677 = getelementptr inbounds float, float* %tmp18676, i64 1
+ %tmp18678 = getelementptr inbounds float, float* %tmp18677, i64 1
+ %tmp18679 = getelementptr inbounds float, float* %tmp18678, i64 1
+ %tmp18680 = getelementptr inbounds float, float* %tmp18679, i64 1
+ %tmp18681 = getelementptr inbounds float, float* %tmp18680, i64 1
+ %tmp18682 = getelementptr inbounds float, float* %tmp18681, i64 1
+ %tmp18683 = getelementptr inbounds float, float* %tmp18682, i64 1
+ %tmp18684 = getelementptr inbounds float, float* %tmp18683, i64 1
+ %tmp18685 = getelementptr inbounds float, float* %tmp18684, i64 1
+ %tmp18686 = getelementptr inbounds float, float* %tmp18685, i64 1
+ %tmp18687 = getelementptr inbounds float, float* %tmp18686, i64 1
+ %tmp18688 = getelementptr inbounds float, float* %tmp18687, i64 1
+ %tmp18689 = getelementptr inbounds float, float* %tmp18688, i64 1
+ %tmp18690 = getelementptr inbounds float, float* %tmp18689, i64 1
+ %tmp18691 = getelementptr inbounds float, float* %tmp18690, i64 1
+ %tmp18692 = getelementptr inbounds float, float* %tmp18691, i64 1
+ %tmp18693 = getelementptr inbounds float, float* %tmp18692, i64 1
+ %tmp18694 = getelementptr inbounds float, float* %tmp18693, i64 1
+ %tmp18695 = getelementptr inbounds float, float* %tmp18694, i64 1
+ %tmp18696 = getelementptr inbounds float, float* %tmp18695, i64 1
+ %tmp18697 = getelementptr inbounds float, float* %tmp18696, i64 1
+ %tmp18698 = getelementptr inbounds float, float* %tmp18697, i64 1
+ %tmp18699 = getelementptr inbounds float, float* %tmp18698, i64 1
+ %tmp18700 = getelementptr inbounds float, float* %tmp18699, i64 1
+ %tmp18701 = getelementptr inbounds float, float* %tmp18700, i64 1
+ %tmp18702 = getelementptr inbounds float, float* %tmp18701, i64 1
+ %tmp18703 = getelementptr inbounds float, float* %tmp18702, i64 1
+ %tmp18704 = getelementptr inbounds float, float* %tmp18703, i64 1
+ %tmp18705 = getelementptr inbounds float, float* %tmp18704, i64 1
+ %tmp18706 = getelementptr inbounds float, float* %tmp18705, i64 1
+ %tmp18707 = getelementptr inbounds float, float* %tmp18706, i64 1
+ %tmp18708 = getelementptr inbounds float, float* %tmp18707, i64 1
+ %tmp18709 = getelementptr inbounds float, float* %tmp18708, i64 1
+ %tmp18710 = getelementptr inbounds float, float* %tmp18709, i64 1
+ %tmp18711 = getelementptr inbounds float, float* %tmp18710, i64 1
+ %tmp18712 = getelementptr inbounds float, float* %tmp18711, i64 1
+ %tmp18713 = getelementptr inbounds float, float* %tmp18712, i64 1
+ %tmp18714 = getelementptr inbounds float, float* %tmp18713, i64 1
+ %tmp18715 = getelementptr inbounds float, float* %tmp18714, i64 1
+ %tmp18716 = getelementptr inbounds float, float* %tmp18715, i64 1
+ %tmp18717 = getelementptr inbounds float, float* %tmp18716, i64 1
+ %tmp18718 = getelementptr inbounds float, float* %tmp18717, i64 1
+ %tmp18719 = getelementptr inbounds float, float* %tmp18718, i64 1
+ %tmp18720 = getelementptr inbounds float, float* %tmp18719, i64 1
+ %tmp18721 = getelementptr inbounds float, float* %tmp18720, i64 1
+ %tmp18722 = getelementptr inbounds float, float* %tmp18721, i64 1
+ %tmp18723 = getelementptr inbounds float, float* %tmp18722, i64 1
+ %tmp18724 = getelementptr inbounds float, float* %tmp18723, i64 1
+ %tmp18725 = getelementptr inbounds float, float* %tmp18724, i64 1
+ %tmp18726 = getelementptr inbounds float, float* %tmp18725, i64 1
+ %tmp18727 = getelementptr inbounds float, float* %tmp18726, i64 1
+ %tmp18728 = getelementptr inbounds float, float* %tmp18727, i64 1
+ %tmp18729 = getelementptr inbounds float, float* %tmp18728, i64 1
+ %tmp18730 = getelementptr inbounds float, float* %tmp18729, i64 1
+ %tmp18731 = getelementptr inbounds float, float* %tmp18730, i64 1
+ %tmp18732 = getelementptr inbounds float, float* %tmp18731, i64 1
+ %tmp18733 = getelementptr inbounds float, float* %tmp18732, i64 1
+ %tmp18734 = getelementptr inbounds float, float* %tmp18733, i64 1
+ %tmp18735 = getelementptr inbounds float, float* %tmp18734, i64 1
+ %tmp18736 = getelementptr inbounds float, float* %tmp18735, i64 1
+ %tmp18737 = getelementptr inbounds float, float* %tmp18736, i64 1
+ %tmp18738 = getelementptr inbounds float, float* %tmp18737, i64 1
+ %tmp18739 = getelementptr inbounds float, float* %tmp18738, i64 1
+ %tmp18740 = getelementptr inbounds float, float* %tmp18739, i64 1
+ %tmp18741 = getelementptr inbounds float, float* %tmp18740, i64 1
+ %tmp18742 = getelementptr inbounds float, float* %tmp18741, i64 1
+ %tmp18743 = getelementptr inbounds float, float* %tmp18742, i64 1
+ %tmp18744 = getelementptr inbounds float, float* %tmp18743, i64 1
+ %tmp18745 = getelementptr inbounds float, float* %tmp18744, i64 1
+ %tmp18746 = getelementptr inbounds float, float* %tmp18745, i64 1
+ %tmp18747 = getelementptr inbounds float, float* %tmp18746, i64 1
+ %tmp18748 = getelementptr inbounds float, float* %tmp18747, i64 1
+ %tmp18749 = getelementptr inbounds float, float* %tmp18748, i64 1
+ %tmp18750 = getelementptr inbounds float, float* %tmp18749, i64 1
+ %tmp18751 = getelementptr inbounds float, float* %tmp18750, i64 1
+ %tmp18752 = getelementptr inbounds float, float* %tmp18751, i64 1
+ %tmp18753 = getelementptr inbounds float, float* %tmp18752, i64 1
+ %tmp18754 = getelementptr inbounds float, float* %tmp18753, i64 1
+ %tmp18755 = getelementptr inbounds float, float* %tmp18754, i64 1
+ %tmp18756 = getelementptr inbounds float, float* %tmp18755, i64 1
+ %tmp18757 = getelementptr inbounds float, float* %tmp18756, i64 1
+ %tmp18758 = getelementptr inbounds float, float* %tmp18757, i64 1
+ %tmp18759 = getelementptr inbounds float, float* %tmp18758, i64 1
+ %tmp18760 = getelementptr inbounds float, float* %tmp18759, i64 1
+ %tmp18761 = getelementptr inbounds float, float* %tmp18760, i64 1
+ %tmp18762 = getelementptr inbounds float, float* %tmp18761, i64 1
+ %tmp18763 = getelementptr inbounds float, float* %tmp18762, i64 1
+ %tmp18764 = getelementptr inbounds float, float* %tmp18763, i64 1
+ %tmp18765 = getelementptr inbounds float, float* %tmp18764, i64 1
+ %tmp18766 = getelementptr inbounds float, float* %tmp18765, i64 1
+ %tmp18767 = getelementptr inbounds float, float* %tmp18766, i64 1
+ %tmp18768 = getelementptr inbounds float, float* %tmp18767, i64 1
+ %tmp18769 = getelementptr inbounds float, float* %tmp18768, i64 1
+ %tmp18770 = getelementptr inbounds float, float* %tmp18769, i64 1
+ %tmp18771 = getelementptr inbounds float, float* %tmp18770, i64 1
+ %tmp18772 = getelementptr inbounds float, float* %tmp18771, i64 1
+ %tmp18773 = getelementptr inbounds float, float* %tmp18772, i64 1
+ %tmp18774 = getelementptr inbounds float, float* %tmp18773, i64 1
+ %tmp18775 = getelementptr inbounds float, float* %tmp18774, i64 1
+ %tmp18776 = getelementptr inbounds float, float* %tmp18775, i64 1
+ %tmp18777 = getelementptr inbounds float, float* %tmp18776, i64 1
+ %tmp18778 = getelementptr inbounds float, float* %tmp18777, i64 1
+ %tmp18779 = getelementptr inbounds float, float* %tmp18778, i64 1
+ %tmp18780 = getelementptr inbounds float, float* %tmp18779, i64 1
+ %tmp18781 = getelementptr inbounds float, float* %tmp18780, i64 1
+ %tmp18782 = getelementptr inbounds float, float* %tmp18781, i64 1
+ %tmp18783 = getelementptr inbounds float, float* %tmp18782, i64 1
+ %tmp18784 = getelementptr inbounds float, float* %tmp18783, i64 1
+ %tmp18785 = getelementptr inbounds float, float* %tmp18784, i64 1
+ %tmp18786 = getelementptr inbounds float, float* %tmp18785, i64 1
+ %tmp18787 = getelementptr inbounds float, float* %tmp18786, i64 1
+ %tmp18788 = getelementptr inbounds float, float* %tmp18787, i64 1
+ %tmp18789 = getelementptr inbounds float, float* %tmp18788, i64 1
+ %tmp18790 = getelementptr inbounds float, float* %tmp18789, i64 1
+ %tmp18791 = getelementptr inbounds float, float* %tmp18790, i64 1
+ %tmp18792 = getelementptr inbounds float, float* %tmp18791, i64 1
+ %tmp18793 = getelementptr inbounds float, float* %tmp18792, i64 1
+ %tmp18794 = getelementptr inbounds float, float* %tmp18793, i64 1
+ %tmp18795 = getelementptr inbounds float, float* %tmp18794, i64 1
+ %tmp18796 = getelementptr inbounds float, float* %tmp18795, i64 1
+ %tmp18797 = getelementptr inbounds float, float* %tmp18796, i64 1
+ %tmp18798 = getelementptr inbounds float, float* %tmp18797, i64 1
+ %tmp18799 = getelementptr inbounds float, float* %tmp18798, i64 1
+ %tmp18800 = getelementptr inbounds float, float* %tmp18799, i64 1
+ %tmp18801 = getelementptr inbounds float, float* %tmp18800, i64 1
+ %tmp18802 = getelementptr inbounds float, float* %tmp18801, i64 1
+ %tmp18803 = getelementptr inbounds float, float* %tmp18802, i64 1
+ %tmp18804 = getelementptr inbounds float, float* %tmp18803, i64 1
+ %tmp18805 = getelementptr inbounds float, float* %tmp18804, i64 1
+ %tmp18806 = getelementptr inbounds float, float* %tmp18805, i64 1
+ %tmp18807 = getelementptr inbounds float, float* %tmp18806, i64 1
+ %tmp18808 = getelementptr inbounds float, float* %tmp18807, i64 1
+ %tmp18809 = getelementptr inbounds float, float* %tmp18808, i64 1
+ %tmp18810 = getelementptr inbounds float, float* %tmp18809, i64 1
+ %tmp18811 = getelementptr inbounds float, float* %tmp18810, i64 1
+ %tmp18812 = getelementptr inbounds float, float* %tmp18811, i64 1
+ %tmp18813 = getelementptr inbounds float, float* %tmp18812, i64 1
+ %tmp18814 = getelementptr inbounds float, float* %tmp18813, i64 1
+ %tmp18815 = getelementptr inbounds float, float* %tmp18814, i64 1
+ %tmp18816 = getelementptr inbounds float, float* %tmp18815, i64 1
+ %tmp18817 = getelementptr inbounds float, float* %tmp18816, i64 1
+ %tmp18818 = getelementptr inbounds float, float* %tmp18817, i64 1
+ %tmp18819 = getelementptr inbounds float, float* %tmp18818, i64 1
+ %tmp18820 = getelementptr inbounds float, float* %tmp18819, i64 1
+ %tmp18821 = getelementptr inbounds float, float* %tmp18820, i64 1
+ %tmp18822 = getelementptr inbounds float, float* %tmp18821, i64 1
+ %tmp18823 = getelementptr inbounds float, float* %tmp18822, i64 1
+ %tmp18824 = getelementptr inbounds float, float* %tmp18823, i64 1
+ %tmp18825 = getelementptr inbounds float, float* %tmp18824, i64 1
+ %tmp18826 = getelementptr inbounds float, float* %tmp18825, i64 1
+ %tmp18827 = getelementptr inbounds float, float* %tmp18826, i64 1
+ %tmp18828 = getelementptr inbounds float, float* %tmp18827, i64 1
+ %tmp18829 = getelementptr inbounds float, float* %tmp18828, i64 1
+ %tmp18830 = getelementptr inbounds float, float* %tmp18829, i64 1
+ %tmp18831 = getelementptr inbounds float, float* %tmp18830, i64 1
+ %tmp18832 = getelementptr inbounds float, float* %tmp18831, i64 1
+ %tmp18833 = getelementptr inbounds float, float* %tmp18832, i64 1
+ %tmp18834 = getelementptr inbounds float, float* %tmp18833, i64 1
+ %tmp18835 = getelementptr inbounds float, float* %tmp18834, i64 1
+ %tmp18836 = getelementptr inbounds float, float* %tmp18835, i64 1
+ %tmp18837 = getelementptr inbounds float, float* %tmp18836, i64 1
+ %tmp18838 = getelementptr inbounds float, float* %tmp18837, i64 1
+ %tmp18839 = getelementptr inbounds float, float* %tmp18838, i64 1
+ %tmp18840 = getelementptr inbounds float, float* %tmp18839, i64 1
+ %tmp18841 = getelementptr inbounds float, float* %tmp18840, i64 1
+ %tmp18842 = getelementptr inbounds float, float* %tmp18841, i64 1
+ %tmp18843 = getelementptr inbounds float, float* %tmp18842, i64 1
+ %tmp18844 = getelementptr inbounds float, float* %tmp18843, i64 1
+ %tmp18845 = getelementptr inbounds float, float* %tmp18844, i64 1
+ %tmp18846 = getelementptr inbounds float, float* %tmp18845, i64 1
+ %tmp18847 = getelementptr inbounds float, float* %tmp18846, i64 1
+ %tmp18848 = getelementptr inbounds float, float* %tmp18847, i64 1
+ %tmp18849 = getelementptr inbounds float, float* %tmp18848, i64 1
+ %tmp18850 = getelementptr inbounds float, float* %tmp18849, i64 1
+ %tmp18851 = getelementptr inbounds float, float* %tmp18850, i64 1
+ %tmp18852 = getelementptr inbounds float, float* %tmp18851, i64 1
+ %tmp18853 = getelementptr inbounds float, float* %tmp18852, i64 1
+ %tmp18854 = getelementptr inbounds float, float* %tmp18853, i64 1
+ %tmp18855 = getelementptr inbounds float, float* %tmp18854, i64 1
+ %tmp18856 = getelementptr inbounds float, float* %tmp18855, i64 1
+ %tmp18857 = getelementptr inbounds float, float* %tmp18856, i64 1
+ %tmp18858 = getelementptr inbounds float, float* %tmp18857, i64 1
+ %tmp18859 = getelementptr inbounds float, float* %tmp18858, i64 1
+ %tmp18860 = getelementptr inbounds float, float* %tmp18859, i64 1
+ %tmp18861 = getelementptr inbounds float, float* %tmp18860, i64 1
+ %tmp18862 = getelementptr inbounds float, float* %tmp18861, i64 1
+ %tmp18863 = getelementptr inbounds float, float* %tmp18862, i64 1
+ %tmp18864 = getelementptr inbounds float, float* %tmp18863, i64 1
+ %tmp18865 = getelementptr inbounds float, float* %tmp18864, i64 1
+ %tmp18866 = getelementptr inbounds float, float* %tmp18865, i64 1
+ %tmp18867 = getelementptr inbounds float, float* %tmp18866, i64 1
+ %tmp18868 = getelementptr inbounds float, float* %tmp18867, i64 1
+ %tmp18869 = getelementptr inbounds float, float* %tmp18868, i64 1
+ %tmp18870 = getelementptr inbounds float, float* %tmp18869, i64 1
+ %tmp18871 = getelementptr inbounds float, float* %tmp18870, i64 1
+ %tmp18872 = getelementptr inbounds float, float* %tmp18871, i64 1
+ %tmp18873 = getelementptr inbounds float, float* %tmp18872, i64 1
+ %tmp18874 = getelementptr inbounds float, float* %tmp18873, i64 1
+ %tmp18875 = getelementptr inbounds float, float* %tmp18874, i64 1
+ %tmp18876 = getelementptr inbounds float, float* %tmp18875, i64 1
+ %tmp18877 = getelementptr inbounds float, float* %tmp18876, i64 1
+ %tmp18878 = getelementptr inbounds float, float* %tmp18877, i64 1
+ %tmp18879 = getelementptr inbounds float, float* %tmp18878, i64 1
+ %tmp18880 = getelementptr inbounds float, float* %tmp18879, i64 1
+ %tmp18881 = getelementptr inbounds float, float* %tmp18880, i64 1
+ %tmp18882 = getelementptr inbounds float, float* %tmp18881, i64 1
+ %tmp18883 = getelementptr inbounds float, float* %tmp18882, i64 1
+ %tmp18884 = getelementptr inbounds float, float* %tmp18883, i64 1
+ %tmp18885 = getelementptr inbounds float, float* %tmp18884, i64 1
+ %tmp18886 = getelementptr inbounds float, float* %tmp18885, i64 1
+ %tmp18887 = getelementptr inbounds float, float* %tmp18886, i64 1
+ %tmp18888 = getelementptr inbounds float, float* %tmp18887, i64 1
+ %tmp18889 = getelementptr inbounds float, float* %tmp18888, i64 1
+ %tmp18890 = getelementptr inbounds float, float* %tmp18889, i64 1
+ %tmp18891 = getelementptr inbounds float, float* %tmp18890, i64 1
+ %tmp18892 = getelementptr inbounds float, float* %tmp18891, i64 1
+ %tmp18893 = getelementptr inbounds float, float* %tmp18892, i64 1
+ %tmp18894 = getelementptr inbounds float, float* %tmp18893, i64 1
+ %tmp18895 = getelementptr inbounds float, float* %tmp18894, i64 1
+ %tmp18896 = getelementptr inbounds float, float* %tmp18895, i64 1
+ %tmp18897 = getelementptr inbounds float, float* %tmp18896, i64 1
+ %tmp18898 = getelementptr inbounds float, float* %tmp18897, i64 1
+ %tmp18899 = getelementptr inbounds float, float* %tmp18898, i64 1
+ %tmp18900 = getelementptr inbounds float, float* %tmp18899, i64 1
+ %tmp18901 = getelementptr inbounds float, float* %tmp18900, i64 1
+ %tmp18902 = getelementptr inbounds float, float* %tmp18901, i64 1
+ %tmp18903 = getelementptr inbounds float, float* %tmp18902, i64 1
+ %tmp18904 = getelementptr inbounds float, float* %tmp18903, i64 1
+ %tmp18905 = getelementptr inbounds float, float* %tmp18904, i64 1
+ %tmp18906 = getelementptr inbounds float, float* %tmp18905, i64 1
+ %tmp18907 = getelementptr inbounds float, float* %tmp18906, i64 1
+ %tmp18908 = getelementptr inbounds float, float* %tmp18907, i64 1
+ %tmp18909 = getelementptr inbounds float, float* %tmp18908, i64 1
+ %tmp18910 = getelementptr inbounds float, float* %tmp18909, i64 1
+ %tmp18911 = getelementptr inbounds float, float* %tmp18910, i64 1
+ %tmp18912 = getelementptr inbounds float, float* %tmp18911, i64 1
+ %tmp18913 = getelementptr inbounds float, float* %tmp18912, i64 1
+ %tmp18914 = getelementptr inbounds float, float* %tmp18913, i64 1
+ %tmp18915 = getelementptr inbounds float, float* %tmp18914, i64 1
+ %tmp18916 = getelementptr inbounds float, float* %tmp18915, i64 1
+ %tmp18917 = getelementptr inbounds float, float* %tmp18916, i64 1
+ %tmp18918 = getelementptr inbounds float, float* %tmp18917, i64 1
+ %tmp18919 = getelementptr inbounds float, float* %tmp18918, i64 1
+ %tmp18920 = getelementptr inbounds float, float* %tmp18919, i64 1
+ %tmp18921 = getelementptr inbounds float, float* %tmp18920, i64 1
+ %tmp18922 = getelementptr inbounds float, float* %tmp18921, i64 1
+ %tmp18923 = getelementptr inbounds float, float* %tmp18922, i64 1
+ %tmp18924 = getelementptr inbounds float, float* %tmp18923, i64 1
+ %tmp18925 = getelementptr inbounds float, float* %tmp18924, i64 1
+ %tmp18926 = getelementptr inbounds float, float* %tmp18925, i64 1
+ %tmp18927 = getelementptr inbounds float, float* %tmp18926, i64 1
+ %tmp18928 = getelementptr inbounds float, float* %tmp18927, i64 1
+ %tmp18929 = getelementptr inbounds float, float* %tmp18928, i64 1
+ %tmp18930 = getelementptr inbounds float, float* %tmp18929, i64 1
+ %tmp18931 = getelementptr inbounds float, float* %tmp18930, i64 1
+ %tmp18932 = getelementptr inbounds float, float* %tmp18931, i64 1
+ %tmp18933 = getelementptr inbounds float, float* %tmp18932, i64 1
+ %tmp18934 = getelementptr inbounds float, float* %tmp18933, i64 1
+ %tmp18935 = getelementptr inbounds float, float* %tmp18934, i64 1
+ %tmp18936 = getelementptr inbounds float, float* %tmp18935, i64 1
+ %tmp18937 = getelementptr inbounds float, float* %tmp18936, i64 1
+ %tmp18938 = getelementptr inbounds float, float* %tmp18937, i64 1
+ %tmp18939 = getelementptr inbounds float, float* %tmp18938, i64 1
+ %tmp18940 = getelementptr inbounds float, float* %tmp18939, i64 1
+ %tmp18941 = getelementptr inbounds float, float* %tmp18940, i64 1
+ %tmp18942 = getelementptr inbounds float, float* %tmp18941, i64 1
+ %tmp18943 = getelementptr inbounds float, float* %tmp18942, i64 1
+ %tmp18944 = getelementptr inbounds float, float* %tmp18943, i64 1
+ %tmp18945 = getelementptr inbounds float, float* %tmp18944, i64 1
+ %tmp18946 = getelementptr inbounds float, float* %tmp18945, i64 1
+ %tmp18947 = getelementptr inbounds float, float* %tmp18946, i64 1
+ %tmp18948 = getelementptr inbounds float, float* %tmp18947, i64 1
+ %tmp18949 = getelementptr inbounds float, float* %tmp18948, i64 1
+ %tmp18950 = getelementptr inbounds float, float* %tmp18949, i64 1
+ %tmp18951 = getelementptr inbounds float, float* %tmp18950, i64 1
+ %tmp18952 = getelementptr inbounds float, float* %tmp18951, i64 1
+ %tmp18953 = getelementptr inbounds float, float* %tmp18952, i64 1
+ %tmp18954 = getelementptr inbounds float, float* %tmp18953, i64 1
+ %tmp18955 = getelementptr inbounds float, float* %tmp18954, i64 1
+ %tmp18956 = getelementptr inbounds float, float* %tmp18955, i64 1
+ %tmp18957 = getelementptr inbounds float, float* %tmp18956, i64 1
+ %tmp18958 = getelementptr inbounds float, float* %tmp18957, i64 1
+ %tmp18959 = getelementptr inbounds float, float* %tmp18958, i64 1
+ %tmp18960 = getelementptr inbounds float, float* %tmp18959, i64 1
+ %tmp18961 = getelementptr inbounds float, float* %tmp18960, i64 1
+ %tmp18962 = getelementptr inbounds float, float* %tmp18961, i64 1
+ %tmp18963 = getelementptr inbounds float, float* %tmp18962, i64 1
+ %tmp18964 = getelementptr inbounds float, float* %tmp18963, i64 1
+ %tmp18965 = getelementptr inbounds float, float* %tmp18964, i64 1
+ %tmp18966 = getelementptr inbounds float, float* %tmp18965, i64 1
+ %tmp18967 = getelementptr inbounds float, float* %tmp18966, i64 1
+ %tmp18968 = getelementptr inbounds float, float* %tmp18967, i64 1
+ %tmp18969 = getelementptr inbounds float, float* %tmp18968, i64 1
+ %tmp18970 = getelementptr inbounds float, float* %tmp18969, i64 1
+ %tmp18971 = getelementptr inbounds float, float* %tmp18970, i64 1
+ %tmp18972 = getelementptr inbounds float, float* %tmp18971, i64 1
+ %tmp18973 = getelementptr inbounds float, float* %tmp18972, i64 1
+ %tmp18974 = getelementptr inbounds float, float* %tmp18973, i64 1
+ %tmp18975 = getelementptr inbounds float, float* %tmp18974, i64 1
+ %tmp18976 = getelementptr inbounds float, float* %tmp18975, i64 1
+ %tmp18977 = getelementptr inbounds float, float* %tmp18976, i64 1
+ %tmp18978 = getelementptr inbounds float, float* %tmp18977, i64 1
+ %tmp18979 = getelementptr inbounds float, float* %tmp18978, i64 1
+ %tmp18980 = getelementptr inbounds float, float* %tmp18979, i64 1
+ %tmp18981 = getelementptr inbounds float, float* %tmp18980, i64 1
+ %tmp18982 = getelementptr inbounds float, float* %tmp18981, i64 1
+ %tmp18983 = getelementptr inbounds float, float* %tmp18982, i64 1
+ %tmp18984 = getelementptr inbounds float, float* %tmp18983, i64 1
+ %tmp18985 = getelementptr inbounds float, float* %tmp18984, i64 1
+ %tmp18986 = getelementptr inbounds float, float* %tmp18985, i64 1
+ %tmp18987 = getelementptr inbounds float, float* %tmp18986, i64 1
+ %tmp18988 = getelementptr inbounds float, float* %tmp18987, i64 1
+ %tmp18989 = getelementptr inbounds float, float* %tmp18988, i64 1
+ %tmp18990 = getelementptr inbounds float, float* %tmp18989, i64 1
+ %tmp18991 = getelementptr inbounds float, float* %tmp18990, i64 1
+ %tmp18992 = getelementptr inbounds float, float* %tmp18991, i64 1
+ %tmp18993 = getelementptr inbounds float, float* %tmp18992, i64 1
+ %tmp18994 = getelementptr inbounds float, float* %tmp18993, i64 1
+ %tmp18995 = getelementptr inbounds float, float* %tmp18994, i64 1
+ %tmp18996 = getelementptr inbounds float, float* %tmp18995, i64 1
+ %tmp18997 = getelementptr inbounds float, float* %tmp18996, i64 1
+ %tmp18998 = getelementptr inbounds float, float* %tmp18997, i64 1
+ %tmp18999 = getelementptr inbounds float, float* %tmp18998, i64 1
+ %tmp19000 = getelementptr inbounds float, float* %tmp18999, i64 1
+ %tmp19001 = getelementptr inbounds float, float* %tmp19000, i64 1
+ %tmp19002 = getelementptr inbounds float, float* %tmp19001, i64 1
+ %tmp19003 = getelementptr inbounds float, float* %tmp19002, i64 1
+ %tmp19004 = getelementptr inbounds float, float* %tmp19003, i64 1
+ %tmp19005 = getelementptr inbounds float, float* %tmp19004, i64 1
+ %tmp19006 = getelementptr inbounds float, float* %tmp19005, i64 1
+ %tmp19007 = getelementptr inbounds float, float* %tmp19006, i64 1
+ %tmp19008 = getelementptr inbounds float, float* %tmp19007, i64 1
+ %tmp19009 = getelementptr inbounds float, float* %tmp19008, i64 1
+ %tmp19010 = getelementptr inbounds float, float* %tmp19009, i64 1
+ %tmp19011 = getelementptr inbounds float, float* %tmp19010, i64 1
+ %tmp19012 = getelementptr inbounds float, float* %tmp19011, i64 1
+ %tmp19013 = getelementptr inbounds float, float* %tmp19012, i64 1
+ %tmp19014 = getelementptr inbounds float, float* %tmp19013, i64 1
+ %tmp19015 = getelementptr inbounds float, float* %tmp19014, i64 1
+ %tmp19016 = getelementptr inbounds float, float* %tmp19015, i64 1
+ %tmp19017 = getelementptr inbounds float, float* %tmp19016, i64 1
+ %tmp19018 = getelementptr inbounds float, float* %tmp19017, i64 1
+ %tmp19019 = getelementptr inbounds float, float* %tmp19018, i64 1
+ %tmp19020 = getelementptr inbounds float, float* %tmp19019, i64 1
+ %tmp19021 = getelementptr inbounds float, float* %tmp19020, i64 1
+ %tmp19022 = getelementptr inbounds float, float* %tmp19021, i64 1
+ %tmp19023 = getelementptr inbounds float, float* %tmp19022, i64 1
+ %tmp19024 = getelementptr inbounds float, float* %tmp19023, i64 1
+ %tmp19025 = getelementptr inbounds float, float* %tmp19024, i64 1
+ %tmp19026 = getelementptr inbounds float, float* %tmp19025, i64 1
+ %tmp19027 = getelementptr inbounds float, float* %tmp19026, i64 1
+ %tmp19028 = getelementptr inbounds float, float* %tmp19027, i64 1
+ %tmp19029 = getelementptr inbounds float, float* %tmp19028, i64 1
+ %tmp19030 = getelementptr inbounds float, float* %tmp19029, i64 1
+ %tmp19031 = getelementptr inbounds float, float* %tmp19030, i64 1
+ %tmp19032 = getelementptr inbounds float, float* %tmp19031, i64 1
+ %tmp19033 = getelementptr inbounds float, float* %tmp19032, i64 1
+ %tmp19034 = getelementptr inbounds float, float* %tmp19033, i64 1
+ %tmp19035 = getelementptr inbounds float, float* %tmp19034, i64 1
+ %tmp19036 = getelementptr inbounds float, float* %tmp19035, i64 1
+ %tmp19037 = getelementptr inbounds float, float* %tmp19036, i64 1
+ %tmp19038 = getelementptr inbounds float, float* %tmp19037, i64 1
+ %tmp19039 = getelementptr inbounds float, float* %tmp19038, i64 1
+ %tmp19040 = getelementptr inbounds float, float* %tmp19039, i64 1
+ %tmp19041 = getelementptr inbounds float, float* %tmp19040, i64 1
+ %tmp19042 = getelementptr inbounds float, float* %tmp19041, i64 1
+ %tmp19043 = getelementptr inbounds float, float* %tmp19042, i64 1
+ %tmp19044 = getelementptr inbounds float, float* %tmp19043, i64 1
+ %tmp19045 = getelementptr inbounds float, float* %tmp19044, i64 1
+ %tmp19046 = getelementptr inbounds float, float* %tmp19045, i64 1
+ %tmp19047 = getelementptr inbounds float, float* %tmp19046, i64 1
+ %tmp19048 = getelementptr inbounds float, float* %tmp19047, i64 1
+ %tmp19049 = getelementptr inbounds float, float* %tmp19048, i64 1
+ %tmp19050 = getelementptr inbounds float, float* %tmp19049, i64 1
+ %tmp19051 = getelementptr inbounds float, float* %tmp19050, i64 1
+ %tmp19052 = getelementptr inbounds float, float* %tmp19051, i64 1
+ %tmp19053 = getelementptr inbounds float, float* %tmp19052, i64 1
+ %tmp19054 = getelementptr inbounds float, float* %tmp19053, i64 1
+ %tmp19055 = getelementptr inbounds float, float* %tmp19054, i64 1
+ %tmp19056 = getelementptr inbounds float, float* %tmp19055, i64 1
+ %tmp19057 = getelementptr inbounds float, float* %tmp19056, i64 1
+ %tmp19058 = getelementptr inbounds float, float* %tmp19057, i64 1
+ %tmp19059 = getelementptr inbounds float, float* %tmp19058, i64 1
+ %tmp19060 = getelementptr inbounds float, float* %tmp19059, i64 1
+ %tmp19061 = getelementptr inbounds float, float* %tmp19060, i64 1
+ %tmp19062 = getelementptr inbounds float, float* %tmp19061, i64 1
+ %tmp19063 = getelementptr inbounds float, float* %tmp19062, i64 1
+ %tmp19064 = getelementptr inbounds float, float* %tmp19063, i64 1
+ %tmp19065 = getelementptr inbounds float, float* %tmp19064, i64 1
+ %tmp19066 = getelementptr inbounds float, float* %tmp19065, i64 1
+ %tmp19067 = getelementptr inbounds float, float* %tmp19066, i64 1
+ %tmp19068 = getelementptr inbounds float, float* %tmp19067, i64 1
+ %tmp19069 = getelementptr inbounds float, float* %tmp19068, i64 1
+ %tmp19070 = getelementptr inbounds float, float* %tmp19069, i64 1
+ %tmp19071 = getelementptr inbounds float, float* %tmp19070, i64 1
+ %tmp19072 = getelementptr inbounds float, float* %tmp19071, i64 1
+ %tmp19073 = getelementptr inbounds float, float* %tmp19072, i64 1
+ %tmp19074 = getelementptr inbounds float, float* %tmp19073, i64 1
+ %tmp19075 = getelementptr inbounds float, float* %tmp19074, i64 1
+ %tmp19076 = getelementptr inbounds float, float* %tmp19075, i64 1
+ %tmp19077 = getelementptr inbounds float, float* %tmp19076, i64 1
+ %tmp19078 = getelementptr inbounds float, float* %tmp19077, i64 1
+ %tmp19079 = getelementptr inbounds float, float* %tmp19078, i64 1
+ %tmp19080 = getelementptr inbounds float, float* %tmp19079, i64 1
+ %tmp19081 = getelementptr inbounds float, float* %tmp19080, i64 1
+ %tmp19082 = getelementptr inbounds float, float* %tmp19081, i64 1
+ %tmp19083 = getelementptr inbounds float, float* %tmp19082, i64 1
+ %tmp19084 = getelementptr inbounds float, float* %tmp19083, i64 1
+ %tmp19085 = getelementptr inbounds float, float* %tmp19084, i64 1
+ %tmp19086 = getelementptr inbounds float, float* %tmp19085, i64 1
+ %tmp19087 = getelementptr inbounds float, float* %tmp19086, i64 1
+ %tmp19088 = getelementptr inbounds float, float* %tmp19087, i64 1
+ %tmp19089 = getelementptr inbounds float, float* %tmp19088, i64 1
+ %tmp19090 = getelementptr inbounds float, float* %tmp19089, i64 1
+ %tmp19091 = getelementptr inbounds float, float* %tmp19090, i64 1
+ %tmp19092 = getelementptr inbounds float, float* %tmp19091, i64 1
+ %tmp19093 = getelementptr inbounds float, float* %tmp19092, i64 1
+ %tmp19094 = getelementptr inbounds float, float* %tmp19093, i64 1
+ %tmp19095 = getelementptr inbounds float, float* %tmp19094, i64 1
+ %tmp19096 = getelementptr inbounds float, float* %tmp19095, i64 1
+ %tmp19097 = getelementptr inbounds float, float* %tmp19096, i64 1
+ %tmp19098 = getelementptr inbounds float, float* %tmp19097, i64 1
+ %tmp19099 = getelementptr inbounds float, float* %tmp19098, i64 1
+ %tmp19100 = getelementptr inbounds float, float* %tmp19099, i64 1
+ %tmp19101 = getelementptr inbounds float, float* %tmp19100, i64 1
+ %tmp19102 = getelementptr inbounds float, float* %tmp19101, i64 1
+ %tmp19103 = getelementptr inbounds float, float* %tmp19102, i64 1
+ %tmp19104 = getelementptr inbounds float, float* %tmp19103, i64 1
+ %tmp19105 = getelementptr inbounds float, float* %tmp19104, i64 1
+ %tmp19106 = getelementptr inbounds float, float* %tmp19105, i64 1
+ %tmp19107 = getelementptr inbounds float, float* %tmp19106, i64 1
+ %tmp19108 = getelementptr inbounds float, float* %tmp19107, i64 1
+ %tmp19109 = getelementptr inbounds float, float* %tmp19108, i64 1
+ %tmp19110 = getelementptr inbounds float, float* %tmp19109, i64 1
+ %tmp19111 = getelementptr inbounds float, float* %tmp19110, i64 1
+ %tmp19112 = getelementptr inbounds float, float* %tmp19111, i64 1
+ %tmp19113 = getelementptr inbounds float, float* %tmp19112, i64 1
+ %tmp19114 = getelementptr inbounds float, float* %tmp19113, i64 1
+ %tmp19115 = getelementptr inbounds float, float* %tmp19114, i64 1
+ %tmp19116 = getelementptr inbounds float, float* %tmp19115, i64 1
+ %tmp19117 = getelementptr inbounds float, float* %tmp19116, i64 1
+ %tmp19118 = getelementptr inbounds float, float* %tmp19117, i64 1
+ %tmp19119 = getelementptr inbounds float, float* %tmp19118, i64 1
+ %tmp19120 = getelementptr inbounds float, float* %tmp19119, i64 1
+ %tmp19121 = getelementptr inbounds float, float* %tmp19120, i64 1
+ %tmp19122 = getelementptr inbounds float, float* %tmp19121, i64 1
+ %tmp19123 = getelementptr inbounds float, float* %tmp19122, i64 1
+ %tmp19124 = getelementptr inbounds float, float* %tmp19123, i64 1
+ %tmp19125 = getelementptr inbounds float, float* %tmp19124, i64 1
+ %tmp19126 = getelementptr inbounds float, float* %tmp19125, i64 1
+ %tmp19127 = getelementptr inbounds float, float* %tmp19126, i64 1
+ %tmp19128 = getelementptr inbounds float, float* %tmp19127, i64 1
+ %tmp19129 = getelementptr inbounds float, float* %tmp19128, i64 1
+ %tmp19130 = getelementptr inbounds float, float* %tmp19129, i64 1
+ %tmp19131 = getelementptr inbounds float, float* %tmp19130, i64 1
+ %tmp19132 = getelementptr inbounds float, float* %tmp19131, i64 1
+ %tmp19133 = getelementptr inbounds float, float* %tmp19132, i64 1
+ %tmp19134 = getelementptr inbounds float, float* %tmp19133, i64 1
+ %tmp19135 = getelementptr inbounds float, float* %tmp19134, i64 1
+ %tmp19136 = getelementptr inbounds float, float* %tmp19135, i64 1
+ %tmp19137 = getelementptr inbounds float, float* %tmp19136, i64 1
+ %tmp19138 = getelementptr inbounds float, float* %tmp19137, i64 1
+ %tmp19139 = getelementptr inbounds float, float* %tmp19138, i64 1
+ %tmp19140 = getelementptr inbounds float, float* %tmp19139, i64 1
+ %tmp19141 = getelementptr inbounds float, float* %tmp19140, i64 1
+ %tmp19142 = getelementptr inbounds float, float* %tmp19141, i64 1
+ %tmp19143 = getelementptr inbounds float, float* %tmp19142, i64 1
+ %tmp19144 = getelementptr inbounds float, float* %tmp19143, i64 1
+ %tmp19145 = getelementptr inbounds float, float* %tmp19144, i64 1
+ %tmp19146 = getelementptr inbounds float, float* %tmp19145, i64 1
+ %tmp19147 = getelementptr inbounds float, float* %tmp19146, i64 1
+ %tmp19148 = getelementptr inbounds float, float* %tmp19147, i64 1
+ %tmp19149 = getelementptr inbounds float, float* %tmp19148, i64 1
+ %tmp19150 = getelementptr inbounds float, float* %tmp19149, i64 1
+ %tmp19151 = getelementptr inbounds float, float* %tmp19150, i64 1
+ %tmp19152 = getelementptr inbounds float, float* %tmp19151, i64 1
+ %tmp19153 = getelementptr inbounds float, float* %tmp19152, i64 1
+ %tmp19154 = getelementptr inbounds float, float* %tmp19153, i64 1
+ %tmp19155 = getelementptr inbounds float, float* %tmp19154, i64 1
+ %tmp19156 = getelementptr inbounds float, float* %tmp19155, i64 1
+ %tmp19157 = getelementptr inbounds float, float* %tmp19156, i64 1
+ %tmp19158 = getelementptr inbounds float, float* %tmp19157, i64 1
+ %tmp19159 = getelementptr inbounds float, float* %tmp19158, i64 1
+ %tmp19160 = getelementptr inbounds float, float* %tmp19159, i64 1
+ %tmp19161 = getelementptr inbounds float, float* %tmp19160, i64 1
+ %tmp19162 = getelementptr inbounds float, float* %tmp19161, i64 1
+ %tmp19163 = getelementptr inbounds float, float* %tmp19162, i64 1
+ %tmp19164 = getelementptr inbounds float, float* %tmp19163, i64 1
+ %tmp19165 = getelementptr inbounds float, float* %tmp19164, i64 1
+ %tmp19166 = getelementptr inbounds float, float* %tmp19165, i64 1
+ %tmp19167 = getelementptr inbounds float, float* %tmp19166, i64 1
+ %tmp19168 = getelementptr inbounds float, float* %tmp19167, i64 1
+ %tmp19169 = getelementptr inbounds float, float* %tmp19168, i64 1
+ %tmp19170 = getelementptr inbounds float, float* %tmp19169, i64 1
+ %tmp19171 = getelementptr inbounds float, float* %tmp19170, i64 1
+ %tmp19172 = getelementptr inbounds float, float* %tmp19171, i64 1
+ %tmp19173 = getelementptr inbounds float, float* %tmp19172, i64 1
+ %tmp19174 = getelementptr inbounds float, float* %tmp19173, i64 1
+ %tmp19175 = getelementptr inbounds float, float* %tmp19174, i64 1
+ %tmp19176 = getelementptr inbounds float, float* %tmp19175, i64 1
+ %tmp19177 = getelementptr inbounds float, float* %tmp19176, i64 1
+ %tmp19178 = getelementptr inbounds float, float* %tmp19177, i64 1
+ %tmp19179 = getelementptr inbounds float, float* %tmp19178, i64 1
+ %tmp19180 = getelementptr inbounds float, float* %tmp19179, i64 1
+ %tmp19181 = getelementptr inbounds float, float* %tmp19180, i64 1
+ %tmp19182 = getelementptr inbounds float, float* %tmp19181, i64 1
+ %tmp19183 = getelementptr inbounds float, float* %tmp19182, i64 1
+ %tmp19184 = getelementptr inbounds float, float* %tmp19183, i64 1
+ %tmp19185 = getelementptr inbounds float, float* %tmp19184, i64 1
+ %tmp19186 = getelementptr inbounds float, float* %tmp19185, i64 1
+ %tmp19187 = getelementptr inbounds float, float* %tmp19186, i64 1
+ %tmp19188 = getelementptr inbounds float, float* %tmp19187, i64 1
+ %tmp19189 = getelementptr inbounds float, float* %tmp19188, i64 1
+ %tmp19190 = getelementptr inbounds float, float* %tmp19189, i64 1
+ %tmp19191 = getelementptr inbounds float, float* %tmp19190, i64 1
+ %tmp19192 = getelementptr inbounds float, float* %tmp19191, i64 1
+ %tmp19193 = getelementptr inbounds float, float* %tmp19192, i64 1
+ %tmp19194 = getelementptr inbounds float, float* %tmp19193, i64 1
+ %tmp19195 = getelementptr inbounds float, float* %tmp19194, i64 1
+ %tmp19196 = getelementptr inbounds float, float* %tmp19195, i64 1
+ %tmp19197 = getelementptr inbounds float, float* %tmp19196, i64 1
+ %tmp19198 = getelementptr inbounds float, float* %tmp19197, i64 1
+ %tmp19199 = getelementptr inbounds float, float* %tmp19198, i64 1
+ %tmp19200 = getelementptr inbounds float, float* %tmp19199, i64 1
+ %tmp19201 = getelementptr inbounds float, float* %tmp19200, i64 1
+ %tmp19202 = getelementptr inbounds float, float* %tmp19201, i64 1
+ %tmp19203 = getelementptr inbounds float, float* %tmp19202, i64 1
+ %tmp19204 = getelementptr inbounds float, float* %tmp19203, i64 1
+ %tmp19205 = getelementptr inbounds float, float* %tmp19204, i64 1
+ %tmp19206 = getelementptr inbounds float, float* %tmp19205, i64 1
+ %tmp19207 = getelementptr inbounds float, float* %tmp19206, i64 1
+ %tmp19208 = getelementptr inbounds float, float* %tmp19207, i64 1
+ %tmp19209 = getelementptr inbounds float, float* %tmp19208, i64 1
+ %tmp19210 = getelementptr inbounds float, float* %tmp19209, i64 1
+ %tmp19211 = getelementptr inbounds float, float* %tmp19210, i64 1
+ %tmp19212 = getelementptr inbounds float, float* %tmp19211, i64 1
+ %tmp19213 = getelementptr inbounds float, float* %tmp19212, i64 1
+ %tmp19214 = getelementptr inbounds float, float* %tmp19213, i64 1
+ %tmp19215 = getelementptr inbounds float, float* %tmp19214, i64 1
+ %tmp19216 = getelementptr inbounds float, float* %tmp19215, i64 1
+ %tmp19217 = getelementptr inbounds float, float* %tmp19216, i64 1
+ %tmp19218 = getelementptr inbounds float, float* %tmp19217, i64 1
+ %tmp19219 = getelementptr inbounds float, float* %tmp19218, i64 1
+ %tmp19220 = getelementptr inbounds float, float* %tmp19219, i64 1
+ %tmp19221 = getelementptr inbounds float, float* %tmp19220, i64 1
+ %tmp19222 = getelementptr inbounds float, float* %tmp19221, i64 1
+ %tmp19223 = getelementptr inbounds float, float* %tmp19222, i64 1
+ %tmp19224 = getelementptr inbounds float, float* %tmp19223, i64 1
+ %tmp19225 = getelementptr inbounds float, float* %tmp19224, i64 1
+ %tmp19226 = getelementptr inbounds float, float* %tmp19225, i64 1
+ %tmp19227 = getelementptr inbounds float, float* %tmp19226, i64 1
+ %tmp19228 = getelementptr inbounds float, float* %tmp19227, i64 1
+ %tmp19229 = getelementptr inbounds float, float* %tmp19228, i64 1
+ %tmp19230 = getelementptr inbounds float, float* %tmp19229, i64 1
+ %tmp19231 = getelementptr inbounds float, float* %tmp19230, i64 1
+ %tmp19232 = getelementptr inbounds float, float* %tmp19231, i64 1
+ %tmp19233 = getelementptr inbounds float, float* %tmp19232, i64 1
+ %tmp19234 = getelementptr inbounds float, float* %tmp19233, i64 1
+ %tmp19235 = getelementptr inbounds float, float* %tmp19234, i64 1
+ %tmp19236 = getelementptr inbounds float, float* %tmp19235, i64 1
+ %tmp19237 = getelementptr inbounds float, float* %tmp19236, i64 1
+ %tmp19238 = getelementptr inbounds float, float* %tmp19237, i64 1
+ %tmp19239 = getelementptr inbounds float, float* %tmp19238, i64 1
+ %tmp19240 = getelementptr inbounds float, float* %tmp19239, i64 1
+ %tmp19241 = getelementptr inbounds float, float* %tmp19240, i64 1
+ %tmp19242 = getelementptr inbounds float, float* %tmp19241, i64 1
+ %tmp19243 = getelementptr inbounds float, float* %tmp19242, i64 1
+ %tmp19244 = getelementptr inbounds float, float* %tmp19243, i64 1
+ %tmp19245 = getelementptr inbounds float, float* %tmp19244, i64 1
+ %tmp19246 = getelementptr inbounds float, float* %tmp19245, i64 1
+ %tmp19247 = getelementptr inbounds float, float* %tmp19246, i64 1
+ %tmp19248 = getelementptr inbounds float, float* %tmp19247, i64 1
+ %tmp19249 = getelementptr inbounds float, float* %tmp19248, i64 1
+ %tmp19250 = getelementptr inbounds float, float* %tmp19249, i64 1
+ %tmp19251 = getelementptr inbounds float, float* %tmp19250, i64 1
+ %tmp19252 = getelementptr inbounds float, float* %tmp19251, i64 1
+ %tmp19253 = getelementptr inbounds float, float* %tmp19252, i64 1
+ %tmp19254 = getelementptr inbounds float, float* %tmp19253, i64 1
+ %tmp19255 = getelementptr inbounds float, float* %tmp19254, i64 1
+ %tmp19256 = getelementptr inbounds float, float* %tmp19255, i64 1
+ %tmp19257 = getelementptr inbounds float, float* %tmp19256, i64 1
+ %tmp19258 = getelementptr inbounds float, float* %tmp19257, i64 1
+ %tmp19259 = getelementptr inbounds float, float* %tmp19258, i64 1
+ %tmp19260 = getelementptr inbounds float, float* %tmp19259, i64 1
+ %tmp19261 = getelementptr inbounds float, float* %tmp19260, i64 1
+ %tmp19262 = getelementptr inbounds float, float* %tmp19261, i64 1
+ %tmp19263 = getelementptr inbounds float, float* %tmp19262, i64 1
+ %tmp19264 = getelementptr inbounds float, float* %tmp19263, i64 1
+ %tmp19265 = getelementptr inbounds float, float* %tmp19264, i64 1
+ %tmp19266 = getelementptr inbounds float, float* %tmp19265, i64 1
+ %tmp19267 = getelementptr inbounds float, float* %tmp19266, i64 1
+ %tmp19268 = getelementptr inbounds float, float* %tmp19267, i64 1
+ %tmp19269 = getelementptr inbounds float, float* %tmp19268, i64 1
+ %tmp19270 = getelementptr inbounds float, float* %tmp19269, i64 1
+ %tmp19271 = getelementptr inbounds float, float* %tmp19270, i64 1
+ %tmp19272 = getelementptr inbounds float, float* %tmp19271, i64 1
+ %tmp19273 = getelementptr inbounds float, float* %tmp19272, i64 1
+ %tmp19274 = getelementptr inbounds float, float* %tmp19273, i64 1
+ %tmp19275 = getelementptr inbounds float, float* %tmp19274, i64 1
+ %tmp19276 = getelementptr inbounds float, float* %tmp19275, i64 1
+ %tmp19277 = getelementptr inbounds float, float* %tmp19276, i64 1
+ %tmp19278 = getelementptr inbounds float, float* %tmp19277, i64 1
+ %tmp19279 = getelementptr inbounds float, float* %tmp19278, i64 1
+ %tmp19280 = getelementptr inbounds float, float* %tmp19279, i64 1
+ %tmp19281 = getelementptr inbounds float, float* %tmp19280, i64 1
+ %tmp19282 = getelementptr inbounds float, float* %tmp19281, i64 1
+ %tmp19283 = getelementptr inbounds float, float* %tmp19282, i64 1
+ %tmp19284 = getelementptr inbounds float, float* %tmp19283, i64 1
+ %tmp19285 = getelementptr inbounds float, float* %tmp19284, i64 1
+ %tmp19286 = getelementptr inbounds float, float* %tmp19285, i64 1
+ %tmp19287 = getelementptr inbounds float, float* %tmp19286, i64 1
+ %tmp19288 = getelementptr inbounds float, float* %tmp19287, i64 1
+ %tmp19289 = getelementptr inbounds float, float* %tmp19288, i64 1
+ %tmp19290 = getelementptr inbounds float, float* %tmp19289, i64 1
+ %tmp19291 = getelementptr inbounds float, float* %tmp19290, i64 1
+ %tmp19292 = getelementptr inbounds float, float* %tmp19291, i64 1
+ %tmp19293 = getelementptr inbounds float, float* %tmp19292, i64 1
+ %tmp19294 = getelementptr inbounds float, float* %tmp19293, i64 1
+ %tmp19295 = getelementptr inbounds float, float* %tmp19294, i64 1
+ %tmp19296 = getelementptr inbounds float, float* %tmp19295, i64 1
+ %tmp19297 = getelementptr inbounds float, float* %tmp19296, i64 1
+ %tmp19298 = getelementptr inbounds float, float* %tmp19297, i64 1
+ %tmp19299 = getelementptr inbounds float, float* %tmp19298, i64 1
+ %tmp19300 = getelementptr inbounds float, float* %tmp19299, i64 1
+ %tmp19301 = getelementptr inbounds float, float* %tmp19300, i64 1
+ %tmp19302 = getelementptr inbounds float, float* %tmp19301, i64 1
+ %tmp19303 = getelementptr inbounds float, float* %tmp19302, i64 1
+ %tmp19304 = getelementptr inbounds float, float* %tmp19303, i64 1
+ %tmp19305 = getelementptr inbounds float, float* %tmp19304, i64 1
+ %tmp19306 = getelementptr inbounds float, float* %tmp19305, i64 1
+ %tmp19307 = getelementptr inbounds float, float* %tmp19306, i64 1
+ %tmp19308 = getelementptr inbounds float, float* %tmp19307, i64 1
+ %tmp19309 = getelementptr inbounds float, float* %tmp19308, i64 1
+ %tmp19310 = getelementptr inbounds float, float* %tmp19309, i64 1
+ %tmp19311 = getelementptr inbounds float, float* %tmp19310, i64 1
+ %tmp19312 = getelementptr inbounds float, float* %tmp19311, i64 1
+ %tmp19313 = getelementptr inbounds float, float* %tmp19312, i64 1
+ %tmp19314 = getelementptr inbounds float, float* %tmp19313, i64 1
+ %tmp19315 = getelementptr inbounds float, float* %tmp19314, i64 1
+ %tmp19316 = getelementptr inbounds float, float* %tmp19315, i64 1
+ %tmp19317 = getelementptr inbounds float, float* %tmp19316, i64 1
+ %tmp19318 = getelementptr inbounds float, float* %tmp19317, i64 1
+ %tmp19319 = getelementptr inbounds float, float* %tmp19318, i64 1
+ %tmp19320 = getelementptr inbounds float, float* %tmp19319, i64 1
+ %tmp19321 = getelementptr inbounds float, float* %tmp19320, i64 1
+ %tmp19322 = getelementptr inbounds float, float* %tmp19321, i64 1
+ %tmp19323 = getelementptr inbounds float, float* %tmp19322, i64 1
+ %tmp19324 = getelementptr inbounds float, float* %tmp19323, i64 1
+ %tmp19325 = getelementptr inbounds float, float* %tmp19324, i64 1
+ %tmp19326 = getelementptr inbounds float, float* %tmp19325, i64 1
+ %tmp19327 = getelementptr inbounds float, float* %tmp19326, i64 1
+ %tmp19328 = getelementptr inbounds float, float* %tmp19327, i64 1
+ %tmp19329 = getelementptr inbounds float, float* %tmp19328, i64 1
+ %tmp19330 = getelementptr inbounds float, float* %tmp19329, i64 1
+ %tmp19331 = getelementptr inbounds float, float* %tmp19330, i64 1
+ %tmp19332 = getelementptr inbounds float, float* %tmp19331, i64 1
+ %tmp19333 = getelementptr inbounds float, float* %tmp19332, i64 1
+ %tmp19334 = getelementptr inbounds float, float* %tmp19333, i64 1
+ %tmp19335 = getelementptr inbounds float, float* %tmp19334, i64 1
+ %tmp19336 = getelementptr inbounds float, float* %tmp19335, i64 1
+ %tmp19337 = getelementptr inbounds float, float* %tmp19336, i64 1
+ %tmp19338 = getelementptr inbounds float, float* %tmp19337, i64 1
+ %tmp19339 = getelementptr inbounds float, float* %tmp19338, i64 1
+ %tmp19340 = getelementptr inbounds float, float* %tmp19339, i64 1
+ %tmp19341 = getelementptr inbounds float, float* %tmp19340, i64 1
+ %tmp19342 = getelementptr inbounds float, float* %tmp19341, i64 1
+ %tmp19343 = getelementptr inbounds float, float* %tmp19342, i64 1
+ %tmp19344 = getelementptr inbounds float, float* %tmp19343, i64 1
+ %tmp19345 = getelementptr inbounds float, float* %tmp19344, i64 1
+ %tmp19346 = getelementptr inbounds float, float* %tmp19345, i64 1
+ %tmp19347 = getelementptr inbounds float, float* %tmp19346, i64 1
+ %tmp19348 = getelementptr inbounds float, float* %tmp19347, i64 1
+ %tmp19349 = getelementptr inbounds float, float* %tmp19348, i64 1
+ %tmp19350 = getelementptr inbounds float, float* %tmp19349, i64 1
+ %tmp19351 = getelementptr inbounds float, float* %tmp19350, i64 1
+ %tmp19352 = getelementptr inbounds float, float* %tmp19351, i64 1
+ %tmp19353 = getelementptr inbounds float, float* %tmp19352, i64 1
+ %tmp19354 = getelementptr inbounds float, float* %tmp19353, i64 1
+ %tmp19355 = getelementptr inbounds float, float* %tmp19354, i64 1
+ %tmp19356 = getelementptr inbounds float, float* %tmp19355, i64 1
+ %tmp19357 = getelementptr inbounds float, float* %tmp19356, i64 1
+ %tmp19358 = getelementptr inbounds float, float* %tmp19357, i64 1
+ %tmp19359 = getelementptr inbounds float, float* %tmp19358, i64 1
+ %tmp19360 = getelementptr inbounds float, float* %tmp19359, i64 1
+ %tmp19361 = getelementptr inbounds float, float* %tmp19360, i64 1
+ %tmp19362 = getelementptr inbounds float, float* %tmp19361, i64 1
+ %tmp19363 = getelementptr inbounds float, float* %tmp19362, i64 1
+ %tmp19364 = getelementptr inbounds float, float* %tmp19363, i64 1
+ %tmp19365 = getelementptr inbounds float, float* %tmp19364, i64 1
+ %tmp19366 = getelementptr inbounds float, float* %tmp19365, i64 1
+ %tmp19367 = getelementptr inbounds float, float* %tmp19366, i64 1
+ %tmp19368 = getelementptr inbounds float, float* %tmp19367, i64 1
+ %tmp19369 = getelementptr inbounds float, float* %tmp19368, i64 1
+ %tmp19370 = getelementptr inbounds float, float* %tmp19369, i64 1
+ %tmp19371 = getelementptr inbounds float, float* %tmp19370, i64 1
+ %tmp19372 = getelementptr inbounds float, float* %tmp19371, i64 1
+ %tmp19373 = getelementptr inbounds float, float* %tmp19372, i64 1
+ %tmp19374 = getelementptr inbounds float, float* %tmp19373, i64 1
+ %tmp19375 = getelementptr inbounds float, float* %tmp19374, i64 1
+ %tmp19376 = getelementptr inbounds float, float* %tmp19375, i64 1
+ %tmp19377 = getelementptr inbounds float, float* %tmp19376, i64 1
+ %tmp19378 = getelementptr inbounds float, float* %tmp19377, i64 1
+ %tmp19379 = getelementptr inbounds float, float* %tmp19378, i64 1
+ %tmp19380 = getelementptr inbounds float, float* %tmp19379, i64 1
+ %tmp19381 = getelementptr inbounds float, float* %tmp19380, i64 1
+ %tmp19382 = getelementptr inbounds float, float* %tmp19381, i64 1
+ %tmp19383 = getelementptr inbounds float, float* %tmp19382, i64 1
+ %tmp19384 = getelementptr inbounds float, float* %tmp19383, i64 1
+ %tmp19385 = getelementptr inbounds float, float* %tmp19384, i64 1
+ %tmp19386 = getelementptr inbounds float, float* %tmp19385, i64 1
+ %tmp19387 = getelementptr inbounds float, float* %tmp19386, i64 1
+ %tmp19388 = getelementptr inbounds float, float* %tmp19387, i64 1
+ %tmp19389 = getelementptr inbounds float, float* %tmp19388, i64 1
+ %tmp19390 = getelementptr inbounds float, float* %tmp19389, i64 1
+ %tmp19391 = getelementptr inbounds float, float* %tmp19390, i64 1
+ %tmp19392 = getelementptr inbounds float, float* %tmp19391, i64 1
+ %tmp19393 = getelementptr inbounds float, float* %tmp19392, i64 1
+ %tmp19394 = getelementptr inbounds float, float* %tmp19393, i64 1
+ %tmp19395 = getelementptr inbounds float, float* %tmp19394, i64 1
+ %tmp19396 = getelementptr inbounds float, float* %tmp19395, i64 1
+ %tmp19397 = getelementptr inbounds float, float* %tmp19396, i64 1
+ %tmp19398 = getelementptr inbounds float, float* %tmp19397, i64 1
+ %tmp19399 = getelementptr inbounds float, float* %tmp19398, i64 1
+ %tmp19400 = getelementptr inbounds float, float* %tmp19399, i64 1
+ %tmp19401 = getelementptr inbounds float, float* %tmp19400, i64 1
+ %tmp19402 = getelementptr inbounds float, float* %tmp19401, i64 1
+ %tmp19403 = getelementptr inbounds float, float* %tmp19402, i64 1
+ %tmp19404 = getelementptr inbounds float, float* %tmp19403, i64 1
+ %tmp19405 = getelementptr inbounds float, float* %tmp19404, i64 1
+ %tmp19406 = getelementptr inbounds float, float* %tmp19405, i64 1
+ %tmp19407 = getelementptr inbounds float, float* %tmp19406, i64 1
+ %tmp19408 = getelementptr inbounds float, float* %tmp19407, i64 1
+ %tmp19409 = getelementptr inbounds float, float* %tmp19408, i64 1
+ %tmp19410 = getelementptr inbounds float, float* %tmp19409, i64 1
+ %tmp19411 = getelementptr inbounds float, float* %tmp19410, i64 1
+ %tmp19412 = getelementptr inbounds float, float* %tmp19411, i64 1
+ %tmp19413 = getelementptr inbounds float, float* %tmp19412, i64 1
+ %tmp19414 = getelementptr inbounds float, float* %tmp19413, i64 1
+ %tmp19415 = getelementptr inbounds float, float* %tmp19414, i64 1
+ %tmp19416 = getelementptr inbounds float, float* %tmp19415, i64 1
+ %tmp19417 = getelementptr inbounds float, float* %tmp19416, i64 1
+ %tmp19418 = getelementptr inbounds float, float* %tmp19417, i64 1
+ %tmp19419 = getelementptr inbounds float, float* %tmp19418, i64 1
+ %tmp19420 = getelementptr inbounds float, float* %tmp19419, i64 1
+ %tmp19421 = getelementptr inbounds float, float* %tmp19420, i64 1
+ %tmp19422 = getelementptr inbounds float, float* %tmp19421, i64 1
+ %tmp19423 = getelementptr inbounds float, float* %tmp19422, i64 1
+ %tmp19424 = getelementptr inbounds float, float* %tmp19423, i64 1
+ %tmp19425 = getelementptr inbounds float, float* %tmp19424, i64 1
+ %tmp19426 = getelementptr inbounds float, float* %tmp19425, i64 1
+ %tmp19427 = getelementptr inbounds float, float* %tmp19426, i64 1
+ %tmp19428 = getelementptr inbounds float, float* %tmp19427, i64 1
+ %tmp19429 = getelementptr inbounds float, float* %tmp19428, i64 1
+ %tmp19430 = getelementptr inbounds float, float* %tmp19429, i64 1
+ %tmp19431 = getelementptr inbounds float, float* %tmp19430, i64 1
+ %tmp19432 = getelementptr inbounds float, float* %tmp19431, i64 1
+ %tmp19433 = getelementptr inbounds float, float* %tmp19432, i64 1
+ %tmp19434 = getelementptr inbounds float, float* %tmp19433, i64 1
+ %tmp19435 = getelementptr inbounds float, float* %tmp19434, i64 1
+ %tmp19436 = getelementptr inbounds float, float* %tmp19435, i64 1
+ %tmp19437 = getelementptr inbounds float, float* %tmp19436, i64 1
+ %tmp19438 = getelementptr inbounds float, float* %tmp19437, i64 1
+ %tmp19439 = getelementptr inbounds float, float* %tmp19438, i64 1
+ %tmp19440 = getelementptr inbounds float, float* %tmp19439, i64 1
+ %tmp19441 = getelementptr inbounds float, float* %tmp19440, i64 1
+ %tmp19442 = getelementptr inbounds float, float* %tmp19441, i64 1
+ %tmp19443 = getelementptr inbounds float, float* %tmp19442, i64 1
+ %tmp19444 = getelementptr inbounds float, float* %tmp19443, i64 1
+ %tmp19445 = getelementptr inbounds float, float* %tmp19444, i64 1
+ %tmp19446 = getelementptr inbounds float, float* %tmp19445, i64 1
+ %tmp19447 = getelementptr inbounds float, float* %tmp19446, i64 1
+ %tmp19448 = getelementptr inbounds float, float* %tmp19447, i64 1
+ %tmp19449 = getelementptr inbounds float, float* %tmp19448, i64 1
+ %tmp19450 = getelementptr inbounds float, float* %tmp19449, i64 1
+ %tmp19451 = getelementptr inbounds float, float* %tmp19450, i64 1
+ %tmp19452 = getelementptr inbounds float, float* %tmp19451, i64 1
+ %tmp19453 = getelementptr inbounds float, float* %tmp19452, i64 1
+ %tmp19454 = getelementptr inbounds float, float* %tmp19453, i64 1
+ %tmp19455 = getelementptr inbounds float, float* %tmp19454, i64 1
+ %tmp19456 = getelementptr inbounds float, float* %tmp19455, i64 1
+ %tmp19457 = getelementptr inbounds float, float* %tmp19456, i64 1
+ %tmp19458 = getelementptr inbounds float, float* %tmp19457, i64 1
+ %tmp19459 = getelementptr inbounds float, float* %tmp19458, i64 1
+ %tmp19460 = getelementptr inbounds float, float* %tmp19459, i64 1
+ %tmp19461 = getelementptr inbounds float, float* %tmp19460, i64 1
+ %tmp19462 = getelementptr inbounds float, float* %tmp19461, i64 1
+ %tmp19463 = getelementptr inbounds float, float* %tmp19462, i64 1
+ %tmp19464 = getelementptr inbounds float, float* %tmp19463, i64 1
+ %tmp19465 = getelementptr inbounds float, float* %tmp19464, i64 1
+ %tmp19466 = getelementptr inbounds float, float* %tmp19465, i64 1
+ %tmp19467 = getelementptr inbounds float, float* %tmp19466, i64 1
+ %tmp19468 = getelementptr inbounds float, float* %tmp19467, i64 1
+ %tmp19469 = getelementptr inbounds float, float* %tmp19468, i64 1
+ %tmp19470 = getelementptr inbounds float, float* %tmp19469, i64 1
+ %tmp19471 = getelementptr inbounds float, float* %tmp19470, i64 1
+ %tmp19472 = getelementptr inbounds float, float* %tmp19471, i64 1
+ %tmp19473 = getelementptr inbounds float, float* %tmp19472, i64 1
+ %tmp19474 = getelementptr inbounds float, float* %tmp19473, i64 1
+ %tmp19475 = getelementptr inbounds float, float* %tmp19474, i64 1
+ %tmp19476 = getelementptr inbounds float, float* %tmp19475, i64 1
+ %tmp19477 = getelementptr inbounds float, float* %tmp19476, i64 1
+ %tmp19478 = getelementptr inbounds float, float* %tmp19477, i64 1
+ %tmp19479 = getelementptr inbounds float, float* %tmp19478, i64 1
+ %tmp19480 = getelementptr inbounds float, float* %tmp19479, i64 1
+ %tmp19481 = getelementptr inbounds float, float* %tmp19480, i64 1
+ %tmp19482 = getelementptr inbounds float, float* %tmp19481, i64 1
+ %tmp19483 = getelementptr inbounds float, float* %tmp19482, i64 1
+ %tmp19484 = getelementptr inbounds float, float* %tmp19483, i64 1
+ %tmp19485 = getelementptr inbounds float, float* %tmp19484, i64 1
+ %tmp19486 = getelementptr inbounds float, float* %tmp19485, i64 1
+ %tmp19487 = getelementptr inbounds float, float* %tmp19486, i64 1
+ %tmp19488 = getelementptr inbounds float, float* %tmp19487, i64 1
+ %tmp19489 = getelementptr inbounds float, float* %tmp19488, i64 1
+ %tmp19490 = getelementptr inbounds float, float* %tmp19489, i64 1
+ %tmp19491 = getelementptr inbounds float, float* %tmp19490, i64 1
+ %tmp19492 = getelementptr inbounds float, float* %tmp19491, i64 1
+ %tmp19493 = getelementptr inbounds float, float* %tmp19492, i64 1
+ %tmp19494 = getelementptr inbounds float, float* %tmp19493, i64 1
+ %tmp19495 = getelementptr inbounds float, float* %tmp19494, i64 1
+ %tmp19496 = getelementptr inbounds float, float* %tmp19495, i64 1
+ %tmp19497 = getelementptr inbounds float, float* %tmp19496, i64 1
+ %tmp19498 = getelementptr inbounds float, float* %tmp19497, i64 1
+ %tmp19499 = getelementptr inbounds float, float* %tmp19498, i64 1
+ %tmp19500 = getelementptr inbounds float, float* %tmp19499, i64 1
+ %tmp19501 = getelementptr inbounds float, float* %tmp19500, i64 1
+ %tmp19502 = getelementptr inbounds float, float* %tmp19501, i64 1
+ %tmp19503 = getelementptr inbounds float, float* %tmp19502, i64 1
+ %tmp19504 = getelementptr inbounds float, float* %tmp19503, i64 1
+ %tmp19505 = getelementptr inbounds float, float* %tmp19504, i64 1
+ %tmp19506 = getelementptr inbounds float, float* %tmp19505, i64 1
+ %tmp19507 = getelementptr inbounds float, float* %tmp19506, i64 1
+ %tmp19508 = getelementptr inbounds float, float* %tmp19507, i64 1
+ %tmp19509 = getelementptr inbounds float, float* %tmp19508, i64 1
+ %tmp19510 = getelementptr inbounds float, float* %tmp19509, i64 1
+ %tmp19511 = getelementptr inbounds float, float* %tmp19510, i64 1
+ %tmp19512 = getelementptr inbounds float, float* %tmp19511, i64 1
+ %tmp19513 = getelementptr inbounds float, float* %tmp19512, i64 1
+ %tmp19514 = getelementptr inbounds float, float* %tmp19513, i64 1
+ %tmp19515 = getelementptr inbounds float, float* %tmp19514, i64 1
+ %tmp19516 = getelementptr inbounds float, float* %tmp19515, i64 1
+ %tmp19517 = getelementptr inbounds float, float* %tmp19516, i64 1
+ %tmp19518 = getelementptr inbounds float, float* %tmp19517, i64 1
+ %tmp19519 = getelementptr inbounds float, float* %tmp19518, i64 1
+ %tmp19520 = getelementptr inbounds float, float* %tmp19519, i64 1
+ %tmp19521 = getelementptr inbounds float, float* %tmp19520, i64 1
+ %tmp19522 = getelementptr inbounds float, float* %tmp19521, i64 1
+ %tmp19523 = getelementptr inbounds float, float* %tmp19522, i64 1
+ %tmp19524 = getelementptr inbounds float, float* %tmp19523, i64 1
+ %tmp19525 = getelementptr inbounds float, float* %tmp19524, i64 1
+ %tmp19526 = getelementptr inbounds float, float* %tmp19525, i64 1
+ %tmp19527 = getelementptr inbounds float, float* %tmp19526, i64 1
+ %tmp19528 = getelementptr inbounds float, float* %tmp19527, i64 1
+ %tmp19529 = getelementptr inbounds float, float* %tmp19528, i64 1
+ %tmp19530 = getelementptr inbounds float, float* %tmp19529, i64 1
+ %tmp19531 = getelementptr inbounds float, float* %tmp19530, i64 1
+ %tmp19532 = getelementptr inbounds float, float* %tmp19531, i64 1
+ %tmp19533 = getelementptr inbounds float, float* %tmp19532, i64 1
+ %tmp19534 = getelementptr inbounds float, float* %tmp19533, i64 1
+ %tmp19535 = getelementptr inbounds float, float* %tmp19534, i64 1
+ %tmp19536 = getelementptr inbounds float, float* %tmp19535, i64 1
+ %tmp19537 = getelementptr inbounds float, float* %tmp19536, i64 1
+ %tmp19538 = getelementptr inbounds float, float* %tmp19537, i64 1
+ %tmp19539 = getelementptr inbounds float, float* %tmp19538, i64 1
+ %tmp19540 = getelementptr inbounds float, float* %tmp19539, i64 1
+ %tmp19541 = getelementptr inbounds float, float* %tmp19540, i64 1
+ %tmp19542 = getelementptr inbounds float, float* %tmp19541, i64 1
+ %tmp19543 = getelementptr inbounds float, float* %tmp19542, i64 1
+ %tmp19544 = getelementptr inbounds float, float* %tmp19543, i64 1
+ %tmp19545 = getelementptr inbounds float, float* %tmp19544, i64 1
+ %tmp19546 = getelementptr inbounds float, float* %tmp19545, i64 1
+ %tmp19547 = getelementptr inbounds float, float* %tmp19546, i64 1
+ %tmp19548 = getelementptr inbounds float, float* %tmp19547, i64 1
+ %tmp19549 = getelementptr inbounds float, float* %tmp19548, i64 1
+ %tmp19550 = getelementptr inbounds float, float* %tmp19549, i64 1
+ %tmp19551 = getelementptr inbounds float, float* %tmp19550, i64 1
+ %tmp19552 = getelementptr inbounds float, float* %tmp19551, i64 1
+ %tmp19553 = getelementptr inbounds float, float* %tmp19552, i64 1
+ %tmp19554 = getelementptr inbounds float, float* %tmp19553, i64 1
+ %tmp19555 = getelementptr inbounds float, float* %tmp19554, i64 1
+ %tmp19556 = getelementptr inbounds float, float* %tmp19555, i64 1
+ %tmp19557 = getelementptr inbounds float, float* %tmp19556, i64 1
+ %tmp19558 = getelementptr inbounds float, float* %tmp19557, i64 1
+ %tmp19559 = getelementptr inbounds float, float* %tmp19558, i64 1
+ %tmp19560 = getelementptr inbounds float, float* %tmp19559, i64 1
+ %tmp19561 = getelementptr inbounds float, float* %tmp19560, i64 1
+ %tmp19562 = getelementptr inbounds float, float* %tmp19561, i64 1
+ %tmp19563 = getelementptr inbounds float, float* %tmp19562, i64 1
+ %tmp19564 = getelementptr inbounds float, float* %tmp19563, i64 1
+ %tmp19565 = getelementptr inbounds float, float* %tmp19564, i64 1
+ %tmp19566 = getelementptr inbounds float, float* %tmp19565, i64 1
+ %tmp19567 = getelementptr inbounds float, float* %tmp19566, i64 1
+ %tmp19568 = getelementptr inbounds float, float* %tmp19567, i64 1
+ %tmp19569 = getelementptr inbounds float, float* %tmp19568, i64 1
+ %tmp19570 = getelementptr inbounds float, float* %tmp19569, i64 1
+ %tmp19571 = getelementptr inbounds float, float* %tmp19570, i64 1
+ %tmp19572 = getelementptr inbounds float, float* %tmp19571, i64 1
+ %tmp19573 = getelementptr inbounds float, float* %tmp19572, i64 1
+ %tmp19574 = getelementptr inbounds float, float* %tmp19573, i64 1
+ %tmp19575 = getelementptr inbounds float, float* %tmp19574, i64 1
+ %tmp19576 = getelementptr inbounds float, float* %tmp19575, i64 1
+ %tmp19577 = getelementptr inbounds float, float* %tmp19576, i64 1
+ %tmp19578 = getelementptr inbounds float, float* %tmp19577, i64 1
+ %tmp19579 = getelementptr inbounds float, float* %tmp19578, i64 1
+ %tmp19580 = getelementptr inbounds float, float* %tmp19579, i64 1
+ %tmp19581 = getelementptr inbounds float, float* %tmp19580, i64 1
+ %tmp19582 = getelementptr inbounds float, float* %tmp19581, i64 1
+ %tmp19583 = getelementptr inbounds float, float* %tmp19582, i64 1
+ %tmp19584 = getelementptr inbounds float, float* %tmp19583, i64 1
+ %tmp19585 = getelementptr inbounds float, float* %tmp19584, i64 1
+ %tmp19586 = getelementptr inbounds float, float* %tmp19585, i64 1
+ %tmp19587 = getelementptr inbounds float, float* %tmp19586, i64 1
+ %tmp19588 = getelementptr inbounds float, float* %tmp19587, i64 1
+ %tmp19589 = getelementptr inbounds float, float* %tmp19588, i64 1
+ %tmp19590 = getelementptr inbounds float, float* %tmp19589, i64 1
+ %tmp19591 = getelementptr inbounds float, float* %tmp19590, i64 1
+ %tmp19592 = getelementptr inbounds float, float* %tmp19591, i64 1
+ %tmp19593 = getelementptr inbounds float, float* %tmp19592, i64 1
+ %tmp19594 = getelementptr inbounds float, float* %tmp19593, i64 1
+ %tmp19595 = getelementptr inbounds float, float* %tmp19594, i64 1
+ %tmp19596 = getelementptr inbounds float, float* %tmp19595, i64 1
+ %tmp19597 = getelementptr inbounds float, float* %tmp19596, i64 1
+ %tmp19598 = getelementptr inbounds float, float* %tmp19597, i64 1
+ %tmp19599 = getelementptr inbounds float, float* %tmp19598, i64 1
+ %tmp19600 = getelementptr inbounds float, float* %tmp19599, i64 1
+ %tmp19601 = getelementptr inbounds float, float* %tmp19600, i64 1
+ %tmp19602 = getelementptr inbounds float, float* %tmp19601, i64 1
+ %tmp19603 = getelementptr inbounds float, float* %tmp19602, i64 1
+ %tmp19604 = getelementptr inbounds float, float* %tmp19603, i64 1
+ %tmp19605 = getelementptr inbounds float, float* %tmp19604, i64 1
+ %tmp19606 = getelementptr inbounds float, float* %tmp19605, i64 1
+ %tmp19607 = getelementptr inbounds float, float* %tmp19606, i64 1
+ %tmp19608 = getelementptr inbounds float, float* %tmp19607, i64 1
+ %tmp19609 = getelementptr inbounds float, float* %tmp19608, i64 1
+ %tmp19610 = getelementptr inbounds float, float* %tmp19609, i64 1
+ %tmp19611 = getelementptr inbounds float, float* %tmp19610, i64 1
+ %tmp19612 = getelementptr inbounds float, float* %tmp19611, i64 1
+ %tmp19613 = getelementptr inbounds float, float* %tmp19612, i64 1
+ %tmp19614 = getelementptr inbounds float, float* %tmp19613, i64 1
+ %tmp19615 = getelementptr inbounds float, float* %tmp19614, i64 1
+ %tmp19616 = getelementptr inbounds float, float* %tmp19615, i64 1
+ %tmp19617 = getelementptr inbounds float, float* %tmp19616, i64 1
+ %tmp19618 = getelementptr inbounds float, float* %tmp19617, i64 1
+ %tmp19619 = getelementptr inbounds float, float* %tmp19618, i64 1
+ %tmp19620 = getelementptr inbounds float, float* %tmp19619, i64 1
+ %tmp19621 = getelementptr inbounds float, float* %tmp19620, i64 1
+ %tmp19622 = getelementptr inbounds float, float* %tmp19621, i64 1
+ %tmp19623 = getelementptr inbounds float, float* %tmp19622, i64 1
+ %tmp19624 = getelementptr inbounds float, float* %tmp19623, i64 1
+ %tmp19625 = getelementptr inbounds float, float* %tmp19624, i64 1
+ %tmp19626 = getelementptr inbounds float, float* %tmp19625, i64 1
+ %tmp19627 = getelementptr inbounds float, float* %tmp19626, i64 1
+ %tmp19628 = getelementptr inbounds float, float* %tmp19627, i64 1
+ %tmp19629 = getelementptr inbounds float, float* %tmp19628, i64 1
+ %tmp19630 = getelementptr inbounds float, float* %tmp19629, i64 1
+ %tmp19631 = getelementptr inbounds float, float* %tmp19630, i64 1
+ %tmp19632 = getelementptr inbounds float, float* %tmp19631, i64 1
+ %tmp19633 = getelementptr inbounds float, float* %tmp19632, i64 1
+ %tmp19634 = getelementptr inbounds float, float* %tmp19633, i64 1
+ %tmp19635 = getelementptr inbounds float, float* %tmp19634, i64 1
+ %tmp19636 = getelementptr inbounds float, float* %tmp19635, i64 1
+ %tmp19637 = getelementptr inbounds float, float* %tmp19636, i64 1
+ %tmp19638 = getelementptr inbounds float, float* %tmp19637, i64 1
+ %tmp19639 = getelementptr inbounds float, float* %tmp19638, i64 1
+ %tmp19640 = getelementptr inbounds float, float* %tmp19639, i64 1
+ %tmp19641 = getelementptr inbounds float, float* %tmp19640, i64 1
+ %tmp19642 = getelementptr inbounds float, float* %tmp19641, i64 1
+ %tmp19643 = getelementptr inbounds float, float* %tmp19642, i64 1
+ %tmp19644 = getelementptr inbounds float, float* %tmp19643, i64 1
+ %tmp19645 = getelementptr inbounds float, float* %tmp19644, i64 1
+ %tmp19646 = getelementptr inbounds float, float* %tmp19645, i64 1
+ %tmp19647 = getelementptr inbounds float, float* %tmp19646, i64 1
+ %tmp19648 = getelementptr inbounds float, float* %tmp19647, i64 1
+ %tmp19649 = getelementptr inbounds float, float* %tmp19648, i64 1
+ %tmp19650 = getelementptr inbounds float, float* %tmp19649, i64 1
+ %tmp19651 = getelementptr inbounds float, float* %tmp19650, i64 1
+ %tmp19652 = getelementptr inbounds float, float* %tmp19651, i64 1
+ %tmp19653 = getelementptr inbounds float, float* %tmp19652, i64 1
+ %tmp19654 = getelementptr inbounds float, float* %tmp19653, i64 1
+ %tmp19655 = getelementptr inbounds float, float* %tmp19654, i64 1
+ %tmp19656 = getelementptr inbounds float, float* %tmp19655, i64 1
+ %tmp19657 = getelementptr inbounds float, float* %tmp19656, i64 1
+ %tmp19658 = getelementptr inbounds float, float* %tmp19657, i64 1
+ %tmp19659 = getelementptr inbounds float, float* %tmp19658, i64 1
+ %tmp19660 = getelementptr inbounds float, float* %tmp19659, i64 1
+ %tmp19661 = getelementptr inbounds float, float* %tmp19660, i64 1
+ %tmp19662 = getelementptr inbounds float, float* %tmp19661, i64 1
+ %tmp19663 = getelementptr inbounds float, float* %tmp19662, i64 1
+ %tmp19664 = getelementptr inbounds float, float* %tmp19663, i64 1
+ %tmp19665 = getelementptr inbounds float, float* %tmp19664, i64 1
+ %tmp19666 = getelementptr inbounds float, float* %tmp19665, i64 1
+ %tmp19667 = getelementptr inbounds float, float* %tmp19666, i64 1
+ %tmp19668 = getelementptr inbounds float, float* %tmp19667, i64 1
+ %tmp19669 = getelementptr inbounds float, float* %tmp19668, i64 1
+ %tmp19670 = getelementptr inbounds float, float* %tmp19669, i64 1
+ %tmp19671 = getelementptr inbounds float, float* %tmp19670, i64 1
+ %tmp19672 = getelementptr inbounds float, float* %tmp19671, i64 1
+ %tmp19673 = getelementptr inbounds float, float* %tmp19672, i64 1
+ %tmp19674 = getelementptr inbounds float, float* %tmp19673, i64 1
+ %tmp19675 = getelementptr inbounds float, float* %tmp19674, i64 1
+ %tmp19676 = getelementptr inbounds float, float* %tmp19675, i64 1
+ %tmp19677 = getelementptr inbounds float, float* %tmp19676, i64 1
+ %tmp19678 = getelementptr inbounds float, float* %tmp19677, i64 1
+ %tmp19679 = getelementptr inbounds float, float* %tmp19678, i64 1
+ %tmp19680 = getelementptr inbounds float, float* %tmp19679, i64 1
+ %tmp19681 = getelementptr inbounds float, float* %tmp19680, i64 1
+ %tmp19682 = getelementptr inbounds float, float* %tmp19681, i64 1
+ %tmp19683 = getelementptr inbounds float, float* %tmp19682, i64 1
+ %tmp19684 = getelementptr inbounds float, float* %tmp19683, i64 1
+ %tmp19685 = getelementptr inbounds float, float* %tmp19684, i64 1
+ %tmp19686 = getelementptr inbounds float, float* %tmp19685, i64 1
+ %tmp19687 = getelementptr inbounds float, float* %tmp19686, i64 1
+ %tmp19688 = getelementptr inbounds float, float* %tmp19687, i64 1
+ %tmp19689 = getelementptr inbounds float, float* %tmp19688, i64 1
+ %tmp19690 = getelementptr inbounds float, float* %tmp19689, i64 1
+ %tmp19691 = getelementptr inbounds float, float* %tmp19690, i64 1
+ %tmp19692 = getelementptr inbounds float, float* %tmp19691, i64 1
+ %tmp19693 = getelementptr inbounds float, float* %tmp19692, i64 1
+ %tmp19694 = getelementptr inbounds float, float* %tmp19693, i64 1
+ %tmp19695 = getelementptr inbounds float, float* %tmp19694, i64 1
+ %tmp19696 = getelementptr inbounds float, float* %tmp19695, i64 1
+ %tmp19697 = getelementptr inbounds float, float* %tmp19696, i64 1
+ %tmp19698 = getelementptr inbounds float, float* %tmp19697, i64 1
+ %tmp19699 = getelementptr inbounds float, float* %tmp19698, i64 1
+ %tmp19700 = getelementptr inbounds float, float* %tmp19699, i64 1
+ %tmp19701 = getelementptr inbounds float, float* %tmp19700, i64 1
+ %tmp19702 = getelementptr inbounds float, float* %tmp19701, i64 1
+ %tmp19703 = getelementptr inbounds float, float* %tmp19702, i64 1
+ %tmp19704 = getelementptr inbounds float, float* %tmp19703, i64 1
+ %tmp19705 = getelementptr inbounds float, float* %tmp19704, i64 1
+ %tmp19706 = getelementptr inbounds float, float* %tmp19705, i64 1
+ %tmp19707 = getelementptr inbounds float, float* %tmp19706, i64 1
+ %tmp19708 = getelementptr inbounds float, float* %tmp19707, i64 1
+ %tmp19709 = getelementptr inbounds float, float* %tmp19708, i64 1
+ %tmp19710 = getelementptr inbounds float, float* %tmp19709, i64 1
+ %tmp19711 = getelementptr inbounds float, float* %tmp19710, i64 1
+ %tmp19712 = getelementptr inbounds float, float* %tmp19711, i64 1
+ %tmp19713 = getelementptr inbounds float, float* %tmp19712, i64 1
+ %tmp19714 = getelementptr inbounds float, float* %tmp19713, i64 1
+ %tmp19715 = getelementptr inbounds float, float* %tmp19714, i64 1
+ %tmp19716 = getelementptr inbounds float, float* %tmp19715, i64 1
+ %tmp19717 = getelementptr inbounds float, float* %tmp19716, i64 1
+ %tmp19718 = getelementptr inbounds float, float* %tmp19717, i64 1
+ %tmp19719 = getelementptr inbounds float, float* %tmp19718, i64 1
+ %tmp19720 = getelementptr inbounds float, float* %tmp19719, i64 1
+ %tmp19721 = getelementptr inbounds float, float* %tmp19720, i64 1
+ %tmp19722 = getelementptr inbounds float, float* %tmp19721, i64 1
+ %tmp19723 = getelementptr inbounds float, float* %tmp19722, i64 1
+ %tmp19724 = getelementptr inbounds float, float* %tmp19723, i64 1
+ %tmp19725 = getelementptr inbounds float, float* %tmp19724, i64 1
+ %tmp19726 = getelementptr inbounds float, float* %tmp19725, i64 1
+ %tmp19727 = getelementptr inbounds float, float* %tmp19726, i64 1
+ %tmp19728 = getelementptr inbounds float, float* %tmp19727, i64 1
+ %tmp19729 = getelementptr inbounds float, float* %tmp19728, i64 1
+ %tmp19730 = getelementptr inbounds float, float* %tmp19729, i64 1
+ %tmp19731 = getelementptr inbounds float, float* %tmp19730, i64 1
+ %tmp19732 = getelementptr inbounds float, float* %tmp19731, i64 1
+ %tmp19733 = getelementptr inbounds float, float* %tmp19732, i64 1
+ %tmp19734 = getelementptr inbounds float, float* %tmp19733, i64 1
+ %tmp19735 = getelementptr inbounds float, float* %tmp19734, i64 1
+ %tmp19736 = getelementptr inbounds float, float* %tmp19735, i64 1
+ %tmp19737 = getelementptr inbounds float, float* %tmp19736, i64 1
+ %tmp19738 = getelementptr inbounds float, float* %tmp19737, i64 1
+ %tmp19739 = getelementptr inbounds float, float* %tmp19738, i64 1
+ %tmp19740 = getelementptr inbounds float, float* %tmp19739, i64 1
+ %tmp19741 = getelementptr inbounds float, float* %tmp19740, i64 1
+ %tmp19742 = getelementptr inbounds float, float* %tmp19741, i64 1
+ %tmp19743 = getelementptr inbounds float, float* %tmp19742, i64 1
+ %tmp19744 = getelementptr inbounds float, float* %tmp19743, i64 1
+ %tmp19745 = getelementptr inbounds float, float* %tmp19744, i64 1
+ %tmp19746 = getelementptr inbounds float, float* %tmp19745, i64 1
+ %tmp19747 = getelementptr inbounds float, float* %tmp19746, i64 1
+ %tmp19748 = getelementptr inbounds float, float* %tmp19747, i64 1
+ %tmp19749 = getelementptr inbounds float, float* %tmp19748, i64 1
+ %tmp19750 = getelementptr inbounds float, float* %tmp19749, i64 1
+ %tmp19751 = getelementptr inbounds float, float* %tmp19750, i64 1
+ %tmp19752 = getelementptr inbounds float, float* %tmp19751, i64 1
+ %tmp19753 = getelementptr inbounds float, float* %tmp19752, i64 1
+ %tmp19754 = getelementptr inbounds float, float* %tmp19753, i64 1
+ %tmp19755 = getelementptr inbounds float, float* %tmp19754, i64 1
+ %tmp19756 = getelementptr inbounds float, float* %tmp19755, i64 1
+ %tmp19757 = getelementptr inbounds float, float* %tmp19756, i64 1
+ %tmp19758 = getelementptr inbounds float, float* %tmp19757, i64 1
+ %tmp19759 = getelementptr inbounds float, float* %tmp19758, i64 1
+ %tmp19760 = getelementptr inbounds float, float* %tmp19759, i64 1
+ %tmp19761 = getelementptr inbounds float, float* %tmp19760, i64 1
+ %tmp19762 = getelementptr inbounds float, float* %tmp19761, i64 1
+ %tmp19763 = getelementptr inbounds float, float* %tmp19762, i64 1
+ %tmp19764 = getelementptr inbounds float, float* %tmp19763, i64 1
+ %tmp19765 = getelementptr inbounds float, float* %tmp19764, i64 1
+ %tmp19766 = getelementptr inbounds float, float* %tmp19765, i64 1
+ %tmp19767 = getelementptr inbounds float, float* %tmp19766, i64 1
+ %tmp19768 = getelementptr inbounds float, float* %tmp19767, i64 1
+ %tmp19769 = getelementptr inbounds float, float* %tmp19768, i64 1
+ %tmp19770 = getelementptr inbounds float, float* %tmp19769, i64 1
+ %tmp19771 = getelementptr inbounds float, float* %tmp19770, i64 1
+ %tmp19772 = getelementptr inbounds float, float* %tmp19771, i64 1
+ %tmp19773 = getelementptr inbounds float, float* %tmp19772, i64 1
+ %tmp19774 = getelementptr inbounds float, float* %tmp19773, i64 1
+ %tmp19775 = getelementptr inbounds float, float* %tmp19774, i64 1
+ %tmp19776 = getelementptr inbounds float, float* %tmp19775, i64 1
+ %tmp19777 = getelementptr inbounds float, float* %tmp19776, i64 1
+ %tmp19778 = getelementptr inbounds float, float* %tmp19777, i64 1
+ %tmp19779 = getelementptr inbounds float, float* %tmp19778, i64 1
+ %tmp19780 = getelementptr inbounds float, float* %tmp19779, i64 1
+ %tmp19781 = getelementptr inbounds float, float* %tmp19780, i64 1
+ %tmp19782 = getelementptr inbounds float, float* %tmp19781, i64 1
+ %tmp19783 = getelementptr inbounds float, float* %tmp19782, i64 1
+ %tmp19784 = getelementptr inbounds float, float* %tmp19783, i64 1
+ %tmp19785 = getelementptr inbounds float, float* %tmp19784, i64 1
+ %tmp19786 = getelementptr inbounds float, float* %tmp19785, i64 1
+ %tmp19787 = getelementptr inbounds float, float* %tmp19786, i64 1
+ %tmp19788 = getelementptr inbounds float, float* %tmp19787, i64 1
+ %tmp19789 = getelementptr inbounds float, float* %tmp19788, i64 1
+ %tmp19790 = getelementptr inbounds float, float* %tmp19789, i64 1
+ %tmp19791 = getelementptr inbounds float, float* %tmp19790, i64 1
+ %tmp19792 = getelementptr inbounds float, float* %tmp19791, i64 1
+ %tmp19793 = getelementptr inbounds float, float* %tmp19792, i64 1
+ %tmp19794 = getelementptr inbounds float, float* %tmp19793, i64 1
+ %tmp19795 = getelementptr inbounds float, float* %tmp19794, i64 1
+ %tmp19796 = getelementptr inbounds float, float* %tmp19795, i64 1
+ %tmp19797 = getelementptr inbounds float, float* %tmp19796, i64 1
+ %tmp19798 = getelementptr inbounds float, float* %tmp19797, i64 1
+ %tmp19799 = getelementptr inbounds float, float* %tmp19798, i64 1
+ %tmp19800 = getelementptr inbounds float, float* %tmp19799, i64 1
+ %tmp19801 = getelementptr inbounds float, float* %tmp19800, i64 1
+ %tmp19802 = getelementptr inbounds float, float* %tmp19801, i64 1
+ %tmp19803 = getelementptr inbounds float, float* %tmp19802, i64 1
+ %tmp19804 = getelementptr inbounds float, float* %tmp19803, i64 1
+ %tmp19805 = getelementptr inbounds float, float* %tmp19804, i64 1
+ %tmp19806 = getelementptr inbounds float, float* %tmp19805, i64 1
+ %tmp19807 = getelementptr inbounds float, float* %tmp19806, i64 1
+ %tmp19808 = getelementptr inbounds float, float* %tmp19807, i64 1
+ %tmp19809 = getelementptr inbounds float, float* %tmp19808, i64 1
+ %tmp19810 = getelementptr inbounds float, float* %tmp19809, i64 1
+ %tmp19811 = getelementptr inbounds float, float* %tmp19810, i64 1
+ %tmp19812 = getelementptr inbounds float, float* %tmp19811, i64 1
+ %tmp19813 = getelementptr inbounds float, float* %tmp19812, i64 1
+ %tmp19814 = getelementptr inbounds float, float* %tmp19813, i64 1
+ %tmp19815 = getelementptr inbounds float, float* %tmp19814, i64 1
+ %tmp19816 = getelementptr inbounds float, float* %tmp19815, i64 1
+ %tmp19817 = getelementptr inbounds float, float* %tmp19816, i64 1
+ %tmp19818 = getelementptr inbounds float, float* %tmp19817, i64 1
+ %tmp19819 = getelementptr inbounds float, float* %tmp19818, i64 1
+ %tmp19820 = getelementptr inbounds float, float* %tmp19819, i64 1
+ %tmp19821 = getelementptr inbounds float, float* %tmp19820, i64 1
+ %tmp19822 = getelementptr inbounds float, float* %tmp19821, i64 1
+ %tmp19823 = getelementptr inbounds float, float* %tmp19822, i64 1
+ %tmp19824 = getelementptr inbounds float, float* %tmp19823, i64 1
+ %tmp19825 = getelementptr inbounds float, float* %tmp19824, i64 1
+ %tmp19826 = getelementptr inbounds float, float* %tmp19825, i64 1
+ %tmp19827 = getelementptr inbounds float, float* %tmp19826, i64 1
+ %tmp19828 = getelementptr inbounds float, float* %tmp19827, i64 1
+ %tmp19829 = getelementptr inbounds float, float* %tmp19828, i64 1
+ %tmp19830 = getelementptr inbounds float, float* %tmp19829, i64 1
+ %tmp19831 = getelementptr inbounds float, float* %tmp19830, i64 1
+ %tmp19832 = getelementptr inbounds float, float* %tmp19831, i64 1
+ %tmp19833 = getelementptr inbounds float, float* %tmp19832, i64 1
+ %tmp19834 = getelementptr inbounds float, float* %tmp19833, i64 1
+ %tmp19835 = getelementptr inbounds float, float* %tmp19834, i64 1
+ %tmp19836 = getelementptr inbounds float, float* %tmp19835, i64 1
+ %tmp19837 = getelementptr inbounds float, float* %tmp19836, i64 1
+ %tmp19838 = getelementptr inbounds float, float* %tmp19837, i64 1
+ %tmp19839 = getelementptr inbounds float, float* %tmp19838, i64 1
+ %tmp19840 = getelementptr inbounds float, float* %tmp19839, i64 1
+ %tmp19841 = getelementptr inbounds float, float* %tmp19840, i64 1
+ %tmp19842 = getelementptr inbounds float, float* %tmp19841, i64 1
+ %tmp19843 = getelementptr inbounds float, float* %tmp19842, i64 1
+ %tmp19844 = getelementptr inbounds float, float* %tmp19843, i64 1
+ %tmp19845 = getelementptr inbounds float, float* %tmp19844, i64 1
+ %tmp19846 = getelementptr inbounds float, float* %tmp19845, i64 1
+ %tmp19847 = getelementptr inbounds float, float* %tmp19846, i64 1
+ %tmp19848 = getelementptr inbounds float, float* %tmp19847, i64 1
+ %tmp19849 = getelementptr inbounds float, float* %tmp19848, i64 1
+ %tmp19850 = getelementptr inbounds float, float* %tmp19849, i64 1
+ %tmp19851 = getelementptr inbounds float, float* %tmp19850, i64 1
+ %tmp19852 = getelementptr inbounds float, float* %tmp19851, i64 1
+ %tmp19853 = getelementptr inbounds float, float* %tmp19852, i64 1
+ %tmp19854 = getelementptr inbounds float, float* %tmp19853, i64 1
+ %tmp19855 = getelementptr inbounds float, float* %tmp19854, i64 1
+ %tmp19856 = getelementptr inbounds float, float* %tmp19855, i64 1
+ %tmp19857 = getelementptr inbounds float, float* %tmp19856, i64 1
+ %tmp19858 = getelementptr inbounds float, float* %tmp19857, i64 1
+ %tmp19859 = getelementptr inbounds float, float* %tmp19858, i64 1
+ %tmp19860 = getelementptr inbounds float, float* %tmp19859, i64 1
+ %tmp19861 = getelementptr inbounds float, float* %tmp19860, i64 1
+ %tmp19862 = getelementptr inbounds float, float* %tmp19861, i64 1
+ %tmp19863 = getelementptr inbounds float, float* %tmp19862, i64 1
+ %tmp19864 = getelementptr inbounds float, float* %tmp19863, i64 1
+ %tmp19865 = getelementptr inbounds float, float* %tmp19864, i64 1
+ %tmp19866 = getelementptr inbounds float, float* %tmp19865, i64 1
+ %tmp19867 = getelementptr inbounds float, float* %tmp19866, i64 1
+ %tmp19868 = getelementptr inbounds float, float* %tmp19867, i64 1
+ %tmp19869 = getelementptr inbounds float, float* %tmp19868, i64 1
+ %tmp19870 = getelementptr inbounds float, float* %tmp19869, i64 1
+ %tmp19871 = getelementptr inbounds float, float* %tmp19870, i64 1
+ %tmp19872 = getelementptr inbounds float, float* %tmp19871, i64 1
+ %tmp19873 = getelementptr inbounds float, float* %tmp19872, i64 1
+ %tmp19874 = getelementptr inbounds float, float* %tmp19873, i64 1
+ %tmp19875 = getelementptr inbounds float, float* %tmp19874, i64 1
+ %tmp19876 = getelementptr inbounds float, float* %tmp19875, i64 1
+ %tmp19877 = getelementptr inbounds float, float* %tmp19876, i64 1
+ %tmp19878 = getelementptr inbounds float, float* %tmp19877, i64 1
+ %tmp19879 = getelementptr inbounds float, float* %tmp19878, i64 1
+ %tmp19880 = getelementptr inbounds float, float* %tmp19879, i64 1
+ %tmp19881 = getelementptr inbounds float, float* %tmp19880, i64 1
+ %tmp19882 = getelementptr inbounds float, float* %tmp19881, i64 1
+ %tmp19883 = getelementptr inbounds float, float* %tmp19882, i64 1
+ %tmp19884 = getelementptr inbounds float, float* %tmp19883, i64 1
+ %tmp19885 = getelementptr inbounds float, float* %tmp19884, i64 1
+ %tmp19886 = getelementptr inbounds float, float* %tmp19885, i64 1
+ %tmp19887 = getelementptr inbounds float, float* %tmp19886, i64 1
+ %tmp19888 = getelementptr inbounds float, float* %tmp19887, i64 1
+ %tmp19889 = getelementptr inbounds float, float* %tmp19888, i64 1
+ %tmp19890 = getelementptr inbounds float, float* %tmp19889, i64 1
+ %tmp19891 = getelementptr inbounds float, float* %tmp19890, i64 1
+ %tmp19892 = getelementptr inbounds float, float* %tmp19891, i64 1
+ %tmp19893 = getelementptr inbounds float, float* %tmp19892, i64 1
+ %tmp19894 = getelementptr inbounds float, float* %tmp19893, i64 1
+ %tmp19895 = getelementptr inbounds float, float* %tmp19894, i64 1
+ %tmp19896 = getelementptr inbounds float, float* %tmp19895, i64 1
+ %tmp19897 = getelementptr inbounds float, float* %tmp19896, i64 1
+ %tmp19898 = getelementptr inbounds float, float* %tmp19897, i64 1
+ %tmp19899 = getelementptr inbounds float, float* %tmp19898, i64 1
+ %tmp19900 = getelementptr inbounds float, float* %tmp19899, i64 1
+ %tmp19901 = getelementptr inbounds float, float* %tmp19900, i64 1
+ %tmp19902 = getelementptr inbounds float, float* %tmp19901, i64 1
+ %tmp19903 = getelementptr inbounds float, float* %tmp19902, i64 1
+ %tmp19904 = getelementptr inbounds float, float* %tmp19903, i64 1
+ %tmp19905 = getelementptr inbounds float, float* %tmp19904, i64 1
+ %tmp19906 = getelementptr inbounds float, float* %tmp19905, i64 1
+ %tmp19907 = getelementptr inbounds float, float* %tmp19906, i64 1
+ %tmp19908 = getelementptr inbounds float, float* %tmp19907, i64 1
+ %tmp19909 = getelementptr inbounds float, float* %tmp19908, i64 1
+ %tmp19910 = getelementptr inbounds float, float* %tmp19909, i64 1
+ %tmp19911 = getelementptr inbounds float, float* %tmp19910, i64 1
+ %tmp19912 = getelementptr inbounds float, float* %tmp19911, i64 1
+ %tmp19913 = getelementptr inbounds float, float* %tmp19912, i64 1
+ %tmp19914 = getelementptr inbounds float, float* %tmp19913, i64 1
+ %tmp19915 = getelementptr inbounds float, float* %tmp19914, i64 1
+ %tmp19916 = getelementptr inbounds float, float* %tmp19915, i64 1
+ %tmp19917 = getelementptr inbounds float, float* %tmp19916, i64 1
+ %tmp19918 = getelementptr inbounds float, float* %tmp19917, i64 1
+ %tmp19919 = getelementptr inbounds float, float* %tmp19918, i64 1
+ %tmp19920 = getelementptr inbounds float, float* %tmp19919, i64 1
+ %tmp19921 = getelementptr inbounds float, float* %tmp19920, i64 1
+ %tmp19922 = getelementptr inbounds float, float* %tmp19921, i64 1
+ %tmp19923 = getelementptr inbounds float, float* %tmp19922, i64 1
+ %tmp19924 = getelementptr inbounds float, float* %tmp19923, i64 1
+ %tmp19925 = getelementptr inbounds float, float* %tmp19924, i64 1
+ %tmp19926 = getelementptr inbounds float, float* %tmp19925, i64 1
+ %tmp19927 = getelementptr inbounds float, float* %tmp19926, i64 1
+ %tmp19928 = getelementptr inbounds float, float* %tmp19927, i64 1
+ %tmp19929 = getelementptr inbounds float, float* %tmp19928, i64 1
+ %tmp19930 = getelementptr inbounds float, float* %tmp19929, i64 1
+ %tmp19931 = getelementptr inbounds float, float* %tmp19930, i64 1
+ %tmp19932 = getelementptr inbounds float, float* %tmp19931, i64 1
+ %tmp19933 = getelementptr inbounds float, float* %tmp19932, i64 1
+ %tmp19934 = getelementptr inbounds float, float* %tmp19933, i64 1
+ %tmp19935 = getelementptr inbounds float, float* %tmp19934, i64 1
+ %tmp19936 = getelementptr inbounds float, float* %tmp19935, i64 1
+ %tmp19937 = getelementptr inbounds float, float* %tmp19936, i64 1
+ %tmp19938 = getelementptr inbounds float, float* %tmp19937, i64 1
+ %tmp19939 = getelementptr inbounds float, float* %tmp19938, i64 1
+ %tmp19940 = getelementptr inbounds float, float* %tmp19939, i64 1
+ %tmp19941 = getelementptr inbounds float, float* %tmp19940, i64 1
+ %tmp19942 = getelementptr inbounds float, float* %tmp19941, i64 1
+ %tmp19943 = getelementptr inbounds float, float* %tmp19942, i64 1
+ %tmp19944 = getelementptr inbounds float, float* %tmp19943, i64 1
+ %tmp19945 = getelementptr inbounds float, float* %tmp19944, i64 1
+ %tmp19946 = getelementptr inbounds float, float* %tmp19945, i64 1
+ %tmp19947 = getelementptr inbounds float, float* %tmp19946, i64 1
+ %tmp19948 = getelementptr inbounds float, float* %tmp19947, i64 1
+ %tmp19949 = getelementptr inbounds float, float* %tmp19948, i64 1
+ %tmp19950 = getelementptr inbounds float, float* %tmp19949, i64 1
+ %tmp19951 = getelementptr inbounds float, float* %tmp19950, i64 1
+ %tmp19952 = getelementptr inbounds float, float* %tmp19951, i64 1
+ %tmp19953 = getelementptr inbounds float, float* %tmp19952, i64 1
+ %tmp19954 = getelementptr inbounds float, float* %tmp19953, i64 1
+ %tmp19955 = getelementptr inbounds float, float* %tmp19954, i64 1
+ %tmp19956 = getelementptr inbounds float, float* %tmp19955, i64 1
+ %tmp19957 = getelementptr inbounds float, float* %tmp19956, i64 1
+ %tmp19958 = getelementptr inbounds float, float* %tmp19957, i64 1
+ %tmp19959 = getelementptr inbounds float, float* %tmp19958, i64 1
+ %tmp19960 = getelementptr inbounds float, float* %tmp19959, i64 1
+ %tmp19961 = getelementptr inbounds float, float* %tmp19960, i64 1
+ %tmp19962 = getelementptr inbounds float, float* %tmp19961, i64 1
+ %tmp19963 = getelementptr inbounds float, float* %tmp19962, i64 1
+ %tmp19964 = getelementptr inbounds float, float* %tmp19963, i64 1
+ %tmp19965 = getelementptr inbounds float, float* %tmp19964, i64 1
+ %tmp19966 = getelementptr inbounds float, float* %tmp19965, i64 1
+ %tmp19967 = getelementptr inbounds float, float* %tmp19966, i64 1
+ %tmp19968 = getelementptr inbounds float, float* %tmp19967, i64 1
+ %tmp19969 = getelementptr inbounds float, float* %tmp19968, i64 1
+ %tmp19970 = getelementptr inbounds float, float* %tmp19969, i64 1
+ %tmp19971 = getelementptr inbounds float, float* %tmp19970, i64 1
+ %tmp19972 = getelementptr inbounds float, float* %tmp19971, i64 1
+ %tmp19973 = getelementptr inbounds float, float* %tmp19972, i64 1
+ %tmp19974 = getelementptr inbounds float, float* %tmp19973, i64 1
+ %tmp19975 = getelementptr inbounds float, float* %tmp19974, i64 1
+ %tmp19976 = getelementptr inbounds float, float* %tmp19975, i64 1
+ %tmp19977 = getelementptr inbounds float, float* %tmp19976, i64 1
+ %tmp19978 = getelementptr inbounds float, float* %tmp19977, i64 1
+ %tmp19979 = getelementptr inbounds float, float* %tmp19978, i64 1
+ %tmp19980 = getelementptr inbounds float, float* %tmp19979, i64 1
+ %tmp19981 = getelementptr inbounds float, float* %tmp19980, i64 1
+ %tmp19982 = getelementptr inbounds float, float* %tmp19981, i64 1
+ %tmp19983 = getelementptr inbounds float, float* %tmp19982, i64 1
+ %tmp19984 = getelementptr inbounds float, float* %tmp19983, i64 1
+ %tmp19985 = getelementptr inbounds float, float* %tmp19984, i64 1
+ %tmp19986 = getelementptr inbounds float, float* %tmp19985, i64 1
+ %tmp19987 = getelementptr inbounds float, float* %tmp19986, i64 1
+ %tmp19988 = getelementptr inbounds float, float* %tmp19987, i64 1
+ %tmp19989 = getelementptr inbounds float, float* %tmp19988, i64 1
+ %tmp19990 = getelementptr inbounds float, float* %tmp19989, i64 1
+ %tmp19991 = getelementptr inbounds float, float* %tmp19990, i64 1
+ %tmp19992 = getelementptr inbounds float, float* %tmp19991, i64 1
+ %tmp19993 = getelementptr inbounds float, float* %tmp19992, i64 1
+ %tmp19994 = getelementptr inbounds float, float* %tmp19993, i64 1
+ %tmp19995 = getelementptr inbounds float, float* %tmp19994, i64 1
+ %tmp19996 = getelementptr inbounds float, float* %tmp19995, i64 1
+ %tmp19997 = getelementptr inbounds float, float* %tmp19996, i64 1
+ %tmp19998 = getelementptr inbounds float, float* %tmp19997, i64 1
+ %tmp19999 = getelementptr inbounds float, float* %tmp19998, i64 1
+ %tmp20000 = getelementptr inbounds float, float* %tmp19999, i64 1
+ %tmp20001 = getelementptr inbounds float, float* %tmp20000, i64 1
+ %tmp20002 = getelementptr inbounds float, float* %tmp20001, i64 1
+ %tmp20003 = getelementptr inbounds float, float* %tmp20002, i64 1
+ %tmp20004 = getelementptr inbounds float, float* %tmp20003, i64 1
+ %tmp20005 = getelementptr inbounds float, float* %tmp20004, i64 1
+ %tmp20006 = getelementptr inbounds float, float* %tmp20005, i64 1
+ %tmp20007 = getelementptr inbounds float, float* %tmp20006, i64 1
+ %tmp20008 = getelementptr inbounds float, float* %tmp20007, i64 1
+ %tmp20009 = getelementptr inbounds float, float* %tmp20008, i64 1
+ %tmp20010 = getelementptr inbounds float, float* %tmp20009, i64 1
+ %tmp20011 = getelementptr inbounds float, float* %tmp20010, i64 1
+ %tmp20012 = getelementptr inbounds float, float* %tmp20011, i64 1
+ %tmp20013 = getelementptr inbounds float, float* %tmp20012, i64 1
+ %tmp20014 = getelementptr inbounds float, float* %tmp20013, i64 1
+ %tmp20015 = getelementptr inbounds float, float* %tmp20014, i64 1
+ %tmp20016 = getelementptr inbounds float, float* %tmp20015, i64 1
+ %tmp20017 = getelementptr inbounds float, float* %tmp20016, i64 1
+ %tmp20018 = getelementptr inbounds float, float* %tmp20017, i64 1
+ %tmp20019 = getelementptr inbounds float, float* %tmp20018, i64 1
+ %tmp20020 = getelementptr inbounds float, float* %tmp20019, i64 1
+ %tmp20021 = getelementptr inbounds float, float* %tmp20020, i64 1
+ %tmp20022 = getelementptr inbounds float, float* %tmp20021, i64 1
+ %tmp20023 = getelementptr inbounds float, float* %tmp20022, i64 1
+ %tmp20024 = getelementptr inbounds float, float* %tmp20023, i64 1
+ %tmp20025 = getelementptr inbounds float, float* %tmp20024, i64 1
+ %tmp20026 = getelementptr inbounds float, float* %tmp20025, i64 1
+ %tmp20027 = getelementptr inbounds float, float* %tmp20026, i64 1
+ %tmp20028 = getelementptr inbounds float, float* %tmp20027, i64 1
+ %tmp20029 = getelementptr inbounds float, float* %tmp20028, i64 1
+ %tmp20030 = getelementptr inbounds float, float* %tmp20029, i64 1
+ %tmp20031 = getelementptr inbounds float, float* %tmp20030, i64 1
+ %tmp20032 = getelementptr inbounds float, float* %tmp20031, i64 1
+ %tmp20033 = getelementptr inbounds float, float* %tmp20032, i64 1
+ %tmp20034 = getelementptr inbounds float, float* %tmp20033, i64 1
+ %tmp20035 = getelementptr inbounds float, float* %tmp20034, i64 1
+ %tmp20036 = getelementptr inbounds float, float* %tmp20035, i64 1
+ %tmp20037 = getelementptr inbounds float, float* %tmp20036, i64 1
+ %tmp20038 = getelementptr inbounds float, float* %tmp20037, i64 1
+ %tmp20039 = getelementptr inbounds float, float* %tmp20038, i64 1
+ %tmp20040 = getelementptr inbounds float, float* %tmp20039, i64 1
+ %tmp20041 = getelementptr inbounds float, float* %tmp20040, i64 1
+ %tmp20042 = getelementptr inbounds float, float* %tmp20041, i64 1
+ %tmp20043 = getelementptr inbounds float, float* %tmp20042, i64 1
+ %tmp20044 = getelementptr inbounds float, float* %tmp20043, i64 1
+ %tmp20045 = getelementptr inbounds float, float* %tmp20044, i64 1
+ %tmp20046 = getelementptr inbounds float, float* %tmp20045, i64 1
+ %tmp20047 = getelementptr inbounds float, float* %tmp20046, i64 1
+ %tmp20048 = getelementptr inbounds float, float* %tmp20047, i64 1
+ %tmp20049 = getelementptr inbounds float, float* %tmp20048, i64 1
+ %tmp20050 = getelementptr inbounds float, float* %tmp20049, i64 1
+ %tmp20051 = getelementptr inbounds float, float* %tmp20050, i64 1
+ %tmp20052 = getelementptr inbounds float, float* %tmp20051, i64 1
+ %tmp20053 = getelementptr inbounds float, float* %tmp20052, i64 1
+ %tmp20054 = getelementptr inbounds float, float* %tmp20053, i64 1
+ %tmp20055 = getelementptr inbounds float, float* %tmp20054, i64 1
+ %tmp20056 = getelementptr inbounds float, float* %tmp20055, i64 1
+ %tmp20057 = getelementptr inbounds float, float* %tmp20056, i64 1
+ %tmp20058 = getelementptr inbounds float, float* %tmp20057, i64 1
+ %tmp20059 = getelementptr inbounds float, float* %tmp20058, i64 1
+ %tmp20060 = getelementptr inbounds float, float* %tmp20059, i64 1
+ %tmp20061 = getelementptr inbounds float, float* %tmp20060, i64 1
+ %tmp20062 = getelementptr inbounds float, float* %tmp20061, i64 1
+ %tmp20063 = getelementptr inbounds float, float* %tmp20062, i64 1
+ %tmp20064 = getelementptr inbounds float, float* %tmp20063, i64 1
+ %tmp20065 = getelementptr inbounds float, float* %tmp20064, i64 1
+ %tmp20066 = getelementptr inbounds float, float* %tmp20065, i64 1
+ %tmp20067 = getelementptr inbounds float, float* %tmp20066, i64 1
+ %tmp20068 = getelementptr inbounds float, float* %tmp20067, i64 1
+ %tmp20069 = getelementptr inbounds float, float* %tmp20068, i64 1
+ %tmp20070 = getelementptr inbounds float, float* %tmp20069, i64 1
+ %tmp20071 = getelementptr inbounds float, float* %tmp20070, i64 1
+ %tmp20072 = getelementptr inbounds float, float* %tmp20071, i64 1
+ %tmp20073 = getelementptr inbounds float, float* %tmp20072, i64 1
+ %tmp20074 = getelementptr inbounds float, float* %tmp20073, i64 1
+ %tmp20075 = getelementptr inbounds float, float* %tmp20074, i64 1
+ %tmp20076 = getelementptr inbounds float, float* %tmp20075, i64 1
+ %tmp20077 = getelementptr inbounds float, float* %tmp20076, i64 1
+ %tmp20078 = getelementptr inbounds float, float* %tmp20077, i64 1
+ %tmp20079 = getelementptr inbounds float, float* %tmp20078, i64 1
+ %tmp20080 = getelementptr inbounds float, float* %tmp20079, i64 1
+ %tmp20081 = getelementptr inbounds float, float* %tmp20080, i64 1
+ %tmp20082 = getelementptr inbounds float, float* %tmp20081, i64 1
+ %tmp20083 = getelementptr inbounds float, float* %tmp20082, i64 1
+ %tmp20084 = getelementptr inbounds float, float* %tmp20083, i64 1
+ %tmp20085 = getelementptr inbounds float, float* %tmp20084, i64 1
+ %tmp20086 = getelementptr inbounds float, float* %tmp20085, i64 1
+ %tmp20087 = getelementptr inbounds float, float* %tmp20086, i64 1
+ %tmp20088 = getelementptr inbounds float, float* %tmp20087, i64 1
+ %tmp20089 = getelementptr inbounds float, float* %tmp20088, i64 1
+ %tmp20090 = getelementptr inbounds float, float* %tmp20089, i64 1
+ %tmp20091 = getelementptr inbounds float, float* %tmp20090, i64 1
+ %tmp20092 = getelementptr inbounds float, float* %tmp20091, i64 1
+ %tmp20093 = getelementptr inbounds float, float* %tmp20092, i64 1
+ %tmp20094 = getelementptr inbounds float, float* %tmp20093, i64 1
+ %tmp20095 = getelementptr inbounds float, float* %tmp20094, i64 1
+ %tmp20096 = getelementptr inbounds float, float* %tmp20095, i64 1
+ %tmp20097 = getelementptr inbounds float, float* %tmp20096, i64 1
+ %tmp20098 = getelementptr inbounds float, float* %tmp20097, i64 1
+ %tmp20099 = getelementptr inbounds float, float* %tmp20098, i64 1
+ %tmp20100 = getelementptr inbounds float, float* %tmp20099, i64 1
+ %tmp20101 = getelementptr inbounds float, float* %tmp20100, i64 1
+ %tmp20102 = getelementptr inbounds float, float* %tmp20101, i64 1
+ %tmp20103 = getelementptr inbounds float, float* %tmp20102, i64 1
+ %tmp20104 = getelementptr inbounds float, float* %tmp20103, i64 1
+ %tmp20105 = getelementptr inbounds float, float* %tmp20104, i64 1
+ %tmp20106 = getelementptr inbounds float, float* %tmp20105, i64 1
+ %tmp20107 = getelementptr inbounds float, float* %tmp20106, i64 1
+ %tmp20108 = getelementptr inbounds float, float* %tmp20107, i64 1
+ %tmp20109 = getelementptr inbounds float, float* %tmp20108, i64 1
+ %tmp20110 = getelementptr inbounds float, float* %tmp20109, i64 1
+ %tmp20111 = getelementptr inbounds float, float* %tmp20110, i64 1
+ %tmp20112 = getelementptr inbounds float, float* %tmp20111, i64 1
+ %tmp20113 = getelementptr inbounds float, float* %tmp20112, i64 1
+ %tmp20114 = getelementptr inbounds float, float* %tmp20113, i64 1
+ %tmp20115 = getelementptr inbounds float, float* %tmp20114, i64 1
+ %tmp20116 = getelementptr inbounds float, float* %tmp20115, i64 1
+ %tmp20117 = getelementptr inbounds float, float* %tmp20116, i64 1
+ %tmp20118 = getelementptr inbounds float, float* %tmp20117, i64 1
+ %tmp20119 = getelementptr inbounds float, float* %tmp20118, i64 1
+ %tmp20120 = getelementptr inbounds float, float* %tmp20119, i64 1
+ %tmp20121 = getelementptr inbounds float, float* %tmp20120, i64 1
+ %tmp20122 = getelementptr inbounds float, float* %tmp20121, i64 1
+ %tmp20123 = getelementptr inbounds float, float* %tmp20122, i64 1
+ %tmp20124 = getelementptr inbounds float, float* %tmp20123, i64 1
+ %tmp20125 = getelementptr inbounds float, float* %tmp20124, i64 1
+ %tmp20126 = getelementptr inbounds float, float* %tmp20125, i64 1
+ %tmp20127 = getelementptr inbounds float, float* %tmp20126, i64 1
+ %tmp20128 = getelementptr inbounds float, float* %tmp20127, i64 1
+ %tmp20129 = getelementptr inbounds float, float* %tmp20128, i64 1
+ %tmp20130 = getelementptr inbounds float, float* %tmp20129, i64 1
+ %tmp20131 = getelementptr inbounds float, float* %tmp20130, i64 1
+ %tmp20132 = getelementptr inbounds float, float* %tmp20131, i64 1
+ %tmp20133 = getelementptr inbounds float, float* %tmp20132, i64 1
+ %tmp20134 = getelementptr inbounds float, float* %tmp20133, i64 1
+ %tmp20135 = getelementptr inbounds float, float* %tmp20134, i64 1
+ %tmp20136 = getelementptr inbounds float, float* %tmp20135, i64 1
+ %tmp20137 = getelementptr inbounds float, float* %tmp20136, i64 1
+ %tmp20138 = getelementptr inbounds float, float* %tmp20137, i64 1
+ %tmp20139 = getelementptr inbounds float, float* %tmp20138, i64 1
+ %tmp20140 = getelementptr inbounds float, float* %tmp20139, i64 1
+ %tmp20141 = getelementptr inbounds float, float* %tmp20140, i64 1
+ %tmp20142 = getelementptr inbounds float, float* %tmp20141, i64 1
+ %tmp20143 = getelementptr inbounds float, float* %tmp20142, i64 1
+ %tmp20144 = getelementptr inbounds float, float* %tmp20143, i64 1
+ %tmp20145 = getelementptr inbounds float, float* %tmp20144, i64 1
+ %tmp20146 = getelementptr inbounds float, float* %tmp20145, i64 1
+ %tmp20147 = getelementptr inbounds float, float* %tmp20146, i64 1
+ %tmp20148 = getelementptr inbounds float, float* %tmp20147, i64 1
+ %tmp20149 = getelementptr inbounds float, float* %tmp20148, i64 1
+ %tmp20150 = getelementptr inbounds float, float* %tmp20149, i64 1
+ %tmp20151 = getelementptr inbounds float, float* %tmp20150, i64 1
+ %tmp20152 = getelementptr inbounds float, float* %tmp20151, i64 1
+ %tmp20153 = getelementptr inbounds float, float* %tmp20152, i64 1
+ %tmp20154 = getelementptr inbounds float, float* %tmp20153, i64 1
+ %tmp20155 = getelementptr inbounds float, float* %tmp20154, i64 1
+ %tmp20156 = getelementptr inbounds float, float* %tmp20155, i64 1
+ %tmp20157 = getelementptr inbounds float, float* %tmp20156, i64 1
+ %tmp20158 = getelementptr inbounds float, float* %tmp20157, i64 1
+ %tmp20159 = getelementptr inbounds float, float* %tmp20158, i64 1
+ %tmp20160 = getelementptr inbounds float, float* %tmp20159, i64 1
+ %tmp20161 = getelementptr inbounds float, float* %tmp20160, i64 1
+ %tmp20162 = getelementptr inbounds float, float* %tmp20161, i64 1
+ %tmp20163 = getelementptr inbounds float, float* %tmp20162, i64 1
+ %tmp20164 = getelementptr inbounds float, float* %tmp20163, i64 1
+ %tmp20165 = getelementptr inbounds float, float* %tmp20164, i64 1
+ %tmp20166 = getelementptr inbounds float, float* %tmp20165, i64 1
+ %tmp20167 = getelementptr inbounds float, float* %tmp20166, i64 1
+ %tmp20168 = getelementptr inbounds float, float* %tmp20167, i64 1
+ %tmp20169 = getelementptr inbounds float, float* %tmp20168, i64 1
+ %tmp20170 = getelementptr inbounds float, float* %tmp20169, i64 1
+ %tmp20171 = getelementptr inbounds float, float* %tmp20170, i64 1
+ %tmp20172 = getelementptr inbounds float, float* %tmp20171, i64 1
+ %tmp20173 = getelementptr inbounds float, float* %tmp20172, i64 1
+ %tmp20174 = getelementptr inbounds float, float* %tmp20173, i64 1
+ %tmp20175 = getelementptr inbounds float, float* %tmp20174, i64 1
+ %tmp20176 = getelementptr inbounds float, float* %tmp20175, i64 1
+ %tmp20177 = getelementptr inbounds float, float* %tmp20176, i64 1
+ %tmp20178 = getelementptr inbounds float, float* %tmp20177, i64 1
+ %tmp20179 = getelementptr inbounds float, float* %tmp20178, i64 1
+ %tmp20180 = getelementptr inbounds float, float* %tmp20179, i64 1
+ %tmp20181 = getelementptr inbounds float, float* %tmp20180, i64 1
+ %tmp20182 = getelementptr inbounds float, float* %tmp20181, i64 1
+ %tmp20183 = getelementptr inbounds float, float* %tmp20182, i64 1
+ %tmp20184 = getelementptr inbounds float, float* %tmp20183, i64 1
+ %tmp20185 = getelementptr inbounds float, float* %tmp20184, i64 1
+ %tmp20186 = getelementptr inbounds float, float* %tmp20185, i64 1
+ %tmp20187 = getelementptr inbounds float, float* %tmp20186, i64 1
+ %tmp20188 = getelementptr inbounds float, float* %tmp20187, i64 1
+ %tmp20189 = getelementptr inbounds float, float* %tmp20188, i64 1
+ %tmp20190 = getelementptr inbounds float, float* %tmp20189, i64 1
+ %tmp20191 = getelementptr inbounds float, float* %tmp20190, i64 1
+ %tmp20192 = getelementptr inbounds float, float* %tmp20191, i64 1
+ %tmp20193 = getelementptr inbounds float, float* %tmp20192, i64 1
+ %tmp20194 = getelementptr inbounds float, float* %tmp20193, i64 1
+ %tmp20195 = getelementptr inbounds float, float* %tmp20194, i64 1
+ %tmp20196 = getelementptr inbounds float, float* %tmp20195, i64 1
+ %tmp20197 = getelementptr inbounds float, float* %tmp20196, i64 1
+ %tmp20198 = getelementptr inbounds float, float* %tmp20197, i64 1
+ %tmp20199 = getelementptr inbounds float, float* %tmp20198, i64 1
+ %tmp20200 = getelementptr inbounds float, float* %tmp20199, i64 1
+ %tmp20201 = getelementptr inbounds float, float* %tmp20200, i64 1
+ %tmp20202 = getelementptr inbounds float, float* %tmp20201, i64 1
+ %tmp20203 = getelementptr inbounds float, float* %tmp20202, i64 1
+ %tmp20204 = getelementptr inbounds float, float* %tmp20203, i64 1
+ %tmp20205 = getelementptr inbounds float, float* %tmp20204, i64 1
+ %tmp20206 = getelementptr inbounds float, float* %tmp20205, i64 1
+ %tmp20207 = getelementptr inbounds float, float* %tmp20206, i64 1
+ %tmp20208 = getelementptr inbounds float, float* %tmp20207, i64 1
+ %tmp20209 = getelementptr inbounds float, float* %tmp20208, i64 1
+ %tmp20210 = getelementptr inbounds float, float* %tmp20209, i64 1
+ %tmp20211 = getelementptr inbounds float, float* %tmp20210, i64 1
+ %tmp20212 = getelementptr inbounds float, float* %tmp20211, i64 1
+ %tmp20213 = getelementptr inbounds float, float* %tmp20212, i64 1
+ %tmp20214 = getelementptr inbounds float, float* %tmp20213, i64 1
+ %tmp20215 = getelementptr inbounds float, float* %tmp20214, i64 1
+ %tmp20216 = getelementptr inbounds float, float* %tmp20215, i64 1
+ %tmp20217 = getelementptr inbounds float, float* %tmp20216, i64 1
+ %tmp20218 = getelementptr inbounds float, float* %tmp20217, i64 1
+ %tmp20219 = getelementptr inbounds float, float* %tmp20218, i64 1
+ %tmp20220 = getelementptr inbounds float, float* %tmp20219, i64 1
+ %tmp20221 = getelementptr inbounds float, float* %tmp20220, i64 1
+ %tmp20222 = getelementptr inbounds float, float* %tmp20221, i64 1
+ %tmp20223 = getelementptr inbounds float, float* %tmp20222, i64 1
+ %tmp20224 = getelementptr inbounds float, float* %tmp20223, i64 1
+ %tmp20225 = getelementptr inbounds float, float* %tmp20224, i64 1
+ %tmp20226 = getelementptr inbounds float, float* %tmp20225, i64 1
+ %tmp20227 = getelementptr inbounds float, float* %tmp20226, i64 1
+ %tmp20228 = getelementptr inbounds float, float* %tmp20227, i64 1
+ %tmp20229 = getelementptr inbounds float, float* %tmp20228, i64 1
+ %tmp20230 = getelementptr inbounds float, float* %tmp20229, i64 1
+ %tmp20231 = getelementptr inbounds float, float* %tmp20230, i64 1
+ %tmp20232 = getelementptr inbounds float, float* %tmp20231, i64 1
+ %tmp20233 = getelementptr inbounds float, float* %tmp20232, i64 1
+ %tmp20234 = getelementptr inbounds float, float* %tmp20233, i64 1
+ %tmp20235 = getelementptr inbounds float, float* %tmp20234, i64 1
+ %tmp20236 = getelementptr inbounds float, float* %tmp20235, i64 1
+ %tmp20237 = getelementptr inbounds float, float* %tmp20236, i64 1
+ %tmp20238 = getelementptr inbounds float, float* %tmp20237, i64 1
+ %tmp20239 = getelementptr inbounds float, float* %tmp20238, i64 1
+ %tmp20240 = getelementptr inbounds float, float* %tmp20239, i64 1
+ %tmp20241 = getelementptr inbounds float, float* %tmp20240, i64 1
+ %tmp20242 = getelementptr inbounds float, float* %tmp20241, i64 1
+ %tmp20243 = getelementptr inbounds float, float* %tmp20242, i64 1
+ %tmp20244 = getelementptr inbounds float, float* %tmp20243, i64 1
+ %tmp20245 = getelementptr inbounds float, float* %tmp20244, i64 1
+ %tmp20246 = getelementptr inbounds float, float* %tmp20245, i64 1
+ %tmp20247 = getelementptr inbounds float, float* %tmp20246, i64 1
+ %tmp20248 = getelementptr inbounds float, float* %tmp20247, i64 1
+ %tmp20249 = getelementptr inbounds float, float* %tmp20248, i64 1
+ %tmp20250 = getelementptr inbounds float, float* %tmp20249, i64 1
+ %tmp20251 = getelementptr inbounds float, float* %tmp20250, i64 1
+ %tmp20252 = getelementptr inbounds float, float* %tmp20251, i64 1
+ %tmp20253 = getelementptr inbounds float, float* %tmp20252, i64 1
+ %tmp20254 = getelementptr inbounds float, float* %tmp20253, i64 1
+ %tmp20255 = getelementptr inbounds float, float* %tmp20254, i64 1
+ %tmp20256 = getelementptr inbounds float, float* %tmp20255, i64 1
+ %tmp20257 = getelementptr inbounds float, float* %tmp20256, i64 1
+ %tmp20258 = getelementptr inbounds float, float* %tmp20257, i64 1
+ %tmp20259 = getelementptr inbounds float, float* %tmp20258, i64 1
+ %tmp20260 = getelementptr inbounds float, float* %tmp20259, i64 1
+ %tmp20261 = getelementptr inbounds float, float* %tmp20260, i64 1
+ %tmp20262 = getelementptr inbounds float, float* %tmp20261, i64 1
+ %tmp20263 = getelementptr inbounds float, float* %tmp20262, i64 1
+ %tmp20264 = getelementptr inbounds float, float* %tmp20263, i64 1
+ %tmp20265 = getelementptr inbounds float, float* %tmp20264, i64 1
+ %tmp20266 = getelementptr inbounds float, float* %tmp20265, i64 1
+ %tmp20267 = getelementptr inbounds float, float* %tmp20266, i64 1
+ %tmp20268 = getelementptr inbounds float, float* %tmp20267, i64 1
+ %tmp20269 = getelementptr inbounds float, float* %tmp20268, i64 1
+ %tmp20270 = getelementptr inbounds float, float* %tmp20269, i64 1
+ %tmp20271 = getelementptr inbounds float, float* %tmp20270, i64 1
+ %tmp20272 = getelementptr inbounds float, float* %tmp20271, i64 1
+ %tmp20273 = getelementptr inbounds float, float* %tmp20272, i64 1
+ %tmp20274 = getelementptr inbounds float, float* %tmp20273, i64 1
+ %tmp20275 = getelementptr inbounds float, float* %tmp20274, i64 1
+ %tmp20276 = getelementptr inbounds float, float* %tmp20275, i64 1
+ %tmp20277 = getelementptr inbounds float, float* %tmp20276, i64 1
+ %tmp20278 = getelementptr inbounds float, float* %tmp20277, i64 1
+ %tmp20279 = getelementptr inbounds float, float* %tmp20278, i64 1
+ %tmp20280 = getelementptr inbounds float, float* %tmp20279, i64 1
+ %tmp20281 = getelementptr inbounds float, float* %tmp20280, i64 1
+ %tmp20282 = getelementptr inbounds float, float* %tmp20281, i64 1
+ %tmp20283 = getelementptr inbounds float, float* %tmp20282, i64 1
+ %tmp20284 = getelementptr inbounds float, float* %tmp20283, i64 1
+ %tmp20285 = getelementptr inbounds float, float* %tmp20284, i64 1
+ %tmp20286 = getelementptr inbounds float, float* %tmp20285, i64 1
+ %tmp20287 = getelementptr inbounds float, float* %tmp20286, i64 1
+ %tmp20288 = getelementptr inbounds float, float* %tmp20287, i64 1
+ %tmp20289 = getelementptr inbounds float, float* %tmp20288, i64 1
+ %tmp20290 = getelementptr inbounds float, float* %tmp20289, i64 1
+ %tmp20291 = getelementptr inbounds float, float* %tmp20290, i64 1
+ %tmp20292 = getelementptr inbounds float, float* %tmp20291, i64 1
+ %tmp20293 = getelementptr inbounds float, float* %tmp20292, i64 1
+ %tmp20294 = getelementptr inbounds float, float* %tmp20293, i64 1
+ %tmp20295 = getelementptr inbounds float, float* %tmp20294, i64 1
+ %tmp20296 = getelementptr inbounds float, float* %tmp20295, i64 1
+ %tmp20297 = getelementptr inbounds float, float* %tmp20296, i64 1
+ %tmp20298 = getelementptr inbounds float, float* %tmp20297, i64 1
+ %tmp20299 = getelementptr inbounds float, float* %tmp20298, i64 1
+ %tmp20300 = getelementptr inbounds float, float* %tmp20299, i64 1
+ %tmp20301 = getelementptr inbounds float, float* %tmp20300, i64 1
+ %tmp20302 = getelementptr inbounds float, float* %tmp20301, i64 1
+ %tmp20303 = getelementptr inbounds float, float* %tmp20302, i64 1
+ %tmp20304 = getelementptr inbounds float, float* %tmp20303, i64 1
+ %tmp20305 = getelementptr inbounds float, float* %tmp20304, i64 1
+ %tmp20306 = getelementptr inbounds float, float* %tmp20305, i64 1
+ %tmp20307 = getelementptr inbounds float, float* %tmp20306, i64 1
+ %tmp20308 = getelementptr inbounds float, float* %tmp20307, i64 1
+ %tmp20309 = getelementptr inbounds float, float* %tmp20308, i64 1
+ %tmp20310 = getelementptr inbounds float, float* %tmp20309, i64 1
+ %tmp20311 = getelementptr inbounds float, float* %tmp20310, i64 1
+ %tmp20312 = getelementptr inbounds float, float* %tmp20311, i64 1
+ %tmp20313 = getelementptr inbounds float, float* %tmp20312, i64 1
+ %tmp20314 = getelementptr inbounds float, float* %tmp20313, i64 1
+ %tmp20315 = getelementptr inbounds float, float* %tmp20314, i64 1
+ %tmp20316 = getelementptr inbounds float, float* %tmp20315, i64 1
+ %tmp20317 = getelementptr inbounds float, float* %tmp20316, i64 1
+ %tmp20318 = getelementptr inbounds float, float* %tmp20317, i64 1
+ %tmp20319 = getelementptr inbounds float, float* %tmp20318, i64 1
+ %tmp20320 = getelementptr inbounds float, float* %tmp20319, i64 1
+ %tmp20321 = getelementptr inbounds float, float* %tmp20320, i64 1
+ %tmp20322 = getelementptr inbounds float, float* %tmp20321, i64 1
+ %tmp20323 = getelementptr inbounds float, float* %tmp20322, i64 1
+ %tmp20324 = getelementptr inbounds float, float* %tmp20323, i64 1
+ %tmp20325 = getelementptr inbounds float, float* %tmp20324, i64 1
+ %tmp20326 = getelementptr inbounds float, float* %tmp20325, i64 1
+ %tmp20327 = getelementptr inbounds float, float* %tmp20326, i64 1
+ %tmp20328 = getelementptr inbounds float, float* %tmp20327, i64 1
+ %tmp20329 = getelementptr inbounds float, float* %tmp20328, i64 1
+ %tmp20330 = getelementptr inbounds float, float* %tmp20329, i64 1
+ %tmp20331 = getelementptr inbounds float, float* %tmp20330, i64 1
+ %tmp20332 = getelementptr inbounds float, float* %tmp20331, i64 1
+ %tmp20333 = getelementptr inbounds float, float* %tmp20332, i64 1
+ %tmp20334 = getelementptr inbounds float, float* %tmp20333, i64 1
+ %tmp20335 = getelementptr inbounds float, float* %tmp20334, i64 1
+ %tmp20336 = getelementptr inbounds float, float* %tmp20335, i64 1
+ %tmp20337 = getelementptr inbounds float, float* %tmp20336, i64 1
+ %tmp20338 = getelementptr inbounds float, float* %tmp20337, i64 1
+ %tmp20339 = getelementptr inbounds float, float* %tmp20338, i64 1
+ %tmp20340 = getelementptr inbounds float, float* %tmp20339, i64 1
+ %tmp20341 = getelementptr inbounds float, float* %tmp20340, i64 1
+ %tmp20342 = getelementptr inbounds float, float* %tmp20341, i64 1
+ %tmp20343 = getelementptr inbounds float, float* %tmp20342, i64 1
+ %tmp20344 = getelementptr inbounds float, float* %tmp20343, i64 1
+ %tmp20345 = getelementptr inbounds float, float* %tmp20344, i64 1
+ %tmp20346 = getelementptr inbounds float, float* %tmp20345, i64 1
+ %tmp20347 = getelementptr inbounds float, float* %tmp20346, i64 1
+ %tmp20348 = getelementptr inbounds float, float* %tmp20347, i64 1
+ %tmp20349 = getelementptr inbounds float, float* %tmp20348, i64 1
+ %tmp20350 = getelementptr inbounds float, float* %tmp20349, i64 1
+ %tmp20351 = getelementptr inbounds float, float* %tmp20350, i64 1
+ %tmp20352 = getelementptr inbounds float, float* %tmp20351, i64 1
+ %tmp20353 = getelementptr inbounds float, float* %tmp20352, i64 1
+ %tmp20354 = getelementptr inbounds float, float* %tmp20353, i64 1
+ %tmp20355 = getelementptr inbounds float, float* %tmp20354, i64 1
+ %tmp20356 = getelementptr inbounds float, float* %tmp20355, i64 1
+ %tmp20357 = getelementptr inbounds float, float* %tmp20356, i64 1
+ %tmp20358 = getelementptr inbounds float, float* %tmp20357, i64 1
+ %tmp20359 = getelementptr inbounds float, float* %tmp20358, i64 1
+ %tmp20360 = getelementptr inbounds float, float* %tmp20359, i64 1
+ %tmp20361 = getelementptr inbounds float, float* %tmp20360, i64 1
+ %tmp20362 = getelementptr inbounds float, float* %tmp20361, i64 1
+ %tmp20363 = getelementptr inbounds float, float* %tmp20362, i64 1
+ %tmp20364 = getelementptr inbounds float, float* %tmp20363, i64 1
+ %tmp20365 = getelementptr inbounds float, float* %tmp20364, i64 1
+ %tmp20366 = getelementptr inbounds float, float* %tmp20365, i64 1
+ %tmp20367 = getelementptr inbounds float, float* %tmp20366, i64 1
+ %tmp20368 = getelementptr inbounds float, float* %tmp20367, i64 1
+ %tmp20369 = getelementptr inbounds float, float* %tmp20368, i64 1
+ %tmp20370 = getelementptr inbounds float, float* %tmp20369, i64 1
+ %tmp20371 = getelementptr inbounds float, float* %tmp20370, i64 1
+ %tmp20372 = getelementptr inbounds float, float* %tmp20371, i64 1
+ %tmp20373 = getelementptr inbounds float, float* %tmp20372, i64 1
+ %tmp20374 = getelementptr inbounds float, float* %tmp20373, i64 1
+ %tmp20375 = getelementptr inbounds float, float* %tmp20374, i64 1
+ %tmp20376 = getelementptr inbounds float, float* %tmp20375, i64 1
+ %tmp20377 = getelementptr inbounds float, float* %tmp20376, i64 1
+ %tmp20378 = getelementptr inbounds float, float* %tmp20377, i64 1
+ %tmp20379 = getelementptr inbounds float, float* %tmp20378, i64 1
+ %tmp20380 = getelementptr inbounds float, float* %tmp20379, i64 1
+ %tmp20381 = getelementptr inbounds float, float* %tmp20380, i64 1
+ %tmp20382 = getelementptr inbounds float, float* %tmp20381, i64 1
+ %tmp20383 = getelementptr inbounds float, float* %tmp20382, i64 1
+ %tmp20384 = getelementptr inbounds float, float* %tmp20383, i64 1
+ %tmp20385 = getelementptr inbounds float, float* %tmp20384, i64 1
+ %tmp20386 = getelementptr inbounds float, float* %tmp20385, i64 1
+ %tmp20387 = getelementptr inbounds float, float* %tmp20386, i64 1
+ %tmp20388 = getelementptr inbounds float, float* %tmp20387, i64 1
+ %tmp20389 = getelementptr inbounds float, float* %tmp20388, i64 1
+ %tmp20390 = getelementptr inbounds float, float* %tmp20389, i64 1
+ %tmp20391 = getelementptr inbounds float, float* %tmp20390, i64 1
+ %tmp20392 = getelementptr inbounds float, float* %tmp20391, i64 1
+ %tmp20393 = getelementptr inbounds float, float* %tmp20392, i64 1
+ %tmp20394 = getelementptr inbounds float, float* %tmp20393, i64 1
+ %tmp20395 = getelementptr inbounds float, float* %tmp20394, i64 1
+ %tmp20396 = getelementptr inbounds float, float* %tmp20395, i64 1
+ %tmp20397 = getelementptr inbounds float, float* %tmp20396, i64 1
+ %tmp20398 = getelementptr inbounds float, float* %tmp20397, i64 1
+ %tmp20399 = getelementptr inbounds float, float* %tmp20398, i64 1
+ %tmp20400 = getelementptr inbounds float, float* %tmp20399, i64 1
+ %tmp20401 = getelementptr inbounds float, float* %tmp20400, i64 1
+ %tmp20402 = getelementptr inbounds float, float* %tmp20401, i64 1
+ %tmp20403 = getelementptr inbounds float, float* %tmp20402, i64 1
+ %tmp20404 = getelementptr inbounds float, float* %tmp20403, i64 1
+ %tmp20405 = getelementptr inbounds float, float* %tmp20404, i64 1
+ %tmp20406 = getelementptr inbounds float, float* %tmp20405, i64 1
+ %tmp20407 = getelementptr inbounds float, float* %tmp20406, i64 1
+ %tmp20408 = getelementptr inbounds float, float* %tmp20407, i64 1
+ %tmp20409 = getelementptr inbounds float, float* %tmp20408, i64 1
+ %tmp20410 = getelementptr inbounds float, float* %tmp20409, i64 1
+ %tmp20411 = getelementptr inbounds float, float* %tmp20410, i64 1
+ %tmp20412 = getelementptr inbounds float, float* %tmp20411, i64 1
+ %tmp20413 = getelementptr inbounds float, float* %tmp20412, i64 1
+ %tmp20414 = getelementptr inbounds float, float* %tmp20413, i64 1
+ %tmp20415 = getelementptr inbounds float, float* %tmp20414, i64 1
+ %tmp20416 = getelementptr inbounds float, float* %tmp20415, i64 1
+ %tmp20417 = getelementptr inbounds float, float* %tmp20416, i64 1
+ %tmp20418 = getelementptr inbounds float, float* %tmp20417, i64 1
+ %tmp20419 = getelementptr inbounds float, float* %tmp20418, i64 1
+ %tmp20420 = getelementptr inbounds float, float* %tmp20419, i64 1
+ %tmp20421 = getelementptr inbounds float, float* %tmp20420, i64 1
+ %tmp20422 = getelementptr inbounds float, float* %tmp20421, i64 1
+ %tmp20423 = getelementptr inbounds float, float* %tmp20422, i64 1
+ %tmp20424 = getelementptr inbounds float, float* %tmp20423, i64 1
+ %tmp20425 = getelementptr inbounds float, float* %tmp20424, i64 1
+ %tmp20426 = getelementptr inbounds float, float* %tmp20425, i64 1
+ %tmp20427 = getelementptr inbounds float, float* %tmp20426, i64 1
+ %tmp20428 = getelementptr inbounds float, float* %tmp20427, i64 1
+ %tmp20429 = getelementptr inbounds float, float* %tmp20428, i64 1
+ %tmp20430 = getelementptr inbounds float, float* %tmp20429, i64 1
+ %tmp20431 = getelementptr inbounds float, float* %tmp20430, i64 1
+ %tmp20432 = getelementptr inbounds float, float* %tmp20431, i64 1
+ %tmp20433 = getelementptr inbounds float, float* %tmp20432, i64 1
+ %tmp20434 = getelementptr inbounds float, float* %tmp20433, i64 1
+ %tmp20435 = getelementptr inbounds float, float* %tmp20434, i64 1
+ %tmp20436 = getelementptr inbounds float, float* %tmp20435, i64 1
+ %tmp20437 = getelementptr inbounds float, float* %tmp20436, i64 1
+ %tmp20438 = getelementptr inbounds float, float* %tmp20437, i64 1
+ %tmp20439 = getelementptr inbounds float, float* %tmp20438, i64 1
+ %tmp20440 = getelementptr inbounds float, float* %tmp20439, i64 1
+ %tmp20441 = getelementptr inbounds float, float* %tmp20440, i64 1
+ %tmp20442 = getelementptr inbounds float, float* %tmp20441, i64 1
+ %tmp20443 = getelementptr inbounds float, float* %tmp20442, i64 1
+ %tmp20444 = getelementptr inbounds float, float* %tmp20443, i64 1
+ %tmp20445 = getelementptr inbounds float, float* %tmp20444, i64 1
+ %tmp20446 = getelementptr inbounds float, float* %tmp20445, i64 1
+ %tmp20447 = getelementptr inbounds float, float* %tmp20446, i64 1
+ %tmp20448 = getelementptr inbounds float, float* %tmp20447, i64 1
+ %tmp20449 = getelementptr inbounds float, float* %tmp20448, i64 1
+ %tmp20450 = getelementptr inbounds float, float* %tmp20449, i64 1
+ %tmp20451 = getelementptr inbounds float, float* %tmp20450, i64 1
+ %tmp20452 = getelementptr inbounds float, float* %tmp20451, i64 1
+ %tmp20453 = getelementptr inbounds float, float* %tmp20452, i64 1
+ %tmp20454 = getelementptr inbounds float, float* %tmp20453, i64 1
+ %tmp20455 = getelementptr inbounds float, float* %tmp20454, i64 1
+ %tmp20456 = getelementptr inbounds float, float* %tmp20455, i64 1
+ %tmp20457 = getelementptr inbounds float, float* %tmp20456, i64 1
+ %tmp20458 = getelementptr inbounds float, float* %tmp20457, i64 1
+ %tmp20459 = getelementptr inbounds float, float* %tmp20458, i64 1
+ %tmp20460 = getelementptr inbounds float, float* %tmp20459, i64 1
+ %tmp20461 = getelementptr inbounds float, float* %tmp20460, i64 1
+ %tmp20462 = getelementptr inbounds float, float* %tmp20461, i64 1
+ %tmp20463 = getelementptr inbounds float, float* %tmp20462, i64 1
+ %tmp20464 = getelementptr inbounds float, float* %tmp20463, i64 1
+ %tmp20465 = getelementptr inbounds float, float* %tmp20464, i64 1
+ %tmp20466 = getelementptr inbounds float, float* %tmp20465, i64 1
+ %tmp20467 = getelementptr inbounds float, float* %tmp20466, i64 1
+ %tmp20468 = getelementptr inbounds float, float* %tmp20467, i64 1
+ %tmp20469 = getelementptr inbounds float, float* %tmp20468, i64 1
+ %tmp20470 = getelementptr inbounds float, float* %tmp20469, i64 1
+ %tmp20471 = getelementptr inbounds float, float* %tmp20470, i64 1
+ %tmp20472 = getelementptr inbounds float, float* %tmp20471, i64 1
+ %tmp20473 = getelementptr inbounds float, float* %tmp20472, i64 1
+ %tmp20474 = getelementptr inbounds float, float* %tmp20473, i64 1
+ %tmp20475 = getelementptr inbounds float, float* %tmp20474, i64 1
+ %tmp20476 = getelementptr inbounds float, float* %tmp20475, i64 1
+ %tmp20477 = getelementptr inbounds float, float* %tmp20476, i64 1
+ %tmp20478 = getelementptr inbounds float, float* %tmp20477, i64 1
+ %tmp20479 = getelementptr inbounds float, float* %tmp20478, i64 1
+ %tmp20480 = getelementptr inbounds float, float* %tmp20479, i64 1
+ %tmp20481 = getelementptr inbounds float, float* %tmp20480, i64 1
+ %tmp20482 = getelementptr inbounds float, float* %tmp20481, i64 1
+ %tmp20483 = getelementptr inbounds float, float* %tmp20482, i64 1
+ %tmp20484 = getelementptr inbounds float, float* %tmp20483, i64 1
+ %tmp20485 = getelementptr inbounds float, float* %tmp20484, i64 1
+ %tmp20486 = getelementptr inbounds float, float* %tmp20485, i64 1
+ %tmp20487 = getelementptr inbounds float, float* %tmp20486, i64 1
+ %tmp20488 = getelementptr inbounds float, float* %tmp20487, i64 1
+ %tmp20489 = getelementptr inbounds float, float* %tmp20488, i64 1
+ %tmp20490 = getelementptr inbounds float, float* %tmp20489, i64 1
+ %tmp20491 = getelementptr inbounds float, float* %tmp20490, i64 1
+ %tmp20492 = getelementptr inbounds float, float* %tmp20491, i64 1
+ %tmp20493 = getelementptr inbounds float, float* %tmp20492, i64 1
+ %tmp20494 = getelementptr inbounds float, float* %tmp20493, i64 1
+ %tmp20495 = getelementptr inbounds float, float* %tmp20494, i64 1
+ %tmp20496 = getelementptr inbounds float, float* %tmp20495, i64 1
+ %tmp20497 = getelementptr inbounds float, float* %tmp20496, i64 1
+ %tmp20498 = getelementptr inbounds float, float* %tmp20497, i64 1
+ %tmp20499 = getelementptr inbounds float, float* %tmp20498, i64 1
+ %tmp20500 = getelementptr inbounds float, float* %tmp20499, i64 1
+ %tmp20501 = getelementptr inbounds float, float* %tmp20500, i64 1
+ %tmp20502 = getelementptr inbounds float, float* %tmp20501, i64 1
+ %tmp20503 = getelementptr inbounds float, float* %tmp20502, i64 1
+ %tmp20504 = getelementptr inbounds float, float* %tmp20503, i64 1
+ %tmp20505 = getelementptr inbounds float, float* %tmp20504, i64 1
+ %tmp20506 = getelementptr inbounds float, float* %tmp20505, i64 1
+ %tmp20507 = getelementptr inbounds float, float* %tmp20506, i64 1
+ %tmp20508 = getelementptr inbounds float, float* %tmp20507, i64 1
+ %tmp20509 = getelementptr inbounds float, float* %tmp20508, i64 1
+ %tmp20510 = getelementptr inbounds float, float* %tmp20509, i64 1
+ %tmp20511 = getelementptr inbounds float, float* %tmp20510, i64 1
+ %tmp20512 = getelementptr inbounds float, float* %tmp20511, i64 1
+ %tmp20513 = getelementptr inbounds float, float* %tmp20512, i64 1
+ %tmp20514 = getelementptr inbounds float, float* %tmp20513, i64 1
+ %tmp20515 = getelementptr inbounds float, float* %tmp20514, i64 1
+ %tmp20516 = getelementptr inbounds float, float* %tmp20515, i64 1
+ %tmp20517 = getelementptr inbounds float, float* %tmp20516, i64 1
+ %tmp20518 = getelementptr inbounds float, float* %tmp20517, i64 1
+ %tmp20519 = getelementptr inbounds float, float* %tmp20518, i64 1
+ %tmp20520 = getelementptr inbounds float, float* %tmp20519, i64 1
+ %tmp20521 = getelementptr inbounds float, float* %tmp20520, i64 1
+ %tmp20522 = getelementptr inbounds float, float* %tmp20521, i64 1
+ %tmp20523 = getelementptr inbounds float, float* %tmp20522, i64 1
+ %tmp20524 = getelementptr inbounds float, float* %tmp20523, i64 1
+ %tmp20525 = getelementptr inbounds float, float* %tmp20524, i64 1
+ %tmp20526 = getelementptr inbounds float, float* %tmp20525, i64 1
+ %tmp20527 = getelementptr inbounds float, float* %tmp20526, i64 1
+ %tmp20528 = getelementptr inbounds float, float* %tmp20527, i64 1
+ %tmp20529 = getelementptr inbounds float, float* %tmp20528, i64 1
+ %tmp20530 = getelementptr inbounds float, float* %tmp20529, i64 1
+ %tmp20531 = getelementptr inbounds float, float* %tmp20530, i64 1
+ %tmp20532 = getelementptr inbounds float, float* %tmp20531, i64 1
+ %tmp20533 = getelementptr inbounds float, float* %tmp20532, i64 1
+ %tmp20534 = getelementptr inbounds float, float* %tmp20533, i64 1
+ %tmp20535 = getelementptr inbounds float, float* %tmp20534, i64 1
+ %tmp20536 = getelementptr inbounds float, float* %tmp20535, i64 1
+ %tmp20537 = getelementptr inbounds float, float* %tmp20536, i64 1
+ %tmp20538 = getelementptr inbounds float, float* %tmp20537, i64 1
+ %tmp20539 = getelementptr inbounds float, float* %tmp20538, i64 1
+ %tmp20540 = getelementptr inbounds float, float* %tmp20539, i64 1
+ %tmp20541 = getelementptr inbounds float, float* %tmp20540, i64 1
+ %tmp20542 = getelementptr inbounds float, float* %tmp20541, i64 1
+ %tmp20543 = getelementptr inbounds float, float* %tmp20542, i64 1
+ %tmp20544 = getelementptr inbounds float, float* %tmp20543, i64 1
+ %tmp20545 = getelementptr inbounds float, float* %tmp20544, i64 1
+ %tmp20546 = getelementptr inbounds float, float* %tmp20545, i64 1
+ %tmp20547 = getelementptr inbounds float, float* %tmp20546, i64 1
+ %tmp20548 = getelementptr inbounds float, float* %tmp20547, i64 1
+ %tmp20549 = getelementptr inbounds float, float* %tmp20548, i64 1
+ %tmp20550 = getelementptr inbounds float, float* %tmp20549, i64 1
+ %tmp20551 = getelementptr inbounds float, float* %tmp20550, i64 1
+ %tmp20552 = getelementptr inbounds float, float* %tmp20551, i64 1
+ %tmp20553 = getelementptr inbounds float, float* %tmp20552, i64 1
+ %tmp20554 = getelementptr inbounds float, float* %tmp20553, i64 1
+ %tmp20555 = getelementptr inbounds float, float* %tmp20554, i64 1
+ %tmp20556 = getelementptr inbounds float, float* %tmp20555, i64 1
+ %tmp20557 = getelementptr inbounds float, float* %tmp20556, i64 1
+ %tmp20558 = getelementptr inbounds float, float* %tmp20557, i64 1
+ %tmp20559 = getelementptr inbounds float, float* %tmp20558, i64 1
+ %tmp20560 = getelementptr inbounds float, float* %tmp20559, i64 1
+ %tmp20561 = getelementptr inbounds float, float* %tmp20560, i64 1
+ %tmp20562 = getelementptr inbounds float, float* %tmp20561, i64 1
+ %tmp20563 = getelementptr inbounds float, float* %tmp20562, i64 1
+ %tmp20564 = getelementptr inbounds float, float* %tmp20563, i64 1
+ %tmp20565 = getelementptr inbounds float, float* %tmp20564, i64 1
+ %tmp20566 = getelementptr inbounds float, float* %tmp20565, i64 1
+ %tmp20567 = getelementptr inbounds float, float* %tmp20566, i64 1
+ %tmp20568 = getelementptr inbounds float, float* %tmp20567, i64 1
+ %tmp20569 = getelementptr inbounds float, float* %tmp20568, i64 1
+ %tmp20570 = getelementptr inbounds float, float* %tmp20569, i64 1
+ %tmp20571 = getelementptr inbounds float, float* %tmp20570, i64 1
+ %tmp20572 = getelementptr inbounds float, float* %tmp20571, i64 1
+ %tmp20573 = getelementptr inbounds float, float* %tmp20572, i64 1
+ %tmp20574 = getelementptr inbounds float, float* %tmp20573, i64 1
+ %tmp20575 = getelementptr inbounds float, float* %tmp20574, i64 1
+ %tmp20576 = getelementptr inbounds float, float* %tmp20575, i64 1
+ %tmp20577 = getelementptr inbounds float, float* %tmp20576, i64 1
+ %tmp20578 = getelementptr inbounds float, float* %tmp20577, i64 1
+ %tmp20579 = getelementptr inbounds float, float* %tmp20578, i64 1
+ %tmp20580 = getelementptr inbounds float, float* %tmp20579, i64 1
+ %tmp20581 = getelementptr inbounds float, float* %tmp20580, i64 1
+ %tmp20582 = getelementptr inbounds float, float* %tmp20581, i64 1
+ %tmp20583 = getelementptr inbounds float, float* %tmp20582, i64 1
+ %tmp20584 = getelementptr inbounds float, float* %tmp20583, i64 1
+ %tmp20585 = getelementptr inbounds float, float* %tmp20584, i64 1
+ %tmp20586 = getelementptr inbounds float, float* %tmp20585, i64 1
+ %tmp20587 = getelementptr inbounds float, float* %tmp20586, i64 1
+ %tmp20588 = getelementptr inbounds float, float* %tmp20587, i64 1
+ %tmp20589 = getelementptr inbounds float, float* %tmp20588, i64 1
+ %tmp20590 = getelementptr inbounds float, float* %tmp20589, i64 1
+ %tmp20591 = getelementptr inbounds float, float* %tmp20590, i64 1
+ %tmp20592 = getelementptr inbounds float, float* %tmp20591, i64 1
+ %tmp20593 = getelementptr inbounds float, float* %tmp20592, i64 1
+ %tmp20594 = getelementptr inbounds float, float* %tmp20593, i64 1
+ %tmp20595 = getelementptr inbounds float, float* %tmp20594, i64 1
+ %tmp20596 = getelementptr inbounds float, float* %tmp20595, i64 1
+ %tmp20597 = getelementptr inbounds float, float* %tmp20596, i64 1
+ %tmp20598 = getelementptr inbounds float, float* %tmp20597, i64 1
+ %tmp20599 = getelementptr inbounds float, float* %tmp20598, i64 1
+ %tmp20600 = getelementptr inbounds float, float* %tmp20599, i64 1
+ %tmp20601 = getelementptr inbounds float, float* %tmp20600, i64 1
+ %tmp20602 = getelementptr inbounds float, float* %tmp20601, i64 1
+ %tmp20603 = getelementptr inbounds float, float* %tmp20602, i64 1
+ %tmp20604 = getelementptr inbounds float, float* %tmp20603, i64 1
+ %tmp20605 = getelementptr inbounds float, float* %tmp20604, i64 1
+ %tmp20606 = getelementptr inbounds float, float* %tmp20605, i64 1
+ %tmp20607 = getelementptr inbounds float, float* %tmp20606, i64 1
+ %tmp20608 = getelementptr inbounds float, float* %tmp20607, i64 1
+ %tmp20609 = getelementptr inbounds float, float* %tmp20608, i64 1
+ %tmp20610 = getelementptr inbounds float, float* %tmp20609, i64 1
+ %tmp20611 = getelementptr inbounds float, float* %tmp20610, i64 1
+ %tmp20612 = getelementptr inbounds float, float* %tmp20611, i64 1
+ %tmp20613 = getelementptr inbounds float, float* %tmp20612, i64 1
+ %tmp20614 = getelementptr inbounds float, float* %tmp20613, i64 1
+ %tmp20615 = getelementptr inbounds float, float* %tmp20614, i64 1
+ %tmp20616 = getelementptr inbounds float, float* %tmp20615, i64 1
+ %tmp20617 = getelementptr inbounds float, float* %tmp20616, i64 1
+ %tmp20618 = getelementptr inbounds float, float* %tmp20617, i64 1
+ %tmp20619 = getelementptr inbounds float, float* %tmp20618, i64 1
+ %tmp20620 = getelementptr inbounds float, float* %tmp20619, i64 1
+ %tmp20621 = getelementptr inbounds float, float* %tmp20620, i64 1
+ %tmp20622 = getelementptr inbounds float, float* %tmp20621, i64 1
+ %tmp20623 = getelementptr inbounds float, float* %tmp20622, i64 1
+ %tmp20624 = getelementptr inbounds float, float* %tmp20623, i64 1
+ %tmp20625 = getelementptr inbounds float, float* %tmp20624, i64 1
+ %tmp20626 = getelementptr inbounds float, float* %tmp20625, i64 1
+ %tmp20627 = getelementptr inbounds float, float* %tmp20626, i64 1
+ %tmp20628 = getelementptr inbounds float, float* %tmp20627, i64 1
+ %tmp20629 = getelementptr inbounds float, float* %tmp20628, i64 1
+ %tmp20630 = getelementptr inbounds float, float* %tmp20629, i64 1
+ %tmp20631 = getelementptr inbounds float, float* %tmp20630, i64 1
+ %tmp20632 = getelementptr inbounds float, float* %tmp20631, i64 1
+ %tmp20633 = getelementptr inbounds float, float* %tmp20632, i64 1
+ %tmp20634 = getelementptr inbounds float, float* %tmp20633, i64 1
+ %tmp20635 = getelementptr inbounds float, float* %tmp20634, i64 1
+ %tmp20636 = getelementptr inbounds float, float* %tmp20635, i64 1
+ %tmp20637 = getelementptr inbounds float, float* %tmp20636, i64 1
+ %tmp20638 = getelementptr inbounds float, float* %tmp20637, i64 1
+ %tmp20639 = getelementptr inbounds float, float* %tmp20638, i64 1
+ %tmp20640 = getelementptr inbounds float, float* %tmp20639, i64 1
+ %tmp20641 = getelementptr inbounds float, float* %tmp20640, i64 1
+ %tmp20642 = getelementptr inbounds float, float* %tmp20641, i64 1
+ %tmp20643 = getelementptr inbounds float, float* %tmp20642, i64 1
+ %tmp20644 = getelementptr inbounds float, float* %tmp20643, i64 1
+ %tmp20645 = getelementptr inbounds float, float* %tmp20644, i64 1
+ %tmp20646 = getelementptr inbounds float, float* %tmp20645, i64 1
+ %tmp20647 = getelementptr inbounds float, float* %tmp20646, i64 1
+ %tmp20648 = getelementptr inbounds float, float* %tmp20647, i64 1
+ %tmp20649 = getelementptr inbounds float, float* %tmp20648, i64 1
+ %tmp20650 = getelementptr inbounds float, float* %tmp20649, i64 1
+ %tmp20651 = getelementptr inbounds float, float* %tmp20650, i64 1
+ %tmp20652 = getelementptr inbounds float, float* %tmp20651, i64 1
+ %tmp20653 = getelementptr inbounds float, float* %tmp20652, i64 1
+ %tmp20654 = getelementptr inbounds float, float* %tmp20653, i64 1
+ %tmp20655 = getelementptr inbounds float, float* %tmp20654, i64 1
+ %tmp20656 = getelementptr inbounds float, float* %tmp20655, i64 1
+ %tmp20657 = getelementptr inbounds float, float* %tmp20656, i64 1
+ %tmp20658 = getelementptr inbounds float, float* %tmp20657, i64 1
+ %tmp20659 = getelementptr inbounds float, float* %tmp20658, i64 1
+ %tmp20660 = getelementptr inbounds float, float* %tmp20659, i64 1
+ %tmp20661 = getelementptr inbounds float, float* %tmp20660, i64 1
+ %tmp20662 = getelementptr inbounds float, float* %tmp20661, i64 1
+ %tmp20663 = getelementptr inbounds float, float* %tmp20662, i64 1
+ %tmp20664 = getelementptr inbounds float, float* %tmp20663, i64 1
+ %tmp20665 = getelementptr inbounds float, float* %tmp20664, i64 1
+ %tmp20666 = getelementptr inbounds float, float* %tmp20665, i64 1
+ %tmp20667 = getelementptr inbounds float, float* %tmp20666, i64 1
+ %tmp20668 = getelementptr inbounds float, float* %tmp20667, i64 1
+ %tmp20669 = getelementptr inbounds float, float* %tmp20668, i64 1
+ %tmp20670 = getelementptr inbounds float, float* %tmp20669, i64 1
+ %tmp20671 = getelementptr inbounds float, float* %tmp20670, i64 1
+ %tmp20672 = getelementptr inbounds float, float* %tmp20671, i64 1
+ %tmp20673 = getelementptr inbounds float, float* %tmp20672, i64 1
+ %tmp20674 = getelementptr inbounds float, float* %tmp20673, i64 1
+ %tmp20675 = getelementptr inbounds float, float* %tmp20674, i64 1
+ %tmp20676 = getelementptr inbounds float, float* %tmp20675, i64 1
+ %tmp20677 = getelementptr inbounds float, float* %tmp20676, i64 1
+ %tmp20678 = getelementptr inbounds float, float* %tmp20677, i64 1
+ %tmp20679 = getelementptr inbounds float, float* %tmp20678, i64 1
+ %tmp20680 = getelementptr inbounds float, float* %tmp20679, i64 1
+ %tmp20681 = getelementptr inbounds float, float* %tmp20680, i64 1
+ %tmp20682 = getelementptr inbounds float, float* %tmp20681, i64 1
+ %tmp20683 = getelementptr inbounds float, float* %tmp20682, i64 1
+ %tmp20684 = getelementptr inbounds float, float* %tmp20683, i64 1
+ %tmp20685 = getelementptr inbounds float, float* %tmp20684, i64 1
+ %tmp20686 = getelementptr inbounds float, float* %tmp20685, i64 1
+ %tmp20687 = getelementptr inbounds float, float* %tmp20686, i64 1
+ %tmp20688 = getelementptr inbounds float, float* %tmp20687, i64 1
+ %tmp20689 = getelementptr inbounds float, float* %tmp20688, i64 1
+ %tmp20690 = getelementptr inbounds float, float* %tmp20689, i64 1
+ %tmp20691 = getelementptr inbounds float, float* %tmp20690, i64 1
+ %tmp20692 = getelementptr inbounds float, float* %tmp20691, i64 1
+ %tmp20693 = getelementptr inbounds float, float* %tmp20692, i64 1
+ %tmp20694 = getelementptr inbounds float, float* %tmp20693, i64 1
+ %tmp20695 = getelementptr inbounds float, float* %tmp20694, i64 1
+ %tmp20696 = getelementptr inbounds float, float* %tmp20695, i64 1
+ %tmp20697 = getelementptr inbounds float, float* %tmp20696, i64 1
+ %tmp20698 = getelementptr inbounds float, float* %tmp20697, i64 1
+ %tmp20699 = getelementptr inbounds float, float* %tmp20698, i64 1
+ %tmp20700 = getelementptr inbounds float, float* %tmp20699, i64 1
+ %tmp20701 = getelementptr inbounds float, float* %tmp20700, i64 1
+ %tmp20702 = getelementptr inbounds float, float* %tmp20701, i64 1
+ %tmp20703 = getelementptr inbounds float, float* %tmp20702, i64 1
+ %tmp20704 = getelementptr inbounds float, float* %tmp20703, i64 1
+ %tmp20705 = getelementptr inbounds float, float* %tmp20704, i64 1
+ %tmp20706 = getelementptr inbounds float, float* %tmp20705, i64 1
+ %tmp20707 = getelementptr inbounds float, float* %tmp20706, i64 1
+ %tmp20708 = getelementptr inbounds float, float* %tmp20707, i64 1
+ %tmp20709 = getelementptr inbounds float, float* %tmp20708, i64 1
+ %tmp20710 = getelementptr inbounds float, float* %tmp20709, i64 1
+ %tmp20711 = getelementptr inbounds float, float* %tmp20710, i64 1
+ %tmp20712 = getelementptr inbounds float, float* %tmp20711, i64 1
+ %tmp20713 = getelementptr inbounds float, float* %tmp20712, i64 1
+ %tmp20714 = getelementptr inbounds float, float* %tmp20713, i64 1
+ %tmp20715 = getelementptr inbounds float, float* %tmp20714, i64 1
+ %tmp20716 = getelementptr inbounds float, float* %tmp20715, i64 1
+ %tmp20717 = getelementptr inbounds float, float* %tmp20716, i64 1
+ %tmp20718 = getelementptr inbounds float, float* %tmp20717, i64 1
+ %tmp20719 = getelementptr inbounds float, float* %tmp20718, i64 1
+ %tmp20720 = getelementptr inbounds float, float* %tmp20719, i64 1
+ %tmp20721 = getelementptr inbounds float, float* %tmp20720, i64 1
+ %tmp20722 = getelementptr inbounds float, float* %tmp20721, i64 1
+ %tmp20723 = getelementptr inbounds float, float* %tmp20722, i64 1
+ %tmp20724 = getelementptr inbounds float, float* %tmp20723, i64 1
+ %tmp20725 = getelementptr inbounds float, float* %tmp20724, i64 1
+ %tmp20726 = getelementptr inbounds float, float* %tmp20725, i64 1
+ %tmp20727 = getelementptr inbounds float, float* %tmp20726, i64 1
+ %tmp20728 = getelementptr inbounds float, float* %tmp20727, i64 1
+ %tmp20729 = getelementptr inbounds float, float* %tmp20728, i64 1
+ %tmp20730 = getelementptr inbounds float, float* %tmp20729, i64 1
+ %tmp20731 = getelementptr inbounds float, float* %tmp20730, i64 1
+ %tmp20732 = getelementptr inbounds float, float* %tmp20731, i64 1
+ %tmp20733 = getelementptr inbounds float, float* %tmp20732, i64 1
+ %tmp20734 = getelementptr inbounds float, float* %tmp20733, i64 1
+ %tmp20735 = getelementptr inbounds float, float* %tmp20734, i64 1
+ %tmp20736 = getelementptr inbounds float, float* %tmp20735, i64 1
+ %tmp20737 = getelementptr inbounds float, float* %tmp20736, i64 1
+ %tmp20738 = getelementptr inbounds float, float* %tmp20737, i64 1
+ %tmp20739 = getelementptr inbounds float, float* %tmp20738, i64 1
+ %tmp20740 = getelementptr inbounds float, float* %tmp20739, i64 1
+ %tmp20741 = getelementptr inbounds float, float* %tmp20740, i64 1
+ %tmp20742 = getelementptr inbounds float, float* %tmp20741, i64 1
+ %tmp20743 = getelementptr inbounds float, float* %tmp20742, i64 1
+ %tmp20744 = getelementptr inbounds float, float* %tmp20743, i64 1
+ %tmp20745 = getelementptr inbounds float, float* %tmp20744, i64 1
+ %tmp20746 = getelementptr inbounds float, float* %tmp20745, i64 1
+ %tmp20747 = getelementptr inbounds float, float* %tmp20746, i64 1
+ %tmp20748 = getelementptr inbounds float, float* %tmp20747, i64 1
+ %tmp20749 = getelementptr inbounds float, float* %tmp20748, i64 1
+ %tmp20750 = getelementptr inbounds float, float* %tmp20749, i64 1
+ %tmp20751 = getelementptr inbounds float, float* %tmp20750, i64 1
+ %tmp20752 = getelementptr inbounds float, float* %tmp20751, i64 1
+ %tmp20753 = getelementptr inbounds float, float* %tmp20752, i64 1
+ %tmp20754 = getelementptr inbounds float, float* %tmp20753, i64 1
+ %tmp20755 = getelementptr inbounds float, float* %tmp20754, i64 1
+ %tmp20756 = getelementptr inbounds float, float* %tmp20755, i64 1
+ %tmp20757 = getelementptr inbounds float, float* %tmp20756, i64 1
+ %tmp20758 = getelementptr inbounds float, float* %tmp20757, i64 1
+ %tmp20759 = getelementptr inbounds float, float* %tmp20758, i64 1
+ %tmp20760 = getelementptr inbounds float, float* %tmp20759, i64 1
+ %tmp20761 = getelementptr inbounds float, float* %tmp20760, i64 1
+ %tmp20762 = getelementptr inbounds float, float* %tmp20761, i64 1
+ %tmp20763 = getelementptr inbounds float, float* %tmp20762, i64 1
+ %tmp20764 = getelementptr inbounds float, float* %tmp20763, i64 1
+ %tmp20765 = getelementptr inbounds float, float* %tmp20764, i64 1
+ %tmp20766 = getelementptr inbounds float, float* %tmp20765, i64 1
+ %tmp20767 = getelementptr inbounds float, float* %tmp20766, i64 1
+ %tmp20768 = getelementptr inbounds float, float* %tmp20767, i64 1
+ %tmp20769 = getelementptr inbounds float, float* %tmp20768, i64 1
+ %tmp20770 = getelementptr inbounds float, float* %tmp20769, i64 1
+ %tmp20771 = getelementptr inbounds float, float* %tmp20770, i64 1
+ %tmp20772 = getelementptr inbounds float, float* %tmp20771, i64 1
+ %tmp20773 = getelementptr inbounds float, float* %tmp20772, i64 1
+ %tmp20774 = getelementptr inbounds float, float* %tmp20773, i64 1
+ %tmp20775 = getelementptr inbounds float, float* %tmp20774, i64 1
+ %tmp20776 = getelementptr inbounds float, float* %tmp20775, i64 1
+ %tmp20777 = getelementptr inbounds float, float* %tmp20776, i64 1
+ %tmp20778 = getelementptr inbounds float, float* %tmp20777, i64 1
+ %tmp20779 = getelementptr inbounds float, float* %tmp20778, i64 1
+ %tmp20780 = getelementptr inbounds float, float* %tmp20779, i64 1
+ %tmp20781 = getelementptr inbounds float, float* %tmp20780, i64 1
+ %tmp20782 = getelementptr inbounds float, float* %tmp20781, i64 1
+ %tmp20783 = getelementptr inbounds float, float* %tmp20782, i64 1
+ %tmp20784 = getelementptr inbounds float, float* %tmp20783, i64 1
+ %tmp20785 = getelementptr inbounds float, float* %tmp20784, i64 1
+ %tmp20786 = getelementptr inbounds float, float* %tmp20785, i64 1
+ %tmp20787 = getelementptr inbounds float, float* %tmp20786, i64 1
+ %tmp20788 = getelementptr inbounds float, float* %tmp20787, i64 1
+ %tmp20789 = getelementptr inbounds float, float* %tmp20788, i64 1
+ %tmp20790 = getelementptr inbounds float, float* %tmp20789, i64 1
+ %tmp20791 = getelementptr inbounds float, float* %tmp20790, i64 1
+ %tmp20792 = getelementptr inbounds float, float* %tmp20791, i64 1
+ %tmp20793 = getelementptr inbounds float, float* %tmp20792, i64 1
+ %tmp20794 = getelementptr inbounds float, float* %tmp20793, i64 1
+ %tmp20795 = getelementptr inbounds float, float* %tmp20794, i64 1
+ %tmp20796 = getelementptr inbounds float, float* %tmp20795, i64 1
+ %tmp20797 = getelementptr inbounds float, float* %tmp20796, i64 1
+ %tmp20798 = getelementptr inbounds float, float* %tmp20797, i64 1
+ %tmp20799 = getelementptr inbounds float, float* %tmp20798, i64 1
+ %tmp20800 = getelementptr inbounds float, float* %tmp20799, i64 1
+ %tmp20801 = getelementptr inbounds float, float* %tmp20800, i64 1
+ %tmp20802 = getelementptr inbounds float, float* %tmp20801, i64 1
+ %tmp20803 = getelementptr inbounds float, float* %tmp20802, i64 1
+ %tmp20804 = getelementptr inbounds float, float* %tmp20803, i64 1
+ %tmp20805 = getelementptr inbounds float, float* %tmp20804, i64 1
+ %tmp20806 = getelementptr inbounds float, float* %tmp20805, i64 1
+ %tmp20807 = getelementptr inbounds float, float* %tmp20806, i64 1
+ %tmp20808 = getelementptr inbounds float, float* %tmp20807, i64 1
+ %tmp20809 = getelementptr inbounds float, float* %tmp20808, i64 1
+ %tmp20810 = getelementptr inbounds float, float* %tmp20809, i64 1
+ %tmp20811 = getelementptr inbounds float, float* %tmp20810, i64 1
+ %tmp20812 = getelementptr inbounds float, float* %tmp20811, i64 1
+ %tmp20813 = getelementptr inbounds float, float* %tmp20812, i64 1
+ %tmp20814 = getelementptr inbounds float, float* %tmp20813, i64 1
+ %tmp20815 = getelementptr inbounds float, float* %tmp20814, i64 1
+ %tmp20816 = getelementptr inbounds float, float* %tmp20815, i64 1
+ %tmp20817 = getelementptr inbounds float, float* %tmp20816, i64 1
+ %tmp20818 = getelementptr inbounds float, float* %tmp20817, i64 1
+ %tmp20819 = getelementptr inbounds float, float* %tmp20818, i64 1
+ %tmp20820 = getelementptr inbounds float, float* %tmp20819, i64 1
+ %tmp20821 = getelementptr inbounds float, float* %tmp20820, i64 1
+ %tmp20822 = getelementptr inbounds float, float* %tmp20821, i64 1
+ %tmp20823 = getelementptr inbounds float, float* %tmp20822, i64 1
+ %tmp20824 = getelementptr inbounds float, float* %tmp20823, i64 1
+ %tmp20825 = getelementptr inbounds float, float* %tmp20824, i64 1
+ %tmp20826 = getelementptr inbounds float, float* %tmp20825, i64 1
+ %tmp20827 = getelementptr inbounds float, float* %tmp20826, i64 1
+ %tmp20828 = getelementptr inbounds float, float* %tmp20827, i64 1
+ %tmp20829 = getelementptr inbounds float, float* %tmp20828, i64 1
+ %tmp20830 = getelementptr inbounds float, float* %tmp20829, i64 1
+ %tmp20831 = getelementptr inbounds float, float* %tmp20830, i64 1
+ %tmp20832 = getelementptr inbounds float, float* %tmp20831, i64 1
+ %tmp20833 = getelementptr inbounds float, float* %tmp20832, i64 1
+ %tmp20834 = getelementptr inbounds float, float* %tmp20833, i64 1
+ %tmp20835 = getelementptr inbounds float, float* %tmp20834, i64 1
+ %tmp20836 = getelementptr inbounds float, float* %tmp20835, i64 1
+ %tmp20837 = getelementptr inbounds float, float* %tmp20836, i64 1
+ %tmp20838 = getelementptr inbounds float, float* %tmp20837, i64 1
+ %tmp20839 = getelementptr inbounds float, float* %tmp20838, i64 1
+ %tmp20840 = getelementptr inbounds float, float* %tmp20839, i64 1
+ %tmp20841 = getelementptr inbounds float, float* %tmp20840, i64 1
+ %tmp20842 = getelementptr inbounds float, float* %tmp20841, i64 1
+ %tmp20843 = getelementptr inbounds float, float* %tmp20842, i64 1
+ %tmp20844 = getelementptr inbounds float, float* %tmp20843, i64 1
+ %tmp20845 = getelementptr inbounds float, float* %tmp20844, i64 1
+ %tmp20846 = getelementptr inbounds float, float* %tmp20845, i64 1
+ %tmp20847 = getelementptr inbounds float, float* %tmp20846, i64 1
+ %tmp20848 = getelementptr inbounds float, float* %tmp20847, i64 1
+ %tmp20849 = getelementptr inbounds float, float* %tmp20848, i64 1
+ %tmp20850 = getelementptr inbounds float, float* %tmp20849, i64 1
+ %tmp20851 = getelementptr inbounds float, float* %tmp20850, i64 1
+ %tmp20852 = getelementptr inbounds float, float* %tmp20851, i64 1
+ %tmp20853 = getelementptr inbounds float, float* %tmp20852, i64 1
+ %tmp20854 = getelementptr inbounds float, float* %tmp20853, i64 1
+ %tmp20855 = getelementptr inbounds float, float* %tmp20854, i64 1
+ %tmp20856 = getelementptr inbounds float, float* %tmp20855, i64 1
+ %tmp20857 = getelementptr inbounds float, float* %tmp20856, i64 1
+ %tmp20858 = getelementptr inbounds float, float* %tmp20857, i64 1
+ %tmp20859 = getelementptr inbounds float, float* %tmp20858, i64 1
+ %tmp20860 = getelementptr inbounds float, float* %tmp20859, i64 1
+ %tmp20861 = getelementptr inbounds float, float* %tmp20860, i64 1
+ %tmp20862 = getelementptr inbounds float, float* %tmp20861, i64 1
+ %tmp20863 = getelementptr inbounds float, float* %tmp20862, i64 1
+ %tmp20864 = getelementptr inbounds float, float* %tmp20863, i64 1
+ %tmp20865 = getelementptr inbounds float, float* %tmp20864, i64 1
+ %tmp20866 = getelementptr inbounds float, float* %tmp20865, i64 1
+ %tmp20867 = getelementptr inbounds float, float* %tmp20866, i64 1
+ %tmp20868 = getelementptr inbounds float, float* %tmp20867, i64 1
+ %tmp20869 = getelementptr inbounds float, float* %tmp20868, i64 1
+ %tmp20870 = getelementptr inbounds float, float* %tmp20869, i64 1
+ %tmp20871 = getelementptr inbounds float, float* %tmp20870, i64 1
+ %tmp20872 = getelementptr inbounds float, float* %tmp20871, i64 1
+ %tmp20873 = getelementptr inbounds float, float* %tmp20872, i64 1
+ %tmp20874 = getelementptr inbounds float, float* %tmp20873, i64 1
+ %tmp20875 = getelementptr inbounds float, float* %tmp20874, i64 1
+ %tmp20876 = getelementptr inbounds float, float* %tmp20875, i64 1
+ %tmp20877 = getelementptr inbounds float, float* %tmp20876, i64 1
+ %tmp20878 = getelementptr inbounds float, float* %tmp20877, i64 1
+ %tmp20879 = getelementptr inbounds float, float* %tmp20878, i64 1
+ %tmp20880 = getelementptr inbounds float, float* %tmp20879, i64 1
+ %tmp20881 = getelementptr inbounds float, float* %tmp20880, i64 1
+ %tmp20882 = getelementptr inbounds float, float* %tmp20881, i64 1
+ %tmp20883 = getelementptr inbounds float, float* %tmp20882, i64 1
+ %tmp20884 = getelementptr inbounds float, float* %tmp20883, i64 1
+ %tmp20885 = getelementptr inbounds float, float* %tmp20884, i64 1
+ %tmp20886 = getelementptr inbounds float, float* %tmp20885, i64 1
+ %tmp20887 = getelementptr inbounds float, float* %tmp20886, i64 1
+ %tmp20888 = getelementptr inbounds float, float* %tmp20887, i64 1
+ %tmp20889 = getelementptr inbounds float, float* %tmp20888, i64 1
+ %tmp20890 = getelementptr inbounds float, float* %tmp20889, i64 1
+ %tmp20891 = getelementptr inbounds float, float* %tmp20890, i64 1
+ %tmp20892 = getelementptr inbounds float, float* %tmp20891, i64 1
+ %tmp20893 = getelementptr inbounds float, float* %tmp20892, i64 1
+ %tmp20894 = getelementptr inbounds float, float* %tmp20893, i64 1
+ %tmp20895 = getelementptr inbounds float, float* %tmp20894, i64 1
+ %tmp20896 = getelementptr inbounds float, float* %tmp20895, i64 1
+ %tmp20897 = getelementptr inbounds float, float* %tmp20896, i64 1
+ %tmp20898 = getelementptr inbounds float, float* %tmp20897, i64 1
+ %tmp20899 = getelementptr inbounds float, float* %tmp20898, i64 1
+ %tmp20900 = getelementptr inbounds float, float* %tmp20899, i64 1
+ %tmp20901 = getelementptr inbounds float, float* %tmp20900, i64 1
+ %tmp20902 = getelementptr inbounds float, float* %tmp20901, i64 1
+ %tmp20903 = getelementptr inbounds float, float* %tmp20902, i64 1
+ %tmp20904 = getelementptr inbounds float, float* %tmp20903, i64 1
+ %tmp20905 = getelementptr inbounds float, float* %tmp20904, i64 1
+ %tmp20906 = getelementptr inbounds float, float* %tmp20905, i64 1
+ %tmp20907 = getelementptr inbounds float, float* %tmp20906, i64 1
+ %tmp20908 = getelementptr inbounds float, float* %tmp20907, i64 1
+ %tmp20909 = getelementptr inbounds float, float* %tmp20908, i64 1
+ %tmp20910 = getelementptr inbounds float, float* %tmp20909, i64 1
+ %tmp20911 = getelementptr inbounds float, float* %tmp20910, i64 1
+ %tmp20912 = getelementptr inbounds float, float* %tmp20911, i64 1
+ %tmp20913 = getelementptr inbounds float, float* %tmp20912, i64 1
+ %tmp20914 = getelementptr inbounds float, float* %tmp20913, i64 1
+ %tmp20915 = getelementptr inbounds float, float* %tmp20914, i64 1
+ %tmp20916 = getelementptr inbounds float, float* %tmp20915, i64 1
+ %tmp20917 = getelementptr inbounds float, float* %tmp20916, i64 1
+ %tmp20918 = getelementptr inbounds float, float* %tmp20917, i64 1
+ %tmp20919 = getelementptr inbounds float, float* %tmp20918, i64 1
+ %tmp20920 = getelementptr inbounds float, float* %tmp20919, i64 1
+ %tmp20921 = getelementptr inbounds float, float* %tmp20920, i64 1
+ %tmp20922 = getelementptr inbounds float, float* %tmp20921, i64 1
+ %tmp20923 = getelementptr inbounds float, float* %tmp20922, i64 1
+ %tmp20924 = getelementptr inbounds float, float* %tmp20923, i64 1
+ %tmp20925 = getelementptr inbounds float, float* %tmp20924, i64 1
+ %tmp20926 = getelementptr inbounds float, float* %tmp20925, i64 1
+ %tmp20927 = getelementptr inbounds float, float* %tmp20926, i64 1
+ %tmp20928 = getelementptr inbounds float, float* %tmp20927, i64 1
+ %tmp20929 = getelementptr inbounds float, float* %tmp20928, i64 1
+ %tmp20930 = getelementptr inbounds float, float* %tmp20929, i64 1
+ %tmp20931 = getelementptr inbounds float, float* %tmp20930, i64 1
+ %tmp20932 = getelementptr inbounds float, float* %tmp20931, i64 1
+ %tmp20933 = getelementptr inbounds float, float* %tmp20932, i64 1
+ %tmp20934 = getelementptr inbounds float, float* %tmp20933, i64 1
+ %tmp20935 = getelementptr inbounds float, float* %tmp20934, i64 1
+ %tmp20936 = getelementptr inbounds float, float* %tmp20935, i64 1
+ %tmp20937 = getelementptr inbounds float, float* %tmp20936, i64 1
+ %tmp20938 = getelementptr inbounds float, float* %tmp20937, i64 1
+ %tmp20939 = getelementptr inbounds float, float* %tmp20938, i64 1
+ %tmp20940 = getelementptr inbounds float, float* %tmp20939, i64 1
+ %tmp20941 = getelementptr inbounds float, float* %tmp20940, i64 1
+ %tmp20942 = getelementptr inbounds float, float* %tmp20941, i64 1
+ %tmp20943 = getelementptr inbounds float, float* %tmp20942, i64 1
+ %tmp20944 = getelementptr inbounds float, float* %tmp20943, i64 1
+ %tmp20945 = getelementptr inbounds float, float* %tmp20944, i64 1
+ %tmp20946 = getelementptr inbounds float, float* %tmp20945, i64 1
+ %tmp20947 = getelementptr inbounds float, float* %tmp20946, i64 1
+ %tmp20948 = getelementptr inbounds float, float* %tmp20947, i64 1
+ %tmp20949 = getelementptr inbounds float, float* %tmp20948, i64 1
+ %tmp20950 = getelementptr inbounds float, float* %tmp20949, i64 1
+ %tmp20951 = getelementptr inbounds float, float* %tmp20950, i64 1
+ %tmp20952 = getelementptr inbounds float, float* %tmp20951, i64 1
+ %tmp20953 = getelementptr inbounds float, float* %tmp20952, i64 1
+ %tmp20954 = getelementptr inbounds float, float* %tmp20953, i64 1
+ %tmp20955 = getelementptr inbounds float, float* %tmp20954, i64 1
+ %tmp20956 = getelementptr inbounds float, float* %tmp20955, i64 1
+ %tmp20957 = getelementptr inbounds float, float* %tmp20956, i64 1
+ %tmp20958 = getelementptr inbounds float, float* %tmp20957, i64 1
+ %tmp20959 = getelementptr inbounds float, float* %tmp20958, i64 1
+ %tmp20960 = getelementptr inbounds float, float* %tmp20959, i64 1
+ %tmp20961 = getelementptr inbounds float, float* %tmp20960, i64 1
+ %tmp20962 = getelementptr inbounds float, float* %tmp20961, i64 1
+ %tmp20963 = getelementptr inbounds float, float* %tmp20962, i64 1
+ %tmp20964 = getelementptr inbounds float, float* %tmp20963, i64 1
+ %tmp20965 = getelementptr inbounds float, float* %tmp20964, i64 1
+ %tmp20966 = getelementptr inbounds float, float* %tmp20965, i64 1
+ %tmp20967 = getelementptr inbounds float, float* %tmp20966, i64 1
+ %tmp20968 = getelementptr inbounds float, float* %tmp20967, i64 1
+ %tmp20969 = getelementptr inbounds float, float* %tmp20968, i64 1
+ %tmp20970 = getelementptr inbounds float, float* %tmp20969, i64 1
+ %tmp20971 = getelementptr inbounds float, float* %tmp20970, i64 1
+ %tmp20972 = getelementptr inbounds float, float* %tmp20971, i64 1
+ %tmp20973 = getelementptr inbounds float, float* %tmp20972, i64 1
+ %tmp20974 = getelementptr inbounds float, float* %tmp20973, i64 1
+ %tmp20975 = getelementptr inbounds float, float* %tmp20974, i64 1
+ %tmp20976 = getelementptr inbounds float, float* %tmp20975, i64 1
+ %tmp20977 = getelementptr inbounds float, float* %tmp20976, i64 1
+ %tmp20978 = getelementptr inbounds float, float* %tmp20977, i64 1
+ %tmp20979 = getelementptr inbounds float, float* %tmp20978, i64 1
+ %tmp20980 = getelementptr inbounds float, float* %tmp20979, i64 1
+ %tmp20981 = getelementptr inbounds float, float* %tmp20980, i64 1
+ %tmp20982 = getelementptr inbounds float, float* %tmp20981, i64 1
+ %tmp20983 = getelementptr inbounds float, float* %tmp20982, i64 1
+ %tmp20984 = getelementptr inbounds float, float* %tmp20983, i64 1
+ %tmp20985 = getelementptr inbounds float, float* %tmp20984, i64 1
+ %tmp20986 = getelementptr inbounds float, float* %tmp20985, i64 1
+ %tmp20987 = getelementptr inbounds float, float* %tmp20986, i64 1
+ %tmp20988 = getelementptr inbounds float, float* %tmp20987, i64 1
+ %tmp20989 = getelementptr inbounds float, float* %tmp20988, i64 1
+ %tmp20990 = getelementptr inbounds float, float* %tmp20989, i64 1
+ %tmp20991 = getelementptr inbounds float, float* %tmp20990, i64 1
+ %tmp20992 = getelementptr inbounds float, float* %tmp20991, i64 1
+ %tmp20993 = getelementptr inbounds float, float* %tmp20992, i64 1
+ %tmp20994 = getelementptr inbounds float, float* %tmp20993, i64 1
+ %tmp20995 = getelementptr inbounds float, float* %tmp20994, i64 1
+ %tmp20996 = getelementptr inbounds float, float* %tmp20995, i64 1
+ %tmp20997 = getelementptr inbounds float, float* %tmp20996, i64 1
+ %tmp20998 = getelementptr inbounds float, float* %tmp20997, i64 1
+ %tmp20999 = getelementptr inbounds float, float* %tmp20998, i64 1
+ %tmp21000 = getelementptr inbounds float, float* %tmp20999, i64 1
+ %tmp21001 = getelementptr inbounds float, float* %tmp21000, i64 1
+ %tmp21002 = getelementptr inbounds float, float* %tmp21001, i64 1
+ %tmp21003 = getelementptr inbounds float, float* %tmp21002, i64 1
+ %tmp21004 = getelementptr inbounds float, float* %tmp21003, i64 1
+ %tmp21005 = getelementptr inbounds float, float* %tmp21004, i64 1
+ %tmp21006 = getelementptr inbounds float, float* %tmp21005, i64 1
+ %tmp21007 = getelementptr inbounds float, float* %tmp21006, i64 1
+ %tmp21008 = getelementptr inbounds float, float* %tmp21007, i64 1
+ %tmp21009 = getelementptr inbounds float, float* %tmp21008, i64 1
+ %tmp21010 = getelementptr inbounds float, float* %tmp21009, i64 1
+ %tmp21011 = getelementptr inbounds float, float* %tmp21010, i64 1
+ %tmp21012 = getelementptr inbounds float, float* %tmp21011, i64 1
+ %tmp21013 = getelementptr inbounds float, float* %tmp21012, i64 1
+ %tmp21014 = getelementptr inbounds float, float* %tmp21013, i64 1
+ %tmp21015 = getelementptr inbounds float, float* %tmp21014, i64 1
+ %tmp21016 = getelementptr inbounds float, float* %tmp21015, i64 1
+ %tmp21017 = getelementptr inbounds float, float* %tmp21016, i64 1
+ %tmp21018 = getelementptr inbounds float, float* %tmp21017, i64 1
+ %tmp21019 = getelementptr inbounds float, float* %tmp21018, i64 1
+ %tmp21020 = getelementptr inbounds float, float* %tmp21019, i64 1
+ %tmp21021 = getelementptr inbounds float, float* %tmp21020, i64 1
+ %tmp21022 = getelementptr inbounds float, float* %tmp21021, i64 1
+ %tmp21023 = getelementptr inbounds float, float* %tmp21022, i64 1
+ %tmp21024 = getelementptr inbounds float, float* %tmp21023, i64 1
+ %tmp21025 = getelementptr inbounds float, float* %tmp21024, i64 1
+ %tmp21026 = getelementptr inbounds float, float* %tmp21025, i64 1
+ %tmp21027 = getelementptr inbounds float, float* %tmp21026, i64 1
+ %tmp21028 = getelementptr inbounds float, float* %tmp21027, i64 1
+ %tmp21029 = getelementptr inbounds float, float* %tmp21028, i64 1
+ %tmp21030 = getelementptr inbounds float, float* %tmp21029, i64 1
+ %tmp21031 = getelementptr inbounds float, float* %tmp21030, i64 1
+ %tmp21032 = getelementptr inbounds float, float* %tmp21031, i64 1
+ %tmp21033 = getelementptr inbounds float, float* %tmp21032, i64 1
+ %tmp21034 = getelementptr inbounds float, float* %tmp21033, i64 1
+ %tmp21035 = getelementptr inbounds float, float* %tmp21034, i64 1
+ %tmp21036 = getelementptr inbounds float, float* %tmp21035, i64 1
+ %tmp21037 = getelementptr inbounds float, float* %tmp21036, i64 1
+ %tmp21038 = getelementptr inbounds float, float* %tmp21037, i64 1
+ %tmp21039 = getelementptr inbounds float, float* %tmp21038, i64 1
+ %tmp21040 = getelementptr inbounds float, float* %tmp21039, i64 1
+ %tmp21041 = getelementptr inbounds float, float* %tmp21040, i64 1
+ %tmp21042 = getelementptr inbounds float, float* %tmp21041, i64 1
+ %tmp21043 = getelementptr inbounds float, float* %tmp21042, i64 1
+ %tmp21044 = getelementptr inbounds float, float* %tmp21043, i64 1
+ %tmp21045 = getelementptr inbounds float, float* %tmp21044, i64 1
+ %tmp21046 = getelementptr inbounds float, float* %tmp21045, i64 1
+ %tmp21047 = getelementptr inbounds float, float* %tmp21046, i64 1
+ %tmp21048 = getelementptr inbounds float, float* %tmp21047, i64 1
+ %tmp21049 = getelementptr inbounds float, float* %tmp21048, i64 1
+ %tmp21050 = getelementptr inbounds float, float* %tmp21049, i64 1
+ %tmp21051 = getelementptr inbounds float, float* %tmp21050, i64 1
+ %tmp21052 = getelementptr inbounds float, float* %tmp21051, i64 1
+ %tmp21053 = getelementptr inbounds float, float* %tmp21052, i64 1
+ %tmp21054 = getelementptr inbounds float, float* %tmp21053, i64 1
+ %tmp21055 = getelementptr inbounds float, float* %tmp21054, i64 1
+ %tmp21056 = getelementptr inbounds float, float* %tmp21055, i64 1
+ %tmp21057 = getelementptr inbounds float, float* %tmp21056, i64 1
+ %tmp21058 = getelementptr inbounds float, float* %tmp21057, i64 1
+ %tmp21059 = getelementptr inbounds float, float* %tmp21058, i64 1
+ %tmp21060 = getelementptr inbounds float, float* %tmp21059, i64 1
+ %tmp21061 = getelementptr inbounds float, float* %tmp21060, i64 1
+ %tmp21062 = getelementptr inbounds float, float* %tmp21061, i64 1
+ %tmp21063 = getelementptr inbounds float, float* %tmp21062, i64 1
+ %tmp21064 = getelementptr inbounds float, float* %tmp21063, i64 1
+ %tmp21065 = getelementptr inbounds float, float* %tmp21064, i64 1
+ %tmp21066 = getelementptr inbounds float, float* %tmp21065, i64 1
+ %tmp21067 = getelementptr inbounds float, float* %tmp21066, i64 1
+ %tmp21068 = getelementptr inbounds float, float* %tmp21067, i64 1
+ %tmp21069 = getelementptr inbounds float, float* %tmp21068, i64 1
+ %tmp21070 = getelementptr inbounds float, float* %tmp21069, i64 1
+ %tmp21071 = getelementptr inbounds float, float* %tmp21070, i64 1
+ %tmp21072 = getelementptr inbounds float, float* %tmp21071, i64 1
+ %tmp21073 = getelementptr inbounds float, float* %tmp21072, i64 1
+ %tmp21074 = getelementptr inbounds float, float* %tmp21073, i64 1
+ %tmp21075 = getelementptr inbounds float, float* %tmp21074, i64 1
+ %tmp21076 = getelementptr inbounds float, float* %tmp21075, i64 1
+ %tmp21077 = getelementptr inbounds float, float* %tmp21076, i64 1
+ %tmp21078 = getelementptr inbounds float, float* %tmp21077, i64 1
+ %tmp21079 = getelementptr inbounds float, float* %tmp21078, i64 1
+ %tmp21080 = getelementptr inbounds float, float* %tmp21079, i64 1
+ %tmp21081 = getelementptr inbounds float, float* %tmp21080, i64 1
+ %tmp21082 = getelementptr inbounds float, float* %tmp21081, i64 1
+ %tmp21083 = getelementptr inbounds float, float* %tmp21082, i64 1
+ %tmp21084 = getelementptr inbounds float, float* %tmp21083, i64 1
+ %tmp21085 = getelementptr inbounds float, float* %tmp21084, i64 1
+ %tmp21086 = getelementptr inbounds float, float* %tmp21085, i64 1
+ %tmp21087 = getelementptr inbounds float, float* %tmp21086, i64 1
+ %tmp21088 = getelementptr inbounds float, float* %tmp21087, i64 1
+ %tmp21089 = getelementptr inbounds float, float* %tmp21088, i64 1
+ %tmp21090 = getelementptr inbounds float, float* %tmp21089, i64 1
+ %tmp21091 = getelementptr inbounds float, float* %tmp21090, i64 1
+ %tmp21092 = getelementptr inbounds float, float* %tmp21091, i64 1
+ %tmp21093 = getelementptr inbounds float, float* %tmp21092, i64 1
+ %tmp21094 = getelementptr inbounds float, float* %tmp21093, i64 1
+ %tmp21095 = getelementptr inbounds float, float* %tmp21094, i64 1
+ %tmp21096 = getelementptr inbounds float, float* %tmp21095, i64 1
+ %tmp21097 = getelementptr inbounds float, float* %tmp21096, i64 1
+ %tmp21098 = getelementptr inbounds float, float* %tmp21097, i64 1
+ %tmp21099 = getelementptr inbounds float, float* %tmp21098, i64 1
+ %tmp21100 = getelementptr inbounds float, float* %tmp21099, i64 1
+ %tmp21101 = getelementptr inbounds float, float* %tmp21100, i64 1
+ %tmp21102 = getelementptr inbounds float, float* %tmp21101, i64 1
+ %tmp21103 = getelementptr inbounds float, float* %tmp21102, i64 1
+ %tmp21104 = getelementptr inbounds float, float* %tmp21103, i64 1
+ %tmp21105 = getelementptr inbounds float, float* %tmp21104, i64 1
+ %tmp21106 = getelementptr inbounds float, float* %tmp21105, i64 1
+ %tmp21107 = getelementptr inbounds float, float* %tmp21106, i64 1
+ %tmp21108 = getelementptr inbounds float, float* %tmp21107, i64 1
+ %tmp21109 = getelementptr inbounds float, float* %tmp21108, i64 1
+ %tmp21110 = getelementptr inbounds float, float* %tmp21109, i64 1
+ %tmp21111 = getelementptr inbounds float, float* %tmp21110, i64 1
+ %tmp21112 = getelementptr inbounds float, float* %tmp21111, i64 1
+ %tmp21113 = getelementptr inbounds float, float* %tmp21112, i64 1
+ %tmp21114 = getelementptr inbounds float, float* %tmp21113, i64 1
+ %tmp21115 = getelementptr inbounds float, float* %tmp21114, i64 1
+ %tmp21116 = getelementptr inbounds float, float* %tmp21115, i64 1
+ %tmp21117 = getelementptr inbounds float, float* %tmp21116, i64 1
+ %tmp21118 = getelementptr inbounds float, float* %tmp21117, i64 1
+ %tmp21119 = getelementptr inbounds float, float* %tmp21118, i64 1
+ %tmp21120 = getelementptr inbounds float, float* %tmp21119, i64 1
+ %tmp21121 = getelementptr inbounds float, float* %tmp21120, i64 1
+ %tmp21122 = getelementptr inbounds float, float* %tmp21121, i64 1
+ %tmp21123 = getelementptr inbounds float, float* %tmp21122, i64 1
+ %tmp21124 = getelementptr inbounds float, float* %tmp21123, i64 1
+ %tmp21125 = getelementptr inbounds float, float* %tmp21124, i64 1
+ %tmp21126 = getelementptr inbounds float, float* %tmp21125, i64 1
+ %tmp21127 = getelementptr inbounds float, float* %tmp21126, i64 1
+ %tmp21128 = getelementptr inbounds float, float* %tmp21127, i64 1
+ %tmp21129 = getelementptr inbounds float, float* %tmp21128, i64 1
+ %tmp21130 = getelementptr inbounds float, float* %tmp21129, i64 1
+ %tmp21131 = getelementptr inbounds float, float* %tmp21130, i64 1
+ %tmp21132 = getelementptr inbounds float, float* %tmp21131, i64 1
+ %tmp21133 = getelementptr inbounds float, float* %tmp21132, i64 1
+ %tmp21134 = getelementptr inbounds float, float* %tmp21133, i64 1
+ %tmp21135 = getelementptr inbounds float, float* %tmp21134, i64 1
+ %tmp21136 = getelementptr inbounds float, float* %tmp21135, i64 1
+ %tmp21137 = getelementptr inbounds float, float* %tmp21136, i64 1
+ %tmp21138 = getelementptr inbounds float, float* %tmp21137, i64 1
+ %tmp21139 = getelementptr inbounds float, float* %tmp21138, i64 1
+ %tmp21140 = getelementptr inbounds float, float* %tmp21139, i64 1
+ %tmp21141 = getelementptr inbounds float, float* %tmp21140, i64 1
+ %tmp21142 = getelementptr inbounds float, float* %tmp21141, i64 1
+ %tmp21143 = getelementptr inbounds float, float* %tmp21142, i64 1
+ %tmp21144 = getelementptr inbounds float, float* %tmp21143, i64 1
+ %tmp21145 = getelementptr inbounds float, float* %tmp21144, i64 1
+ %tmp21146 = getelementptr inbounds float, float* %tmp21145, i64 1
+ %tmp21147 = getelementptr inbounds float, float* %tmp21146, i64 1
+ %tmp21148 = getelementptr inbounds float, float* %tmp21147, i64 1
+ %tmp21149 = getelementptr inbounds float, float* %tmp21148, i64 1
+ %tmp21150 = getelementptr inbounds float, float* %tmp21149, i64 1
+ %tmp21151 = getelementptr inbounds float, float* %tmp21150, i64 1
+ %tmp21152 = getelementptr inbounds float, float* %tmp21151, i64 1
+ %tmp21153 = getelementptr inbounds float, float* %tmp21152, i64 1
+ %tmp21154 = getelementptr inbounds float, float* %tmp21153, i64 1
+ %tmp21155 = getelementptr inbounds float, float* %tmp21154, i64 1
+ %tmp21156 = getelementptr inbounds float, float* %tmp21155, i64 1
+ %tmp21157 = getelementptr inbounds float, float* %tmp21156, i64 1
+ %tmp21158 = getelementptr inbounds float, float* %tmp21157, i64 1
+ %tmp21159 = getelementptr inbounds float, float* %tmp21158, i64 1
+ %tmp21160 = getelementptr inbounds float, float* %tmp21159, i64 1
+ %tmp21161 = getelementptr inbounds float, float* %tmp21160, i64 1
+ %tmp21162 = getelementptr inbounds float, float* %tmp21161, i64 1
+ %tmp21163 = getelementptr inbounds float, float* %tmp21162, i64 1
+ %tmp21164 = getelementptr inbounds float, float* %tmp21163, i64 1
+ %tmp21165 = getelementptr inbounds float, float* %tmp21164, i64 1
+ %tmp21166 = getelementptr inbounds float, float* %tmp21165, i64 1
+ %tmp21167 = getelementptr inbounds float, float* %tmp21166, i64 1
+ %tmp21168 = getelementptr inbounds float, float* %tmp21167, i64 1
+ %tmp21169 = getelementptr inbounds float, float* %tmp21168, i64 1
+ %tmp21170 = getelementptr inbounds float, float* %tmp21169, i64 1
+ %tmp21171 = getelementptr inbounds float, float* %tmp21170, i64 1
+ %tmp21172 = getelementptr inbounds float, float* %tmp21171, i64 1
+ %tmp21173 = getelementptr inbounds float, float* %tmp21172, i64 1
+ %tmp21174 = getelementptr inbounds float, float* %tmp21173, i64 1
+ %tmp21175 = getelementptr inbounds float, float* %tmp21174, i64 1
+ %tmp21176 = getelementptr inbounds float, float* %tmp21175, i64 1
+ %tmp21177 = getelementptr inbounds float, float* %tmp21176, i64 1
+ %tmp21178 = getelementptr inbounds float, float* %tmp21177, i64 1
+ %tmp21179 = getelementptr inbounds float, float* %tmp21178, i64 1
+ %tmp21180 = getelementptr inbounds float, float* %tmp21179, i64 1
+ %tmp21181 = getelementptr inbounds float, float* %tmp21180, i64 1
+ %tmp21182 = getelementptr inbounds float, float* %tmp21181, i64 1
+ %tmp21183 = getelementptr inbounds float, float* %tmp21182, i64 1
+ %tmp21184 = getelementptr inbounds float, float* %tmp21183, i64 1
+ %tmp21185 = getelementptr inbounds float, float* %tmp21184, i64 1
+ %tmp21186 = getelementptr inbounds float, float* %tmp21185, i64 1
+ %tmp21187 = getelementptr inbounds float, float* %tmp21186, i64 1
+ %tmp21188 = getelementptr inbounds float, float* %tmp21187, i64 1
+ %tmp21189 = getelementptr inbounds float, float* %tmp21188, i64 1
+ %tmp21190 = getelementptr inbounds float, float* %tmp21189, i64 1
+ %tmp21191 = getelementptr inbounds float, float* %tmp21190, i64 1
+ %tmp21192 = getelementptr inbounds float, float* %tmp21191, i64 1
+ %tmp21193 = getelementptr inbounds float, float* %tmp21192, i64 1
+ %tmp21194 = getelementptr inbounds float, float* %tmp21193, i64 1
+ %tmp21195 = getelementptr inbounds float, float* %tmp21194, i64 1
+ %tmp21196 = getelementptr inbounds float, float* %tmp21195, i64 1
+ %tmp21197 = getelementptr inbounds float, float* %tmp21196, i64 1
+ %tmp21198 = getelementptr inbounds float, float* %tmp21197, i64 1
+ %tmp21199 = getelementptr inbounds float, float* %tmp21198, i64 1
+ %tmp21200 = getelementptr inbounds float, float* %tmp21199, i64 1
+ %tmp21201 = getelementptr inbounds float, float* %tmp21200, i64 1
+ %tmp21202 = getelementptr inbounds float, float* %tmp21201, i64 1
+ %tmp21203 = getelementptr inbounds float, float* %tmp21202, i64 1
+ %tmp21204 = getelementptr inbounds float, float* %tmp21203, i64 1
+ %tmp21205 = getelementptr inbounds float, float* %tmp21204, i64 1
+ %tmp21206 = getelementptr inbounds float, float* %tmp21205, i64 1
+ %tmp21207 = getelementptr inbounds float, float* %tmp21206, i64 1
+ %tmp21208 = getelementptr inbounds float, float* %tmp21207, i64 1
+ %tmp21209 = getelementptr inbounds float, float* %tmp21208, i64 1
+ %tmp21210 = getelementptr inbounds float, float* %tmp21209, i64 1
+ %tmp21211 = getelementptr inbounds float, float* %tmp21210, i64 1
+ %tmp21212 = getelementptr inbounds float, float* %tmp21211, i64 1
+ %tmp21213 = getelementptr inbounds float, float* %tmp21212, i64 1
+ %tmp21214 = getelementptr inbounds float, float* %tmp21213, i64 1
+ %tmp21215 = getelementptr inbounds float, float* %tmp21214, i64 1
+ %tmp21216 = getelementptr inbounds float, float* %tmp21215, i64 1
+ %tmp21217 = getelementptr inbounds float, float* %tmp21216, i64 1
+ %tmp21218 = getelementptr inbounds float, float* %tmp21217, i64 1
+ %tmp21219 = getelementptr inbounds float, float* %tmp21218, i64 1
+ %tmp21220 = getelementptr inbounds float, float* %tmp21219, i64 1
+ %tmp21221 = getelementptr inbounds float, float* %tmp21220, i64 1
+ %tmp21222 = getelementptr inbounds float, float* %tmp21221, i64 1
+ %tmp21223 = getelementptr inbounds float, float* %tmp21222, i64 1
+ %tmp21224 = getelementptr inbounds float, float* %tmp21223, i64 1
+ %tmp21225 = getelementptr inbounds float, float* %tmp21224, i64 1
+ %tmp21226 = getelementptr inbounds float, float* %tmp21225, i64 1
+ %tmp21227 = getelementptr inbounds float, float* %tmp21226, i64 1
+ %tmp21228 = getelementptr inbounds float, float* %tmp21227, i64 1
+ %tmp21229 = getelementptr inbounds float, float* %tmp21228, i64 1
+ %tmp21230 = getelementptr inbounds float, float* %tmp21229, i64 1
+ %tmp21231 = getelementptr inbounds float, float* %tmp21230, i64 1
+ %tmp21232 = getelementptr inbounds float, float* %tmp21231, i64 1
+ %tmp21233 = getelementptr inbounds float, float* %tmp21232, i64 1
+ %tmp21234 = getelementptr inbounds float, float* %tmp21233, i64 1
+ %tmp21235 = getelementptr inbounds float, float* %tmp21234, i64 1
+ %tmp21236 = getelementptr inbounds float, float* %tmp21235, i64 1
+ %tmp21237 = getelementptr inbounds float, float* %tmp21236, i64 1
+ %tmp21238 = getelementptr inbounds float, float* %tmp21237, i64 1
+ %tmp21239 = getelementptr inbounds float, float* %tmp21238, i64 1
+ %tmp21240 = getelementptr inbounds float, float* %tmp21239, i64 1
+ %tmp21241 = getelementptr inbounds float, float* %tmp21240, i64 1
+ %tmp21242 = getelementptr inbounds float, float* %tmp21241, i64 1
+ %tmp21243 = getelementptr inbounds float, float* %tmp21242, i64 1
+ %tmp21244 = getelementptr inbounds float, float* %tmp21243, i64 1
+ %tmp21245 = getelementptr inbounds float, float* %tmp21244, i64 1
+ %tmp21246 = getelementptr inbounds float, float* %tmp21245, i64 1
+ %tmp21247 = getelementptr inbounds float, float* %tmp21246, i64 1
+ %tmp21248 = getelementptr inbounds float, float* %tmp21247, i64 1
+ %tmp21249 = getelementptr inbounds float, float* %tmp21248, i64 1
+ %tmp21250 = getelementptr inbounds float, float* %tmp21249, i64 1
+ %tmp21251 = getelementptr inbounds float, float* %tmp21250, i64 1
+ %tmp21252 = getelementptr inbounds float, float* %tmp21251, i64 1
+ %tmp21253 = getelementptr inbounds float, float* %tmp21252, i64 1
+ %tmp21254 = getelementptr inbounds float, float* %tmp21253, i64 1
+ %tmp21255 = getelementptr inbounds float, float* %tmp21254, i64 1
+ %tmp21256 = getelementptr inbounds float, float* %tmp21255, i64 1
+ %tmp21257 = getelementptr inbounds float, float* %tmp21256, i64 1
+ %tmp21258 = getelementptr inbounds float, float* %tmp21257, i64 1
+ %tmp21259 = getelementptr inbounds float, float* %tmp21258, i64 1
+ %tmp21260 = getelementptr inbounds float, float* %tmp21259, i64 1
+ %tmp21261 = getelementptr inbounds float, float* %tmp21260, i64 1
+ %tmp21262 = getelementptr inbounds float, float* %tmp21261, i64 1
+ %tmp21263 = getelementptr inbounds float, float* %tmp21262, i64 1
+ %tmp21264 = getelementptr inbounds float, float* %tmp21263, i64 1
+ %tmp21265 = getelementptr inbounds float, float* %tmp21264, i64 1
+ %tmp21266 = getelementptr inbounds float, float* %tmp21265, i64 1
+ %tmp21267 = getelementptr inbounds float, float* %tmp21266, i64 1
+ %tmp21268 = getelementptr inbounds float, float* %tmp21267, i64 1
+ %tmp21269 = getelementptr inbounds float, float* %tmp21268, i64 1
+ %tmp21270 = getelementptr inbounds float, float* %tmp21269, i64 1
+ %tmp21271 = getelementptr inbounds float, float* %tmp21270, i64 1
+ %tmp21272 = getelementptr inbounds float, float* %tmp21271, i64 1
+ %tmp21273 = getelementptr inbounds float, float* %tmp21272, i64 1
+ %tmp21274 = getelementptr inbounds float, float* %tmp21273, i64 1
+ %tmp21275 = getelementptr inbounds float, float* %tmp21274, i64 1
+ %tmp21276 = getelementptr inbounds float, float* %tmp21275, i64 1
+ %tmp21277 = getelementptr inbounds float, float* %tmp21276, i64 1
+ %tmp21278 = getelementptr inbounds float, float* %tmp21277, i64 1
+ %tmp21279 = getelementptr inbounds float, float* %tmp21278, i64 1
+ %tmp21280 = getelementptr inbounds float, float* %tmp21279, i64 1
+ %tmp21281 = getelementptr inbounds float, float* %tmp21280, i64 1
+ %tmp21282 = getelementptr inbounds float, float* %tmp21281, i64 1
+ %tmp21283 = getelementptr inbounds float, float* %tmp21282, i64 1
+ %tmp21284 = getelementptr inbounds float, float* %tmp21283, i64 1
+ %tmp21285 = getelementptr inbounds float, float* %tmp21284, i64 1
+ %tmp21286 = getelementptr inbounds float, float* %tmp21285, i64 1
+ %tmp21287 = getelementptr inbounds float, float* %tmp21286, i64 1
+ %tmp21288 = getelementptr inbounds float, float* %tmp21287, i64 1
+ %tmp21289 = getelementptr inbounds float, float* %tmp21288, i64 1
+ %tmp21290 = getelementptr inbounds float, float* %tmp21289, i64 1
+ %tmp21291 = getelementptr inbounds float, float* %tmp21290, i64 1
+ %tmp21292 = getelementptr inbounds float, float* %tmp21291, i64 1
+ %tmp21293 = getelementptr inbounds float, float* %tmp21292, i64 1
+ %tmp21294 = getelementptr inbounds float, float* %tmp21293, i64 1
+ %tmp21295 = getelementptr inbounds float, float* %tmp21294, i64 1
+ %tmp21296 = getelementptr inbounds float, float* %tmp21295, i64 1
+ %tmp21297 = getelementptr inbounds float, float* %tmp21296, i64 1
+ %tmp21298 = getelementptr inbounds float, float* %tmp21297, i64 1
+ %tmp21299 = getelementptr inbounds float, float* %tmp21298, i64 1
+ %tmp21300 = getelementptr inbounds float, float* %tmp21299, i64 1
+ %tmp21301 = getelementptr inbounds float, float* %tmp21300, i64 1
+ %tmp21302 = getelementptr inbounds float, float* %tmp21301, i64 1
+ %tmp21303 = getelementptr inbounds float, float* %tmp21302, i64 1
+ %tmp21304 = getelementptr inbounds float, float* %tmp21303, i64 1
+ %tmp21305 = getelementptr inbounds float, float* %tmp21304, i64 1
+ %tmp21306 = getelementptr inbounds float, float* %tmp21305, i64 1
+ %tmp21307 = getelementptr inbounds float, float* %tmp21306, i64 1
+ %tmp21308 = getelementptr inbounds float, float* %tmp21307, i64 1
+ %tmp21309 = getelementptr inbounds float, float* %tmp21308, i64 1
+ %tmp21310 = getelementptr inbounds float, float* %tmp21309, i64 1
+ %tmp21311 = getelementptr inbounds float, float* %tmp21310, i64 1
+ %tmp21312 = getelementptr inbounds float, float* %tmp21311, i64 1
+ %tmp21313 = getelementptr inbounds float, float* %tmp21312, i64 1
+ %tmp21314 = getelementptr inbounds float, float* %tmp21313, i64 1
+ %tmp21315 = getelementptr inbounds float, float* %tmp21314, i64 1
+ %tmp21316 = getelementptr inbounds float, float* %tmp21315, i64 1
+ %tmp21317 = getelementptr inbounds float, float* %tmp21316, i64 1
+ %tmp21318 = getelementptr inbounds float, float* %tmp21317, i64 1
+ %tmp21319 = getelementptr inbounds float, float* %tmp21318, i64 1
+ %tmp21320 = getelementptr inbounds float, float* %tmp21319, i64 1
+ %tmp21321 = getelementptr inbounds float, float* %tmp21320, i64 1
+ %tmp21322 = getelementptr inbounds float, float* %tmp21321, i64 1
+ %tmp21323 = getelementptr inbounds float, float* %tmp21322, i64 1
+ %tmp21324 = getelementptr inbounds float, float* %tmp21323, i64 1
+ %tmp21325 = getelementptr inbounds float, float* %tmp21324, i64 1
+ %tmp21326 = getelementptr inbounds float, float* %tmp21325, i64 1
+ %tmp21327 = getelementptr inbounds float, float* %tmp21326, i64 1
+ %tmp21328 = getelementptr inbounds float, float* %tmp21327, i64 1
+ %tmp21329 = getelementptr inbounds float, float* %tmp21328, i64 1
+ %tmp21330 = getelementptr inbounds float, float* %tmp21329, i64 1
+ %tmp21331 = getelementptr inbounds float, float* %tmp21330, i64 1
+ %tmp21332 = getelementptr inbounds float, float* %tmp21331, i64 1
+ %tmp21333 = getelementptr inbounds float, float* %tmp21332, i64 1
+ %tmp21334 = getelementptr inbounds float, float* %tmp21333, i64 1
+ %tmp21335 = getelementptr inbounds float, float* %tmp21334, i64 1
+ %tmp21336 = getelementptr inbounds float, float* %tmp21335, i64 1
+ %tmp21337 = getelementptr inbounds float, float* %tmp21336, i64 1
+ %tmp21338 = getelementptr inbounds float, float* %tmp21337, i64 1
+ %tmp21339 = getelementptr inbounds float, float* %tmp21338, i64 1
+ %tmp21340 = getelementptr inbounds float, float* %tmp21339, i64 1
+ %tmp21341 = getelementptr inbounds float, float* %tmp21340, i64 1
+ %tmp21342 = getelementptr inbounds float, float* %tmp21341, i64 1
+ %tmp21343 = getelementptr inbounds float, float* %tmp21342, i64 1
+ %tmp21344 = getelementptr inbounds float, float* %tmp21343, i64 1
+ %tmp21345 = getelementptr inbounds float, float* %tmp21344, i64 1
+ %tmp21346 = getelementptr inbounds float, float* %tmp21345, i64 1
+ %tmp21347 = getelementptr inbounds float, float* %tmp21346, i64 1
+ %tmp21348 = getelementptr inbounds float, float* %tmp21347, i64 1
+ %tmp21349 = getelementptr inbounds float, float* %tmp21348, i64 1
+ %tmp21350 = getelementptr inbounds float, float* %tmp21349, i64 1
+ %tmp21351 = getelementptr inbounds float, float* %tmp21350, i64 1
+ %tmp21352 = getelementptr inbounds float, float* %tmp21351, i64 1
+ %tmp21353 = getelementptr inbounds float, float* %tmp21352, i64 1
+ %tmp21354 = getelementptr inbounds float, float* %tmp21353, i64 1
+ %tmp21355 = getelementptr inbounds float, float* %tmp21354, i64 1
+ %tmp21356 = getelementptr inbounds float, float* %tmp21355, i64 1
+ %tmp21357 = getelementptr inbounds float, float* %tmp21356, i64 1
+ %tmp21358 = getelementptr inbounds float, float* %tmp21357, i64 1
+ %tmp21359 = getelementptr inbounds float, float* %tmp21358, i64 1
+ %tmp21360 = getelementptr inbounds float, float* %tmp21359, i64 1
+ %tmp21361 = getelementptr inbounds float, float* %tmp21360, i64 1
+ %tmp21362 = getelementptr inbounds float, float* %tmp21361, i64 1
+ %tmp21363 = getelementptr inbounds float, float* %tmp21362, i64 1
+ %tmp21364 = getelementptr inbounds float, float* %tmp21363, i64 1
+ %tmp21365 = getelementptr inbounds float, float* %tmp21364, i64 1
+ %tmp21366 = getelementptr inbounds float, float* %tmp21365, i64 1
+ %tmp21367 = getelementptr inbounds float, float* %tmp21366, i64 1
+ %tmp21368 = getelementptr inbounds float, float* %tmp21367, i64 1
+ %tmp21369 = getelementptr inbounds float, float* %tmp21368, i64 1
+ %tmp21370 = getelementptr inbounds float, float* %tmp21369, i64 1
+ %tmp21371 = getelementptr inbounds float, float* %tmp21370, i64 1
+ %tmp21372 = getelementptr inbounds float, float* %tmp21371, i64 1
+ %tmp21373 = getelementptr inbounds float, float* %tmp21372, i64 1
+ %tmp21374 = getelementptr inbounds float, float* %tmp21373, i64 1
+ %tmp21375 = getelementptr inbounds float, float* %tmp21374, i64 1
+ %tmp21376 = getelementptr inbounds float, float* %tmp21375, i64 1
+ %tmp21377 = getelementptr inbounds float, float* %tmp21376, i64 1
+ %tmp21378 = getelementptr inbounds float, float* %tmp21377, i64 1
+ %tmp21379 = getelementptr inbounds float, float* %tmp21378, i64 1
+ %tmp21380 = getelementptr inbounds float, float* %tmp21379, i64 1
+ %tmp21381 = getelementptr inbounds float, float* %tmp21380, i64 1
+ %tmp21382 = getelementptr inbounds float, float* %tmp21381, i64 1
+ %tmp21383 = getelementptr inbounds float, float* %tmp21382, i64 1
+ %tmp21384 = getelementptr inbounds float, float* %tmp21383, i64 1
+ %tmp21385 = getelementptr inbounds float, float* %tmp21384, i64 1
+ %tmp21386 = getelementptr inbounds float, float* %tmp21385, i64 1
+ %tmp21387 = getelementptr inbounds float, float* %tmp21386, i64 1
+ %tmp21388 = getelementptr inbounds float, float* %tmp21387, i64 1
+ %tmp21389 = getelementptr inbounds float, float* %tmp21388, i64 1
+ %tmp21390 = getelementptr inbounds float, float* %tmp21389, i64 1
+ %tmp21391 = getelementptr inbounds float, float* %tmp21390, i64 1
+ %tmp21392 = getelementptr inbounds float, float* %tmp21391, i64 1
+ %tmp21393 = getelementptr inbounds float, float* %tmp21392, i64 1
+ %tmp21394 = getelementptr inbounds float, float* %tmp21393, i64 1
+ %tmp21395 = getelementptr inbounds float, float* %tmp21394, i64 1
+ %tmp21396 = getelementptr inbounds float, float* %tmp21395, i64 1
+ %tmp21397 = getelementptr inbounds float, float* %tmp21396, i64 1
+ %tmp21398 = getelementptr inbounds float, float* %tmp21397, i64 1
+ %tmp21399 = getelementptr inbounds float, float* %tmp21398, i64 1
+ %tmp21400 = getelementptr inbounds float, float* %tmp21399, i64 1
+ %tmp21401 = getelementptr inbounds float, float* %tmp21400, i64 1
+ %tmp21402 = getelementptr inbounds float, float* %tmp21401, i64 1
+ %tmp21403 = getelementptr inbounds float, float* %tmp21402, i64 1
+ %tmp21404 = getelementptr inbounds float, float* %tmp21403, i64 1
+ %tmp21405 = getelementptr inbounds float, float* %tmp21404, i64 1
+ %tmp21406 = getelementptr inbounds float, float* %tmp21405, i64 1
+ %tmp21407 = getelementptr inbounds float, float* %tmp21406, i64 1
+ %tmp21408 = getelementptr inbounds float, float* %tmp21407, i64 1
+ %tmp21409 = getelementptr inbounds float, float* %tmp21408, i64 1
+ %tmp21410 = getelementptr inbounds float, float* %tmp21409, i64 1
+ %tmp21411 = getelementptr inbounds float, float* %tmp21410, i64 1
+ %tmp21412 = getelementptr inbounds float, float* %tmp21411, i64 1
+ %tmp21413 = getelementptr inbounds float, float* %tmp21412, i64 1
+ %tmp21414 = getelementptr inbounds float, float* %tmp21413, i64 1
+ %tmp21415 = getelementptr inbounds float, float* %tmp21414, i64 1
+ %tmp21416 = getelementptr inbounds float, float* %tmp21415, i64 1
+ %tmp21417 = getelementptr inbounds float, float* %tmp21416, i64 1
+ %tmp21418 = getelementptr inbounds float, float* %tmp21417, i64 1
+ %tmp21419 = getelementptr inbounds float, float* %tmp21418, i64 1
+ %tmp21420 = getelementptr inbounds float, float* %tmp21419, i64 1
+ %tmp21421 = getelementptr inbounds float, float* %tmp21420, i64 1
+ %tmp21422 = getelementptr inbounds float, float* %tmp21421, i64 1
+ %tmp21423 = getelementptr inbounds float, float* %tmp21422, i64 1
+ %tmp21424 = getelementptr inbounds float, float* %tmp21423, i64 1
+ %tmp21425 = getelementptr inbounds float, float* %tmp21424, i64 1
+ %tmp21426 = getelementptr inbounds float, float* %tmp21425, i64 1
+ %tmp21427 = getelementptr inbounds float, float* %tmp21426, i64 1
+ %tmp21428 = getelementptr inbounds float, float* %tmp21427, i64 1
+ %tmp21429 = getelementptr inbounds float, float* %tmp21428, i64 1
+ %tmp21430 = getelementptr inbounds float, float* %tmp21429, i64 1
+ %tmp21431 = getelementptr inbounds float, float* %tmp21430, i64 1
+ %tmp21432 = getelementptr inbounds float, float* %tmp21431, i64 1
+ %tmp21433 = getelementptr inbounds float, float* %tmp21432, i64 1
+ %tmp21434 = getelementptr inbounds float, float* %tmp21433, i64 1
+ %tmp21435 = getelementptr inbounds float, float* %tmp21434, i64 1
+ %tmp21436 = getelementptr inbounds float, float* %tmp21435, i64 1
+ %tmp21437 = getelementptr inbounds float, float* %tmp21436, i64 1
+ %tmp21438 = getelementptr inbounds float, float* %tmp21437, i64 1
+ %tmp21439 = getelementptr inbounds float, float* %tmp21438, i64 1
+ %tmp21440 = getelementptr inbounds float, float* %tmp21439, i64 1
+ %tmp21441 = getelementptr inbounds float, float* %tmp21440, i64 1
+ %tmp21442 = getelementptr inbounds float, float* %tmp21441, i64 1
+ %tmp21443 = getelementptr inbounds float, float* %tmp21442, i64 1
+ %tmp21444 = getelementptr inbounds float, float* %tmp21443, i64 1
+ %tmp21445 = getelementptr inbounds float, float* %tmp21444, i64 1
+ %tmp21446 = getelementptr inbounds float, float* %tmp21445, i64 1
+ %tmp21447 = getelementptr inbounds float, float* %tmp21446, i64 1
+ %tmp21448 = getelementptr inbounds float, float* %tmp21447, i64 1
+ %tmp21449 = getelementptr inbounds float, float* %tmp21448, i64 1
+ %tmp21450 = getelementptr inbounds float, float* %tmp21449, i64 1
+ %tmp21451 = getelementptr inbounds float, float* %tmp21450, i64 1
+ %tmp21452 = getelementptr inbounds float, float* %tmp21451, i64 1
+ %tmp21453 = getelementptr inbounds float, float* %tmp21452, i64 1
+ %tmp21454 = getelementptr inbounds float, float* %tmp21453, i64 1
+ %tmp21455 = getelementptr inbounds float, float* %tmp21454, i64 1
+ %tmp21456 = getelementptr inbounds float, float* %tmp21455, i64 1
+ %tmp21457 = getelementptr inbounds float, float* %tmp21456, i64 1
+ %tmp21458 = getelementptr inbounds float, float* %tmp21457, i64 1
+ %tmp21459 = getelementptr inbounds float, float* %tmp21458, i64 1
+ %tmp21460 = getelementptr inbounds float, float* %tmp21459, i64 1
+ %tmp21461 = getelementptr inbounds float, float* %tmp21460, i64 1
+ %tmp21462 = getelementptr inbounds float, float* %tmp21461, i64 1
+ %tmp21463 = getelementptr inbounds float, float* %tmp21462, i64 1
+ %tmp21464 = getelementptr inbounds float, float* %tmp21463, i64 1
+ %tmp21465 = getelementptr inbounds float, float* %tmp21464, i64 1
+ %tmp21466 = getelementptr inbounds float, float* %tmp21465, i64 1
+ %tmp21467 = getelementptr inbounds float, float* %tmp21466, i64 1
+ %tmp21468 = getelementptr inbounds float, float* %tmp21467, i64 1
+ %tmp21469 = getelementptr inbounds float, float* %tmp21468, i64 1
+ %tmp21470 = getelementptr inbounds float, float* %tmp21469, i64 1
+ %tmp21471 = getelementptr inbounds float, float* %tmp21470, i64 1
+ %tmp21472 = getelementptr inbounds float, float* %tmp21471, i64 1
+ %tmp21473 = getelementptr inbounds float, float* %tmp21472, i64 1
+ %tmp21474 = getelementptr inbounds float, float* %tmp21473, i64 1
+ %tmp21475 = getelementptr inbounds float, float* %tmp21474, i64 1
+ %tmp21476 = getelementptr inbounds float, float* %tmp21475, i64 1
+ %tmp21477 = getelementptr inbounds float, float* %tmp21476, i64 1
+ %tmp21478 = getelementptr inbounds float, float* %tmp21477, i64 1
+ %tmp21479 = getelementptr inbounds float, float* %tmp21478, i64 1
+ %tmp21480 = getelementptr inbounds float, float* %tmp21479, i64 1
+ %tmp21481 = getelementptr inbounds float, float* %tmp21480, i64 1
+ %tmp21482 = getelementptr inbounds float, float* %tmp21481, i64 1
+ %tmp21483 = getelementptr inbounds float, float* %tmp21482, i64 1
+ %tmp21484 = getelementptr inbounds float, float* %tmp21483, i64 1
+ %tmp21485 = getelementptr inbounds float, float* %tmp21484, i64 1
+ %tmp21486 = getelementptr inbounds float, float* %tmp21485, i64 1
+ %tmp21487 = getelementptr inbounds float, float* %tmp21486, i64 1
+ %tmp21488 = getelementptr inbounds float, float* %tmp21487, i64 1
+ %tmp21489 = getelementptr inbounds float, float* %tmp21488, i64 1
+ %tmp21490 = getelementptr inbounds float, float* %tmp21489, i64 1
+ %tmp21491 = getelementptr inbounds float, float* %tmp21490, i64 1
+ %tmp21492 = getelementptr inbounds float, float* %tmp21491, i64 1
+ %tmp21493 = getelementptr inbounds float, float* %tmp21492, i64 1
+ %tmp21494 = getelementptr inbounds float, float* %tmp21493, i64 1
+ %tmp21495 = getelementptr inbounds float, float* %tmp21494, i64 1
+ %tmp21496 = getelementptr inbounds float, float* %tmp21495, i64 1
+ %tmp21497 = getelementptr inbounds float, float* %tmp21496, i64 1
+ %tmp21498 = getelementptr inbounds float, float* %tmp21497, i64 1
+ %tmp21499 = getelementptr inbounds float, float* %tmp21498, i64 1
+ %tmp21500 = getelementptr inbounds float, float* %tmp21499, i64 1
+ %tmp21501 = getelementptr inbounds float, float* %tmp21500, i64 1
+ %tmp21502 = getelementptr inbounds float, float* %tmp21501, i64 1
+ %tmp21503 = getelementptr inbounds float, float* %tmp21502, i64 1
+ %tmp21504 = getelementptr inbounds float, float* %tmp21503, i64 1
+ %tmp21505 = getelementptr inbounds float, float* %tmp21504, i64 1
+ %tmp21506 = getelementptr inbounds float, float* %tmp21505, i64 1
+ %tmp21507 = getelementptr inbounds float, float* %tmp21506, i64 1
+ %tmp21508 = getelementptr inbounds float, float* %tmp21507, i64 1
+ %tmp21509 = getelementptr inbounds float, float* %tmp21508, i64 1
+ %tmp21510 = getelementptr inbounds float, float* %tmp21509, i64 1
+ %tmp21511 = getelementptr inbounds float, float* %tmp21510, i64 1
+ %tmp21512 = getelementptr inbounds float, float* %tmp21511, i64 1
+ %tmp21513 = getelementptr inbounds float, float* %tmp21512, i64 1
+ %tmp21514 = getelementptr inbounds float, float* %tmp21513, i64 1
+ %tmp21515 = getelementptr inbounds float, float* %tmp21514, i64 1
+ %tmp21516 = getelementptr inbounds float, float* %tmp21515, i64 1
+ %tmp21517 = getelementptr inbounds float, float* %tmp21516, i64 1
+ %tmp21518 = getelementptr inbounds float, float* %tmp21517, i64 1
+ %tmp21519 = getelementptr inbounds float, float* %tmp21518, i64 1
+ %tmp21520 = getelementptr inbounds float, float* %tmp21519, i64 1
+ %tmp21521 = getelementptr inbounds float, float* %tmp21520, i64 1
+ %tmp21522 = getelementptr inbounds float, float* %tmp21521, i64 1
+ %tmp21523 = getelementptr inbounds float, float* %tmp21522, i64 1
+ %tmp21524 = getelementptr inbounds float, float* %tmp21523, i64 1
+ %tmp21525 = getelementptr inbounds float, float* %tmp21524, i64 1
+ %tmp21526 = getelementptr inbounds float, float* %tmp21525, i64 1
+ %tmp21527 = getelementptr inbounds float, float* %tmp21526, i64 1
+ %tmp21528 = getelementptr inbounds float, float* %tmp21527, i64 1
+ %tmp21529 = getelementptr inbounds float, float* %tmp21528, i64 1
+ %tmp21530 = getelementptr inbounds float, float* %tmp21529, i64 1
+ %tmp21531 = getelementptr inbounds float, float* %tmp21530, i64 1
+ %tmp21532 = getelementptr inbounds float, float* %tmp21531, i64 1
+ %tmp21533 = getelementptr inbounds float, float* %tmp21532, i64 1
+ %tmp21534 = getelementptr inbounds float, float* %tmp21533, i64 1
+ %tmp21535 = getelementptr inbounds float, float* %tmp21534, i64 1
+ %tmp21536 = getelementptr inbounds float, float* %tmp21535, i64 1
+ %tmp21537 = getelementptr inbounds float, float* %tmp21536, i64 1
+ %tmp21538 = getelementptr inbounds float, float* %tmp21537, i64 1
+ %tmp21539 = getelementptr inbounds float, float* %tmp21538, i64 1
+ %tmp21540 = getelementptr inbounds float, float* %tmp21539, i64 1
+ %tmp21541 = getelementptr inbounds float, float* %tmp21540, i64 1
+ %tmp21542 = getelementptr inbounds float, float* %tmp21541, i64 1
+ %tmp21543 = getelementptr inbounds float, float* %tmp21542, i64 1
+ %tmp21544 = getelementptr inbounds float, float* %tmp21543, i64 1
+ %tmp21545 = getelementptr inbounds float, float* %tmp21544, i64 1
+ %tmp21546 = getelementptr inbounds float, float* %tmp21545, i64 1
+ %tmp21547 = getelementptr inbounds float, float* %tmp21546, i64 1
+ %tmp21548 = getelementptr inbounds float, float* %tmp21547, i64 1
+ %tmp21549 = getelementptr inbounds float, float* %tmp21548, i64 1
+ %tmp21550 = getelementptr inbounds float, float* %tmp21549, i64 1
+ %tmp21551 = getelementptr inbounds float, float* %tmp21550, i64 1
+ %tmp21552 = getelementptr inbounds float, float* %tmp21551, i64 1
+ %tmp21553 = getelementptr inbounds float, float* %tmp21552, i64 1
+ %tmp21554 = getelementptr inbounds float, float* %tmp21553, i64 1
+ %tmp21555 = getelementptr inbounds float, float* %tmp21554, i64 1
+ %tmp21556 = getelementptr inbounds float, float* %tmp21555, i64 1
+ %tmp21557 = getelementptr inbounds float, float* %tmp21556, i64 1
+ %tmp21558 = getelementptr inbounds float, float* %tmp21557, i64 1
+ %tmp21559 = getelementptr inbounds float, float* %tmp21558, i64 1
+ %tmp21560 = getelementptr inbounds float, float* %tmp21559, i64 1
+ %tmp21561 = getelementptr inbounds float, float* %tmp21560, i64 1
+ %tmp21562 = getelementptr inbounds float, float* %tmp21561, i64 1
+ %tmp21563 = getelementptr inbounds float, float* %tmp21562, i64 1
+ %tmp21564 = getelementptr inbounds float, float* %tmp21563, i64 1
+ %tmp21565 = getelementptr inbounds float, float* %tmp21564, i64 1
+ %tmp21566 = getelementptr inbounds float, float* %tmp21565, i64 1
+ %tmp21567 = getelementptr inbounds float, float* %tmp21566, i64 1
+ %tmp21568 = getelementptr inbounds float, float* %tmp21567, i64 1
+ %tmp21569 = getelementptr inbounds float, float* %tmp21568, i64 1
+ %tmp21570 = getelementptr inbounds float, float* %tmp21569, i64 1
+ %tmp21571 = getelementptr inbounds float, float* %tmp21570, i64 1
+ %tmp21572 = getelementptr inbounds float, float* %tmp21571, i64 1
+ %tmp21573 = getelementptr inbounds float, float* %tmp21572, i64 1
+ %tmp21574 = getelementptr inbounds float, float* %tmp21573, i64 1
+ %tmp21575 = getelementptr inbounds float, float* %tmp21574, i64 1
+ %tmp21576 = getelementptr inbounds float, float* %tmp21575, i64 1
+ %tmp21577 = getelementptr inbounds float, float* %tmp21576, i64 1
+ %tmp21578 = getelementptr inbounds float, float* %tmp21577, i64 1
+ %tmp21579 = getelementptr inbounds float, float* %tmp21578, i64 1
+ %tmp21580 = getelementptr inbounds float, float* %tmp21579, i64 1
+ %tmp21581 = getelementptr inbounds float, float* %tmp21580, i64 1
+ %tmp21582 = getelementptr inbounds float, float* %tmp21581, i64 1
+ %tmp21583 = getelementptr inbounds float, float* %tmp21582, i64 1
+ %tmp21584 = getelementptr inbounds float, float* %tmp21583, i64 1
+ %tmp21585 = getelementptr inbounds float, float* %tmp21584, i64 1
+ %tmp21586 = getelementptr inbounds float, float* %tmp21585, i64 1
+ %tmp21587 = getelementptr inbounds float, float* %tmp21586, i64 1
+ %tmp21588 = getelementptr inbounds float, float* %tmp21587, i64 1
+ %tmp21589 = getelementptr inbounds float, float* %tmp21588, i64 1
+ %tmp21590 = getelementptr inbounds float, float* %tmp21589, i64 1
+ %tmp21591 = getelementptr inbounds float, float* %tmp21590, i64 1
+ %tmp21592 = getelementptr inbounds float, float* %tmp21591, i64 1
+ %tmp21593 = getelementptr inbounds float, float* %tmp21592, i64 1
+ %tmp21594 = getelementptr inbounds float, float* %tmp21593, i64 1
+ %tmp21595 = getelementptr inbounds float, float* %tmp21594, i64 1
+ %tmp21596 = getelementptr inbounds float, float* %tmp21595, i64 1
+ %tmp21597 = getelementptr inbounds float, float* %tmp21596, i64 1
+ %tmp21598 = getelementptr inbounds float, float* %tmp21597, i64 1
+ %tmp21599 = getelementptr inbounds float, float* %tmp21598, i64 1
+ %tmp21600 = getelementptr inbounds float, float* %tmp21599, i64 1
+ %tmp21601 = getelementptr inbounds float, float* %tmp21600, i64 1
+ %tmp21602 = getelementptr inbounds float, float* %tmp21601, i64 1
+ %tmp21603 = getelementptr inbounds float, float* %tmp21602, i64 1
+ %tmp21604 = getelementptr inbounds float, float* %tmp21603, i64 1
+ %tmp21605 = getelementptr inbounds float, float* %tmp21604, i64 1
+ %tmp21606 = getelementptr inbounds float, float* %tmp21605, i64 1
+ %tmp21607 = getelementptr inbounds float, float* %tmp21606, i64 1
+ %tmp21608 = getelementptr inbounds float, float* %tmp21607, i64 1
+ %tmp21609 = getelementptr inbounds float, float* %tmp21608, i64 1
+ %tmp21610 = getelementptr inbounds float, float* %tmp21609, i64 1
+ %tmp21611 = getelementptr inbounds float, float* %tmp21610, i64 1
+ %tmp21612 = getelementptr inbounds float, float* %tmp21611, i64 1
+ %tmp21613 = getelementptr inbounds float, float* %tmp21612, i64 1
+ %tmp21614 = getelementptr inbounds float, float* %tmp21613, i64 1
+ %tmp21615 = getelementptr inbounds float, float* %tmp21614, i64 1
+ %tmp21616 = getelementptr inbounds float, float* %tmp21615, i64 1
+ %tmp21617 = getelementptr inbounds float, float* %tmp21616, i64 1
+ %tmp21618 = getelementptr inbounds float, float* %tmp21617, i64 1
+ %tmp21619 = getelementptr inbounds float, float* %tmp21618, i64 1
+ %tmp21620 = getelementptr inbounds float, float* %tmp21619, i64 1
+ %tmp21621 = getelementptr inbounds float, float* %tmp21620, i64 1
+ %tmp21622 = getelementptr inbounds float, float* %tmp21621, i64 1
+ %tmp21623 = getelementptr inbounds float, float* %tmp21622, i64 1
+ %tmp21624 = getelementptr inbounds float, float* %tmp21623, i64 1
+ %tmp21625 = getelementptr inbounds float, float* %tmp21624, i64 1
+ %tmp21626 = getelementptr inbounds float, float* %tmp21625, i64 1
+ %tmp21627 = getelementptr inbounds float, float* %tmp21626, i64 1
+ %tmp21628 = getelementptr inbounds float, float* %tmp21627, i64 1
+ %tmp21629 = getelementptr inbounds float, float* %tmp21628, i64 1
+ %tmp21630 = getelementptr inbounds float, float* %tmp21629, i64 1
+ %tmp21631 = getelementptr inbounds float, float* %tmp21630, i64 1
+ %tmp21632 = getelementptr inbounds float, float* %tmp21631, i64 1
+ %tmp21633 = getelementptr inbounds float, float* %tmp21632, i64 1
+ %tmp21634 = getelementptr inbounds float, float* %tmp21633, i64 1
+ %tmp21635 = getelementptr inbounds float, float* %tmp21634, i64 1
+ %tmp21636 = getelementptr inbounds float, float* %tmp21635, i64 1
+ %tmp21637 = getelementptr inbounds float, float* %tmp21636, i64 1
+ %tmp21638 = getelementptr inbounds float, float* %tmp21637, i64 1
+ %tmp21639 = getelementptr inbounds float, float* %tmp21638, i64 1
+ %tmp21640 = getelementptr inbounds float, float* %tmp21639, i64 1
+ %tmp21641 = getelementptr inbounds float, float* %tmp21640, i64 1
+ %tmp21642 = getelementptr inbounds float, float* %tmp21641, i64 1
+ %tmp21643 = getelementptr inbounds float, float* %tmp21642, i64 1
+ %tmp21644 = getelementptr inbounds float, float* %tmp21643, i64 1
+ %tmp21645 = getelementptr inbounds float, float* %tmp21644, i64 1
+ %tmp21646 = getelementptr inbounds float, float* %tmp21645, i64 1
+ %tmp21647 = getelementptr inbounds float, float* %tmp21646, i64 1
+ %tmp21648 = getelementptr inbounds float, float* %tmp21647, i64 1
+ %tmp21649 = getelementptr inbounds float, float* %tmp21648, i64 1
+ %tmp21650 = getelementptr inbounds float, float* %tmp21649, i64 1
+ %tmp21651 = getelementptr inbounds float, float* %tmp21650, i64 1
+ %tmp21652 = getelementptr inbounds float, float* %tmp21651, i64 1
+ %tmp21653 = getelementptr inbounds float, float* %tmp21652, i64 1
+ %tmp21654 = getelementptr inbounds float, float* %tmp21653, i64 1
+ %tmp21655 = getelementptr inbounds float, float* %tmp21654, i64 1
+ %tmp21656 = getelementptr inbounds float, float* %tmp21655, i64 1
+ %tmp21657 = getelementptr inbounds float, float* %tmp21656, i64 1
+ %tmp21658 = getelementptr inbounds float, float* %tmp21657, i64 1
+ %tmp21659 = getelementptr inbounds float, float* %tmp21658, i64 1
+ %tmp21660 = getelementptr inbounds float, float* %tmp21659, i64 1
+ %tmp21661 = getelementptr inbounds float, float* %tmp21660, i64 1
+ %tmp21662 = getelementptr inbounds float, float* %tmp21661, i64 1
+ %tmp21663 = getelementptr inbounds float, float* %tmp21662, i64 1
+ %tmp21664 = getelementptr inbounds float, float* %tmp21663, i64 1
+ %tmp21665 = getelementptr inbounds float, float* %tmp21664, i64 1
+ %tmp21666 = getelementptr inbounds float, float* %tmp21665, i64 1
+ %tmp21667 = getelementptr inbounds float, float* %tmp21666, i64 1
+ %tmp21668 = getelementptr inbounds float, float* %tmp21667, i64 1
+ %tmp21669 = getelementptr inbounds float, float* %tmp21668, i64 1
+ %tmp21670 = getelementptr inbounds float, float* %tmp21669, i64 1
+ %tmp21671 = getelementptr inbounds float, float* %tmp21670, i64 1
+ %tmp21672 = getelementptr inbounds float, float* %tmp21671, i64 1
+ %tmp21673 = getelementptr inbounds float, float* %tmp21672, i64 1
+ %tmp21674 = getelementptr inbounds float, float* %tmp21673, i64 1
+ %tmp21675 = getelementptr inbounds float, float* %tmp21674, i64 1
+ %tmp21676 = getelementptr inbounds float, float* %tmp21675, i64 1
+ %tmp21677 = getelementptr inbounds float, float* %tmp21676, i64 1
+ %tmp21678 = getelementptr inbounds float, float* %tmp21677, i64 1
+ %tmp21679 = getelementptr inbounds float, float* %tmp21678, i64 1
+ %tmp21680 = getelementptr inbounds float, float* %tmp21679, i64 1
+ %tmp21681 = getelementptr inbounds float, float* %tmp21680, i64 1
+ %tmp21682 = getelementptr inbounds float, float* %tmp21681, i64 1
+ %tmp21683 = getelementptr inbounds float, float* %tmp21682, i64 1
+ %tmp21684 = getelementptr inbounds float, float* %tmp21683, i64 1
+ %tmp21685 = getelementptr inbounds float, float* %tmp21684, i64 1
+ %tmp21686 = getelementptr inbounds float, float* %tmp21685, i64 1
+ %tmp21687 = getelementptr inbounds float, float* %tmp21686, i64 1
+ %tmp21688 = getelementptr inbounds float, float* %tmp21687, i64 1
+ %tmp21689 = getelementptr inbounds float, float* %tmp21688, i64 1
+ %tmp21690 = getelementptr inbounds float, float* %tmp21689, i64 1
+ %tmp21691 = getelementptr inbounds float, float* %tmp21690, i64 1
+ %tmp21692 = getelementptr inbounds float, float* %tmp21691, i64 1
+ %tmp21693 = getelementptr inbounds float, float* %tmp21692, i64 1
+ %tmp21694 = getelementptr inbounds float, float* %tmp21693, i64 1
+ %tmp21695 = getelementptr inbounds float, float* %tmp21694, i64 1
+ %tmp21696 = getelementptr inbounds float, float* %tmp21695, i64 1
+ %tmp21697 = getelementptr inbounds float, float* %tmp21696, i64 1
+ %tmp21698 = getelementptr inbounds float, float* %tmp21697, i64 1
+ %tmp21699 = getelementptr inbounds float, float* %tmp21698, i64 1
+ %tmp21700 = getelementptr inbounds float, float* %tmp21699, i64 1
+ %tmp21701 = getelementptr inbounds float, float* %tmp21700, i64 1
+ %tmp21702 = getelementptr inbounds float, float* %tmp21701, i64 1
+ %tmp21703 = getelementptr inbounds float, float* %tmp21702, i64 1
+ %tmp21704 = getelementptr inbounds float, float* %tmp21703, i64 1
+ %tmp21705 = getelementptr inbounds float, float* %tmp21704, i64 1
+ %tmp21706 = getelementptr inbounds float, float* %tmp21705, i64 1
+ %tmp21707 = getelementptr inbounds float, float* %tmp21706, i64 1
+ %tmp21708 = getelementptr inbounds float, float* %tmp21707, i64 1
+ %tmp21709 = getelementptr inbounds float, float* %tmp21708, i64 1
+ %tmp21710 = getelementptr inbounds float, float* %tmp21709, i64 1
+ %tmp21711 = getelementptr inbounds float, float* %tmp21710, i64 1
+ %tmp21712 = getelementptr inbounds float, float* %tmp21711, i64 1
+ %tmp21713 = getelementptr inbounds float, float* %tmp21712, i64 1
+ %tmp21714 = getelementptr inbounds float, float* %tmp21713, i64 1
+ %tmp21715 = getelementptr inbounds float, float* %tmp21714, i64 1
+ %tmp21716 = getelementptr inbounds float, float* %tmp21715, i64 1
+ %tmp21717 = getelementptr inbounds float, float* %tmp21716, i64 1
+ %tmp21718 = getelementptr inbounds float, float* %tmp21717, i64 1
+ %tmp21719 = getelementptr inbounds float, float* %tmp21718, i64 1
+ %tmp21720 = getelementptr inbounds float, float* %tmp21719, i64 1
+ %tmp21721 = getelementptr inbounds float, float* %tmp21720, i64 1
+ %tmp21722 = getelementptr inbounds float, float* %tmp21721, i64 1
+ %tmp21723 = getelementptr inbounds float, float* %tmp21722, i64 1
+ %tmp21724 = getelementptr inbounds float, float* %tmp21723, i64 1
+ %tmp21725 = getelementptr inbounds float, float* %tmp21724, i64 1
+ %tmp21726 = getelementptr inbounds float, float* %tmp21725, i64 1
+ %tmp21727 = getelementptr inbounds float, float* %tmp21726, i64 1
+ %tmp21728 = getelementptr inbounds float, float* %tmp21727, i64 1
+ %tmp21729 = getelementptr inbounds float, float* %tmp21728, i64 1
+ %tmp21730 = getelementptr inbounds float, float* %tmp21729, i64 1
+ %tmp21731 = getelementptr inbounds float, float* %tmp21730, i64 1
+ %tmp21732 = getelementptr inbounds float, float* %tmp21731, i64 1
+ %tmp21733 = getelementptr inbounds float, float* %tmp21732, i64 1
+ %tmp21734 = getelementptr inbounds float, float* %tmp21733, i64 1
+ %tmp21735 = getelementptr inbounds float, float* %tmp21734, i64 1
+ %tmp21736 = getelementptr inbounds float, float* %tmp21735, i64 1
+ %tmp21737 = getelementptr inbounds float, float* %tmp21736, i64 1
+ %tmp21738 = getelementptr inbounds float, float* %tmp21737, i64 1
+ %tmp21739 = getelementptr inbounds float, float* %tmp21738, i64 1
+ %tmp21740 = getelementptr inbounds float, float* %tmp21739, i64 1
+ %tmp21741 = getelementptr inbounds float, float* %tmp21740, i64 1
+ %tmp21742 = getelementptr inbounds float, float* %tmp21741, i64 1
+ %tmp21743 = getelementptr inbounds float, float* %tmp21742, i64 1
+ %tmp21744 = getelementptr inbounds float, float* %tmp21743, i64 1
+ %tmp21745 = getelementptr inbounds float, float* %tmp21744, i64 1
+ %tmp21746 = getelementptr inbounds float, float* %tmp21745, i64 1
+ %tmp21747 = getelementptr inbounds float, float* %tmp21746, i64 1
+ %tmp21748 = getelementptr inbounds float, float* %tmp21747, i64 1
+ %tmp21749 = getelementptr inbounds float, float* %tmp21748, i64 1
+ %tmp21750 = getelementptr inbounds float, float* %tmp21749, i64 1
+ %tmp21751 = getelementptr inbounds float, float* %tmp21750, i64 1
+ %tmp21752 = getelementptr inbounds float, float* %tmp21751, i64 1
+ %tmp21753 = getelementptr inbounds float, float* %tmp21752, i64 1
+ %tmp21754 = getelementptr inbounds float, float* %tmp21753, i64 1
+ %tmp21755 = getelementptr inbounds float, float* %tmp21754, i64 1
+ %tmp21756 = getelementptr inbounds float, float* %tmp21755, i64 1
+ %tmp21757 = getelementptr inbounds float, float* %tmp21756, i64 1
+ %tmp21758 = getelementptr inbounds float, float* %tmp21757, i64 1
+ %tmp21759 = getelementptr inbounds float, float* %tmp21758, i64 1
+ %tmp21760 = getelementptr inbounds float, float* %tmp21759, i64 1
+ %tmp21761 = getelementptr inbounds float, float* %tmp21760, i64 1
+ %tmp21762 = getelementptr inbounds float, float* %tmp21761, i64 1
+ %tmp21763 = getelementptr inbounds float, float* %tmp21762, i64 1
+ %tmp21764 = getelementptr inbounds float, float* %tmp21763, i64 1
+ %tmp21765 = getelementptr inbounds float, float* %tmp21764, i64 1
+ %tmp21766 = getelementptr inbounds float, float* %tmp21765, i64 1
+ %tmp21767 = getelementptr inbounds float, float* %tmp21766, i64 1
+ %tmp21768 = getelementptr inbounds float, float* %tmp21767, i64 1
+ %tmp21769 = getelementptr inbounds float, float* %tmp21768, i64 1
+ %tmp21770 = getelementptr inbounds float, float* %tmp21769, i64 1
+ %tmp21771 = getelementptr inbounds float, float* %tmp21770, i64 1
+ %tmp21772 = getelementptr inbounds float, float* %tmp21771, i64 1
+ %tmp21773 = getelementptr inbounds float, float* %tmp21772, i64 1
+ %tmp21774 = getelementptr inbounds float, float* %tmp21773, i64 1
+ %tmp21775 = getelementptr inbounds float, float* %tmp21774, i64 1
+ %tmp21776 = getelementptr inbounds float, float* %tmp21775, i64 1
+ %tmp21777 = getelementptr inbounds float, float* %tmp21776, i64 1
+ %tmp21778 = getelementptr inbounds float, float* %tmp21777, i64 1
+ %tmp21779 = getelementptr inbounds float, float* %tmp21778, i64 1
+ %tmp21780 = getelementptr inbounds float, float* %tmp21779, i64 1
+ %tmp21781 = getelementptr inbounds float, float* %tmp21780, i64 1
+ %tmp21782 = getelementptr inbounds float, float* %tmp21781, i64 1
+ %tmp21783 = getelementptr inbounds float, float* %tmp21782, i64 1
+ %tmp21784 = getelementptr inbounds float, float* %tmp21783, i64 1
+ %tmp21785 = getelementptr inbounds float, float* %tmp21784, i64 1
+ %tmp21786 = getelementptr inbounds float, float* %tmp21785, i64 1
+ %tmp21787 = getelementptr inbounds float, float* %tmp21786, i64 1
+ %tmp21788 = getelementptr inbounds float, float* %tmp21787, i64 1
+ %tmp21789 = getelementptr inbounds float, float* %tmp21788, i64 1
+ %tmp21790 = getelementptr inbounds float, float* %tmp21789, i64 1
+ %tmp21791 = getelementptr inbounds float, float* %tmp21790, i64 1
+ %tmp21792 = getelementptr inbounds float, float* %tmp21791, i64 1
+ %tmp21793 = getelementptr inbounds float, float* %tmp21792, i64 1
+ %tmp21794 = getelementptr inbounds float, float* %tmp21793, i64 1
+ %tmp21795 = getelementptr inbounds float, float* %tmp21794, i64 1
+ %tmp21796 = getelementptr inbounds float, float* %tmp21795, i64 1
+ %tmp21797 = getelementptr inbounds float, float* %tmp21796, i64 1
+ %tmp21798 = getelementptr inbounds float, float* %tmp21797, i64 1
+ %tmp21799 = getelementptr inbounds float, float* %tmp21798, i64 1
+ %tmp21800 = getelementptr inbounds float, float* %tmp21799, i64 1
+ %tmp21801 = getelementptr inbounds float, float* %tmp21800, i64 1
+ %tmp21802 = getelementptr inbounds float, float* %tmp21801, i64 1
+ %tmp21803 = getelementptr inbounds float, float* %tmp21802, i64 1
+ %tmp21804 = getelementptr inbounds float, float* %tmp21803, i64 1
+ %tmp21805 = getelementptr inbounds float, float* %tmp21804, i64 1
+ %tmp21806 = getelementptr inbounds float, float* %tmp21805, i64 1
+ %tmp21807 = getelementptr inbounds float, float* %tmp21806, i64 1
+ %tmp21808 = getelementptr inbounds float, float* %tmp21807, i64 1
+ %tmp21809 = getelementptr inbounds float, float* %tmp21808, i64 1
+ %tmp21810 = getelementptr inbounds float, float* %tmp21809, i64 1
+ %tmp21811 = getelementptr inbounds float, float* %tmp21810, i64 1
+ %tmp21812 = getelementptr inbounds float, float* %tmp21811, i64 1
+ %tmp21813 = getelementptr inbounds float, float* %tmp21812, i64 1
+ %tmp21814 = getelementptr inbounds float, float* %tmp21813, i64 1
+ %tmp21815 = getelementptr inbounds float, float* %tmp21814, i64 1
+ %tmp21816 = getelementptr inbounds float, float* %tmp21815, i64 1
+ %tmp21817 = getelementptr inbounds float, float* %tmp21816, i64 1
+ %tmp21818 = getelementptr inbounds float, float* %tmp21817, i64 1
+ %tmp21819 = getelementptr inbounds float, float* %tmp21818, i64 1
+ %tmp21820 = getelementptr inbounds float, float* %tmp21819, i64 1
+ %tmp21821 = getelementptr inbounds float, float* %tmp21820, i64 1
+ %tmp21822 = getelementptr inbounds float, float* %tmp21821, i64 1
+ %tmp21823 = getelementptr inbounds float, float* %tmp21822, i64 1
+ %tmp21824 = getelementptr inbounds float, float* %tmp21823, i64 1
+ %tmp21825 = getelementptr inbounds float, float* %tmp21824, i64 1
+ %tmp21826 = getelementptr inbounds float, float* %tmp21825, i64 1
+ %tmp21827 = getelementptr inbounds float, float* %tmp21826, i64 1
+ %tmp21828 = getelementptr inbounds float, float* %tmp21827, i64 1
+ %tmp21829 = getelementptr inbounds float, float* %tmp21828, i64 1
+ %tmp21830 = getelementptr inbounds float, float* %tmp21829, i64 1
+ %tmp21831 = getelementptr inbounds float, float* %tmp21830, i64 1
+ %tmp21832 = getelementptr inbounds float, float* %tmp21831, i64 1
+ %tmp21833 = getelementptr inbounds float, float* %tmp21832, i64 1
+ %tmp21834 = getelementptr inbounds float, float* %tmp21833, i64 1
+ %tmp21835 = getelementptr inbounds float, float* %tmp21834, i64 1
+ %tmp21836 = getelementptr inbounds float, float* %tmp21835, i64 1
+ %tmp21837 = getelementptr inbounds float, float* %tmp21836, i64 1
+ %tmp21838 = getelementptr inbounds float, float* %tmp21837, i64 1
+ %tmp21839 = getelementptr inbounds float, float* %tmp21838, i64 1
+ %tmp21840 = getelementptr inbounds float, float* %tmp21839, i64 1
+ %tmp21841 = getelementptr inbounds float, float* %tmp21840, i64 1
+ %tmp21842 = getelementptr inbounds float, float* %tmp21841, i64 1
+ %tmp21843 = getelementptr inbounds float, float* %tmp21842, i64 1
+ %tmp21844 = getelementptr inbounds float, float* %tmp21843, i64 1
+ %tmp21845 = getelementptr inbounds float, float* %tmp21844, i64 1
+ %tmp21846 = getelementptr inbounds float, float* %tmp21845, i64 1
+ %tmp21847 = getelementptr inbounds float, float* %tmp21846, i64 1
+ %tmp21848 = getelementptr inbounds float, float* %tmp21847, i64 1
+ %tmp21849 = getelementptr inbounds float, float* %tmp21848, i64 1
+ %tmp21850 = getelementptr inbounds float, float* %tmp21849, i64 1
+ %tmp21851 = getelementptr inbounds float, float* %tmp21850, i64 1
+ %tmp21852 = getelementptr inbounds float, float* %tmp21851, i64 1
+ %tmp21853 = getelementptr inbounds float, float* %tmp21852, i64 1
+ %tmp21854 = getelementptr inbounds float, float* %tmp21853, i64 1
+ %tmp21855 = getelementptr inbounds float, float* %tmp21854, i64 1
+ %tmp21856 = getelementptr inbounds float, float* %tmp21855, i64 1
+ %tmp21857 = getelementptr inbounds float, float* %tmp21856, i64 1
+ %tmp21858 = getelementptr inbounds float, float* %tmp21857, i64 1
+ %tmp21859 = getelementptr inbounds float, float* %tmp21858, i64 1
+ %tmp21860 = getelementptr inbounds float, float* %tmp21859, i64 1
+ %tmp21861 = getelementptr inbounds float, float* %tmp21860, i64 1
+ %tmp21862 = getelementptr inbounds float, float* %tmp21861, i64 1
+ %tmp21863 = getelementptr inbounds float, float* %tmp21862, i64 1
+ %tmp21864 = getelementptr inbounds float, float* %tmp21863, i64 1
+ %tmp21865 = getelementptr inbounds float, float* %tmp21864, i64 1
+ %tmp21866 = getelementptr inbounds float, float* %tmp21865, i64 1
+ %tmp21867 = getelementptr inbounds float, float* %tmp21866, i64 1
+ %tmp21868 = getelementptr inbounds float, float* %tmp21867, i64 1
+ %tmp21869 = getelementptr inbounds float, float* %tmp21868, i64 1
+ %tmp21870 = getelementptr inbounds float, float* %tmp21869, i64 1
+ %tmp21871 = getelementptr inbounds float, float* %tmp21870, i64 1
+ %tmp21872 = getelementptr inbounds float, float* %tmp21871, i64 1
+ %tmp21873 = getelementptr inbounds float, float* %tmp21872, i64 1
+ %tmp21874 = getelementptr inbounds float, float* %tmp21873, i64 1
+ %tmp21875 = getelementptr inbounds float, float* %tmp21874, i64 1
+ %tmp21876 = getelementptr inbounds float, float* %tmp21875, i64 1
+ %tmp21877 = getelementptr inbounds float, float* %tmp21876, i64 1
+ %tmp21878 = getelementptr inbounds float, float* %tmp21877, i64 1
+ %tmp21879 = getelementptr inbounds float, float* %tmp21878, i64 1
+ %tmp21880 = getelementptr inbounds float, float* %tmp21879, i64 1
+ %tmp21881 = getelementptr inbounds float, float* %tmp21880, i64 1
+ %tmp21882 = getelementptr inbounds float, float* %tmp21881, i64 1
+ %tmp21883 = getelementptr inbounds float, float* %tmp21882, i64 1
+ %tmp21884 = getelementptr inbounds float, float* %tmp21883, i64 1
+ %tmp21885 = getelementptr inbounds float, float* %tmp21884, i64 1
+ %tmp21886 = getelementptr inbounds float, float* %tmp21885, i64 1
+ %tmp21887 = getelementptr inbounds float, float* %tmp21886, i64 1
+ %tmp21888 = getelementptr inbounds float, float* %tmp21887, i64 1
+ %tmp21889 = getelementptr inbounds float, float* %tmp21888, i64 1
+ %tmp21890 = getelementptr inbounds float, float* %tmp21889, i64 1
+ %tmp21891 = getelementptr inbounds float, float* %tmp21890, i64 1
+ %tmp21892 = getelementptr inbounds float, float* %tmp21891, i64 1
+ %tmp21893 = getelementptr inbounds float, float* %tmp21892, i64 1
+ %tmp21894 = getelementptr inbounds float, float* %tmp21893, i64 1
+ %tmp21895 = getelementptr inbounds float, float* %tmp21894, i64 1
+ %tmp21896 = getelementptr inbounds float, float* %tmp21895, i64 1
+ %tmp21897 = getelementptr inbounds float, float* %tmp21896, i64 1
+ %tmp21898 = getelementptr inbounds float, float* %tmp21897, i64 1
+ %tmp21899 = getelementptr inbounds float, float* %tmp21898, i64 1
+ %tmp21900 = getelementptr inbounds float, float* %tmp21899, i64 1
+ %tmp21901 = getelementptr inbounds float, float* %tmp21900, i64 1
+ %tmp21902 = getelementptr inbounds float, float* %tmp21901, i64 1
+ %tmp21903 = getelementptr inbounds float, float* %tmp21902, i64 1
+ %tmp21904 = getelementptr inbounds float, float* %tmp21903, i64 1
+ %tmp21905 = getelementptr inbounds float, float* %tmp21904, i64 1
+ %tmp21906 = getelementptr inbounds float, float* %tmp21905, i64 1
+ %tmp21907 = getelementptr inbounds float, float* %tmp21906, i64 1
+ %tmp21908 = getelementptr inbounds float, float* %tmp21907, i64 1
+ %tmp21909 = getelementptr inbounds float, float* %tmp21908, i64 1
+ %tmp21910 = getelementptr inbounds float, float* %tmp21909, i64 1
+ %tmp21911 = getelementptr inbounds float, float* %tmp21910, i64 1
+ %tmp21912 = getelementptr inbounds float, float* %tmp21911, i64 1
+ %tmp21913 = getelementptr inbounds float, float* %tmp21912, i64 1
+ %tmp21914 = getelementptr inbounds float, float* %tmp21913, i64 1
+ %tmp21915 = getelementptr inbounds float, float* %tmp21914, i64 1
+ %tmp21916 = getelementptr inbounds float, float* %tmp21915, i64 1
+ %tmp21917 = getelementptr inbounds float, float* %tmp21916, i64 1
+ %tmp21918 = getelementptr inbounds float, float* %tmp21917, i64 1
+ %tmp21919 = getelementptr inbounds float, float* %tmp21918, i64 1
+ %tmp21920 = getelementptr inbounds float, float* %tmp21919, i64 1
+ %tmp21921 = getelementptr inbounds float, float* %tmp21920, i64 1
+ %tmp21922 = getelementptr inbounds float, float* %tmp21921, i64 1
+ %tmp21923 = getelementptr inbounds float, float* %tmp21922, i64 1
+ %tmp21924 = getelementptr inbounds float, float* %tmp21923, i64 1
+ %tmp21925 = getelementptr inbounds float, float* %tmp21924, i64 1
+ %tmp21926 = getelementptr inbounds float, float* %tmp21925, i64 1
+ %tmp21927 = getelementptr inbounds float, float* %tmp21926, i64 1
+ %tmp21928 = getelementptr inbounds float, float* %tmp21927, i64 1
+ %tmp21929 = getelementptr inbounds float, float* %tmp21928, i64 1
+ %tmp21930 = getelementptr inbounds float, float* %tmp21929, i64 1
+ %tmp21931 = getelementptr inbounds float, float* %tmp21930, i64 1
+ %tmp21932 = getelementptr inbounds float, float* %tmp21931, i64 1
+ %tmp21933 = getelementptr inbounds float, float* %tmp21932, i64 1
+ %tmp21934 = getelementptr inbounds float, float* %tmp21933, i64 1
+ %tmp21935 = getelementptr inbounds float, float* %tmp21934, i64 1
+ %tmp21936 = getelementptr inbounds float, float* %tmp21935, i64 1
+ %tmp21937 = getelementptr inbounds float, float* %tmp21936, i64 1
+ %tmp21938 = getelementptr inbounds float, float* %tmp21937, i64 1
+ %tmp21939 = getelementptr inbounds float, float* %tmp21938, i64 1
+ %tmp21940 = getelementptr inbounds float, float* %tmp21939, i64 1
+ %tmp21941 = getelementptr inbounds float, float* %tmp21940, i64 1
+ %tmp21942 = getelementptr inbounds float, float* %tmp21941, i64 1
+ %tmp21943 = getelementptr inbounds float, float* %tmp21942, i64 1
+ %tmp21944 = getelementptr inbounds float, float* %tmp21943, i64 1
+ %tmp21945 = getelementptr inbounds float, float* %tmp21944, i64 1
+ %tmp21946 = getelementptr inbounds float, float* %tmp21945, i64 1
+ %tmp21947 = getelementptr inbounds float, float* %tmp21946, i64 1
+ %tmp21948 = getelementptr inbounds float, float* %tmp21947, i64 1
+ %tmp21949 = getelementptr inbounds float, float* %tmp21948, i64 1
+ %tmp21950 = getelementptr inbounds float, float* %tmp21949, i64 1
+ %tmp21951 = getelementptr inbounds float, float* %tmp21950, i64 1
+ %tmp21952 = getelementptr inbounds float, float* %tmp21951, i64 1
+ %tmp21953 = getelementptr inbounds float, float* %tmp21952, i64 1
+ %tmp21954 = getelementptr inbounds float, float* %tmp21953, i64 1
+ %tmp21955 = getelementptr inbounds float, float* %tmp21954, i64 1
+ %tmp21956 = getelementptr inbounds float, float* %tmp21955, i64 1
+ %tmp21957 = getelementptr inbounds float, float* %tmp21956, i64 1
+ %tmp21958 = getelementptr inbounds float, float* %tmp21957, i64 1
+ %tmp21959 = getelementptr inbounds float, float* %tmp21958, i64 1
+ %tmp21960 = getelementptr inbounds float, float* %tmp21959, i64 1
+ %tmp21961 = getelementptr inbounds float, float* %tmp21960, i64 1
+ %tmp21962 = getelementptr inbounds float, float* %tmp21961, i64 1
+ %tmp21963 = getelementptr inbounds float, float* %tmp21962, i64 1
+ %tmp21964 = getelementptr inbounds float, float* %tmp21963, i64 1
+ %tmp21965 = getelementptr inbounds float, float* %tmp21964, i64 1
+ %tmp21966 = getelementptr inbounds float, float* %tmp21965, i64 1
+ %tmp21967 = getelementptr inbounds float, float* %tmp21966, i64 1
+ %tmp21968 = getelementptr inbounds float, float* %tmp21967, i64 1
+ %tmp21969 = getelementptr inbounds float, float* %tmp21968, i64 1
+ %tmp21970 = getelementptr inbounds float, float* %tmp21969, i64 1
+ %tmp21971 = getelementptr inbounds float, float* %tmp21970, i64 1
+ %tmp21972 = getelementptr inbounds float, float* %tmp21971, i64 1
+ %tmp21973 = getelementptr inbounds float, float* %tmp21972, i64 1
+ %tmp21974 = getelementptr inbounds float, float* %tmp21973, i64 1
+ %tmp21975 = getelementptr inbounds float, float* %tmp21974, i64 1
+ %tmp21976 = getelementptr inbounds float, float* %tmp21975, i64 1
+ %tmp21977 = getelementptr inbounds float, float* %tmp21976, i64 1
+ %tmp21978 = getelementptr inbounds float, float* %tmp21977, i64 1
+ %tmp21979 = getelementptr inbounds float, float* %tmp21978, i64 1
+ %tmp21980 = getelementptr inbounds float, float* %tmp21979, i64 1
+ %tmp21981 = getelementptr inbounds float, float* %tmp21980, i64 1
+ %tmp21982 = getelementptr inbounds float, float* %tmp21981, i64 1
+ %tmp21983 = getelementptr inbounds float, float* %tmp21982, i64 1
+ %tmp21984 = getelementptr inbounds float, float* %tmp21983, i64 1
+ %tmp21985 = getelementptr inbounds float, float* %tmp21984, i64 1
+ %tmp21986 = getelementptr inbounds float, float* %tmp21985, i64 1
+ %tmp21987 = getelementptr inbounds float, float* %tmp21986, i64 1
+ %tmp21988 = getelementptr inbounds float, float* %tmp21987, i64 1
+ %tmp21989 = getelementptr inbounds float, float* %tmp21988, i64 1
+ %tmp21990 = getelementptr inbounds float, float* %tmp21989, i64 1
+ %tmp21991 = getelementptr inbounds float, float* %tmp21990, i64 1
+ %tmp21992 = getelementptr inbounds float, float* %tmp21991, i64 1
+ %tmp21993 = getelementptr inbounds float, float* %tmp21992, i64 1
+ %tmp21994 = getelementptr inbounds float, float* %tmp21993, i64 1
+ %tmp21995 = getelementptr inbounds float, float* %tmp21994, i64 1
+ %tmp21996 = getelementptr inbounds float, float* %tmp21995, i64 1
+ %tmp21997 = getelementptr inbounds float, float* %tmp21996, i64 1
+ %tmp21998 = getelementptr inbounds float, float* %tmp21997, i64 1
+ %tmp21999 = getelementptr inbounds float, float* %tmp21998, i64 1
+ %tmp22000 = getelementptr inbounds float, float* %tmp21999, i64 1
+ %tmp22001 = getelementptr inbounds float, float* %tmp22000, i64 1
+ %tmp22002 = getelementptr inbounds float, float* %tmp22001, i64 1
+ %tmp22003 = getelementptr inbounds float, float* %tmp22002, i64 1
+ %tmp22004 = getelementptr inbounds float, float* %tmp22003, i64 1
+ %tmp22005 = getelementptr inbounds float, float* %tmp22004, i64 1
+ %tmp22006 = getelementptr inbounds float, float* %tmp22005, i64 1
+ %tmp22007 = getelementptr inbounds float, float* %tmp22006, i64 1
+ %tmp22008 = getelementptr inbounds float, float* %tmp22007, i64 1
+ %tmp22009 = getelementptr inbounds float, float* %tmp22008, i64 1
+ %tmp22010 = getelementptr inbounds float, float* %tmp22009, i64 1
+ %tmp22011 = getelementptr inbounds float, float* %tmp22010, i64 1
+ %tmp22012 = getelementptr inbounds float, float* %tmp22011, i64 1
+ %tmp22013 = getelementptr inbounds float, float* %tmp22012, i64 1
+ %tmp22014 = getelementptr inbounds float, float* %tmp22013, i64 1
+ %tmp22015 = getelementptr inbounds float, float* %tmp22014, i64 1
+ %tmp22016 = getelementptr inbounds float, float* %tmp22015, i64 1
+ %tmp22017 = getelementptr inbounds float, float* %tmp22016, i64 1
+ %tmp22018 = getelementptr inbounds float, float* %tmp22017, i64 1
+ %tmp22019 = getelementptr inbounds float, float* %tmp22018, i64 1
+ %tmp22020 = getelementptr inbounds float, float* %tmp22019, i64 1
+ %tmp22021 = getelementptr inbounds float, float* %tmp22020, i64 1
+ %tmp22022 = getelementptr inbounds float, float* %tmp22021, i64 1
+ %tmp22023 = getelementptr inbounds float, float* %tmp22022, i64 1
+ %tmp22024 = getelementptr inbounds float, float* %tmp22023, i64 1
+ %tmp22025 = getelementptr inbounds float, float* %tmp22024, i64 1
+ %tmp22026 = getelementptr inbounds float, float* %tmp22025, i64 1
+ %tmp22027 = getelementptr inbounds float, float* %tmp22026, i64 1
+ %tmp22028 = getelementptr inbounds float, float* %tmp22027, i64 1
+ %tmp22029 = getelementptr inbounds float, float* %tmp22028, i64 1
+ %tmp22030 = getelementptr inbounds float, float* %tmp22029, i64 1
+ %tmp22031 = getelementptr inbounds float, float* %tmp22030, i64 1
+ %tmp22032 = getelementptr inbounds float, float* %tmp22031, i64 1
+ %tmp22033 = getelementptr inbounds float, float* %tmp22032, i64 1
+ %tmp22034 = getelementptr inbounds float, float* %tmp22033, i64 1
+ %tmp22035 = getelementptr inbounds float, float* %tmp22034, i64 1
+ %tmp22036 = getelementptr inbounds float, float* %tmp22035, i64 1
+ %tmp22037 = getelementptr inbounds float, float* %tmp22036, i64 1
+ %tmp22038 = getelementptr inbounds float, float* %tmp22037, i64 1
+ %tmp22039 = getelementptr inbounds float, float* %tmp22038, i64 1
+ %tmp22040 = getelementptr inbounds float, float* %tmp22039, i64 1
+ %tmp22041 = getelementptr inbounds float, float* %tmp22040, i64 1
+ %tmp22042 = getelementptr inbounds float, float* %tmp22041, i64 1
+ %tmp22043 = getelementptr inbounds float, float* %tmp22042, i64 1
+ %tmp22044 = getelementptr inbounds float, float* %tmp22043, i64 1
+ %tmp22045 = getelementptr inbounds float, float* %tmp22044, i64 1
+ %tmp22046 = getelementptr inbounds float, float* %tmp22045, i64 1
+ %tmp22047 = getelementptr inbounds float, float* %tmp22046, i64 1
+ %tmp22048 = getelementptr inbounds float, float* %tmp22047, i64 1
+ %tmp22049 = getelementptr inbounds float, float* %tmp22048, i64 1
+ %tmp22050 = getelementptr inbounds float, float* %tmp22049, i64 1
+ %tmp22051 = getelementptr inbounds float, float* %tmp22050, i64 1
+ %tmp22052 = getelementptr inbounds float, float* %tmp22051, i64 1
+ %tmp22053 = getelementptr inbounds float, float* %tmp22052, i64 1
+ %tmp22054 = getelementptr inbounds float, float* %tmp22053, i64 1
+ %tmp22055 = getelementptr inbounds float, float* %tmp22054, i64 1
+ %tmp22056 = getelementptr inbounds float, float* %tmp22055, i64 1
+ %tmp22057 = getelementptr inbounds float, float* %tmp22056, i64 1
+ %tmp22058 = getelementptr inbounds float, float* %tmp22057, i64 1
+ %tmp22059 = getelementptr inbounds float, float* %tmp22058, i64 1
+ %tmp22060 = getelementptr inbounds float, float* %tmp22059, i64 1
+ %tmp22061 = getelementptr inbounds float, float* %tmp22060, i64 1
+ %tmp22062 = getelementptr inbounds float, float* %tmp22061, i64 1
+ %tmp22063 = getelementptr inbounds float, float* %tmp22062, i64 1
+ %tmp22064 = getelementptr inbounds float, float* %tmp22063, i64 1
+ %tmp22065 = getelementptr inbounds float, float* %tmp22064, i64 1
+ %tmp22066 = getelementptr inbounds float, float* %tmp22065, i64 1
+ %tmp22067 = getelementptr inbounds float, float* %tmp22066, i64 1
+ %tmp22068 = getelementptr inbounds float, float* %tmp22067, i64 1
+ %tmp22069 = getelementptr inbounds float, float* %tmp22068, i64 1
+ %tmp22070 = getelementptr inbounds float, float* %tmp22069, i64 1
+ %tmp22071 = getelementptr inbounds float, float* %tmp22070, i64 1
+ %tmp22072 = getelementptr inbounds float, float* %tmp22071, i64 1
+ %tmp22073 = getelementptr inbounds float, float* %tmp22072, i64 1
+ %tmp22074 = getelementptr inbounds float, float* %tmp22073, i64 1
+ %tmp22075 = getelementptr inbounds float, float* %tmp22074, i64 1
+ %tmp22076 = getelementptr inbounds float, float* %tmp22075, i64 1
+ %tmp22077 = getelementptr inbounds float, float* %tmp22076, i64 1
+ %tmp22078 = getelementptr inbounds float, float* %tmp22077, i64 1
+ %tmp22079 = getelementptr inbounds float, float* %tmp22078, i64 1
+ %tmp22080 = getelementptr inbounds float, float* %tmp22079, i64 1
+ %tmp22081 = getelementptr inbounds float, float* %tmp22080, i64 1
+ %tmp22082 = getelementptr inbounds float, float* %tmp22081, i64 1
+ %tmp22083 = getelementptr inbounds float, float* %tmp22082, i64 1
+ %tmp22084 = getelementptr inbounds float, float* %tmp22083, i64 1
+ %tmp22085 = getelementptr inbounds float, float* %tmp22084, i64 1
+ %tmp22086 = getelementptr inbounds float, float* %tmp22085, i64 1
+ %tmp22087 = getelementptr inbounds float, float* %tmp22086, i64 1
+ %tmp22088 = getelementptr inbounds float, float* %tmp22087, i64 1
+ %tmp22089 = getelementptr inbounds float, float* %tmp22088, i64 1
+ %tmp22090 = getelementptr inbounds float, float* %tmp22089, i64 1
+ %tmp22091 = getelementptr inbounds float, float* %tmp22090, i64 1
+ %tmp22092 = getelementptr inbounds float, float* %tmp22091, i64 1
+ %tmp22093 = getelementptr inbounds float, float* %tmp22092, i64 1
+ %tmp22094 = getelementptr inbounds float, float* %tmp22093, i64 1
+ %tmp22095 = getelementptr inbounds float, float* %tmp22094, i64 1
+ %tmp22096 = getelementptr inbounds float, float* %tmp22095, i64 1
+ %tmp22097 = getelementptr inbounds float, float* %tmp22096, i64 1
+ %tmp22098 = getelementptr inbounds float, float* %tmp22097, i64 1
+ %tmp22099 = getelementptr inbounds float, float* %tmp22098, i64 1
+ %tmp22100 = getelementptr inbounds float, float* %tmp22099, i64 1
+ %tmp22101 = getelementptr inbounds float, float* %tmp22100, i64 1
+ %tmp22102 = getelementptr inbounds float, float* %tmp22101, i64 1
+ %tmp22103 = getelementptr inbounds float, float* %tmp22102, i64 1
+ %tmp22104 = getelementptr inbounds float, float* %tmp22103, i64 1
+ %tmp22105 = getelementptr inbounds float, float* %tmp22104, i64 1
+ %tmp22106 = getelementptr inbounds float, float* %tmp22105, i64 1
+ %tmp22107 = getelementptr inbounds float, float* %tmp22106, i64 1
+ %tmp22108 = getelementptr inbounds float, float* %tmp22107, i64 1
+ %tmp22109 = getelementptr inbounds float, float* %tmp22108, i64 1
+ %tmp22110 = getelementptr inbounds float, float* %tmp22109, i64 1
+ %tmp22111 = getelementptr inbounds float, float* %tmp22110, i64 1
+ %tmp22112 = getelementptr inbounds float, float* %tmp22111, i64 1
+ %tmp22113 = getelementptr inbounds float, float* %tmp22112, i64 1
+ %tmp22114 = getelementptr inbounds float, float* %tmp22113, i64 1
+ %tmp22115 = getelementptr inbounds float, float* %tmp22114, i64 1
+ %tmp22116 = getelementptr inbounds float, float* %tmp22115, i64 1
+ %tmp22117 = getelementptr inbounds float, float* %tmp22116, i64 1
+ %tmp22118 = getelementptr inbounds float, float* %tmp22117, i64 1
+ %tmp22119 = getelementptr inbounds float, float* %tmp22118, i64 1
+ %tmp22120 = getelementptr inbounds float, float* %tmp22119, i64 1
+ %tmp22121 = getelementptr inbounds float, float* %tmp22120, i64 1
+ %tmp22122 = getelementptr inbounds float, float* %tmp22121, i64 1
+ %tmp22123 = getelementptr inbounds float, float* %tmp22122, i64 1
+ %tmp22124 = getelementptr inbounds float, float* %tmp22123, i64 1
+ %tmp22125 = getelementptr inbounds float, float* %tmp22124, i64 1
+ %tmp22126 = getelementptr inbounds float, float* %tmp22125, i64 1
+ %tmp22127 = getelementptr inbounds float, float* %tmp22126, i64 1
+ %tmp22128 = getelementptr inbounds float, float* %tmp22127, i64 1
+ %tmp22129 = getelementptr inbounds float, float* %tmp22128, i64 1
+ %tmp22130 = getelementptr inbounds float, float* %tmp22129, i64 1
+ %tmp22131 = getelementptr inbounds float, float* %tmp22130, i64 1
+ %tmp22132 = getelementptr inbounds float, float* %tmp22131, i64 1
+ %tmp22133 = getelementptr inbounds float, float* %tmp22132, i64 1
+ %tmp22134 = getelementptr inbounds float, float* %tmp22133, i64 1
+ %tmp22135 = getelementptr inbounds float, float* %tmp22134, i64 1
+ %tmp22136 = getelementptr inbounds float, float* %tmp22135, i64 1
+ %tmp22137 = getelementptr inbounds float, float* %tmp22136, i64 1
+ %tmp22138 = getelementptr inbounds float, float* %tmp22137, i64 1
+ %tmp22139 = getelementptr inbounds float, float* %tmp22138, i64 1
+ %tmp22140 = getelementptr inbounds float, float* %tmp22139, i64 1
+ %tmp22141 = getelementptr inbounds float, float* %tmp22140, i64 1
+ %tmp22142 = getelementptr inbounds float, float* %tmp22141, i64 1
+ %tmp22143 = getelementptr inbounds float, float* %tmp22142, i64 1
+ %tmp22144 = getelementptr inbounds float, float* %tmp22143, i64 1
+ %tmp22145 = getelementptr inbounds float, float* %tmp22144, i64 1
+ %tmp22146 = getelementptr inbounds float, float* %tmp22145, i64 1
+ %tmp22147 = getelementptr inbounds float, float* %tmp22146, i64 1
+ %tmp22148 = getelementptr inbounds float, float* %tmp22147, i64 1
+ %tmp22149 = getelementptr inbounds float, float* %tmp22148, i64 1
+ %tmp22150 = getelementptr inbounds float, float* %tmp22149, i64 1
+ %tmp22151 = getelementptr inbounds float, float* %tmp22150, i64 1
+ %tmp22152 = getelementptr inbounds float, float* %tmp22151, i64 1
+ %tmp22153 = getelementptr inbounds float, float* %tmp22152, i64 1
+ %tmp22154 = getelementptr inbounds float, float* %tmp22153, i64 1
+ %tmp22155 = getelementptr inbounds float, float* %tmp22154, i64 1
+ %tmp22156 = getelementptr inbounds float, float* %tmp22155, i64 1
+ %tmp22157 = getelementptr inbounds float, float* %tmp22156, i64 1
+ %tmp22158 = getelementptr inbounds float, float* %tmp22157, i64 1
+ %tmp22159 = getelementptr inbounds float, float* %tmp22158, i64 1
+ %tmp22160 = getelementptr inbounds float, float* %tmp22159, i64 1
+ %tmp22161 = getelementptr inbounds float, float* %tmp22160, i64 1
+ %tmp22162 = getelementptr inbounds float, float* %tmp22161, i64 1
+ %tmp22163 = getelementptr inbounds float, float* %tmp22162, i64 1
+ %tmp22164 = getelementptr inbounds float, float* %tmp22163, i64 1
+ %tmp22165 = getelementptr inbounds float, float* %tmp22164, i64 1
+ %tmp22166 = getelementptr inbounds float, float* %tmp22165, i64 1
+ %tmp22167 = getelementptr inbounds float, float* %tmp22166, i64 1
+ %tmp22168 = getelementptr inbounds float, float* %tmp22167, i64 1
+ %tmp22169 = getelementptr inbounds float, float* %tmp22168, i64 1
+ %tmp22170 = getelementptr inbounds float, float* %tmp22169, i64 1
+ %tmp22171 = getelementptr inbounds float, float* %tmp22170, i64 1
+ %tmp22172 = getelementptr inbounds float, float* %tmp22171, i64 1
+ %tmp22173 = getelementptr inbounds float, float* %tmp22172, i64 1
+ %tmp22174 = getelementptr inbounds float, float* %tmp22173, i64 1
+ %tmp22175 = getelementptr inbounds float, float* %tmp22174, i64 1
+ %tmp22176 = getelementptr inbounds float, float* %tmp22175, i64 1
+ %tmp22177 = getelementptr inbounds float, float* %tmp22176, i64 1
+ %tmp22178 = getelementptr inbounds float, float* %tmp22177, i64 1
+ %tmp22179 = getelementptr inbounds float, float* %tmp22178, i64 1
+ %tmp22180 = getelementptr inbounds float, float* %tmp22179, i64 1
+ %tmp22181 = getelementptr inbounds float, float* %tmp22180, i64 1
+ %tmp22182 = getelementptr inbounds float, float* %tmp22181, i64 1
+ %tmp22183 = getelementptr inbounds float, float* %tmp22182, i64 1
+ %tmp22184 = getelementptr inbounds float, float* %tmp22183, i64 1
+ %tmp22185 = getelementptr inbounds float, float* %tmp22184, i64 1
+ %tmp22186 = getelementptr inbounds float, float* %tmp22185, i64 1
+ %tmp22187 = getelementptr inbounds float, float* %tmp22186, i64 1
+ %tmp22188 = getelementptr inbounds float, float* %tmp22187, i64 1
+ %tmp22189 = getelementptr inbounds float, float* %tmp22188, i64 1
+ %tmp22190 = getelementptr inbounds float, float* %tmp22189, i64 1
+ %tmp22191 = getelementptr inbounds float, float* %tmp22190, i64 1
+ %tmp22192 = getelementptr inbounds float, float* %tmp22191, i64 1
+ %tmp22193 = getelementptr inbounds float, float* %tmp22192, i64 1
+ %tmp22194 = getelementptr inbounds float, float* %tmp22193, i64 1
+ %tmp22195 = getelementptr inbounds float, float* %tmp22194, i64 1
+ %tmp22196 = getelementptr inbounds float, float* %tmp22195, i64 1
+ %tmp22197 = getelementptr inbounds float, float* %tmp22196, i64 1
+ %tmp22198 = getelementptr inbounds float, float* %tmp22197, i64 1
+ %tmp22199 = getelementptr inbounds float, float* %tmp22198, i64 1
+ %tmp22200 = getelementptr inbounds float, float* %tmp22199, i64 1
+ %tmp22201 = getelementptr inbounds float, float* %tmp22200, i64 1
+ %tmp22202 = getelementptr inbounds float, float* %tmp22201, i64 1
+ %tmp22203 = getelementptr inbounds float, float* %tmp22202, i64 1
+ %tmp22204 = getelementptr inbounds float, float* %tmp22203, i64 1
+ %tmp22205 = getelementptr inbounds float, float* %tmp22204, i64 1
+ %tmp22206 = getelementptr inbounds float, float* %tmp22205, i64 1
+ %tmp22207 = getelementptr inbounds float, float* %tmp22206, i64 1
+ %tmp22208 = getelementptr inbounds float, float* %tmp22207, i64 1
+ %tmp22209 = getelementptr inbounds float, float* %tmp22208, i64 1
+ %tmp22210 = getelementptr inbounds float, float* %tmp22209, i64 1
+ %tmp22211 = getelementptr inbounds float, float* %tmp22210, i64 1
+ %tmp22212 = getelementptr inbounds float, float* %tmp22211, i64 1
+ %tmp22213 = getelementptr inbounds float, float* %tmp22212, i64 1
+ %tmp22214 = getelementptr inbounds float, float* %tmp22213, i64 1
+ %tmp22215 = getelementptr inbounds float, float* %tmp22214, i64 1
+ %tmp22216 = getelementptr inbounds float, float* %tmp22215, i64 1
+ %tmp22217 = getelementptr inbounds float, float* %tmp22216, i64 1
+ %tmp22218 = getelementptr inbounds float, float* %tmp22217, i64 1
+ %tmp22219 = getelementptr inbounds float, float* %tmp22218, i64 1
+ %tmp22220 = getelementptr inbounds float, float* %tmp22219, i64 1
+ %tmp22221 = getelementptr inbounds float, float* %tmp22220, i64 1
+ %tmp22222 = getelementptr inbounds float, float* %tmp22221, i64 1
+ %tmp22223 = getelementptr inbounds float, float* %tmp22222, i64 1
+ %tmp22224 = getelementptr inbounds float, float* %tmp22223, i64 1
+ %tmp22225 = getelementptr inbounds float, float* %tmp22224, i64 1
+ %tmp22226 = getelementptr inbounds float, float* %tmp22225, i64 1
+ %tmp22227 = getelementptr inbounds float, float* %tmp22226, i64 1
+ %tmp22228 = getelementptr inbounds float, float* %tmp22227, i64 1
+ %tmp22229 = getelementptr inbounds float, float* %tmp22228, i64 1
+ %tmp22230 = getelementptr inbounds float, float* %tmp22229, i64 1
+ %tmp22231 = getelementptr inbounds float, float* %tmp22230, i64 1
+ %tmp22232 = getelementptr inbounds float, float* %tmp22231, i64 1
+ %tmp22233 = getelementptr inbounds float, float* %tmp22232, i64 1
+ %tmp22234 = getelementptr inbounds float, float* %tmp22233, i64 1
+ %tmp22235 = getelementptr inbounds float, float* %tmp22234, i64 1
+ %tmp22236 = getelementptr inbounds float, float* %tmp22235, i64 1
+ %tmp22237 = getelementptr inbounds float, float* %tmp22236, i64 1
+ %tmp22238 = getelementptr inbounds float, float* %tmp22237, i64 1
+ %tmp22239 = getelementptr inbounds float, float* %tmp22238, i64 1
+ %tmp22240 = getelementptr inbounds float, float* %tmp22239, i64 1
+ %tmp22241 = getelementptr inbounds float, float* %tmp22240, i64 1
+ %tmp22242 = getelementptr inbounds float, float* %tmp22241, i64 1
+ %tmp22243 = getelementptr inbounds float, float* %tmp22242, i64 1
+ %tmp22244 = getelementptr inbounds float, float* %tmp22243, i64 1
+ %tmp22245 = getelementptr inbounds float, float* %tmp22244, i64 1
+ %tmp22246 = getelementptr inbounds float, float* %tmp22245, i64 1
+ %tmp22247 = getelementptr inbounds float, float* %tmp22246, i64 1
+ %tmp22248 = getelementptr inbounds float, float* %tmp22247, i64 1
+ %tmp22249 = getelementptr inbounds float, float* %tmp22248, i64 1
+ %tmp22250 = getelementptr inbounds float, float* %tmp22249, i64 1
+ %tmp22251 = getelementptr inbounds float, float* %tmp22250, i64 1
+ %tmp22252 = getelementptr inbounds float, float* %tmp22251, i64 1
+ %tmp22253 = getelementptr inbounds float, float* %tmp22252, i64 1
+ %tmp22254 = getelementptr inbounds float, float* %tmp22253, i64 1
+ %tmp22255 = getelementptr inbounds float, float* %tmp22254, i64 1
+ %tmp22256 = getelementptr inbounds float, float* %tmp22255, i64 1
+ %tmp22257 = getelementptr inbounds float, float* %tmp22256, i64 1
+ %tmp22258 = getelementptr inbounds float, float* %tmp22257, i64 1
+ %tmp22259 = getelementptr inbounds float, float* %tmp22258, i64 1
+ %tmp22260 = getelementptr inbounds float, float* %tmp22259, i64 1
+ %tmp22261 = getelementptr inbounds float, float* %tmp22260, i64 1
+ %tmp22262 = getelementptr inbounds float, float* %tmp22261, i64 1
+ %tmp22263 = getelementptr inbounds float, float* %tmp22262, i64 1
+ %tmp22264 = getelementptr inbounds float, float* %tmp22263, i64 1
+ %tmp22265 = getelementptr inbounds float, float* %tmp22264, i64 1
+ %tmp22266 = getelementptr inbounds float, float* %tmp22265, i64 1
+ %tmp22267 = getelementptr inbounds float, float* %tmp22266, i64 1
+ %tmp22268 = getelementptr inbounds float, float* %tmp22267, i64 1
+ %tmp22269 = getelementptr inbounds float, float* %tmp22268, i64 1
+ %tmp22270 = getelementptr inbounds float, float* %tmp22269, i64 1
+ %tmp22271 = getelementptr inbounds float, float* %tmp22270, i64 1
+ %tmp22272 = getelementptr inbounds float, float* %tmp22271, i64 1
+ %tmp22273 = getelementptr inbounds float, float* %tmp22272, i64 1
+ %tmp22274 = getelementptr inbounds float, float* %tmp22273, i64 1
+ %tmp22275 = getelementptr inbounds float, float* %tmp22274, i64 1
+ %tmp22276 = getelementptr inbounds float, float* %tmp22275, i64 1
+ %tmp22277 = getelementptr inbounds float, float* %tmp22276, i64 1
+ %tmp22278 = getelementptr inbounds float, float* %tmp22277, i64 1
+ %tmp22279 = getelementptr inbounds float, float* %tmp22278, i64 1
+ %tmp22280 = getelementptr inbounds float, float* %tmp22279, i64 1
+ %tmp22281 = getelementptr inbounds float, float* %tmp22280, i64 1
+ %tmp22282 = getelementptr inbounds float, float* %tmp22281, i64 1
+ %tmp22283 = getelementptr inbounds float, float* %tmp22282, i64 1
+ %tmp22284 = getelementptr inbounds float, float* %tmp22283, i64 1
+ %tmp22285 = getelementptr inbounds float, float* %tmp22284, i64 1
+ %tmp22286 = getelementptr inbounds float, float* %tmp22285, i64 1
+ %tmp22287 = getelementptr inbounds float, float* %tmp22286, i64 1
+ %tmp22288 = getelementptr inbounds float, float* %tmp22287, i64 1
+ %tmp22289 = getelementptr inbounds float, float* %tmp22288, i64 1
+ %tmp22290 = getelementptr inbounds float, float* %tmp22289, i64 1
+ %tmp22291 = getelementptr inbounds float, float* %tmp22290, i64 1
+ %tmp22292 = getelementptr inbounds float, float* %tmp22291, i64 1
+ %tmp22293 = getelementptr inbounds float, float* %tmp22292, i64 1
+ %tmp22294 = getelementptr inbounds float, float* %tmp22293, i64 1
+ %tmp22295 = getelementptr inbounds float, float* %tmp22294, i64 1
+ %tmp22296 = getelementptr inbounds float, float* %tmp22295, i64 1
+ %tmp22297 = getelementptr inbounds float, float* %tmp22296, i64 1
+ %tmp22298 = getelementptr inbounds float, float* %tmp22297, i64 1
+ %tmp22299 = getelementptr inbounds float, float* %tmp22298, i64 1
+ %tmp22300 = getelementptr inbounds float, float* %tmp22299, i64 1
+ %tmp22301 = getelementptr inbounds float, float* %tmp22300, i64 1
+ %tmp22302 = getelementptr inbounds float, float* %tmp22301, i64 1
+ %tmp22303 = getelementptr inbounds float, float* %tmp22302, i64 1
+ %tmp22304 = getelementptr inbounds float, float* %tmp22303, i64 1
+ %tmp22305 = getelementptr inbounds float, float* %tmp22304, i64 1
+ %tmp22306 = getelementptr inbounds float, float* %tmp22305, i64 1
+ %tmp22307 = getelementptr inbounds float, float* %tmp22306, i64 1
+ %tmp22308 = getelementptr inbounds float, float* %tmp22307, i64 1
+ %tmp22309 = getelementptr inbounds float, float* %tmp22308, i64 1
+ %tmp22310 = getelementptr inbounds float, float* %tmp22309, i64 1
+ %tmp22311 = getelementptr inbounds float, float* %tmp22310, i64 1
+ %tmp22312 = getelementptr inbounds float, float* %tmp22311, i64 1
+ %tmp22313 = getelementptr inbounds float, float* %tmp22312, i64 1
+ %tmp22314 = getelementptr inbounds float, float* %tmp22313, i64 1
+ %tmp22315 = getelementptr inbounds float, float* %tmp22314, i64 1
+ %tmp22316 = getelementptr inbounds float, float* %tmp22315, i64 1
+ %tmp22317 = getelementptr inbounds float, float* %tmp22316, i64 1
+ %tmp22318 = getelementptr inbounds float, float* %tmp22317, i64 1
+ %tmp22319 = getelementptr inbounds float, float* %tmp22318, i64 1
+ %tmp22320 = getelementptr inbounds float, float* %tmp22319, i64 1
+ %tmp22321 = getelementptr inbounds float, float* %tmp22320, i64 1
+ %tmp22322 = getelementptr inbounds float, float* %tmp22321, i64 1
+ %tmp22323 = getelementptr inbounds float, float* %tmp22322, i64 1
+ %tmp22324 = getelementptr inbounds float, float* %tmp22323, i64 1
+ %tmp22325 = getelementptr inbounds float, float* %tmp22324, i64 1
+ %tmp22326 = getelementptr inbounds float, float* %tmp22325, i64 1
+ %tmp22327 = getelementptr inbounds float, float* %tmp22326, i64 1
+ %tmp22328 = getelementptr inbounds float, float* %tmp22327, i64 1
+ %tmp22329 = getelementptr inbounds float, float* %tmp22328, i64 1
+ %tmp22330 = getelementptr inbounds float, float* %tmp22329, i64 1
+ %tmp22331 = getelementptr inbounds float, float* %tmp22330, i64 1
+ %tmp22332 = getelementptr inbounds float, float* %tmp22331, i64 1
+ %tmp22333 = getelementptr inbounds float, float* %tmp22332, i64 1
+ %tmp22334 = getelementptr inbounds float, float* %tmp22333, i64 1
+ %tmp22335 = getelementptr inbounds float, float* %tmp22334, i64 1
+ %tmp22336 = getelementptr inbounds float, float* %tmp22335, i64 1
+ %tmp22337 = getelementptr inbounds float, float* %tmp22336, i64 1
+ %tmp22338 = getelementptr inbounds float, float* %tmp22337, i64 1
+ %tmp22339 = getelementptr inbounds float, float* %tmp22338, i64 1
+ %tmp22340 = getelementptr inbounds float, float* %tmp22339, i64 1
+ %tmp22341 = getelementptr inbounds float, float* %tmp22340, i64 1
+ %tmp22342 = getelementptr inbounds float, float* %tmp22341, i64 1
+ %tmp22343 = getelementptr inbounds float, float* %tmp22342, i64 1
+ %tmp22344 = getelementptr inbounds float, float* %tmp22343, i64 1
+ %tmp22345 = getelementptr inbounds float, float* %tmp22344, i64 1
+ %tmp22346 = getelementptr inbounds float, float* %tmp22345, i64 1
+ %tmp22347 = getelementptr inbounds float, float* %tmp22346, i64 1
+ %tmp22348 = getelementptr inbounds float, float* %tmp22347, i64 1
+ %tmp22349 = getelementptr inbounds float, float* %tmp22348, i64 1
+ %tmp22350 = getelementptr inbounds float, float* %tmp22349, i64 1
+ %tmp22351 = getelementptr inbounds float, float* %tmp22350, i64 1
+ %tmp22352 = getelementptr inbounds float, float* %tmp22351, i64 1
+ %tmp22353 = getelementptr inbounds float, float* %tmp22352, i64 1
+ %tmp22354 = getelementptr inbounds float, float* %tmp22353, i64 1
+ %tmp22355 = getelementptr inbounds float, float* %tmp22354, i64 1
+ %tmp22356 = getelementptr inbounds float, float* %tmp22355, i64 1
+ %tmp22357 = getelementptr inbounds float, float* %tmp22356, i64 1
+ %tmp22358 = getelementptr inbounds float, float* %tmp22357, i64 1
+ %tmp22359 = getelementptr inbounds float, float* %tmp22358, i64 1
+ %tmp22360 = getelementptr inbounds float, float* %tmp22359, i64 1
+ %tmp22361 = getelementptr inbounds float, float* %tmp22360, i64 1
+ %tmp22362 = getelementptr inbounds float, float* %tmp22361, i64 1
+ %tmp22363 = getelementptr inbounds float, float* %tmp22362, i64 1
+ %tmp22364 = getelementptr inbounds float, float* %tmp22363, i64 1
+ %tmp22365 = getelementptr inbounds float, float* %tmp22364, i64 1
+ %tmp22366 = getelementptr inbounds float, float* %tmp22365, i64 1
+ %tmp22367 = getelementptr inbounds float, float* %tmp22366, i64 1
+ %tmp22368 = getelementptr inbounds float, float* %tmp22367, i64 1
+ %tmp22369 = getelementptr inbounds float, float* %tmp22368, i64 1
+ %tmp22370 = getelementptr inbounds float, float* %tmp22369, i64 1
+ %tmp22371 = getelementptr inbounds float, float* %tmp22370, i64 1
+ %tmp22372 = getelementptr inbounds float, float* %tmp22371, i64 1
+ %tmp22373 = getelementptr inbounds float, float* %tmp22372, i64 1
+ %tmp22374 = getelementptr inbounds float, float* %tmp22373, i64 1
+ %tmp22375 = getelementptr inbounds float, float* %tmp22374, i64 1
+ %tmp22376 = getelementptr inbounds float, float* %tmp22375, i64 1
+ %tmp22377 = getelementptr inbounds float, float* %tmp22376, i64 1
+ %tmp22378 = getelementptr inbounds float, float* %tmp22377, i64 1
+ %tmp22379 = getelementptr inbounds float, float* %tmp22378, i64 1
+ %tmp22380 = getelementptr inbounds float, float* %tmp22379, i64 1
+ %tmp22381 = getelementptr inbounds float, float* %tmp22380, i64 1
+ %tmp22382 = getelementptr inbounds float, float* %tmp22381, i64 1
+ %tmp22383 = getelementptr inbounds float, float* %tmp22382, i64 1
+ %tmp22384 = getelementptr inbounds float, float* %tmp22383, i64 1
+ %tmp22385 = getelementptr inbounds float, float* %tmp22384, i64 1
+ %tmp22386 = getelementptr inbounds float, float* %tmp22385, i64 1
+ %tmp22387 = getelementptr inbounds float, float* %tmp22386, i64 1
+ %tmp22388 = getelementptr inbounds float, float* %tmp22387, i64 1
+ %tmp22389 = getelementptr inbounds float, float* %tmp22388, i64 1
+ %tmp22390 = getelementptr inbounds float, float* %tmp22389, i64 1
+ %tmp22391 = getelementptr inbounds float, float* %tmp22390, i64 1
+ %tmp22392 = getelementptr inbounds float, float* %tmp22391, i64 1
+ %tmp22393 = getelementptr inbounds float, float* %tmp22392, i64 1
+ %tmp22394 = getelementptr inbounds float, float* %tmp22393, i64 1
+ %tmp22395 = getelementptr inbounds float, float* %tmp22394, i64 1
+ %tmp22396 = getelementptr inbounds float, float* %tmp22395, i64 1
+ %tmp22397 = getelementptr inbounds float, float* %tmp22396, i64 1
+ %tmp22398 = getelementptr inbounds float, float* %tmp22397, i64 1
+ %tmp22399 = getelementptr inbounds float, float* %tmp22398, i64 1
+ %tmp22400 = getelementptr inbounds float, float* %tmp22399, i64 1
+ %tmp22401 = getelementptr inbounds float, float* %tmp22400, i64 1
+ %tmp22402 = getelementptr inbounds float, float* %tmp22401, i64 1
+ %tmp22403 = getelementptr inbounds float, float* %tmp22402, i64 1
+ %tmp22404 = getelementptr inbounds float, float* %tmp22403, i64 1
+ %tmp22405 = getelementptr inbounds float, float* %tmp22404, i64 1
+ %tmp22406 = getelementptr inbounds float, float* %tmp22405, i64 1
+ %tmp22407 = getelementptr inbounds float, float* %tmp22406, i64 1
+ %tmp22408 = getelementptr inbounds float, float* %tmp22407, i64 1
+ %tmp22409 = getelementptr inbounds float, float* %tmp22408, i64 1
+ %tmp22410 = getelementptr inbounds float, float* %tmp22409, i64 1
+ %tmp22411 = getelementptr inbounds float, float* %tmp22410, i64 1
+ %tmp22412 = getelementptr inbounds float, float* %tmp22411, i64 1
+ %tmp22413 = getelementptr inbounds float, float* %tmp22412, i64 1
+ %tmp22414 = getelementptr inbounds float, float* %tmp22413, i64 1
+ %tmp22415 = getelementptr inbounds float, float* %tmp22414, i64 1
+ %tmp22416 = getelementptr inbounds float, float* %tmp22415, i64 1
+ %tmp22417 = getelementptr inbounds float, float* %tmp22416, i64 1
+ %tmp22418 = getelementptr inbounds float, float* %tmp22417, i64 1
+ %tmp22419 = getelementptr inbounds float, float* %tmp22418, i64 1
+ %tmp22420 = getelementptr inbounds float, float* %tmp22419, i64 1
+ %tmp22421 = getelementptr inbounds float, float* %tmp22420, i64 1
+ %tmp22422 = getelementptr inbounds float, float* %tmp22421, i64 1
+ %tmp22423 = getelementptr inbounds float, float* %tmp22422, i64 1
+ %tmp22424 = getelementptr inbounds float, float* %tmp22423, i64 1
+ %tmp22425 = getelementptr inbounds float, float* %tmp22424, i64 1
+ %tmp22426 = getelementptr inbounds float, float* %tmp22425, i64 1
+ %tmp22427 = getelementptr inbounds float, float* %tmp22426, i64 1
+ %tmp22428 = getelementptr inbounds float, float* %tmp22427, i64 1
+ %tmp22429 = getelementptr inbounds float, float* %tmp22428, i64 1
+ %tmp22430 = getelementptr inbounds float, float* %tmp22429, i64 1
+ %tmp22431 = getelementptr inbounds float, float* %tmp22430, i64 1
+ %tmp22432 = getelementptr inbounds float, float* %tmp22431, i64 1
+ %tmp22433 = getelementptr inbounds float, float* %tmp22432, i64 1
+ %tmp22434 = getelementptr inbounds float, float* %tmp22433, i64 1
+ %tmp22435 = getelementptr inbounds float, float* %tmp22434, i64 1
+ %tmp22436 = getelementptr inbounds float, float* %tmp22435, i64 1
+ %tmp22437 = getelementptr inbounds float, float* %tmp22436, i64 1
+ %tmp22438 = getelementptr inbounds float, float* %tmp22437, i64 1
+ %tmp22439 = getelementptr inbounds float, float* %tmp22438, i64 1
+ %tmp22440 = getelementptr inbounds float, float* %tmp22439, i64 1
+ %tmp22441 = getelementptr inbounds float, float* %tmp22440, i64 1
+ %tmp22442 = getelementptr inbounds float, float* %tmp22441, i64 1
+ %tmp22443 = getelementptr inbounds float, float* %tmp22442, i64 1
+ %tmp22444 = getelementptr inbounds float, float* %tmp22443, i64 1
+ %tmp22445 = getelementptr inbounds float, float* %tmp22444, i64 1
+ %tmp22446 = getelementptr inbounds float, float* %tmp22445, i64 1
+ %tmp22447 = getelementptr inbounds float, float* %tmp22446, i64 1
+ %tmp22448 = getelementptr inbounds float, float* %tmp22447, i64 1
+ %tmp22449 = getelementptr inbounds float, float* %tmp22448, i64 1
+ %tmp22450 = getelementptr inbounds float, float* %tmp22449, i64 1
+ %tmp22451 = getelementptr inbounds float, float* %tmp22450, i64 1
+ %tmp22452 = getelementptr inbounds float, float* %tmp22451, i64 1
+ %tmp22453 = getelementptr inbounds float, float* %tmp22452, i64 1
+ %tmp22454 = getelementptr inbounds float, float* %tmp22453, i64 1
+ %tmp22455 = getelementptr inbounds float, float* %tmp22454, i64 1
+ %tmp22456 = getelementptr inbounds float, float* %tmp22455, i64 1
+ %tmp22457 = getelementptr inbounds float, float* %tmp22456, i64 1
+ %tmp22458 = getelementptr inbounds float, float* %tmp22457, i64 1
+ %tmp22459 = getelementptr inbounds float, float* %tmp22458, i64 1
+ %tmp22460 = getelementptr inbounds float, float* %tmp22459, i64 1
+ %tmp22461 = getelementptr inbounds float, float* %tmp22460, i64 1
+ %tmp22462 = getelementptr inbounds float, float* %tmp22461, i64 1
+ %tmp22463 = getelementptr inbounds float, float* %tmp22462, i64 1
+ %tmp22464 = getelementptr inbounds float, float* %tmp22463, i64 1
+ %tmp22465 = getelementptr inbounds float, float* %tmp22464, i64 1
+ %tmp22466 = getelementptr inbounds float, float* %tmp22465, i64 1
+ %tmp22467 = getelementptr inbounds float, float* %tmp22466, i64 1
+ %tmp22468 = getelementptr inbounds float, float* %tmp22467, i64 1
+ %tmp22469 = getelementptr inbounds float, float* %tmp22468, i64 1
+ %tmp22470 = getelementptr inbounds float, float* %tmp22469, i64 1
+ %tmp22471 = getelementptr inbounds float, float* %tmp22470, i64 1
+ %tmp22472 = getelementptr inbounds float, float* %tmp22471, i64 1
+ %tmp22473 = getelementptr inbounds float, float* %tmp22472, i64 1
+ %tmp22474 = getelementptr inbounds float, float* %tmp22473, i64 1
+ %tmp22475 = getelementptr inbounds float, float* %tmp22474, i64 1
+ %tmp22476 = getelementptr inbounds float, float* %tmp22475, i64 1
+ %tmp22477 = getelementptr inbounds float, float* %tmp22476, i64 1
+ %tmp22478 = getelementptr inbounds float, float* %tmp22477, i64 1
+ %tmp22479 = getelementptr inbounds float, float* %tmp22478, i64 1
+ %tmp22480 = getelementptr inbounds float, float* %tmp22479, i64 1
+ %tmp22481 = getelementptr inbounds float, float* %tmp22480, i64 1
+ %tmp22482 = getelementptr inbounds float, float* %tmp22481, i64 1
+ %tmp22483 = getelementptr inbounds float, float* %tmp22482, i64 1
+ %tmp22484 = getelementptr inbounds float, float* %tmp22483, i64 1
+ %tmp22485 = getelementptr inbounds float, float* %tmp22484, i64 1
+ %tmp22486 = getelementptr inbounds float, float* %tmp22485, i64 1
+ %tmp22487 = getelementptr inbounds float, float* %tmp22486, i64 1
+ %tmp22488 = getelementptr inbounds float, float* %tmp22487, i64 1
+ %tmp22489 = getelementptr inbounds float, float* %tmp22488, i64 1
+ %tmp22490 = getelementptr inbounds float, float* %tmp22489, i64 1
+ %tmp22491 = getelementptr inbounds float, float* %tmp22490, i64 1
+ %tmp22492 = getelementptr inbounds float, float* %tmp22491, i64 1
+ %tmp22493 = getelementptr inbounds float, float* %tmp22492, i64 1
+ %tmp22494 = getelementptr inbounds float, float* %tmp22493, i64 1
+ %tmp22495 = getelementptr inbounds float, float* %tmp22494, i64 1
+ %tmp22496 = getelementptr inbounds float, float* %tmp22495, i64 1
+ %tmp22497 = getelementptr inbounds float, float* %tmp22496, i64 1
+ %tmp22498 = getelementptr inbounds float, float* %tmp22497, i64 1
+ %tmp22499 = getelementptr inbounds float, float* %tmp22498, i64 1
+ %tmp22500 = getelementptr inbounds float, float* %tmp22499, i64 1
+ %tmp22501 = getelementptr inbounds float, float* %tmp22500, i64 1
+ %tmp22502 = getelementptr inbounds float, float* %tmp22501, i64 1
+ %tmp22503 = getelementptr inbounds float, float* %tmp22502, i64 1
+ %tmp22504 = getelementptr inbounds float, float* %tmp22503, i64 1
+ %tmp22505 = getelementptr inbounds float, float* %tmp22504, i64 1
+ %tmp22506 = getelementptr inbounds float, float* %tmp22505, i64 1
+ %tmp22507 = getelementptr inbounds float, float* %tmp22506, i64 1
+ %tmp22508 = getelementptr inbounds float, float* %tmp22507, i64 1
+ %tmp22509 = getelementptr inbounds float, float* %tmp22508, i64 1
+ %tmp22510 = getelementptr inbounds float, float* %tmp22509, i64 1
+ %tmp22511 = getelementptr inbounds float, float* %tmp22510, i64 1
+ %tmp22512 = getelementptr inbounds float, float* %tmp22511, i64 1
+ %tmp22513 = getelementptr inbounds float, float* %tmp22512, i64 1
+ %tmp22514 = getelementptr inbounds float, float* %tmp22513, i64 1
+ %tmp22515 = getelementptr inbounds float, float* %tmp22514, i64 1
+ %tmp22516 = getelementptr inbounds float, float* %tmp22515, i64 1
+ %tmp22517 = getelementptr inbounds float, float* %tmp22516, i64 1
+ %tmp22518 = getelementptr inbounds float, float* %tmp22517, i64 1
+ %tmp22519 = getelementptr inbounds float, float* %tmp22518, i64 1
+ %tmp22520 = getelementptr inbounds float, float* %tmp22519, i64 1
+ %tmp22521 = getelementptr inbounds float, float* %tmp22520, i64 1
+ %tmp22522 = getelementptr inbounds float, float* %tmp22521, i64 1
+ %tmp22523 = getelementptr inbounds float, float* %tmp22522, i64 1
+ %tmp22524 = getelementptr inbounds float, float* %tmp22523, i64 1
+ %tmp22525 = getelementptr inbounds float, float* %tmp22524, i64 1
+ %tmp22526 = getelementptr inbounds float, float* %tmp22525, i64 1
+ %tmp22527 = getelementptr inbounds float, float* %tmp22526, i64 1
+ %tmp22528 = getelementptr inbounds float, float* %tmp22527, i64 1
+ %tmp22529 = getelementptr inbounds float, float* %tmp22528, i64 1
+ %tmp22530 = getelementptr inbounds float, float* %tmp22529, i64 1
+ %tmp22531 = getelementptr inbounds float, float* %tmp22530, i64 1
+ %tmp22532 = getelementptr inbounds float, float* %tmp22531, i64 1
+ %tmp22533 = getelementptr inbounds float, float* %tmp22532, i64 1
+ %tmp22534 = getelementptr inbounds float, float* %tmp22533, i64 1
+ %tmp22535 = getelementptr inbounds float, float* %tmp22534, i64 1
+ %tmp22536 = getelementptr inbounds float, float* %tmp22535, i64 1
+ %tmp22537 = getelementptr inbounds float, float* %tmp22536, i64 1
+ %tmp22538 = getelementptr inbounds float, float* %tmp22537, i64 1
+ %tmp22539 = getelementptr inbounds float, float* %tmp22538, i64 1
+ %tmp22540 = getelementptr inbounds float, float* %tmp22539, i64 1
+ %tmp22541 = getelementptr inbounds float, float* %tmp22540, i64 1
+ %tmp22542 = getelementptr inbounds float, float* %tmp22541, i64 1
+ %tmp22543 = getelementptr inbounds float, float* %tmp22542, i64 1
+ %tmp22544 = getelementptr inbounds float, float* %tmp22543, i64 1
+ %tmp22545 = getelementptr inbounds float, float* %tmp22544, i64 1
+ %tmp22546 = getelementptr inbounds float, float* %tmp22545, i64 1
+ %tmp22547 = getelementptr inbounds float, float* %tmp22546, i64 1
+ %tmp22548 = getelementptr inbounds float, float* %tmp22547, i64 1
+ %tmp22549 = getelementptr inbounds float, float* %tmp22548, i64 1
+ %tmp22550 = getelementptr inbounds float, float* %tmp22549, i64 1
+ %tmp22551 = getelementptr inbounds float, float* %tmp22550, i64 1
+ %tmp22552 = getelementptr inbounds float, float* %tmp22551, i64 1
+ %tmp22553 = getelementptr inbounds float, float* %tmp22552, i64 1
+ %tmp22554 = getelementptr inbounds float, float* %tmp22553, i64 1
+ %tmp22555 = getelementptr inbounds float, float* %tmp22554, i64 1
+ %tmp22556 = getelementptr inbounds float, float* %tmp22555, i64 1
+ %tmp22557 = getelementptr inbounds float, float* %tmp22556, i64 1
+ %tmp22558 = getelementptr inbounds float, float* %tmp22557, i64 1
+ %tmp22559 = getelementptr inbounds float, float* %tmp22558, i64 1
+ %tmp22560 = getelementptr inbounds float, float* %tmp22559, i64 1
+ %tmp22561 = getelementptr inbounds float, float* %tmp22560, i64 1
+ %tmp22562 = getelementptr inbounds float, float* %tmp22561, i64 1
+ %tmp22563 = getelementptr inbounds float, float* %tmp22562, i64 1
+ %tmp22564 = getelementptr inbounds float, float* %tmp22563, i64 1
+ %tmp22565 = getelementptr inbounds float, float* %tmp22564, i64 1
+ %tmp22566 = getelementptr inbounds float, float* %tmp22565, i64 1
+ %tmp22567 = getelementptr inbounds float, float* %tmp22566, i64 1
+ %tmp22568 = getelementptr inbounds float, float* %tmp22567, i64 1
+ %tmp22569 = getelementptr inbounds float, float* %tmp22568, i64 1
+ %tmp22570 = getelementptr inbounds float, float* %tmp22569, i64 1
+ %tmp22571 = getelementptr inbounds float, float* %tmp22570, i64 1
+ %tmp22572 = getelementptr inbounds float, float* %tmp22571, i64 1
+ %tmp22573 = getelementptr inbounds float, float* %tmp22572, i64 1
+ %tmp22574 = getelementptr inbounds float, float* %tmp22573, i64 1
+ %tmp22575 = getelementptr inbounds float, float* %tmp22574, i64 1
+ %tmp22576 = getelementptr inbounds float, float* %tmp22575, i64 1
+ %tmp22577 = getelementptr inbounds float, float* %tmp22576, i64 1
+ %tmp22578 = getelementptr inbounds float, float* %tmp22577, i64 1
+ %tmp22579 = getelementptr inbounds float, float* %tmp22578, i64 1
+ %tmp22580 = getelementptr inbounds float, float* %tmp22579, i64 1
+ %tmp22581 = getelementptr inbounds float, float* %tmp22580, i64 1
+ %tmp22582 = getelementptr inbounds float, float* %tmp22581, i64 1
+ %tmp22583 = getelementptr inbounds float, float* %tmp22582, i64 1
+ %tmp22584 = getelementptr inbounds float, float* %tmp22583, i64 1
+ %tmp22585 = getelementptr inbounds float, float* %tmp22584, i64 1
+ %tmp22586 = getelementptr inbounds float, float* %tmp22585, i64 1
+ %tmp22587 = getelementptr inbounds float, float* %tmp22586, i64 1
+ %tmp22588 = getelementptr inbounds float, float* %tmp22587, i64 1
+ %tmp22589 = getelementptr inbounds float, float* %tmp22588, i64 1
+ %tmp22590 = getelementptr inbounds float, float* %tmp22589, i64 1
+ %tmp22591 = getelementptr inbounds float, float* %tmp22590, i64 1
+ %tmp22592 = getelementptr inbounds float, float* %tmp22591, i64 1
+ %tmp22593 = getelementptr inbounds float, float* %tmp22592, i64 1
+ %tmp22594 = getelementptr inbounds float, float* %tmp22593, i64 1
+ %tmp22595 = getelementptr inbounds float, float* %tmp22594, i64 1
+ %tmp22596 = getelementptr inbounds float, float* %tmp22595, i64 1
+ %tmp22597 = getelementptr inbounds float, float* %tmp22596, i64 1
+ %tmp22598 = getelementptr inbounds float, float* %tmp22597, i64 1
+ %tmp22599 = getelementptr inbounds float, float* %tmp22598, i64 1
+ %tmp22600 = getelementptr inbounds float, float* %tmp22599, i64 1
+ %tmp22601 = getelementptr inbounds float, float* %tmp22600, i64 1
+ %tmp22602 = getelementptr inbounds float, float* %tmp22601, i64 1
+ %tmp22603 = getelementptr inbounds float, float* %tmp22602, i64 1
+ %tmp22604 = getelementptr inbounds float, float* %tmp22603, i64 1
+ %tmp22605 = getelementptr inbounds float, float* %tmp22604, i64 1
+ %tmp22606 = getelementptr inbounds float, float* %tmp22605, i64 1
+ %tmp22607 = getelementptr inbounds float, float* %tmp22606, i64 1
+ %tmp22608 = getelementptr inbounds float, float* %tmp22607, i64 1
+ %tmp22609 = getelementptr inbounds float, float* %tmp22608, i64 1
+ %tmp22610 = getelementptr inbounds float, float* %tmp22609, i64 1
+ %tmp22611 = getelementptr inbounds float, float* %tmp22610, i64 1
+ %tmp22612 = getelementptr inbounds float, float* %tmp22611, i64 1
+ %tmp22613 = getelementptr inbounds float, float* %tmp22612, i64 1
+ %tmp22614 = getelementptr inbounds float, float* %tmp22613, i64 1
+ %tmp22615 = getelementptr inbounds float, float* %tmp22614, i64 1
+ %tmp22616 = getelementptr inbounds float, float* %tmp22615, i64 1
+ %tmp22617 = getelementptr inbounds float, float* %tmp22616, i64 1
+ %tmp22618 = getelementptr inbounds float, float* %tmp22617, i64 1
+ %tmp22619 = getelementptr inbounds float, float* %tmp22618, i64 1
+ %tmp22620 = getelementptr inbounds float, float* %tmp22619, i64 1
+ %tmp22621 = getelementptr inbounds float, float* %tmp22620, i64 1
+ %tmp22622 = getelementptr inbounds float, float* %tmp22621, i64 1
+ %tmp22623 = getelementptr inbounds float, float* %tmp22622, i64 1
+ %tmp22624 = getelementptr inbounds float, float* %tmp22623, i64 1
+ %tmp22625 = getelementptr inbounds float, float* %tmp22624, i64 1
+ %tmp22626 = getelementptr inbounds float, float* %tmp22625, i64 1
+ %tmp22627 = getelementptr inbounds float, float* %tmp22626, i64 1
+ %tmp22628 = getelementptr inbounds float, float* %tmp22627, i64 1
+ %tmp22629 = getelementptr inbounds float, float* %tmp22628, i64 1
+ %tmp22630 = getelementptr inbounds float, float* %tmp22629, i64 1
+ %tmp22631 = getelementptr inbounds float, float* %tmp22630, i64 1
+ %tmp22632 = getelementptr inbounds float, float* %tmp22631, i64 1
+ %tmp22633 = getelementptr inbounds float, float* %tmp22632, i64 1
+ %tmp22634 = getelementptr inbounds float, float* %tmp22633, i64 1
+ %tmp22635 = getelementptr inbounds float, float* %tmp22634, i64 1
+ %tmp22636 = getelementptr inbounds float, float* %tmp22635, i64 1
+ %tmp22637 = getelementptr inbounds float, float* %tmp22636, i64 1
+ %tmp22638 = getelementptr inbounds float, float* %tmp22637, i64 1
+ %tmp22639 = getelementptr inbounds float, float* %tmp22638, i64 1
+ %tmp22640 = getelementptr inbounds float, float* %tmp22639, i64 1
+ %tmp22641 = getelementptr inbounds float, float* %tmp22640, i64 1
+ %tmp22642 = getelementptr inbounds float, float* %tmp22641, i64 1
+ %tmp22643 = getelementptr inbounds float, float* %tmp22642, i64 1
+ %tmp22644 = getelementptr inbounds float, float* %tmp22643, i64 1
+ %tmp22645 = getelementptr inbounds float, float* %tmp22644, i64 1
+ %tmp22646 = getelementptr inbounds float, float* %tmp22645, i64 1
+ %tmp22647 = getelementptr inbounds float, float* %tmp22646, i64 1
+ %tmp22648 = getelementptr inbounds float, float* %tmp22647, i64 1
+ %tmp22649 = getelementptr inbounds float, float* %tmp22648, i64 1
+ %tmp22650 = getelementptr inbounds float, float* %tmp22649, i64 1
+ %tmp22651 = getelementptr inbounds float, float* %tmp22650, i64 1
+ %tmp22652 = getelementptr inbounds float, float* %tmp22651, i64 1
+ %tmp22653 = getelementptr inbounds float, float* %tmp22652, i64 1
+ %tmp22654 = getelementptr inbounds float, float* %tmp22653, i64 1
+ %tmp22655 = getelementptr inbounds float, float* %tmp22654, i64 1
+ %tmp22656 = getelementptr inbounds float, float* %tmp22655, i64 1
+ %tmp22657 = getelementptr inbounds float, float* %tmp22656, i64 1
+ %tmp22658 = getelementptr inbounds float, float* %tmp22657, i64 1
+ %tmp22659 = getelementptr inbounds float, float* %tmp22658, i64 1
+ %tmp22660 = getelementptr inbounds float, float* %tmp22659, i64 1
+ %tmp22661 = getelementptr inbounds float, float* %tmp22660, i64 1
+ %tmp22662 = getelementptr inbounds float, float* %tmp22661, i64 1
+ %tmp22663 = getelementptr inbounds float, float* %tmp22662, i64 1
+ %tmp22664 = getelementptr inbounds float, float* %tmp22663, i64 1
+ %tmp22665 = getelementptr inbounds float, float* %tmp22664, i64 1
+ %tmp22666 = getelementptr inbounds float, float* %tmp22665, i64 1
+ %tmp22667 = getelementptr inbounds float, float* %tmp22666, i64 1
+ %tmp22668 = getelementptr inbounds float, float* %tmp22667, i64 1
+ %tmp22669 = getelementptr inbounds float, float* %tmp22668, i64 1
+ %tmp22670 = getelementptr inbounds float, float* %tmp22669, i64 1
+ %tmp22671 = getelementptr inbounds float, float* %tmp22670, i64 1
+ %tmp22672 = getelementptr inbounds float, float* %tmp22671, i64 1
+ %tmp22673 = getelementptr inbounds float, float* %tmp22672, i64 1
+ %tmp22674 = getelementptr inbounds float, float* %tmp22673, i64 1
+ %tmp22675 = getelementptr inbounds float, float* %tmp22674, i64 1
+ %tmp22676 = getelementptr inbounds float, float* %tmp22675, i64 1
+ %tmp22677 = getelementptr inbounds float, float* %tmp22676, i64 1
+ %tmp22678 = getelementptr inbounds float, float* %tmp22677, i64 1
+ %tmp22679 = getelementptr inbounds float, float* %tmp22678, i64 1
+ %tmp22680 = getelementptr inbounds float, float* %tmp22679, i64 1
+ %tmp22681 = getelementptr inbounds float, float* %tmp22680, i64 1
+ %tmp22682 = getelementptr inbounds float, float* %tmp22681, i64 1
+ %tmp22683 = getelementptr inbounds float, float* %tmp22682, i64 1
+ %tmp22684 = getelementptr inbounds float, float* %tmp22683, i64 1
+ %tmp22685 = getelementptr inbounds float, float* %tmp22684, i64 1
+ %tmp22686 = getelementptr inbounds float, float* %tmp22685, i64 1
+ %tmp22687 = getelementptr inbounds float, float* %tmp22686, i64 1
+ %tmp22688 = getelementptr inbounds float, float* %tmp22687, i64 1
+ %tmp22689 = getelementptr inbounds float, float* %tmp22688, i64 1
+ %tmp22690 = getelementptr inbounds float, float* %tmp22689, i64 1
+ %tmp22691 = getelementptr inbounds float, float* %tmp22690, i64 1
+ %tmp22692 = getelementptr inbounds float, float* %tmp22691, i64 1
+ %tmp22693 = getelementptr inbounds float, float* %tmp22692, i64 1
+ %tmp22694 = getelementptr inbounds float, float* %tmp22693, i64 1
+ %tmp22695 = getelementptr inbounds float, float* %tmp22694, i64 1
+ %tmp22696 = getelementptr inbounds float, float* %tmp22695, i64 1
+ %tmp22697 = getelementptr inbounds float, float* %tmp22696, i64 1
+ %tmp22698 = getelementptr inbounds float, float* %tmp22697, i64 1
+ %tmp22699 = getelementptr inbounds float, float* %tmp22698, i64 1
+ %tmp22700 = getelementptr inbounds float, float* %tmp22699, i64 1
+ %tmp22701 = getelementptr inbounds float, float* %tmp22700, i64 1
+ %tmp22702 = getelementptr inbounds float, float* %tmp22701, i64 1
+ %tmp22703 = getelementptr inbounds float, float* %tmp22702, i64 1
+ %tmp22704 = getelementptr inbounds float, float* %tmp22703, i64 1
+ %tmp22705 = getelementptr inbounds float, float* %tmp22704, i64 1
+ %tmp22706 = getelementptr inbounds float, float* %tmp22705, i64 1
+ %tmp22707 = getelementptr inbounds float, float* %tmp22706, i64 1
+ %tmp22708 = getelementptr inbounds float, float* %tmp22707, i64 1
+ %tmp22709 = getelementptr inbounds float, float* %tmp22708, i64 1
+ %tmp22710 = getelementptr inbounds float, float* %tmp22709, i64 1
+ %tmp22711 = getelementptr inbounds float, float* %tmp22710, i64 1
+ %tmp22712 = getelementptr inbounds float, float* %tmp22711, i64 1
+ %tmp22713 = getelementptr inbounds float, float* %tmp22712, i64 1
+ %tmp22714 = getelementptr inbounds float, float* %tmp22713, i64 1
+ %tmp22715 = getelementptr inbounds float, float* %tmp22714, i64 1
+ %tmp22716 = getelementptr inbounds float, float* %tmp22715, i64 1
+ %tmp22717 = getelementptr inbounds float, float* %tmp22716, i64 1
+ %tmp22718 = getelementptr inbounds float, float* %tmp22717, i64 1
+ %tmp22719 = getelementptr inbounds float, float* %tmp22718, i64 1
+ %tmp22720 = getelementptr inbounds float, float* %tmp22719, i64 1
+ %tmp22721 = getelementptr inbounds float, float* %tmp22720, i64 1
+ %tmp22722 = getelementptr inbounds float, float* %tmp22721, i64 1
+ %tmp22723 = getelementptr inbounds float, float* %tmp22722, i64 1
+ %tmp22724 = getelementptr inbounds float, float* %tmp22723, i64 1
+ %tmp22725 = getelementptr inbounds float, float* %tmp22724, i64 1
+ %tmp22726 = getelementptr inbounds float, float* %tmp22725, i64 1
+ %tmp22727 = getelementptr inbounds float, float* %tmp22726, i64 1
+ %tmp22728 = getelementptr inbounds float, float* %tmp22727, i64 1
+ %tmp22729 = getelementptr inbounds float, float* %tmp22728, i64 1
+ %tmp22730 = getelementptr inbounds float, float* %tmp22729, i64 1
+ %tmp22731 = getelementptr inbounds float, float* %tmp22730, i64 1
+ %tmp22732 = getelementptr inbounds float, float* %tmp22731, i64 1
+ %tmp22733 = getelementptr inbounds float, float* %tmp22732, i64 1
+ %tmp22734 = getelementptr inbounds float, float* %tmp22733, i64 1
+ %tmp22735 = getelementptr inbounds float, float* %tmp22734, i64 1
+ %tmp22736 = getelementptr inbounds float, float* %tmp22735, i64 1
+ %tmp22737 = getelementptr inbounds float, float* %tmp22736, i64 1
+ %tmp22738 = getelementptr inbounds float, float* %tmp22737, i64 1
+ %tmp22739 = getelementptr inbounds float, float* %tmp22738, i64 1
+ %tmp22740 = getelementptr inbounds float, float* %tmp22739, i64 1
+ %tmp22741 = getelementptr inbounds float, float* %tmp22740, i64 1
+ %tmp22742 = getelementptr inbounds float, float* %tmp22741, i64 1
+ %tmp22743 = getelementptr inbounds float, float* %tmp22742, i64 1
+ %tmp22744 = getelementptr inbounds float, float* %tmp22743, i64 1
+ %tmp22745 = getelementptr inbounds float, float* %tmp22744, i64 1
+ %tmp22746 = getelementptr inbounds float, float* %tmp22745, i64 1
+ %tmp22747 = getelementptr inbounds float, float* %tmp22746, i64 1
+ %tmp22748 = getelementptr inbounds float, float* %tmp22747, i64 1
+ %tmp22749 = getelementptr inbounds float, float* %tmp22748, i64 1
+ %tmp22750 = getelementptr inbounds float, float* %tmp22749, i64 1
+ %tmp22751 = getelementptr inbounds float, float* %tmp22750, i64 1
+ %tmp22752 = getelementptr inbounds float, float* %tmp22751, i64 1
+ %tmp22753 = getelementptr inbounds float, float* %tmp22752, i64 1
+ %tmp22754 = getelementptr inbounds float, float* %tmp22753, i64 1
+ %tmp22755 = getelementptr inbounds float, float* %tmp22754, i64 1
+ %tmp22756 = getelementptr inbounds float, float* %tmp22755, i64 1
+ %tmp22757 = getelementptr inbounds float, float* %tmp22756, i64 1
+ %tmp22758 = getelementptr inbounds float, float* %tmp22757, i64 1
+ %tmp22759 = getelementptr inbounds float, float* %tmp22758, i64 1
+ %tmp22760 = getelementptr inbounds float, float* %tmp22759, i64 1
+ %tmp22761 = getelementptr inbounds float, float* %tmp22760, i64 1
+ %tmp22762 = getelementptr inbounds float, float* %tmp22761, i64 1
+ %tmp22763 = getelementptr inbounds float, float* %tmp22762, i64 1
+ %tmp22764 = getelementptr inbounds float, float* %tmp22763, i64 1
+ %tmp22765 = getelementptr inbounds float, float* %tmp22764, i64 1
+ %tmp22766 = getelementptr inbounds float, float* %tmp22765, i64 1
+ %tmp22767 = getelementptr inbounds float, float* %tmp22766, i64 1
+ %tmp22768 = getelementptr inbounds float, float* %tmp22767, i64 1
+ %tmp22769 = getelementptr inbounds float, float* %tmp22768, i64 1
+ %tmp22770 = getelementptr inbounds float, float* %tmp22769, i64 1
+ %tmp22771 = getelementptr inbounds float, float* %tmp22770, i64 1
+ %tmp22772 = getelementptr inbounds float, float* %tmp22771, i64 1
+ %tmp22773 = getelementptr inbounds float, float* %tmp22772, i64 1
+ %tmp22774 = getelementptr inbounds float, float* %tmp22773, i64 1
+ %tmp22775 = getelementptr inbounds float, float* %tmp22774, i64 1
+ %tmp22776 = getelementptr inbounds float, float* %tmp22775, i64 1
+ %tmp22777 = getelementptr inbounds float, float* %tmp22776, i64 1
+ %tmp22778 = getelementptr inbounds float, float* %tmp22777, i64 1
+ %tmp22779 = getelementptr inbounds float, float* %tmp22778, i64 1
+ %tmp22780 = getelementptr inbounds float, float* %tmp22779, i64 1
+ %tmp22781 = getelementptr inbounds float, float* %tmp22780, i64 1
+ %tmp22782 = getelementptr inbounds float, float* %tmp22781, i64 1
+ %tmp22783 = getelementptr inbounds float, float* %tmp22782, i64 1
+ %tmp22784 = getelementptr inbounds float, float* %tmp22783, i64 1
+ %tmp22785 = getelementptr inbounds float, float* %tmp22784, i64 1
+ %tmp22786 = getelementptr inbounds float, float* %tmp22785, i64 1
+ %tmp22787 = getelementptr inbounds float, float* %tmp22786, i64 1
+ %tmp22788 = getelementptr inbounds float, float* %tmp22787, i64 1
+ %tmp22789 = getelementptr inbounds float, float* %tmp22788, i64 1
+ %tmp22790 = getelementptr inbounds float, float* %tmp22789, i64 1
+ %tmp22791 = getelementptr inbounds float, float* %tmp22790, i64 1
+ %tmp22792 = getelementptr inbounds float, float* %tmp22791, i64 1
+ %tmp22793 = getelementptr inbounds float, float* %tmp22792, i64 1
+ %tmp22794 = getelementptr inbounds float, float* %tmp22793, i64 1
+ %tmp22795 = getelementptr inbounds float, float* %tmp22794, i64 1
+ %tmp22796 = getelementptr inbounds float, float* %tmp22795, i64 1
+ %tmp22797 = getelementptr inbounds float, float* %tmp22796, i64 1
+ %tmp22798 = getelementptr inbounds float, float* %tmp22797, i64 1
+ %tmp22799 = getelementptr inbounds float, float* %tmp22798, i64 1
+ %tmp22800 = getelementptr inbounds float, float* %tmp22799, i64 1
+ %tmp22801 = getelementptr inbounds float, float* %tmp22800, i64 1
+ %tmp22802 = getelementptr inbounds float, float* %tmp22801, i64 1
+ %tmp22803 = getelementptr inbounds float, float* %tmp22802, i64 1
+ %tmp22804 = getelementptr inbounds float, float* %tmp22803, i64 1
+ %tmp22805 = getelementptr inbounds float, float* %tmp22804, i64 1
+ %tmp22806 = getelementptr inbounds float, float* %tmp22805, i64 1
+ %tmp22807 = getelementptr inbounds float, float* %tmp22806, i64 1
+ %tmp22808 = getelementptr inbounds float, float* %tmp22807, i64 1
+ %tmp22809 = getelementptr inbounds float, float* %tmp22808, i64 1
+ %tmp22810 = getelementptr inbounds float, float* %tmp22809, i64 1
+ %tmp22811 = getelementptr inbounds float, float* %tmp22810, i64 1
+ %tmp22812 = getelementptr inbounds float, float* %tmp22811, i64 1
+ %tmp22813 = getelementptr inbounds float, float* %tmp22812, i64 1
+ %tmp22814 = getelementptr inbounds float, float* %tmp22813, i64 1
+ %tmp22815 = getelementptr inbounds float, float* %tmp22814, i64 1
+ %tmp22816 = getelementptr inbounds float, float* %tmp22815, i64 1
+ %tmp22817 = getelementptr inbounds float, float* %tmp22816, i64 1
+ %tmp22818 = getelementptr inbounds float, float* %tmp22817, i64 1
+ %tmp22819 = getelementptr inbounds float, float* %tmp22818, i64 1
+ %tmp22820 = getelementptr inbounds float, float* %tmp22819, i64 1
+ %tmp22821 = getelementptr inbounds float, float* %tmp22820, i64 1
+ %tmp22822 = getelementptr inbounds float, float* %tmp22821, i64 1
+ %tmp22823 = getelementptr inbounds float, float* %tmp22822, i64 1
+ %tmp22824 = getelementptr inbounds float, float* %tmp22823, i64 1
+ %tmp22825 = getelementptr inbounds float, float* %tmp22824, i64 1
+ %tmp22826 = getelementptr inbounds float, float* %tmp22825, i64 1
+ %tmp22827 = getelementptr inbounds float, float* %tmp22826, i64 1
+ %tmp22828 = getelementptr inbounds float, float* %tmp22827, i64 1
+ %tmp22829 = getelementptr inbounds float, float* %tmp22828, i64 1
+ %tmp22830 = getelementptr inbounds float, float* %tmp22829, i64 1
+ %tmp22831 = getelementptr inbounds float, float* %tmp22830, i64 1
+ %tmp22832 = getelementptr inbounds float, float* %tmp22831, i64 1
+ %tmp22833 = getelementptr inbounds float, float* %tmp22832, i64 1
+ %tmp22834 = getelementptr inbounds float, float* %tmp22833, i64 1
+ %tmp22835 = getelementptr inbounds float, float* %tmp22834, i64 1
+ %tmp22836 = getelementptr inbounds float, float* %tmp22835, i64 1
+ %tmp22837 = getelementptr inbounds float, float* %tmp22836, i64 1
+ %tmp22838 = getelementptr inbounds float, float* %tmp22837, i64 1
+ %tmp22839 = getelementptr inbounds float, float* %tmp22838, i64 1
+ %tmp22840 = getelementptr inbounds float, float* %tmp22839, i64 1
+ %tmp22841 = getelementptr inbounds float, float* %tmp22840, i64 1
+ %tmp22842 = getelementptr inbounds float, float* %tmp22841, i64 1
+ %tmp22843 = getelementptr inbounds float, float* %tmp22842, i64 1
+ %tmp22844 = getelementptr inbounds float, float* %tmp22843, i64 1
+ %tmp22845 = getelementptr inbounds float, float* %tmp22844, i64 1
+ %tmp22846 = getelementptr inbounds float, float* %tmp22845, i64 1
+ %tmp22847 = getelementptr inbounds float, float* %tmp22846, i64 1
+ %tmp22848 = getelementptr inbounds float, float* %tmp22847, i64 1
+ %tmp22849 = getelementptr inbounds float, float* %tmp22848, i64 1
+ %tmp22850 = getelementptr inbounds float, float* %tmp22849, i64 1
+ %tmp22851 = getelementptr inbounds float, float* %tmp22850, i64 1
+ %tmp22852 = getelementptr inbounds float, float* %tmp22851, i64 1
+ %tmp22853 = getelementptr inbounds float, float* %tmp22852, i64 1
+ %tmp22854 = getelementptr inbounds float, float* %tmp22853, i64 1
+ %tmp22855 = getelementptr inbounds float, float* %tmp22854, i64 1
+ %tmp22856 = getelementptr inbounds float, float* %tmp22855, i64 1
+ %tmp22857 = getelementptr inbounds float, float* %tmp22856, i64 1
+ %tmp22858 = getelementptr inbounds float, float* %tmp22857, i64 1
+ %tmp22859 = getelementptr inbounds float, float* %tmp22858, i64 1
+ %tmp22860 = getelementptr inbounds float, float* %tmp22859, i64 1
+ %tmp22861 = getelementptr inbounds float, float* %tmp22860, i64 1
+ %tmp22862 = getelementptr inbounds float, float* %tmp22861, i64 1
+ %tmp22863 = getelementptr inbounds float, float* %tmp22862, i64 1
+ %tmp22864 = getelementptr inbounds float, float* %tmp22863, i64 1
+ %tmp22865 = getelementptr inbounds float, float* %tmp22864, i64 1
+ %tmp22866 = getelementptr inbounds float, float* %tmp22865, i64 1
+ %tmp22867 = getelementptr inbounds float, float* %tmp22866, i64 1
+ %tmp22868 = getelementptr inbounds float, float* %tmp22867, i64 1
+ %tmp22869 = getelementptr inbounds float, float* %tmp22868, i64 1
+ %tmp22870 = getelementptr inbounds float, float* %tmp22869, i64 1
+ %tmp22871 = getelementptr inbounds float, float* %tmp22870, i64 1
+ %tmp22872 = getelementptr inbounds float, float* %tmp22871, i64 1
+ %tmp22873 = getelementptr inbounds float, float* %tmp22872, i64 1
+ %tmp22874 = getelementptr inbounds float, float* %tmp22873, i64 1
+ %tmp22875 = getelementptr inbounds float, float* %tmp22874, i64 1
+ %tmp22876 = getelementptr inbounds float, float* %tmp22875, i64 1
+ %tmp22877 = getelementptr inbounds float, float* %tmp22876, i64 1
+ %tmp22878 = getelementptr inbounds float, float* %tmp22877, i64 1
+ %tmp22879 = getelementptr inbounds float, float* %tmp22878, i64 1
+ %tmp22880 = getelementptr inbounds float, float* %tmp22879, i64 1
+ %tmp22881 = getelementptr inbounds float, float* %tmp22880, i64 1
+ %tmp22882 = getelementptr inbounds float, float* %tmp22881, i64 1
+ %tmp22883 = getelementptr inbounds float, float* %tmp22882, i64 1
+ %tmp22884 = getelementptr inbounds float, float* %tmp22883, i64 1
+ %tmp22885 = getelementptr inbounds float, float* %tmp22884, i64 1
+ %tmp22886 = getelementptr inbounds float, float* %tmp22885, i64 1
+ %tmp22887 = getelementptr inbounds float, float* %tmp22886, i64 1
+ %tmp22888 = getelementptr inbounds float, float* %tmp22887, i64 1
+ %tmp22889 = getelementptr inbounds float, float* %tmp22888, i64 1
+ %tmp22890 = getelementptr inbounds float, float* %tmp22889, i64 1
+ %tmp22891 = getelementptr inbounds float, float* %tmp22890, i64 1
+ %tmp22892 = getelementptr inbounds float, float* %tmp22891, i64 1
+ %tmp22893 = getelementptr inbounds float, float* %tmp22892, i64 1
+ %tmp22894 = getelementptr inbounds float, float* %tmp22893, i64 1
+ %tmp22895 = getelementptr inbounds float, float* %tmp22894, i64 1
+ %tmp22896 = getelementptr inbounds float, float* %tmp22895, i64 1
+ %tmp22897 = getelementptr inbounds float, float* %tmp22896, i64 1
+ %tmp22898 = getelementptr inbounds float, float* %tmp22897, i64 1
+ %tmp22899 = getelementptr inbounds float, float* %tmp22898, i64 1
+ %tmp22900 = getelementptr inbounds float, float* %tmp22899, i64 1
+ %tmp22901 = getelementptr inbounds float, float* %tmp22900, i64 1
+ %tmp22902 = getelementptr inbounds float, float* %tmp22901, i64 1
+ %tmp22903 = getelementptr inbounds float, float* %tmp22902, i64 1
+ %tmp22904 = getelementptr inbounds float, float* %tmp22903, i64 1
+ %tmp22905 = getelementptr inbounds float, float* %tmp22904, i64 1
+ %tmp22906 = getelementptr inbounds float, float* %tmp22905, i64 1
+ %tmp22907 = getelementptr inbounds float, float* %tmp22906, i64 1
+ %tmp22908 = getelementptr inbounds float, float* %tmp22907, i64 1
+ %tmp22909 = getelementptr inbounds float, float* %tmp22908, i64 1
+ %tmp22910 = getelementptr inbounds float, float* %tmp22909, i64 1
+ %tmp22911 = getelementptr inbounds float, float* %tmp22910, i64 1
+ %tmp22912 = getelementptr inbounds float, float* %tmp22911, i64 1
+ %tmp22913 = getelementptr inbounds float, float* %tmp22912, i64 1
+ %tmp22914 = getelementptr inbounds float, float* %tmp22913, i64 1
+ %tmp22915 = getelementptr inbounds float, float* %tmp22914, i64 1
+ %tmp22916 = getelementptr inbounds float, float* %tmp22915, i64 1
+ %tmp22917 = getelementptr inbounds float, float* %tmp22916, i64 1
+ %tmp22918 = getelementptr inbounds float, float* %tmp22917, i64 1
+ %tmp22919 = getelementptr inbounds float, float* %tmp22918, i64 1
+ %tmp22920 = getelementptr inbounds float, float* %tmp22919, i64 1
+ %tmp22921 = getelementptr inbounds float, float* %tmp22920, i64 1
+ %tmp22922 = getelementptr inbounds float, float* %tmp22921, i64 1
+ %tmp22923 = getelementptr inbounds float, float* %tmp22922, i64 1
+ %tmp22924 = getelementptr inbounds float, float* %tmp22923, i64 1
+ %tmp22925 = getelementptr inbounds float, float* %tmp22924, i64 1
+ %tmp22926 = getelementptr inbounds float, float* %tmp22925, i64 1
+ %tmp22927 = getelementptr inbounds float, float* %tmp22926, i64 1
+ %tmp22928 = getelementptr inbounds float, float* %tmp22927, i64 1
+ %tmp22929 = getelementptr inbounds float, float* %tmp22928, i64 1
+ %tmp22930 = getelementptr inbounds float, float* %tmp22929, i64 1
+ %tmp22931 = getelementptr inbounds float, float* %tmp22930, i64 1
+ %tmp22932 = getelementptr inbounds float, float* %tmp22931, i64 1
+ %tmp22933 = getelementptr inbounds float, float* %tmp22932, i64 1
+ %tmp22934 = getelementptr inbounds float, float* %tmp22933, i64 1
+ %tmp22935 = getelementptr inbounds float, float* %tmp22934, i64 1
+ %tmp22936 = getelementptr inbounds float, float* %tmp22935, i64 1
+ %tmp22937 = getelementptr inbounds float, float* %tmp22936, i64 1
+ %tmp22938 = getelementptr inbounds float, float* %tmp22937, i64 1
+ %tmp22939 = getelementptr inbounds float, float* %tmp22938, i64 1
+ %tmp22940 = getelementptr inbounds float, float* %tmp22939, i64 1
+ %tmp22941 = getelementptr inbounds float, float* %tmp22940, i64 1
+ %tmp22942 = getelementptr inbounds float, float* %tmp22941, i64 1
+ %tmp22943 = getelementptr inbounds float, float* %tmp22942, i64 1
+ %tmp22944 = getelementptr inbounds float, float* %tmp22943, i64 1
+ %tmp22945 = getelementptr inbounds float, float* %tmp22944, i64 1
+ %tmp22946 = getelementptr inbounds float, float* %tmp22945, i64 1
+ %tmp22947 = getelementptr inbounds float, float* %tmp22946, i64 1
+ %tmp22948 = getelementptr inbounds float, float* %tmp22947, i64 1
+ %tmp22949 = getelementptr inbounds float, float* %tmp22948, i64 1
+ %tmp22950 = getelementptr inbounds float, float* %tmp22949, i64 1
+ %tmp22951 = getelementptr inbounds float, float* %tmp22950, i64 1
+ %tmp22952 = getelementptr inbounds float, float* %tmp22951, i64 1
+ %tmp22953 = getelementptr inbounds float, float* %tmp22952, i64 1
+ %tmp22954 = getelementptr inbounds float, float* %tmp22953, i64 1
+ %tmp22955 = getelementptr inbounds float, float* %tmp22954, i64 1
+ %tmp22956 = getelementptr inbounds float, float* %tmp22955, i64 1
+ %tmp22957 = getelementptr inbounds float, float* %tmp22956, i64 1
+ %tmp22958 = getelementptr inbounds float, float* %tmp22957, i64 1
+ %tmp22959 = getelementptr inbounds float, float* %tmp22958, i64 1
+ %tmp22960 = getelementptr inbounds float, float* %tmp22959, i64 1
+ %tmp22961 = getelementptr inbounds float, float* %tmp22960, i64 1
+ %tmp22962 = getelementptr inbounds float, float* %tmp22961, i64 1
+ %tmp22963 = getelementptr inbounds float, float* %tmp22962, i64 1
+ %tmp22964 = getelementptr inbounds float, float* %tmp22963, i64 1
+ %tmp22965 = getelementptr inbounds float, float* %tmp22964, i64 1
+ %tmp22966 = getelementptr inbounds float, float* %tmp22965, i64 1
+ %tmp22967 = getelementptr inbounds float, float* %tmp22966, i64 1
+ %tmp22968 = getelementptr inbounds float, float* %tmp22967, i64 1
+ %tmp22969 = getelementptr inbounds float, float* %tmp22968, i64 1
+ %tmp22970 = getelementptr inbounds float, float* %tmp22969, i64 1
+ %tmp22971 = getelementptr inbounds float, float* %tmp22970, i64 1
+ %tmp22972 = getelementptr inbounds float, float* %tmp22971, i64 1
+ %tmp22973 = getelementptr inbounds float, float* %tmp22972, i64 1
+ %tmp22974 = getelementptr inbounds float, float* %tmp22973, i64 1
+ %tmp22975 = getelementptr inbounds float, float* %tmp22974, i64 1
+ %tmp22976 = getelementptr inbounds float, float* %tmp22975, i64 1
+ %tmp22977 = getelementptr inbounds float, float* %tmp22976, i64 1
+ %tmp22978 = getelementptr inbounds float, float* %tmp22977, i64 1
+ %tmp22979 = getelementptr inbounds float, float* %tmp22978, i64 1
+ %tmp22980 = getelementptr inbounds float, float* %tmp22979, i64 1
+ %tmp22981 = getelementptr inbounds float, float* %tmp22980, i64 1
+ %tmp22982 = getelementptr inbounds float, float* %tmp22981, i64 1
+ %tmp22983 = getelementptr inbounds float, float* %tmp22982, i64 1
+ %tmp22984 = getelementptr inbounds float, float* %tmp22983, i64 1
+ %tmp22985 = getelementptr inbounds float, float* %tmp22984, i64 1
+ %tmp22986 = getelementptr inbounds float, float* %tmp22985, i64 1
+ %tmp22987 = getelementptr inbounds float, float* %tmp22986, i64 1
+ %tmp22988 = getelementptr inbounds float, float* %tmp22987, i64 1
+ %tmp22989 = getelementptr inbounds float, float* %tmp22988, i64 1
+ %tmp22990 = getelementptr inbounds float, float* %tmp22989, i64 1
+ %tmp22991 = getelementptr inbounds float, float* %tmp22990, i64 1
+ %tmp22992 = getelementptr inbounds float, float* %tmp22991, i64 1
+ %tmp22993 = getelementptr inbounds float, float* %tmp22992, i64 1
+ %tmp22994 = getelementptr inbounds float, float* %tmp22993, i64 1
+ %tmp22995 = getelementptr inbounds float, float* %tmp22994, i64 1
+ %tmp22996 = getelementptr inbounds float, float* %tmp22995, i64 1
+ %tmp22997 = getelementptr inbounds float, float* %tmp22996, i64 1
+ %tmp22998 = getelementptr inbounds float, float* %tmp22997, i64 1
+ %tmp22999 = getelementptr inbounds float, float* %tmp22998, i64 1
+ %tmp23000 = getelementptr inbounds float, float* %tmp22999, i64 1
+ %tmp23001 = getelementptr inbounds float, float* %tmp23000, i64 1
+ %tmp23002 = getelementptr inbounds float, float* %tmp23001, i64 1
+ %tmp23003 = getelementptr inbounds float, float* %tmp23002, i64 1
+ %tmp23004 = getelementptr inbounds float, float* %tmp23003, i64 1
+ %tmp23005 = getelementptr inbounds float, float* %tmp23004, i64 1
+ %tmp23006 = getelementptr inbounds float, float* %tmp23005, i64 1
+ %tmp23007 = getelementptr inbounds float, float* %tmp23006, i64 1
+ %tmp23008 = getelementptr inbounds float, float* %tmp23007, i64 1
+ %tmp23009 = getelementptr inbounds float, float* %tmp23008, i64 1
+ %tmp23010 = getelementptr inbounds float, float* %tmp23009, i64 1
+ %tmp23011 = getelementptr inbounds float, float* %tmp23010, i64 1
+ %tmp23012 = getelementptr inbounds float, float* %tmp23011, i64 1
+ %tmp23013 = getelementptr inbounds float, float* %tmp23012, i64 1
+ %tmp23014 = getelementptr inbounds float, float* %tmp23013, i64 1
+ %tmp23015 = getelementptr inbounds float, float* %tmp23014, i64 1
+ %tmp23016 = getelementptr inbounds float, float* %tmp23015, i64 1
+ %tmp23017 = getelementptr inbounds float, float* %tmp23016, i64 1
+ %tmp23018 = getelementptr inbounds float, float* %tmp23017, i64 1
+ %tmp23019 = getelementptr inbounds float, float* %tmp23018, i64 1
+ %tmp23020 = getelementptr inbounds float, float* %tmp23019, i64 1
+ %tmp23021 = getelementptr inbounds float, float* %tmp23020, i64 1
+ %tmp23022 = getelementptr inbounds float, float* %tmp23021, i64 1
+ %tmp23023 = getelementptr inbounds float, float* %tmp23022, i64 1
+ %tmp23024 = getelementptr inbounds float, float* %tmp23023, i64 1
+ %tmp23025 = getelementptr inbounds float, float* %tmp23024, i64 1
+ %tmp23026 = getelementptr inbounds float, float* %tmp23025, i64 1
+ %tmp23027 = getelementptr inbounds float, float* %tmp23026, i64 1
+ %tmp23028 = getelementptr inbounds float, float* %tmp23027, i64 1
+ %tmp23029 = getelementptr inbounds float, float* %tmp23028, i64 1
+ %tmp23030 = getelementptr inbounds float, float* %tmp23029, i64 1
+ %tmp23031 = getelementptr inbounds float, float* %tmp23030, i64 1
+ %tmp23032 = getelementptr inbounds float, float* %tmp23031, i64 1
+ %tmp23033 = getelementptr inbounds float, float* %tmp23032, i64 1
+ %tmp23034 = getelementptr inbounds float, float* %tmp23033, i64 1
+ %tmp23035 = getelementptr inbounds float, float* %tmp23034, i64 1
+ %tmp23036 = getelementptr inbounds float, float* %tmp23035, i64 1
+ %tmp23037 = getelementptr inbounds float, float* %tmp23036, i64 1
+ %tmp23038 = getelementptr inbounds float, float* %tmp23037, i64 1
+ %tmp23039 = getelementptr inbounds float, float* %tmp23038, i64 1
+ %tmp23040 = getelementptr inbounds float, float* %tmp23039, i64 1
+ %tmp23041 = getelementptr inbounds float, float* %tmp23040, i64 1
+ %tmp23042 = getelementptr inbounds float, float* %tmp23041, i64 1
+ %tmp23043 = getelementptr inbounds float, float* %tmp23042, i64 1
+ %tmp23044 = getelementptr inbounds float, float* %tmp23043, i64 1
+ %tmp23045 = getelementptr inbounds float, float* %tmp23044, i64 1
+ %tmp23046 = getelementptr inbounds float, float* %tmp23045, i64 1
+ %tmp23047 = getelementptr inbounds float, float* %tmp23046, i64 1
+ %tmp23048 = getelementptr inbounds float, float* %tmp23047, i64 1
+ %tmp23049 = getelementptr inbounds float, float* %tmp23048, i64 1
+ %tmp23050 = getelementptr inbounds float, float* %tmp23049, i64 1
+ %tmp23051 = getelementptr inbounds float, float* %tmp23050, i64 1
+ %tmp23052 = getelementptr inbounds float, float* %tmp23051, i64 1
+ %tmp23053 = getelementptr inbounds float, float* %tmp23052, i64 1
+ %tmp23054 = getelementptr inbounds float, float* %tmp23053, i64 1
+ %tmp23055 = getelementptr inbounds float, float* %tmp23054, i64 1
+ %tmp23056 = getelementptr inbounds float, float* %tmp23055, i64 1
+ %tmp23057 = getelementptr inbounds float, float* %tmp23056, i64 1
+ %tmp23058 = getelementptr inbounds float, float* %tmp23057, i64 1
+ %tmp23059 = getelementptr inbounds float, float* %tmp23058, i64 1
+ %tmp23060 = getelementptr inbounds float, float* %tmp23059, i64 1
+ %tmp23061 = getelementptr inbounds float, float* %tmp23060, i64 1
+ %tmp23062 = getelementptr inbounds float, float* %tmp23061, i64 1
+ %tmp23063 = getelementptr inbounds float, float* %tmp23062, i64 1
+ %tmp23064 = getelementptr inbounds float, float* %tmp23063, i64 1
+ %tmp23065 = getelementptr inbounds float, float* %tmp23064, i64 1
+ %tmp23066 = getelementptr inbounds float, float* %tmp23065, i64 1
+ %tmp23067 = getelementptr inbounds float, float* %tmp23066, i64 1
+ %tmp23068 = getelementptr inbounds float, float* %tmp23067, i64 1
+ %tmp23069 = getelementptr inbounds float, float* %tmp23068, i64 1
+ %tmp23070 = getelementptr inbounds float, float* %tmp23069, i64 1
+ %tmp23071 = getelementptr inbounds float, float* %tmp23070, i64 1
+ %tmp23072 = getelementptr inbounds float, float* %tmp23071, i64 1
+ %tmp23073 = getelementptr inbounds float, float* %tmp23072, i64 1
+ %tmp23074 = getelementptr inbounds float, float* %tmp23073, i64 1
+ %tmp23075 = getelementptr inbounds float, float* %tmp23074, i64 1
+ %tmp23076 = getelementptr inbounds float, float* %tmp23075, i64 1
+ %tmp23077 = getelementptr inbounds float, float* %tmp23076, i64 1
+ %tmp23078 = getelementptr inbounds float, float* %tmp23077, i64 1
+ %tmp23079 = getelementptr inbounds float, float* %tmp23078, i64 1
+ %tmp23080 = getelementptr inbounds float, float* %tmp23079, i64 1
+ %tmp23081 = getelementptr inbounds float, float* %tmp23080, i64 1
+ %tmp23082 = getelementptr inbounds float, float* %tmp23081, i64 1
+ %tmp23083 = getelementptr inbounds float, float* %tmp23082, i64 1
+ %tmp23084 = getelementptr inbounds float, float* %tmp23083, i64 1
+ %tmp23085 = getelementptr inbounds float, float* %tmp23084, i64 1
+ %tmp23086 = getelementptr inbounds float, float* %tmp23085, i64 1
+ %tmp23087 = getelementptr inbounds float, float* %tmp23086, i64 1
+ %tmp23088 = getelementptr inbounds float, float* %tmp23087, i64 1
+ %tmp23089 = getelementptr inbounds float, float* %tmp23088, i64 1
+ %tmp23090 = getelementptr inbounds float, float* %tmp23089, i64 1
+ %tmp23091 = getelementptr inbounds float, float* %tmp23090, i64 1
+ %tmp23092 = getelementptr inbounds float, float* %tmp23091, i64 1
+ %tmp23093 = getelementptr inbounds float, float* %tmp23092, i64 1
+ %tmp23094 = getelementptr inbounds float, float* %tmp23093, i64 1
+ %tmp23095 = getelementptr inbounds float, float* %tmp23094, i64 1
+ %tmp23096 = getelementptr inbounds float, float* %tmp23095, i64 1
+ %tmp23097 = getelementptr inbounds float, float* %tmp23096, i64 1
+ %tmp23098 = getelementptr inbounds float, float* %tmp23097, i64 1
+ %tmp23099 = getelementptr inbounds float, float* %tmp23098, i64 1
+ %tmp23100 = getelementptr inbounds float, float* %tmp23099, i64 1
+ %tmp23101 = getelementptr inbounds float, float* %tmp23100, i64 1
+ %tmp23102 = getelementptr inbounds float, float* %tmp23101, i64 1
+ %tmp23103 = getelementptr inbounds float, float* %tmp23102, i64 1
+ %tmp23104 = getelementptr inbounds float, float* %tmp23103, i64 1
+ %tmp23105 = getelementptr inbounds float, float* %tmp23104, i64 1
+ %tmp23106 = getelementptr inbounds float, float* %tmp23105, i64 1
+ %tmp23107 = getelementptr inbounds float, float* %tmp23106, i64 1
+ %tmp23108 = getelementptr inbounds float, float* %tmp23107, i64 1
+ %tmp23109 = getelementptr inbounds float, float* %tmp23108, i64 1
+ %tmp23110 = getelementptr inbounds float, float* %tmp23109, i64 1
+ %tmp23111 = getelementptr inbounds float, float* %tmp23110, i64 1
+ %tmp23112 = getelementptr inbounds float, float* %tmp23111, i64 1
+ %tmp23113 = getelementptr inbounds float, float* %tmp23112, i64 1
+ %tmp23114 = getelementptr inbounds float, float* %tmp23113, i64 1
+ %tmp23115 = getelementptr inbounds float, float* %tmp23114, i64 1
+ %tmp23116 = getelementptr inbounds float, float* %tmp23115, i64 1
+ %tmp23117 = getelementptr inbounds float, float* %tmp23116, i64 1
+ %tmp23118 = getelementptr inbounds float, float* %tmp23117, i64 1
+ %tmp23119 = getelementptr inbounds float, float* %tmp23118, i64 1
+ %tmp23120 = getelementptr inbounds float, float* %tmp23119, i64 1
+ %tmp23121 = getelementptr inbounds float, float* %tmp23120, i64 1
+ %tmp23122 = getelementptr inbounds float, float* %tmp23121, i64 1
+ %tmp23123 = getelementptr inbounds float, float* %tmp23122, i64 1
+ %tmp23124 = getelementptr inbounds float, float* %tmp23123, i64 1
+ %tmp23125 = getelementptr inbounds float, float* %tmp23124, i64 1
+ %tmp23126 = getelementptr inbounds float, float* %tmp23125, i64 1
+ %tmp23127 = getelementptr inbounds float, float* %tmp23126, i64 1
+ %tmp23128 = getelementptr inbounds float, float* %tmp23127, i64 1
+ %tmp23129 = getelementptr inbounds float, float* %tmp23128, i64 1
+ %tmp23130 = getelementptr inbounds float, float* %tmp23129, i64 1
+ %tmp23131 = getelementptr inbounds float, float* %tmp23130, i64 1
+ %tmp23132 = getelementptr inbounds float, float* %tmp23131, i64 1
+ %tmp23133 = getelementptr inbounds float, float* %tmp23132, i64 1
+ %tmp23134 = getelementptr inbounds float, float* %tmp23133, i64 1
+ %tmp23135 = getelementptr inbounds float, float* %tmp23134, i64 1
+ %tmp23136 = getelementptr inbounds float, float* %tmp23135, i64 1
+ %tmp23137 = getelementptr inbounds float, float* %tmp23136, i64 1
+ %tmp23138 = getelementptr inbounds float, float* %tmp23137, i64 1
+ %tmp23139 = getelementptr inbounds float, float* %tmp23138, i64 1
+ %tmp23140 = getelementptr inbounds float, float* %tmp23139, i64 1
+ %tmp23141 = getelementptr inbounds float, float* %tmp23140, i64 1
+ %tmp23142 = getelementptr inbounds float, float* %tmp23141, i64 1
+ %tmp23143 = getelementptr inbounds float, float* %tmp23142, i64 1
+ %tmp23144 = getelementptr inbounds float, float* %tmp23143, i64 1
+ %tmp23145 = getelementptr inbounds float, float* %tmp23144, i64 1
+ %tmp23146 = getelementptr inbounds float, float* %tmp23145, i64 1
+ %tmp23147 = getelementptr inbounds float, float* %tmp23146, i64 1
+ %tmp23148 = getelementptr inbounds float, float* %tmp23147, i64 1
+ %tmp23149 = getelementptr inbounds float, float* %tmp23148, i64 1
+ %tmp23150 = getelementptr inbounds float, float* %tmp23149, i64 1
+ %tmp23151 = getelementptr inbounds float, float* %tmp23150, i64 1
+ %tmp23152 = getelementptr inbounds float, float* %tmp23151, i64 1
+ %tmp23153 = getelementptr inbounds float, float* %tmp23152, i64 1
+ %tmp23154 = getelementptr inbounds float, float* %tmp23153, i64 1
+ %tmp23155 = getelementptr inbounds float, float* %tmp23154, i64 1
+ %tmp23156 = getelementptr inbounds float, float* %tmp23155, i64 1
+ %tmp23157 = getelementptr inbounds float, float* %tmp23156, i64 1
+ %tmp23158 = getelementptr inbounds float, float* %tmp23157, i64 1
+ %tmp23159 = getelementptr inbounds float, float* %tmp23158, i64 1
+ %tmp23160 = getelementptr inbounds float, float* %tmp23159, i64 1
+ %tmp23161 = getelementptr inbounds float, float* %tmp23160, i64 1
+ %tmp23162 = getelementptr inbounds float, float* %tmp23161, i64 1
+ %tmp23163 = getelementptr inbounds float, float* %tmp23162, i64 1
+ %tmp23164 = getelementptr inbounds float, float* %tmp23163, i64 1
+ %tmp23165 = getelementptr inbounds float, float* %tmp23164, i64 1
+ %tmp23166 = getelementptr inbounds float, float* %tmp23165, i64 1
+ %tmp23167 = getelementptr inbounds float, float* %tmp23166, i64 1
+ %tmp23168 = getelementptr inbounds float, float* %tmp23167, i64 1
+ %tmp23169 = getelementptr inbounds float, float* %tmp23168, i64 1
+ %tmp23170 = getelementptr inbounds float, float* %tmp23169, i64 1
+ %tmp23171 = getelementptr inbounds float, float* %tmp23170, i64 1
+ %tmp23172 = getelementptr inbounds float, float* %tmp23171, i64 1
+ %tmp23173 = getelementptr inbounds float, float* %tmp23172, i64 1
+ %tmp23174 = getelementptr inbounds float, float* %tmp23173, i64 1
+ %tmp23175 = getelementptr inbounds float, float* %tmp23174, i64 1
+ %tmp23176 = getelementptr inbounds float, float* %tmp23175, i64 1
+ %tmp23177 = getelementptr inbounds float, float* %tmp23176, i64 1
+ %tmp23178 = getelementptr inbounds float, float* %tmp23177, i64 1
+ %tmp23179 = getelementptr inbounds float, float* %tmp23178, i64 1
+ %tmp23180 = getelementptr inbounds float, float* %tmp23179, i64 1
+ %tmp23181 = getelementptr inbounds float, float* %tmp23180, i64 1
+ %tmp23182 = getelementptr inbounds float, float* %tmp23181, i64 1
+ %tmp23183 = getelementptr inbounds float, float* %tmp23182, i64 1
+ %tmp23184 = getelementptr inbounds float, float* %tmp23183, i64 1
+ %tmp23185 = getelementptr inbounds float, float* %tmp23184, i64 1
+ %tmp23186 = getelementptr inbounds float, float* %tmp23185, i64 1
+ %tmp23187 = getelementptr inbounds float, float* %tmp23186, i64 1
+ %tmp23188 = getelementptr inbounds float, float* %tmp23187, i64 1
+ %tmp23189 = getelementptr inbounds float, float* %tmp23188, i64 1
+ %tmp23190 = getelementptr inbounds float, float* %tmp23189, i64 1
+ %tmp23191 = getelementptr inbounds float, float* %tmp23190, i64 1
+ %tmp23192 = getelementptr inbounds float, float* %tmp23191, i64 1
+ %tmp23193 = getelementptr inbounds float, float* %tmp23192, i64 1
+ %tmp23194 = getelementptr inbounds float, float* %tmp23193, i64 1
+ %tmp23195 = getelementptr inbounds float, float* %tmp23194, i64 1
+ %tmp23196 = getelementptr inbounds float, float* %tmp23195, i64 1
+ %tmp23197 = getelementptr inbounds float, float* %tmp23196, i64 1
+ %tmp23198 = getelementptr inbounds float, float* %tmp23197, i64 1
+ %tmp23199 = getelementptr inbounds float, float* %tmp23198, i64 1
+ %tmp23200 = getelementptr inbounds float, float* %tmp23199, i64 1
+ %tmp23201 = getelementptr inbounds float, float* %tmp23200, i64 1
+ %tmp23202 = getelementptr inbounds float, float* %tmp23201, i64 1
+ %tmp23203 = getelementptr inbounds float, float* %tmp23202, i64 1
+ %tmp23204 = getelementptr inbounds float, float* %tmp23203, i64 1
+ %tmp23205 = getelementptr inbounds float, float* %tmp23204, i64 1
+ %tmp23206 = getelementptr inbounds float, float* %tmp23205, i64 1
+ %tmp23207 = getelementptr inbounds float, float* %tmp23206, i64 1
+ %tmp23208 = getelementptr inbounds float, float* %tmp23207, i64 1
+ %tmp23209 = getelementptr inbounds float, float* %tmp23208, i64 1
+ %tmp23210 = getelementptr inbounds float, float* %tmp23209, i64 1
+ %tmp23211 = getelementptr inbounds float, float* %tmp23210, i64 1
+ %tmp23212 = getelementptr inbounds float, float* %tmp23211, i64 1
+ %tmp23213 = getelementptr inbounds float, float* %tmp23212, i64 1
+ %tmp23214 = getelementptr inbounds float, float* %tmp23213, i64 1
+ %tmp23215 = getelementptr inbounds float, float* %tmp23214, i64 1
+ %tmp23216 = getelementptr inbounds float, float* %tmp23215, i64 1
+ %tmp23217 = getelementptr inbounds float, float* %tmp23216, i64 1
+ %tmp23218 = getelementptr inbounds float, float* %tmp23217, i64 1
+ %tmp23219 = getelementptr inbounds float, float* %tmp23218, i64 1
+ %tmp23220 = getelementptr inbounds float, float* %tmp23219, i64 1
+ %tmp23221 = getelementptr inbounds float, float* %tmp23220, i64 1
+ %tmp23222 = getelementptr inbounds float, float* %tmp23221, i64 1
+ %tmp23223 = getelementptr inbounds float, float* %tmp23222, i64 1
+ %tmp23224 = getelementptr inbounds float, float* %tmp23223, i64 1
+ %tmp23225 = getelementptr inbounds float, float* %tmp23224, i64 1
+ %tmp23226 = getelementptr inbounds float, float* %tmp23225, i64 1
+ %tmp23227 = getelementptr inbounds float, float* %tmp23226, i64 1
+ %tmp23228 = getelementptr inbounds float, float* %tmp23227, i64 1
+ %tmp23229 = getelementptr inbounds float, float* %tmp23228, i64 1
+ %tmp23230 = getelementptr inbounds float, float* %tmp23229, i64 1
+ %tmp23231 = getelementptr inbounds float, float* %tmp23230, i64 1
+ %tmp23232 = getelementptr inbounds float, float* %tmp23231, i64 1
+ %tmp23233 = getelementptr inbounds float, float* %tmp23232, i64 1
+ %tmp23234 = getelementptr inbounds float, float* %tmp23233, i64 1
+ %tmp23235 = getelementptr inbounds float, float* %tmp23234, i64 1
+ %tmp23236 = getelementptr inbounds float, float* %tmp23235, i64 1
+ %tmp23237 = getelementptr inbounds float, float* %tmp23236, i64 1
+ %tmp23238 = getelementptr inbounds float, float* %tmp23237, i64 1
+ %tmp23239 = getelementptr inbounds float, float* %tmp23238, i64 1
+ %tmp23240 = getelementptr inbounds float, float* %tmp23239, i64 1
+ %tmp23241 = getelementptr inbounds float, float* %tmp23240, i64 1
+ %tmp23242 = getelementptr inbounds float, float* %tmp23241, i64 1
+ %tmp23243 = getelementptr inbounds float, float* %tmp23242, i64 1
+ %tmp23244 = getelementptr inbounds float, float* %tmp23243, i64 1
+ %tmp23245 = getelementptr inbounds float, float* %tmp23244, i64 1
+ %tmp23246 = getelementptr inbounds float, float* %tmp23245, i64 1
+ %tmp23247 = getelementptr inbounds float, float* %tmp23246, i64 1
+ %tmp23248 = getelementptr inbounds float, float* %tmp23247, i64 1
+ %tmp23249 = getelementptr inbounds float, float* %tmp23248, i64 1
+ %tmp23250 = getelementptr inbounds float, float* %tmp23249, i64 1
+ %tmp23251 = getelementptr inbounds float, float* %tmp23250, i64 1
+ %tmp23252 = getelementptr inbounds float, float* %tmp23251, i64 1
+ %tmp23253 = getelementptr inbounds float, float* %tmp23252, i64 1
+ %tmp23254 = getelementptr inbounds float, float* %tmp23253, i64 1
+ %tmp23255 = getelementptr inbounds float, float* %tmp23254, i64 1
+ %tmp23256 = getelementptr inbounds float, float* %tmp23255, i64 1
+ %tmp23257 = getelementptr inbounds float, float* %tmp23256, i64 1
+ %tmp23258 = getelementptr inbounds float, float* %tmp23257, i64 1
+ %tmp23259 = getelementptr inbounds float, float* %tmp23258, i64 1
+ %tmp23260 = getelementptr inbounds float, float* %tmp23259, i64 1
+ %tmp23261 = getelementptr inbounds float, float* %tmp23260, i64 1
+ %tmp23262 = getelementptr inbounds float, float* %tmp23261, i64 1
+ %tmp23263 = getelementptr inbounds float, float* %tmp23262, i64 1
+ %tmp23264 = getelementptr inbounds float, float* %tmp23263, i64 1
+ %tmp23265 = getelementptr inbounds float, float* %tmp23264, i64 1
+ %tmp23266 = getelementptr inbounds float, float* %tmp23265, i64 1
+ %tmp23267 = getelementptr inbounds float, float* %tmp23266, i64 1
+ %tmp23268 = getelementptr inbounds float, float* %tmp23267, i64 1
+ %tmp23269 = getelementptr inbounds float, float* %tmp23268, i64 1
+ %tmp23270 = getelementptr inbounds float, float* %tmp23269, i64 1
+ %tmp23271 = getelementptr inbounds float, float* %tmp23270, i64 1
+ %tmp23272 = getelementptr inbounds float, float* %tmp23271, i64 1
+ %tmp23273 = getelementptr inbounds float, float* %tmp23272, i64 1
+ %tmp23274 = getelementptr inbounds float, float* %tmp23273, i64 1
+ %tmp23275 = getelementptr inbounds float, float* %tmp23274, i64 1
+ %tmp23276 = getelementptr inbounds float, float* %tmp23275, i64 1
+ %tmp23277 = getelementptr inbounds float, float* %tmp23276, i64 1
+ %tmp23278 = getelementptr inbounds float, float* %tmp23277, i64 1
+ %tmp23279 = getelementptr inbounds float, float* %tmp23278, i64 1
+ %tmp23280 = getelementptr inbounds float, float* %tmp23279, i64 1
+ %tmp23281 = getelementptr inbounds float, float* %tmp23280, i64 1
+ %tmp23282 = getelementptr inbounds float, float* %tmp23281, i64 1
+ %tmp23283 = getelementptr inbounds float, float* %tmp23282, i64 1
+ %tmp23284 = getelementptr inbounds float, float* %tmp23283, i64 1
+ %tmp23285 = getelementptr inbounds float, float* %tmp23284, i64 1
+ %tmp23286 = getelementptr inbounds float, float* %tmp23285, i64 1
+ %tmp23287 = getelementptr inbounds float, float* %tmp23286, i64 1
+ %tmp23288 = getelementptr inbounds float, float* %tmp23287, i64 1
+ %tmp23289 = getelementptr inbounds float, float* %tmp23288, i64 1
+ %tmp23290 = getelementptr inbounds float, float* %tmp23289, i64 1
+ %tmp23291 = getelementptr inbounds float, float* %tmp23290, i64 1
+ %tmp23292 = getelementptr inbounds float, float* %tmp23291, i64 1
+ %tmp23293 = getelementptr inbounds float, float* %tmp23292, i64 1
+ %tmp23294 = getelementptr inbounds float, float* %tmp23293, i64 1
+ %tmp23295 = getelementptr inbounds float, float* %tmp23294, i64 1
+ %tmp23296 = getelementptr inbounds float, float* %tmp23295, i64 1
+ %tmp23297 = getelementptr inbounds float, float* %tmp23296, i64 1
+ %tmp23298 = getelementptr inbounds float, float* %tmp23297, i64 1
+ %tmp23299 = getelementptr inbounds float, float* %tmp23298, i64 1
+ %tmp23300 = getelementptr inbounds float, float* %tmp23299, i64 1
+ %tmp23301 = getelementptr inbounds float, float* %tmp23300, i64 1
+ %tmp23302 = getelementptr inbounds float, float* %tmp23301, i64 1
+ %tmp23303 = getelementptr inbounds float, float* %tmp23302, i64 1
+ %tmp23304 = getelementptr inbounds float, float* %tmp23303, i64 1
+ %tmp23305 = getelementptr inbounds float, float* %tmp23304, i64 1
+ %tmp23306 = getelementptr inbounds float, float* %tmp23305, i64 1
+ %tmp23307 = getelementptr inbounds float, float* %tmp23306, i64 1
+ %tmp23308 = getelementptr inbounds float, float* %tmp23307, i64 1
+ %tmp23309 = getelementptr inbounds float, float* %tmp23308, i64 1
+ %tmp23310 = getelementptr inbounds float, float* %tmp23309, i64 1
+ %tmp23311 = getelementptr inbounds float, float* %tmp23310, i64 1
+ %tmp23312 = getelementptr inbounds float, float* %tmp23311, i64 1
+ %tmp23313 = getelementptr inbounds float, float* %tmp23312, i64 1
+ %tmp23314 = getelementptr inbounds float, float* %tmp23313, i64 1
+ %tmp23315 = getelementptr inbounds float, float* %tmp23314, i64 1
+ %tmp23316 = getelementptr inbounds float, float* %tmp23315, i64 1
+ %tmp23317 = getelementptr inbounds float, float* %tmp23316, i64 1
+ %tmp23318 = getelementptr inbounds float, float* %tmp23317, i64 1
+ %tmp23319 = getelementptr inbounds float, float* %tmp23318, i64 1
+ %tmp23320 = getelementptr inbounds float, float* %tmp23319, i64 1
+ %tmp23321 = getelementptr inbounds float, float* %tmp23320, i64 1
+ %tmp23322 = getelementptr inbounds float, float* %tmp23321, i64 1
+ %tmp23323 = getelementptr inbounds float, float* %tmp23322, i64 1
+ %tmp23324 = getelementptr inbounds float, float* %tmp23323, i64 1
+ %tmp23325 = getelementptr inbounds float, float* %tmp23324, i64 1
+ %tmp23326 = getelementptr inbounds float, float* %tmp23325, i64 1
+ %tmp23327 = getelementptr inbounds float, float* %tmp23326, i64 1
+ %tmp23328 = getelementptr inbounds float, float* %tmp23327, i64 1
+ %tmp23329 = getelementptr inbounds float, float* %tmp23328, i64 1
+ %tmp23330 = getelementptr inbounds float, float* %tmp23329, i64 1
+ %tmp23331 = getelementptr inbounds float, float* %tmp23330, i64 1
+ %tmp23332 = getelementptr inbounds float, float* %tmp23331, i64 1
+ %tmp23333 = getelementptr inbounds float, float* %tmp23332, i64 1
+ %tmp23334 = getelementptr inbounds float, float* %tmp23333, i64 1
+ %tmp23335 = getelementptr inbounds float, float* %tmp23334, i64 1
+ %tmp23336 = getelementptr inbounds float, float* %tmp23335, i64 1
+ %tmp23337 = getelementptr inbounds float, float* %tmp23336, i64 1
+ %tmp23338 = getelementptr inbounds float, float* %tmp23337, i64 1
+ %tmp23339 = getelementptr inbounds float, float* %tmp23338, i64 1
+ %tmp23340 = getelementptr inbounds float, float* %tmp23339, i64 1
+ %tmp23341 = getelementptr inbounds float, float* %tmp23340, i64 1
+ %tmp23342 = getelementptr inbounds float, float* %tmp23341, i64 1
+ %tmp23343 = getelementptr inbounds float, float* %tmp23342, i64 1
+ %tmp23344 = getelementptr inbounds float, float* %tmp23343, i64 1
+ %tmp23345 = getelementptr inbounds float, float* %tmp23344, i64 1
+ %tmp23346 = getelementptr inbounds float, float* %tmp23345, i64 1
+ %tmp23347 = getelementptr inbounds float, float* %tmp23346, i64 1
+ %tmp23348 = getelementptr inbounds float, float* %tmp23347, i64 1
+ %tmp23349 = getelementptr inbounds float, float* %tmp23348, i64 1
+ %tmp23350 = getelementptr inbounds float, float* %tmp23349, i64 1
+ %tmp23351 = getelementptr inbounds float, float* %tmp23350, i64 1
+ %tmp23352 = getelementptr inbounds float, float* %tmp23351, i64 1
+ %tmp23353 = getelementptr inbounds float, float* %tmp23352, i64 1
+ %tmp23354 = getelementptr inbounds float, float* %tmp23353, i64 1
+ %tmp23355 = getelementptr inbounds float, float* %tmp23354, i64 1
+ %tmp23356 = getelementptr inbounds float, float* %tmp23355, i64 1
+ %tmp23357 = getelementptr inbounds float, float* %tmp23356, i64 1
+ %tmp23358 = getelementptr inbounds float, float* %tmp23357, i64 1
+ %tmp23359 = getelementptr inbounds float, float* %tmp23358, i64 1
+ %tmp23360 = getelementptr inbounds float, float* %tmp23359, i64 1
+ %tmp23361 = getelementptr inbounds float, float* %tmp23360, i64 1
+ %tmp23362 = getelementptr inbounds float, float* %tmp23361, i64 1
+ %tmp23363 = getelementptr inbounds float, float* %tmp23362, i64 1
+ %tmp23364 = getelementptr inbounds float, float* %tmp23363, i64 1
+ %tmp23365 = getelementptr inbounds float, float* %tmp23364, i64 1
+ %tmp23366 = getelementptr inbounds float, float* %tmp23365, i64 1
+ %tmp23367 = getelementptr inbounds float, float* %tmp23366, i64 1
+ %tmp23368 = getelementptr inbounds float, float* %tmp23367, i64 1
+ %tmp23369 = getelementptr inbounds float, float* %tmp23368, i64 1
+ %tmp23370 = getelementptr inbounds float, float* %tmp23369, i64 1
+ %tmp23371 = getelementptr inbounds float, float* %tmp23370, i64 1
+ %tmp23372 = getelementptr inbounds float, float* %tmp23371, i64 1
+ %tmp23373 = getelementptr inbounds float, float* %tmp23372, i64 1
+ %tmp23374 = getelementptr inbounds float, float* %tmp23373, i64 1
+ %tmp23375 = getelementptr inbounds float, float* %tmp23374, i64 1
+ %tmp23376 = getelementptr inbounds float, float* %tmp23375, i64 1
+ %tmp23377 = getelementptr inbounds float, float* %tmp23376, i64 1
+ %tmp23378 = getelementptr inbounds float, float* %tmp23377, i64 1
+ %tmp23379 = getelementptr inbounds float, float* %tmp23378, i64 1
+ %tmp23380 = getelementptr inbounds float, float* %tmp23379, i64 1
+ %tmp23381 = getelementptr inbounds float, float* %tmp23380, i64 1
+ %tmp23382 = getelementptr inbounds float, float* %tmp23381, i64 1
+ %tmp23383 = getelementptr inbounds float, float* %tmp23382, i64 1
+ %tmp23384 = getelementptr inbounds float, float* %tmp23383, i64 1
+ %tmp23385 = getelementptr inbounds float, float* %tmp23384, i64 1
+ %tmp23386 = getelementptr inbounds float, float* %tmp23385, i64 1
+ %tmp23387 = getelementptr inbounds float, float* %tmp23386, i64 1
+ %tmp23388 = getelementptr inbounds float, float* %tmp23387, i64 1
+ %tmp23389 = getelementptr inbounds float, float* %tmp23388, i64 1
+ %tmp23390 = getelementptr inbounds float, float* %tmp23389, i64 1
+ %tmp23391 = getelementptr inbounds float, float* %tmp23390, i64 1
+ %tmp23392 = getelementptr inbounds float, float* %tmp23391, i64 1
+ %tmp23393 = getelementptr inbounds float, float* %tmp23392, i64 1
+ %tmp23394 = getelementptr inbounds float, float* %tmp23393, i64 1
+ %tmp23395 = getelementptr inbounds float, float* %tmp23394, i64 1
+ %tmp23396 = getelementptr inbounds float, float* %tmp23395, i64 1
+ %tmp23397 = getelementptr inbounds float, float* %tmp23396, i64 1
+ %tmp23398 = getelementptr inbounds float, float* %tmp23397, i64 1
+ %tmp23399 = getelementptr inbounds float, float* %tmp23398, i64 1
+ %tmp23400 = getelementptr inbounds float, float* %tmp23399, i64 1
+ %tmp23401 = getelementptr inbounds float, float* %tmp23400, i64 1
+ %tmp23402 = getelementptr inbounds float, float* %tmp23401, i64 1
+ %tmp23403 = getelementptr inbounds float, float* %tmp23402, i64 1
+ %tmp23404 = getelementptr inbounds float, float* %tmp23403, i64 1
+ %tmp23405 = getelementptr inbounds float, float* %tmp23404, i64 1
+ %tmp23406 = getelementptr inbounds float, float* %tmp23405, i64 1
+ %tmp23407 = getelementptr inbounds float, float* %tmp23406, i64 1
+ %tmp23408 = getelementptr inbounds float, float* %tmp23407, i64 1
+ %tmp23409 = getelementptr inbounds float, float* %tmp23408, i64 1
+ %tmp23410 = getelementptr inbounds float, float* %tmp23409, i64 1
+ %tmp23411 = getelementptr inbounds float, float* %tmp23410, i64 1
+ %tmp23412 = getelementptr inbounds float, float* %tmp23411, i64 1
+ %tmp23413 = getelementptr inbounds float, float* %tmp23412, i64 1
+ %tmp23414 = getelementptr inbounds float, float* %tmp23413, i64 1
+ %tmp23415 = getelementptr inbounds float, float* %tmp23414, i64 1
+ %tmp23416 = getelementptr inbounds float, float* %tmp23415, i64 1
+ %tmp23417 = getelementptr inbounds float, float* %tmp23416, i64 1
+ %tmp23418 = getelementptr inbounds float, float* %tmp23417, i64 1
+ %tmp23419 = getelementptr inbounds float, float* %tmp23418, i64 1
+ %tmp23420 = getelementptr inbounds float, float* %tmp23419, i64 1
+ %tmp23421 = getelementptr inbounds float, float* %tmp23420, i64 1
+ %tmp23422 = getelementptr inbounds float, float* %tmp23421, i64 1
+ %tmp23423 = getelementptr inbounds float, float* %tmp23422, i64 1
+ %tmp23424 = getelementptr inbounds float, float* %tmp23423, i64 1
+ %tmp23425 = getelementptr inbounds float, float* %tmp23424, i64 1
+ %tmp23426 = getelementptr inbounds float, float* %tmp23425, i64 1
+ %tmp23427 = getelementptr inbounds float, float* %tmp23426, i64 1
+ %tmp23428 = getelementptr inbounds float, float* %tmp23427, i64 1
+ %tmp23429 = getelementptr inbounds float, float* %tmp23428, i64 1
+ %tmp23430 = getelementptr inbounds float, float* %tmp23429, i64 1
+ %tmp23431 = getelementptr inbounds float, float* %tmp23430, i64 1
+ %tmp23432 = getelementptr inbounds float, float* %tmp23431, i64 1
+ %tmp23433 = getelementptr inbounds float, float* %tmp23432, i64 1
+ %tmp23434 = getelementptr inbounds float, float* %tmp23433, i64 1
+ %tmp23435 = getelementptr inbounds float, float* %tmp23434, i64 1
+ %tmp23436 = getelementptr inbounds float, float* %tmp23435, i64 1
+ %tmp23437 = getelementptr inbounds float, float* %tmp23436, i64 1
+ %tmp23438 = getelementptr inbounds float, float* %tmp23437, i64 1
+ %tmp23439 = getelementptr inbounds float, float* %tmp23438, i64 1
+ %tmp23440 = getelementptr inbounds float, float* %tmp23439, i64 1
+ %tmp23441 = getelementptr inbounds float, float* %tmp23440, i64 1
+ %tmp23442 = getelementptr inbounds float, float* %tmp23441, i64 1
+ %tmp23443 = getelementptr inbounds float, float* %tmp23442, i64 1
+ %tmp23444 = getelementptr inbounds float, float* %tmp23443, i64 1
+ %tmp23445 = getelementptr inbounds float, float* %tmp23444, i64 1
+ %tmp23446 = getelementptr inbounds float, float* %tmp23445, i64 1
+ %tmp23447 = getelementptr inbounds float, float* %tmp23446, i64 1
+ %tmp23448 = getelementptr inbounds float, float* %tmp23447, i64 1
+ %tmp23449 = getelementptr inbounds float, float* %tmp23448, i64 1
+ %tmp23450 = getelementptr inbounds float, float* %tmp23449, i64 1
+ %tmp23451 = getelementptr inbounds float, float* %tmp23450, i64 1
+ %tmp23452 = getelementptr inbounds float, float* %tmp23451, i64 1
+ %tmp23453 = getelementptr inbounds float, float* %tmp23452, i64 1
+ %tmp23454 = getelementptr inbounds float, float* %tmp23453, i64 1
+ %tmp23455 = getelementptr inbounds float, float* %tmp23454, i64 1
+ %tmp23456 = getelementptr inbounds float, float* %tmp23455, i64 1
+ %tmp23457 = getelementptr inbounds float, float* %tmp23456, i64 1
+ %tmp23458 = getelementptr inbounds float, float* %tmp23457, i64 1
+ %tmp23459 = getelementptr inbounds float, float* %tmp23458, i64 1
+ %tmp23460 = getelementptr inbounds float, float* %tmp23459, i64 1
+ %tmp23461 = getelementptr inbounds float, float* %tmp23460, i64 1
+ %tmp23462 = getelementptr inbounds float, float* %tmp23461, i64 1
+ %tmp23463 = getelementptr inbounds float, float* %tmp23462, i64 1
+ %tmp23464 = getelementptr inbounds float, float* %tmp23463, i64 1
+ %tmp23465 = getelementptr inbounds float, float* %tmp23464, i64 1
+ %tmp23466 = getelementptr inbounds float, float* %tmp23465, i64 1
+ %tmp23467 = getelementptr inbounds float, float* %tmp23466, i64 1
+ %tmp23468 = getelementptr inbounds float, float* %tmp23467, i64 1
+ %tmp23469 = getelementptr inbounds float, float* %tmp23468, i64 1
+ %tmp23470 = getelementptr inbounds float, float* %tmp23469, i64 1
+ %tmp23471 = getelementptr inbounds float, float* %tmp23470, i64 1
+ %tmp23472 = getelementptr inbounds float, float* %tmp23471, i64 1
+ %tmp23473 = getelementptr inbounds float, float* %tmp23472, i64 1
+ %tmp23474 = getelementptr inbounds float, float* %tmp23473, i64 1
+ %tmp23475 = getelementptr inbounds float, float* %tmp23474, i64 1
+ %tmp23476 = getelementptr inbounds float, float* %tmp23475, i64 1
+ %tmp23477 = getelementptr inbounds float, float* %tmp23476, i64 1
+ %tmp23478 = getelementptr inbounds float, float* %tmp23477, i64 1
+ %tmp23479 = getelementptr inbounds float, float* %tmp23478, i64 1
+ %tmp23480 = getelementptr inbounds float, float* %tmp23479, i64 1
+ %tmp23481 = getelementptr inbounds float, float* %tmp23480, i64 1
+ %tmp23482 = getelementptr inbounds float, float* %tmp23481, i64 1
+ %tmp23483 = getelementptr inbounds float, float* %tmp23482, i64 1
+ %tmp23484 = getelementptr inbounds float, float* %tmp23483, i64 1
+ %tmp23485 = getelementptr inbounds float, float* %tmp23484, i64 1
+ %tmp23486 = getelementptr inbounds float, float* %tmp23485, i64 1
+ %tmp23487 = getelementptr inbounds float, float* %tmp23486, i64 1
+ %tmp23488 = getelementptr inbounds float, float* %tmp23487, i64 1
+ %tmp23489 = getelementptr inbounds float, float* %tmp23488, i64 1
+ %tmp23490 = getelementptr inbounds float, float* %tmp23489, i64 1
+ %tmp23491 = getelementptr inbounds float, float* %tmp23490, i64 1
+ %tmp23492 = getelementptr inbounds float, float* %tmp23491, i64 1
+ %tmp23493 = getelementptr inbounds float, float* %tmp23492, i64 1
+ %tmp23494 = getelementptr inbounds float, float* %tmp23493, i64 1
+ %tmp23495 = getelementptr inbounds float, float* %tmp23494, i64 1
+ %tmp23496 = getelementptr inbounds float, float* %tmp23495, i64 1
+ %tmp23497 = getelementptr inbounds float, float* %tmp23496, i64 1
+ %tmp23498 = getelementptr inbounds float, float* %tmp23497, i64 1
+ %tmp23499 = getelementptr inbounds float, float* %tmp23498, i64 1
+ %tmp23500 = getelementptr inbounds float, float* %tmp23499, i64 1
+ %tmp23501 = getelementptr inbounds float, float* %tmp23500, i64 1
+ %tmp23502 = getelementptr inbounds float, float* %tmp23501, i64 1
+ %tmp23503 = getelementptr inbounds float, float* %tmp23502, i64 1
+ %tmp23504 = getelementptr inbounds float, float* %tmp23503, i64 1
+ %tmp23505 = getelementptr inbounds float, float* %tmp23504, i64 1
+ %tmp23506 = getelementptr inbounds float, float* %tmp23505, i64 1
+ %tmp23507 = getelementptr inbounds float, float* %tmp23506, i64 1
+ %tmp23508 = getelementptr inbounds float, float* %tmp23507, i64 1
+ %tmp23509 = getelementptr inbounds float, float* %tmp23508, i64 1
+ %tmp23510 = getelementptr inbounds float, float* %tmp23509, i64 1
+ %tmp23511 = getelementptr inbounds float, float* %tmp23510, i64 1
+ %tmp23512 = getelementptr inbounds float, float* %tmp23511, i64 1
+ %tmp23513 = getelementptr inbounds float, float* %tmp23512, i64 1
+ %tmp23514 = getelementptr inbounds float, float* %tmp23513, i64 1
+ %tmp23515 = getelementptr inbounds float, float* %tmp23514, i64 1
+ %tmp23516 = getelementptr inbounds float, float* %tmp23515, i64 1
+ %tmp23517 = getelementptr inbounds float, float* %tmp23516, i64 1
+ %tmp23518 = getelementptr inbounds float, float* %tmp23517, i64 1
+ %tmp23519 = getelementptr inbounds float, float* %tmp23518, i64 1
+ %tmp23520 = getelementptr inbounds float, float* %tmp23519, i64 1
+ %tmp23521 = getelementptr inbounds float, float* %tmp23520, i64 1
+ %tmp23522 = getelementptr inbounds float, float* %tmp23521, i64 1
+ %tmp23523 = getelementptr inbounds float, float* %tmp23522, i64 1
+ %tmp23524 = getelementptr inbounds float, float* %tmp23523, i64 1
+ %tmp23525 = getelementptr inbounds float, float* %tmp23524, i64 1
+ %tmp23526 = getelementptr inbounds float, float* %tmp23525, i64 1
+ %tmp23527 = getelementptr inbounds float, float* %tmp23526, i64 1
+ %tmp23528 = getelementptr inbounds float, float* %tmp23527, i64 1
+ %tmp23529 = getelementptr inbounds float, float* %tmp23528, i64 1
+ %tmp23530 = getelementptr inbounds float, float* %tmp23529, i64 1
+ %tmp23531 = getelementptr inbounds float, float* %tmp23530, i64 1
+ %tmp23532 = getelementptr inbounds float, float* %tmp23531, i64 1
+ %tmp23533 = getelementptr inbounds float, float* %tmp23532, i64 1
+ %tmp23534 = getelementptr inbounds float, float* %tmp23533, i64 1
+ %tmp23535 = getelementptr inbounds float, float* %tmp23534, i64 1
+ %tmp23536 = getelementptr inbounds float, float* %tmp23535, i64 1
+ %tmp23537 = getelementptr inbounds float, float* %tmp23536, i64 1
+ %tmp23538 = getelementptr inbounds float, float* %tmp23537, i64 1
+ %tmp23539 = getelementptr inbounds float, float* %tmp23538, i64 1
+ %tmp23540 = getelementptr inbounds float, float* %tmp23539, i64 1
+ %tmp23541 = getelementptr inbounds float, float* %tmp23540, i64 1
+ %tmp23542 = getelementptr inbounds float, float* %tmp23541, i64 1
+ %tmp23543 = getelementptr inbounds float, float* %tmp23542, i64 1
+ %tmp23544 = getelementptr inbounds float, float* %tmp23543, i64 1
+ %tmp23545 = getelementptr inbounds float, float* %tmp23544, i64 1
+ %tmp23546 = getelementptr inbounds float, float* %tmp23545, i64 1
+ %tmp23547 = getelementptr inbounds float, float* %tmp23546, i64 1
+ %tmp23548 = getelementptr inbounds float, float* %tmp23547, i64 1
+ %tmp23549 = getelementptr inbounds float, float* %tmp23548, i64 1
+ %tmp23550 = getelementptr inbounds float, float* %tmp23549, i64 1
+ %tmp23551 = getelementptr inbounds float, float* %tmp23550, i64 1
+ %tmp23552 = getelementptr inbounds float, float* %tmp23551, i64 1
+ %tmp23553 = getelementptr inbounds float, float* %tmp23552, i64 1
+ %tmp23554 = getelementptr inbounds float, float* %tmp23553, i64 1
+ %tmp23555 = getelementptr inbounds float, float* %tmp23554, i64 1
+ %tmp23556 = getelementptr inbounds float, float* %tmp23555, i64 1
+ %tmp23557 = getelementptr inbounds float, float* %tmp23556, i64 1
+ %tmp23558 = getelementptr inbounds float, float* %tmp23557, i64 1
+ %tmp23559 = getelementptr inbounds float, float* %tmp23558, i64 1
+ %tmp23560 = getelementptr inbounds float, float* %tmp23559, i64 1
+ %tmp23561 = getelementptr inbounds float, float* %tmp23560, i64 1
+ %tmp23562 = getelementptr inbounds float, float* %tmp23561, i64 1
+ %tmp23563 = getelementptr inbounds float, float* %tmp23562, i64 1
+ %tmp23564 = getelementptr inbounds float, float* %tmp23563, i64 1
+ %tmp23565 = getelementptr inbounds float, float* %tmp23564, i64 1
+ %tmp23566 = getelementptr inbounds float, float* %tmp23565, i64 1
+ %tmp23567 = getelementptr inbounds float, float* %tmp23566, i64 1
+ %tmp23568 = getelementptr inbounds float, float* %tmp23567, i64 1
+ %tmp23569 = getelementptr inbounds float, float* %tmp23568, i64 1
+ %tmp23570 = getelementptr inbounds float, float* %tmp23569, i64 1
+ %tmp23571 = getelementptr inbounds float, float* %tmp23570, i64 1
+ %tmp23572 = getelementptr inbounds float, float* %tmp23571, i64 1
+ %tmp23573 = getelementptr inbounds float, float* %tmp23572, i64 1
+ %tmp23574 = getelementptr inbounds float, float* %tmp23573, i64 1
+ %tmp23575 = getelementptr inbounds float, float* %tmp23574, i64 1
+ %tmp23576 = getelementptr inbounds float, float* %tmp23575, i64 1
+ %tmp23577 = getelementptr inbounds float, float* %tmp23576, i64 1
+ %tmp23578 = getelementptr inbounds float, float* %tmp23577, i64 1
+ %tmp23579 = getelementptr inbounds float, float* %tmp23578, i64 1
+ %tmp23580 = getelementptr inbounds float, float* %tmp23579, i64 1
+ %tmp23581 = getelementptr inbounds float, float* %tmp23580, i64 1
+ %tmp23582 = getelementptr inbounds float, float* %tmp23581, i64 1
+ %tmp23583 = getelementptr inbounds float, float* %tmp23582, i64 1
+ %tmp23584 = getelementptr inbounds float, float* %tmp23583, i64 1
+ %tmp23585 = getelementptr inbounds float, float* %tmp23584, i64 1
+ %tmp23586 = getelementptr inbounds float, float* %tmp23585, i64 1
+ %tmp23587 = getelementptr inbounds float, float* %tmp23586, i64 1
+ %tmp23588 = getelementptr inbounds float, float* %tmp23587, i64 1
+ %tmp23589 = getelementptr inbounds float, float* %tmp23588, i64 1
+ %tmp23590 = getelementptr inbounds float, float* %tmp23589, i64 1
+ %tmp23591 = getelementptr inbounds float, float* %tmp23590, i64 1
+ %tmp23592 = getelementptr inbounds float, float* %tmp23591, i64 1
+ %tmp23593 = getelementptr inbounds float, float* %tmp23592, i64 1
+ %tmp23594 = getelementptr inbounds float, float* %tmp23593, i64 1
+ %tmp23595 = getelementptr inbounds float, float* %tmp23594, i64 1
+ %tmp23596 = getelementptr inbounds float, float* %tmp23595, i64 1
+ %tmp23597 = getelementptr inbounds float, float* %tmp23596, i64 1
+ %tmp23598 = getelementptr inbounds float, float* %tmp23597, i64 1
+ %tmp23599 = getelementptr inbounds float, float* %tmp23598, i64 1
+ %tmp23600 = getelementptr inbounds float, float* %tmp23599, i64 1
+ %tmp23601 = getelementptr inbounds float, float* %tmp23600, i64 1
+ %tmp23602 = getelementptr inbounds float, float* %tmp23601, i64 1
+ %tmp23603 = getelementptr inbounds float, float* %tmp23602, i64 1
+ %tmp23604 = getelementptr inbounds float, float* %tmp23603, i64 1
+ %tmp23605 = getelementptr inbounds float, float* %tmp23604, i64 1
+ %tmp23606 = getelementptr inbounds float, float* %tmp23605, i64 1
+ %tmp23607 = getelementptr inbounds float, float* %tmp23606, i64 1
+ %tmp23608 = getelementptr inbounds float, float* %tmp23607, i64 1
+ %tmp23609 = getelementptr inbounds float, float* %tmp23608, i64 1
+ %tmp23610 = getelementptr inbounds float, float* %tmp23609, i64 1
+ %tmp23611 = getelementptr inbounds float, float* %tmp23610, i64 1
+ %tmp23612 = getelementptr inbounds float, float* %tmp23611, i64 1
+ %tmp23613 = getelementptr inbounds float, float* %tmp23612, i64 1
+ %tmp23614 = getelementptr inbounds float, float* %tmp23613, i64 1
+ %tmp23615 = getelementptr inbounds float, float* %tmp23614, i64 1
+ %tmp23616 = getelementptr inbounds float, float* %tmp23615, i64 1
+ %tmp23617 = getelementptr inbounds float, float* %tmp23616, i64 1
+ %tmp23618 = getelementptr inbounds float, float* %tmp23617, i64 1
+ %tmp23619 = getelementptr inbounds float, float* %tmp23618, i64 1
+ %tmp23620 = getelementptr inbounds float, float* %tmp23619, i64 1
+ %tmp23621 = getelementptr inbounds float, float* %tmp23620, i64 1
+ %tmp23622 = getelementptr inbounds float, float* %tmp23621, i64 1
+ %tmp23623 = getelementptr inbounds float, float* %tmp23622, i64 1
+ %tmp23624 = getelementptr inbounds float, float* %tmp23623, i64 1
+ %tmp23625 = getelementptr inbounds float, float* %tmp23624, i64 1
+ %tmp23626 = getelementptr inbounds float, float* %tmp23625, i64 1
+ %tmp23627 = getelementptr inbounds float, float* %tmp23626, i64 1
+ %tmp23628 = getelementptr inbounds float, float* %tmp23627, i64 1
+ %tmp23629 = getelementptr inbounds float, float* %tmp23628, i64 1
+ %tmp23630 = getelementptr inbounds float, float* %tmp23629, i64 1
+ %tmp23631 = getelementptr inbounds float, float* %tmp23630, i64 1
+ %tmp23632 = getelementptr inbounds float, float* %tmp23631, i64 1
+ %tmp23633 = getelementptr inbounds float, float* %tmp23632, i64 1
+ %tmp23634 = getelementptr inbounds float, float* %tmp23633, i64 1
+ %tmp23635 = getelementptr inbounds float, float* %tmp23634, i64 1
+ %tmp23636 = getelementptr inbounds float, float* %tmp23635, i64 1
+ %tmp23637 = getelementptr inbounds float, float* %tmp23636, i64 1
+ %tmp23638 = getelementptr inbounds float, float* %tmp23637, i64 1
+ %tmp23639 = getelementptr inbounds float, float* %tmp23638, i64 1
+ %tmp23640 = getelementptr inbounds float, float* %tmp23639, i64 1
+ %tmp23641 = getelementptr inbounds float, float* %tmp23640, i64 1
+ %tmp23642 = getelementptr inbounds float, float* %tmp23641, i64 1
+ %tmp23643 = getelementptr inbounds float, float* %tmp23642, i64 1
+ %tmp23644 = getelementptr inbounds float, float* %tmp23643, i64 1
+ %tmp23645 = getelementptr inbounds float, float* %tmp23644, i64 1
+ %tmp23646 = getelementptr inbounds float, float* %tmp23645, i64 1
+ %tmp23647 = getelementptr inbounds float, float* %tmp23646, i64 1
+ %tmp23648 = getelementptr inbounds float, float* %tmp23647, i64 1
+ %tmp23649 = getelementptr inbounds float, float* %tmp23648, i64 1
+ %tmp23650 = getelementptr inbounds float, float* %tmp23649, i64 1
+ %tmp23651 = getelementptr inbounds float, float* %tmp23650, i64 1
+ %tmp23652 = getelementptr inbounds float, float* %tmp23651, i64 1
+ %tmp23653 = getelementptr inbounds float, float* %tmp23652, i64 1
+ %tmp23654 = getelementptr inbounds float, float* %tmp23653, i64 1
+ %tmp23655 = getelementptr inbounds float, float* %tmp23654, i64 1
+ %tmp23656 = getelementptr inbounds float, float* %tmp23655, i64 1
+ %tmp23657 = getelementptr inbounds float, float* %tmp23656, i64 1
+ %tmp23658 = getelementptr inbounds float, float* %tmp23657, i64 1
+ %tmp23659 = getelementptr inbounds float, float* %tmp23658, i64 1
+ %tmp23660 = getelementptr inbounds float, float* %tmp23659, i64 1
+ %tmp23661 = getelementptr inbounds float, float* %tmp23660, i64 1
+ %tmp23662 = getelementptr inbounds float, float* %tmp23661, i64 1
+ %tmp23663 = getelementptr inbounds float, float* %tmp23662, i64 1
+ %tmp23664 = getelementptr inbounds float, float* %tmp23663, i64 1
+ %tmp23665 = getelementptr inbounds float, float* %tmp23664, i64 1
+ %tmp23666 = getelementptr inbounds float, float* %tmp23665, i64 1
+ %tmp23667 = getelementptr inbounds float, float* %tmp23666, i64 1
+ %tmp23668 = getelementptr inbounds float, float* %tmp23667, i64 1
+ %tmp23669 = getelementptr inbounds float, float* %tmp23668, i64 1
+ %tmp23670 = getelementptr inbounds float, float* %tmp23669, i64 1
+ %tmp23671 = getelementptr inbounds float, float* %tmp23670, i64 1
+ %tmp23672 = getelementptr inbounds float, float* %tmp23671, i64 1
+ %tmp23673 = getelementptr inbounds float, float* %tmp23672, i64 1
+ %tmp23674 = getelementptr inbounds float, float* %tmp23673, i64 1
+ %tmp23675 = getelementptr inbounds float, float* %tmp23674, i64 1
+ %tmp23676 = getelementptr inbounds float, float* %tmp23675, i64 1
+ %tmp23677 = getelementptr inbounds float, float* %tmp23676, i64 1
+ %tmp23678 = getelementptr inbounds float, float* %tmp23677, i64 1
+ %tmp23679 = getelementptr inbounds float, float* %tmp23678, i64 1
+ %tmp23680 = getelementptr inbounds float, float* %tmp23679, i64 1
+ %tmp23681 = getelementptr inbounds float, float* %tmp23680, i64 1
+ %tmp23682 = getelementptr inbounds float, float* %tmp23681, i64 1
+ %tmp23683 = getelementptr inbounds float, float* %tmp23682, i64 1
+ %tmp23684 = getelementptr inbounds float, float* %tmp23683, i64 1
+ %tmp23685 = getelementptr inbounds float, float* %tmp23684, i64 1
+ %tmp23686 = getelementptr inbounds float, float* %tmp23685, i64 1
+ %tmp23687 = getelementptr inbounds float, float* %tmp23686, i64 1
+ %tmp23688 = getelementptr inbounds float, float* %tmp23687, i64 1
+ %tmp23689 = getelementptr inbounds float, float* %tmp23688, i64 1
+ %tmp23690 = getelementptr inbounds float, float* %tmp23689, i64 1
+ %tmp23691 = getelementptr inbounds float, float* %tmp23690, i64 1
+ %tmp23692 = getelementptr inbounds float, float* %tmp23691, i64 1
+ %tmp23693 = getelementptr inbounds float, float* %tmp23692, i64 1
+ %tmp23694 = getelementptr inbounds float, float* %tmp23693, i64 1
+ %tmp23695 = getelementptr inbounds float, float* %tmp23694, i64 1
+ %tmp23696 = getelementptr inbounds float, float* %tmp23695, i64 1
+ %tmp23697 = getelementptr inbounds float, float* %tmp23696, i64 1
+ %tmp23698 = getelementptr inbounds float, float* %tmp23697, i64 1
+ %tmp23699 = getelementptr inbounds float, float* %tmp23698, i64 1
+ %tmp23700 = getelementptr inbounds float, float* %tmp23699, i64 1
+ %tmp23701 = getelementptr inbounds float, float* %tmp23700, i64 1
+ %tmp23702 = getelementptr inbounds float, float* %tmp23701, i64 1
+ %tmp23703 = getelementptr inbounds float, float* %tmp23702, i64 1
+ %tmp23704 = getelementptr inbounds float, float* %tmp23703, i64 1
+ %tmp23705 = getelementptr inbounds float, float* %tmp23704, i64 1
+ %tmp23706 = getelementptr inbounds float, float* %tmp23705, i64 1
+ %tmp23707 = getelementptr inbounds float, float* %tmp23706, i64 1
+ %tmp23708 = getelementptr inbounds float, float* %tmp23707, i64 1
+ %tmp23709 = getelementptr inbounds float, float* %tmp23708, i64 1
+ %tmp23710 = getelementptr inbounds float, float* %tmp23709, i64 1
+ %tmp23711 = getelementptr inbounds float, float* %tmp23710, i64 1
+ %tmp23712 = getelementptr inbounds float, float* %tmp23711, i64 1
+ %tmp23713 = getelementptr inbounds float, float* %tmp23712, i64 1
+ %tmp23714 = getelementptr inbounds float, float* %tmp23713, i64 1
+ %tmp23715 = getelementptr inbounds float, float* %tmp23714, i64 1
+ %tmp23716 = getelementptr inbounds float, float* %tmp23715, i64 1
+ %tmp23717 = getelementptr inbounds float, float* %tmp23716, i64 1
+ %tmp23718 = getelementptr inbounds float, float* %tmp23717, i64 1
+ %tmp23719 = getelementptr inbounds float, float* %tmp23718, i64 1
+ %tmp23720 = getelementptr inbounds float, float* %tmp23719, i64 1
+ %tmp23721 = getelementptr inbounds float, float* %tmp23720, i64 1
+ %tmp23722 = getelementptr inbounds float, float* %tmp23721, i64 1
+ %tmp23723 = getelementptr inbounds float, float* %tmp23722, i64 1
+ %tmp23724 = getelementptr inbounds float, float* %tmp23723, i64 1
+ %tmp23725 = getelementptr inbounds float, float* %tmp23724, i64 1
+ %tmp23726 = getelementptr inbounds float, float* %tmp23725, i64 1
+ %tmp23727 = getelementptr inbounds float, float* %tmp23726, i64 1
+ %tmp23728 = getelementptr inbounds float, float* %tmp23727, i64 1
+ %tmp23729 = getelementptr inbounds float, float* %tmp23728, i64 1
+ %tmp23730 = getelementptr inbounds float, float* %tmp23729, i64 1
+ %tmp23731 = getelementptr inbounds float, float* %tmp23730, i64 1
+ %tmp23732 = getelementptr inbounds float, float* %tmp23731, i64 1
+ %tmp23733 = getelementptr inbounds float, float* %tmp23732, i64 1
+ %tmp23734 = getelementptr inbounds float, float* %tmp23733, i64 1
+ %tmp23735 = getelementptr inbounds float, float* %tmp23734, i64 1
+ %tmp23736 = getelementptr inbounds float, float* %tmp23735, i64 1
+ %tmp23737 = getelementptr inbounds float, float* %tmp23736, i64 1
+ %tmp23738 = getelementptr inbounds float, float* %tmp23737, i64 1
+ %tmp23739 = getelementptr inbounds float, float* %tmp23738, i64 1
+ %tmp23740 = getelementptr inbounds float, float* %tmp23739, i64 1
+ %tmp23741 = getelementptr inbounds float, float* %tmp23740, i64 1
+ %tmp23742 = getelementptr inbounds float, float* %tmp23741, i64 1
+ %tmp23743 = getelementptr inbounds float, float* %tmp23742, i64 1
+ %tmp23744 = getelementptr inbounds float, float* %tmp23743, i64 1
+ %tmp23745 = getelementptr inbounds float, float* %tmp23744, i64 1
+ %tmp23746 = getelementptr inbounds float, float* %tmp23745, i64 1
+ %tmp23747 = getelementptr inbounds float, float* %tmp23746, i64 1
+ %tmp23748 = getelementptr inbounds float, float* %tmp23747, i64 1
+ %tmp23749 = getelementptr inbounds float, float* %tmp23748, i64 1
+ %tmp23750 = getelementptr inbounds float, float* %tmp23749, i64 1
+ %tmp23751 = getelementptr inbounds float, float* %tmp23750, i64 1
+ %tmp23752 = getelementptr inbounds float, float* %tmp23751, i64 1
+ %tmp23753 = getelementptr inbounds float, float* %tmp23752, i64 1
+ %tmp23754 = getelementptr inbounds float, float* %tmp23753, i64 1
+ %tmp23755 = getelementptr inbounds float, float* %tmp23754, i64 1
+ %tmp23756 = getelementptr inbounds float, float* %tmp23755, i64 1
+ %tmp23757 = getelementptr inbounds float, float* %tmp23756, i64 1
+ %tmp23758 = getelementptr inbounds float, float* %tmp23757, i64 1
+ %tmp23759 = getelementptr inbounds float, float* %tmp23758, i64 1
+ %tmp23760 = getelementptr inbounds float, float* %tmp23759, i64 1
+ %tmp23761 = getelementptr inbounds float, float* %tmp23760, i64 1
+ %tmp23762 = getelementptr inbounds float, float* %tmp23761, i64 1
+ %tmp23763 = getelementptr inbounds float, float* %tmp23762, i64 1
+ %tmp23764 = getelementptr inbounds float, float* %tmp23763, i64 1
+ %tmp23765 = getelementptr inbounds float, float* %tmp23764, i64 1
+ %tmp23766 = getelementptr inbounds float, float* %tmp23765, i64 1
+ %tmp23767 = getelementptr inbounds float, float* %tmp23766, i64 1
+ %tmp23768 = getelementptr inbounds float, float* %tmp23767, i64 1
+ %tmp23769 = getelementptr inbounds float, float* %tmp23768, i64 1
+ %tmp23770 = getelementptr inbounds float, float* %tmp23769, i64 1
+ %tmp23771 = getelementptr inbounds float, float* %tmp23770, i64 1
+ %tmp23772 = getelementptr inbounds float, float* %tmp23771, i64 1
+ %tmp23773 = getelementptr inbounds float, float* %tmp23772, i64 1
+ %tmp23774 = getelementptr inbounds float, float* %tmp23773, i64 1
+ %tmp23775 = getelementptr inbounds float, float* %tmp23774, i64 1
+ %tmp23776 = getelementptr inbounds float, float* %tmp23775, i64 1
+ %tmp23777 = getelementptr inbounds float, float* %tmp23776, i64 1
+ %tmp23778 = getelementptr inbounds float, float* %tmp23777, i64 1
+ %tmp23779 = getelementptr inbounds float, float* %tmp23778, i64 1
+ %tmp23780 = getelementptr inbounds float, float* %tmp23779, i64 1
+ %tmp23781 = getelementptr inbounds float, float* %tmp23780, i64 1
+ %tmp23782 = getelementptr inbounds float, float* %tmp23781, i64 1
+ %tmp23783 = getelementptr inbounds float, float* %tmp23782, i64 1
+ %tmp23784 = getelementptr inbounds float, float* %tmp23783, i64 1
+ %tmp23785 = getelementptr inbounds float, float* %tmp23784, i64 1
+ %tmp23786 = getelementptr inbounds float, float* %tmp23785, i64 1
+ %tmp23787 = getelementptr inbounds float, float* %tmp23786, i64 1
+ %tmp23788 = getelementptr inbounds float, float* %tmp23787, i64 1
+ %tmp23789 = getelementptr inbounds float, float* %tmp23788, i64 1
+ %tmp23790 = getelementptr inbounds float, float* %tmp23789, i64 1
+ %tmp23791 = getelementptr inbounds float, float* %tmp23790, i64 1
+ %tmp23792 = getelementptr inbounds float, float* %tmp23791, i64 1
+ %tmp23793 = getelementptr inbounds float, float* %tmp23792, i64 1
+ %tmp23794 = getelementptr inbounds float, float* %tmp23793, i64 1
+ %tmp23795 = getelementptr inbounds float, float* %tmp23794, i64 1
+ %tmp23796 = getelementptr inbounds float, float* %tmp23795, i64 1
+ %tmp23797 = getelementptr inbounds float, float* %tmp23796, i64 1
+ %tmp23798 = getelementptr inbounds float, float* %tmp23797, i64 1
+ %tmp23799 = getelementptr inbounds float, float* %tmp23798, i64 1
+ %tmp23800 = getelementptr inbounds float, float* %tmp23799, i64 1
+ %tmp23801 = getelementptr inbounds float, float* %tmp23800, i64 1
+ %tmp23802 = getelementptr inbounds float, float* %tmp23801, i64 1
+ %tmp23803 = getelementptr inbounds float, float* %tmp23802, i64 1
+ %tmp23804 = getelementptr inbounds float, float* %tmp23803, i64 1
+ %tmp23805 = getelementptr inbounds float, float* %tmp23804, i64 1
+ %tmp23806 = getelementptr inbounds float, float* %tmp23805, i64 1
+ %tmp23807 = getelementptr inbounds float, float* %tmp23806, i64 1
+ %tmp23808 = getelementptr inbounds float, float* %tmp23807, i64 1
+ %tmp23809 = getelementptr inbounds float, float* %tmp23808, i64 1
+ %tmp23810 = getelementptr inbounds float, float* %tmp23809, i64 1
+ %tmp23811 = getelementptr inbounds float, float* %tmp23810, i64 1
+ %tmp23812 = getelementptr inbounds float, float* %tmp23811, i64 1
+ %tmp23813 = getelementptr inbounds float, float* %tmp23812, i64 1
+ %tmp23814 = getelementptr inbounds float, float* %tmp23813, i64 1
+ %tmp23815 = getelementptr inbounds float, float* %tmp23814, i64 1
+ %tmp23816 = getelementptr inbounds float, float* %tmp23815, i64 1
+ %tmp23817 = getelementptr inbounds float, float* %tmp23816, i64 1
+ %tmp23818 = getelementptr inbounds float, float* %tmp23817, i64 1
+ %tmp23819 = getelementptr inbounds float, float* %tmp23818, i64 1
+ %tmp23820 = getelementptr inbounds float, float* %tmp23819, i64 1
+ %tmp23821 = getelementptr inbounds float, float* %tmp23820, i64 1
+ %tmp23822 = getelementptr inbounds float, float* %tmp23821, i64 1
+ %tmp23823 = getelementptr inbounds float, float* %tmp23822, i64 1
+ %tmp23824 = getelementptr inbounds float, float* %tmp23823, i64 1
+ %tmp23825 = getelementptr inbounds float, float* %tmp23824, i64 1
+ %tmp23826 = getelementptr inbounds float, float* %tmp23825, i64 1
+ %tmp23827 = getelementptr inbounds float, float* %tmp23826, i64 1
+ %tmp23828 = getelementptr inbounds float, float* %tmp23827, i64 1
+ %tmp23829 = getelementptr inbounds float, float* %tmp23828, i64 1
+ %tmp23830 = getelementptr inbounds float, float* %tmp23829, i64 1
+ %tmp23831 = getelementptr inbounds float, float* %tmp23830, i64 1
+ %tmp23832 = getelementptr inbounds float, float* %tmp23831, i64 1
+ %tmp23833 = getelementptr inbounds float, float* %tmp23832, i64 1
+ %tmp23834 = getelementptr inbounds float, float* %tmp23833, i64 1
+ %tmp23835 = getelementptr inbounds float, float* %tmp23834, i64 1
+ %tmp23836 = getelementptr inbounds float, float* %tmp23835, i64 1
+ %tmp23837 = getelementptr inbounds float, float* %tmp23836, i64 1
+ %tmp23838 = getelementptr inbounds float, float* %tmp23837, i64 1
+ %tmp23839 = getelementptr inbounds float, float* %tmp23838, i64 1
+ %tmp23840 = getelementptr inbounds float, float* %tmp23839, i64 1
+ %tmp23841 = getelementptr inbounds float, float* %tmp23840, i64 1
+ %tmp23842 = getelementptr inbounds float, float* %tmp23841, i64 1
+ %tmp23843 = getelementptr inbounds float, float* %tmp23842, i64 1
+ %tmp23844 = getelementptr inbounds float, float* %tmp23843, i64 1
+ %tmp23845 = getelementptr inbounds float, float* %tmp23844, i64 1
+ %tmp23846 = getelementptr inbounds float, float* %tmp23845, i64 1
+ %tmp23847 = getelementptr inbounds float, float* %tmp23846, i64 1
+ %tmp23848 = getelementptr inbounds float, float* %tmp23847, i64 1
+ %tmp23849 = getelementptr inbounds float, float* %tmp23848, i64 1
+ %tmp23850 = getelementptr inbounds float, float* %tmp23849, i64 1
+ %tmp23851 = getelementptr inbounds float, float* %tmp23850, i64 1
+ %tmp23852 = getelementptr inbounds float, float* %tmp23851, i64 1
+ %tmp23853 = getelementptr inbounds float, float* %tmp23852, i64 1
+ %tmp23854 = getelementptr inbounds float, float* %tmp23853, i64 1
+ %tmp23855 = getelementptr inbounds float, float* %tmp23854, i64 1
+ %tmp23856 = getelementptr inbounds float, float* %tmp23855, i64 1
+ %tmp23857 = getelementptr inbounds float, float* %tmp23856, i64 1
+ %tmp23858 = getelementptr inbounds float, float* %tmp23857, i64 1
+ %tmp23859 = getelementptr inbounds float, float* %tmp23858, i64 1
+ %tmp23860 = getelementptr inbounds float, float* %tmp23859, i64 1
+ %tmp23861 = getelementptr inbounds float, float* %tmp23860, i64 1
+ %tmp23862 = getelementptr inbounds float, float* %tmp23861, i64 1
+ %tmp23863 = getelementptr inbounds float, float* %tmp23862, i64 1
+ %tmp23864 = getelementptr inbounds float, float* %tmp23863, i64 1
+ %tmp23865 = getelementptr inbounds float, float* %tmp23864, i64 1
+ %tmp23866 = getelementptr inbounds float, float* %tmp23865, i64 1
+ %tmp23867 = getelementptr inbounds float, float* %tmp23866, i64 1
+ %tmp23868 = getelementptr inbounds float, float* %tmp23867, i64 1
+ %tmp23869 = getelementptr inbounds float, float* %tmp23868, i64 1
+ %tmp23870 = getelementptr inbounds float, float* %tmp23869, i64 1
+ %tmp23871 = getelementptr inbounds float, float* %tmp23870, i64 1
+ %tmp23872 = getelementptr inbounds float, float* %tmp23871, i64 1
+ %tmp23873 = getelementptr inbounds float, float* %tmp23872, i64 1
+ %tmp23874 = getelementptr inbounds float, float* %tmp23873, i64 1
+ %tmp23875 = getelementptr inbounds float, float* %tmp23874, i64 1
+ %tmp23876 = getelementptr inbounds float, float* %tmp23875, i64 1
+ %tmp23877 = getelementptr inbounds float, float* %tmp23876, i64 1
+ %tmp23878 = getelementptr inbounds float, float* %tmp23877, i64 1
+ %tmp23879 = getelementptr inbounds float, float* %tmp23878, i64 1
+ %tmp23880 = getelementptr inbounds float, float* %tmp23879, i64 1
+ %tmp23881 = getelementptr inbounds float, float* %tmp23880, i64 1
+ %tmp23882 = getelementptr inbounds float, float* %tmp23881, i64 1
+ %tmp23883 = getelementptr inbounds float, float* %tmp23882, i64 1
+ %tmp23884 = getelementptr inbounds float, float* %tmp23883, i64 1
+ %tmp23885 = getelementptr inbounds float, float* %tmp23884, i64 1
+ %tmp23886 = getelementptr inbounds float, float* %tmp23885, i64 1
+ %tmp23887 = getelementptr inbounds float, float* %tmp23886, i64 1
+ %tmp23888 = getelementptr inbounds float, float* %tmp23887, i64 1
+ %tmp23889 = getelementptr inbounds float, float* %tmp23888, i64 1
+ %tmp23890 = getelementptr inbounds float, float* %tmp23889, i64 1
+ %tmp23891 = getelementptr inbounds float, float* %tmp23890, i64 1
+ %tmp23892 = getelementptr inbounds float, float* %tmp23891, i64 1
+ %tmp23893 = getelementptr inbounds float, float* %tmp23892, i64 1
+ %tmp23894 = getelementptr inbounds float, float* %tmp23893, i64 1
+ %tmp23895 = getelementptr inbounds float, float* %tmp23894, i64 1
+ %tmp23896 = getelementptr inbounds float, float* %tmp23895, i64 1
+ %tmp23897 = getelementptr inbounds float, float* %tmp23896, i64 1
+ %tmp23898 = getelementptr inbounds float, float* %tmp23897, i64 1
+ %tmp23899 = getelementptr inbounds float, float* %tmp23898, i64 1
+ %tmp23900 = getelementptr inbounds float, float* %tmp23899, i64 1
+ %tmp23901 = getelementptr inbounds float, float* %tmp23900, i64 1
+ %tmp23902 = getelementptr inbounds float, float* %tmp23901, i64 1
+ %tmp23903 = getelementptr inbounds float, float* %tmp23902, i64 1
+ %tmp23904 = getelementptr inbounds float, float* %tmp23903, i64 1
+ %tmp23905 = getelementptr inbounds float, float* %tmp23904, i64 1
+ %tmp23906 = getelementptr inbounds float, float* %tmp23905, i64 1
+ %tmp23907 = getelementptr inbounds float, float* %tmp23906, i64 1
+ %tmp23908 = getelementptr inbounds float, float* %tmp23907, i64 1
+ %tmp23909 = getelementptr inbounds float, float* %tmp23908, i64 1
+ %tmp23910 = getelementptr inbounds float, float* %tmp23909, i64 1
+ %tmp23911 = getelementptr inbounds float, float* %tmp23910, i64 1
+ %tmp23912 = getelementptr inbounds float, float* %tmp23911, i64 1
+ %tmp23913 = getelementptr inbounds float, float* %tmp23912, i64 1
+ %tmp23914 = getelementptr inbounds float, float* %tmp23913, i64 1
+ %tmp23915 = getelementptr inbounds float, float* %tmp23914, i64 1
+ %tmp23916 = getelementptr inbounds float, float* %tmp23915, i64 1
+ %tmp23917 = getelementptr inbounds float, float* %tmp23916, i64 1
+ %tmp23918 = getelementptr inbounds float, float* %tmp23917, i64 1
+ %tmp23919 = getelementptr inbounds float, float* %tmp23918, i64 1
+ %tmp23920 = getelementptr inbounds float, float* %tmp23919, i64 1
+ %tmp23921 = getelementptr inbounds float, float* %tmp23920, i64 1
+ %tmp23922 = getelementptr inbounds float, float* %tmp23921, i64 1
+ %tmp23923 = getelementptr inbounds float, float* %tmp23922, i64 1
+ %tmp23924 = getelementptr inbounds float, float* %tmp23923, i64 1
+ %tmp23925 = getelementptr inbounds float, float* %tmp23924, i64 1
+ %tmp23926 = getelementptr inbounds float, float* %tmp23925, i64 1
+ %tmp23927 = getelementptr inbounds float, float* %tmp23926, i64 1
+ %tmp23928 = getelementptr inbounds float, float* %tmp23927, i64 1
+ %tmp23929 = getelementptr inbounds float, float* %tmp23928, i64 1
+ %tmp23930 = getelementptr inbounds float, float* %tmp23929, i64 1
+ %tmp23931 = getelementptr inbounds float, float* %tmp23930, i64 1
+ %tmp23932 = getelementptr inbounds float, float* %tmp23931, i64 1
+ %tmp23933 = getelementptr inbounds float, float* %tmp23932, i64 1
+ %tmp23934 = getelementptr inbounds float, float* %tmp23933, i64 1
+ %tmp23935 = getelementptr inbounds float, float* %tmp23934, i64 1
+ %tmp23936 = getelementptr inbounds float, float* %tmp23935, i64 1
+ %tmp23937 = getelementptr inbounds float, float* %tmp23936, i64 1
+ %tmp23938 = getelementptr inbounds float, float* %tmp23937, i64 1
+ %tmp23939 = getelementptr inbounds float, float* %tmp23938, i64 1
+ %tmp23940 = getelementptr inbounds float, float* %tmp23939, i64 1
+ %tmp23941 = getelementptr inbounds float, float* %tmp23940, i64 1
+ %tmp23942 = getelementptr inbounds float, float* %tmp23941, i64 1
+ %tmp23943 = getelementptr inbounds float, float* %tmp23942, i64 1
+ %tmp23944 = getelementptr inbounds float, float* %tmp23943, i64 1
+ %tmp23945 = getelementptr inbounds float, float* %tmp23944, i64 1
+ %tmp23946 = getelementptr inbounds float, float* %tmp23945, i64 1
+ %tmp23947 = getelementptr inbounds float, float* %tmp23946, i64 1
+ %tmp23948 = getelementptr inbounds float, float* %tmp23947, i64 1
+ %tmp23949 = getelementptr inbounds float, float* %tmp23948, i64 1
+ %tmp23950 = getelementptr inbounds float, float* %tmp23949, i64 1
+ %tmp23951 = getelementptr inbounds float, float* %tmp23950, i64 1
+ %tmp23952 = getelementptr inbounds float, float* %tmp23951, i64 1
+ %tmp23953 = getelementptr inbounds float, float* %tmp23952, i64 1
+ %tmp23954 = getelementptr inbounds float, float* %tmp23953, i64 1
+ %tmp23955 = getelementptr inbounds float, float* %tmp23954, i64 1
+ %tmp23956 = getelementptr inbounds float, float* %tmp23955, i64 1
+ %tmp23957 = getelementptr inbounds float, float* %tmp23956, i64 1
+ %tmp23958 = getelementptr inbounds float, float* %tmp23957, i64 1
+ %tmp23959 = getelementptr inbounds float, float* %tmp23958, i64 1
+ %tmp23960 = getelementptr inbounds float, float* %tmp23959, i64 1
+ %tmp23961 = getelementptr inbounds float, float* %tmp23960, i64 1
+ %tmp23962 = getelementptr inbounds float, float* %tmp23961, i64 1
+ %tmp23963 = getelementptr inbounds float, float* %tmp23962, i64 1
+ %tmp23964 = getelementptr inbounds float, float* %tmp23963, i64 1
+ %tmp23965 = getelementptr inbounds float, float* %tmp23964, i64 1
+ %tmp23966 = getelementptr inbounds float, float* %tmp23965, i64 1
+ %tmp23967 = getelementptr inbounds float, float* %tmp23966, i64 1
+ %tmp23968 = getelementptr inbounds float, float* %tmp23967, i64 1
+ %tmp23969 = getelementptr inbounds float, float* %tmp23968, i64 1
+ %tmp23970 = getelementptr inbounds float, float* %tmp23969, i64 1
+ %tmp23971 = getelementptr inbounds float, float* %tmp23970, i64 1
+ %tmp23972 = getelementptr inbounds float, float* %tmp23971, i64 1
+ %tmp23973 = getelementptr inbounds float, float* %tmp23972, i64 1
+ %tmp23974 = getelementptr inbounds float, float* %tmp23973, i64 1
+ %tmp23975 = getelementptr inbounds float, float* %tmp23974, i64 1
+ %tmp23976 = getelementptr inbounds float, float* %tmp23975, i64 1
+ %tmp23977 = getelementptr inbounds float, float* %tmp23976, i64 1
+ %tmp23978 = getelementptr inbounds float, float* %tmp23977, i64 1
+ %tmp23979 = getelementptr inbounds float, float* %tmp23978, i64 1
+ %tmp23980 = getelementptr inbounds float, float* %tmp23979, i64 1
+ %tmp23981 = getelementptr inbounds float, float* %tmp23980, i64 1
+ %tmp23982 = getelementptr inbounds float, float* %tmp23981, i64 1
+ %tmp23983 = getelementptr inbounds float, float* %tmp23982, i64 1
+ %tmp23984 = getelementptr inbounds float, float* %tmp23983, i64 1
+ %tmp23985 = getelementptr inbounds float, float* %tmp23984, i64 1
+ %tmp23986 = getelementptr inbounds float, float* %tmp23985, i64 1
+ %tmp23987 = getelementptr inbounds float, float* %tmp23986, i64 1
+ %tmp23988 = getelementptr inbounds float, float* %tmp23987, i64 1
+ %tmp23989 = getelementptr inbounds float, float* %tmp23988, i64 1
+ %tmp23990 = getelementptr inbounds float, float* %tmp23989, i64 1
+ %tmp23991 = getelementptr inbounds float, float* %tmp23990, i64 1
+ %tmp23992 = getelementptr inbounds float, float* %tmp23991, i64 1
+ %tmp23993 = getelementptr inbounds float, float* %tmp23992, i64 1
+ %tmp23994 = getelementptr inbounds float, float* %tmp23993, i64 1
+ %tmp23995 = getelementptr inbounds float, float* %tmp23994, i64 1
+ %tmp23996 = getelementptr inbounds float, float* %tmp23995, i64 1
+ %tmp23997 = getelementptr inbounds float, float* %tmp23996, i64 1
+ %tmp23998 = getelementptr inbounds float, float* %tmp23997, i64 1
+ %tmp23999 = getelementptr inbounds float, float* %tmp23998, i64 1
+ %tmp24000 = getelementptr inbounds float, float* %tmp23999, i64 1
+ %tmp24001 = getelementptr inbounds float, float* %tmp24000, i64 1
+ %tmp24002 = getelementptr inbounds float, float* %tmp24001, i64 1
+ %tmp24003 = getelementptr inbounds float, float* %tmp24002, i64 1
+ %tmp24004 = getelementptr inbounds float, float* %tmp24003, i64 1
+ %tmp24005 = getelementptr inbounds float, float* %tmp24004, i64 1
+ %tmp24006 = getelementptr inbounds float, float* %tmp24005, i64 1
+ %tmp24007 = getelementptr inbounds float, float* %tmp24006, i64 1
+ %tmp24008 = getelementptr inbounds float, float* %tmp24007, i64 1
+ %tmp24009 = getelementptr inbounds float, float* %tmp24008, i64 1
+ %tmp24010 = getelementptr inbounds float, float* %tmp24009, i64 1
+ %tmp24011 = getelementptr inbounds float, float* %tmp24010, i64 1
+ %tmp24012 = getelementptr inbounds float, float* %tmp24011, i64 1
+ %tmp24013 = getelementptr inbounds float, float* %tmp24012, i64 1
+ %tmp24014 = getelementptr inbounds float, float* %tmp24013, i64 1
+ %tmp24015 = getelementptr inbounds float, float* %tmp24014, i64 1
+ %tmp24016 = getelementptr inbounds float, float* %tmp24015, i64 1
+ %tmp24017 = getelementptr inbounds float, float* %tmp24016, i64 1
+ %tmp24018 = getelementptr inbounds float, float* %tmp24017, i64 1
+ %tmp24019 = getelementptr inbounds float, float* %tmp24018, i64 1
+ %tmp24020 = getelementptr inbounds float, float* %tmp24019, i64 1
+ %tmp24021 = getelementptr inbounds float, float* %tmp24020, i64 1
+ %tmp24022 = getelementptr inbounds float, float* %tmp24021, i64 1
+ %tmp24023 = getelementptr inbounds float, float* %tmp24022, i64 1
+ %tmp24024 = getelementptr inbounds float, float* %tmp24023, i64 1
+ %tmp24025 = getelementptr inbounds float, float* %tmp24024, i64 1
+ %tmp24026 = getelementptr inbounds float, float* %tmp24025, i64 1
+ %tmp24027 = getelementptr inbounds float, float* %tmp24026, i64 1
+ %tmp24028 = getelementptr inbounds float, float* %tmp24027, i64 1
+ %tmp24029 = getelementptr inbounds float, float* %tmp24028, i64 1
+ %tmp24030 = getelementptr inbounds float, float* %tmp24029, i64 1
+ %tmp24031 = getelementptr inbounds float, float* %tmp24030, i64 1
+ %tmp24032 = getelementptr inbounds float, float* %tmp24031, i64 1
+ %tmp24033 = getelementptr inbounds float, float* %tmp24032, i64 1
+ %tmp24034 = getelementptr inbounds float, float* %tmp24033, i64 1
+ %tmp24035 = getelementptr inbounds float, float* %tmp24034, i64 1
+ %tmp24036 = getelementptr inbounds float, float* %tmp24035, i64 1
+ %tmp24037 = getelementptr inbounds float, float* %tmp24036, i64 1
+ %tmp24038 = getelementptr inbounds float, float* %tmp24037, i64 1
+ %tmp24039 = getelementptr inbounds float, float* %tmp24038, i64 1
+ %tmp24040 = getelementptr inbounds float, float* %tmp24039, i64 1
+ %tmp24041 = getelementptr inbounds float, float* %tmp24040, i64 1
+ %tmp24042 = getelementptr inbounds float, float* %tmp24041, i64 1
+ %tmp24043 = getelementptr inbounds float, float* %tmp24042, i64 1
+ %tmp24044 = getelementptr inbounds float, float* %tmp24043, i64 1
+ %tmp24045 = getelementptr inbounds float, float* %tmp24044, i64 1
+ %tmp24046 = getelementptr inbounds float, float* %tmp24045, i64 1
+ %tmp24047 = getelementptr inbounds float, float* %tmp24046, i64 1
+ %tmp24048 = getelementptr inbounds float, float* %tmp24047, i64 1
+ %tmp24049 = getelementptr inbounds float, float* %tmp24048, i64 1
+ %tmp24050 = getelementptr inbounds float, float* %tmp24049, i64 1
+ %tmp24051 = getelementptr inbounds float, float* %tmp24050, i64 1
+ %tmp24052 = getelementptr inbounds float, float* %tmp24051, i64 1
+ %tmp24053 = getelementptr inbounds float, float* %tmp24052, i64 1
+ %tmp24054 = getelementptr inbounds float, float* %tmp24053, i64 1
+ %tmp24055 = getelementptr inbounds float, float* %tmp24054, i64 1
+ %tmp24056 = getelementptr inbounds float, float* %tmp24055, i64 1
+ %tmp24057 = getelementptr inbounds float, float* %tmp24056, i64 1
+ %tmp24058 = getelementptr inbounds float, float* %tmp24057, i64 1
+ %tmp24059 = getelementptr inbounds float, float* %tmp24058, i64 1
+ %tmp24060 = getelementptr inbounds float, float* %tmp24059, i64 1
+ %tmp24061 = getelementptr inbounds float, float* %tmp24060, i64 1
+ %tmp24062 = getelementptr inbounds float, float* %tmp24061, i64 1
+ %tmp24063 = getelementptr inbounds float, float* %tmp24062, i64 1
+ %tmp24064 = getelementptr inbounds float, float* %tmp24063, i64 1
+ %tmp24065 = getelementptr inbounds float, float* %tmp24064, i64 1
+ %tmp24066 = getelementptr inbounds float, float* %tmp24065, i64 1
+ %tmp24067 = getelementptr inbounds float, float* %tmp24066, i64 1
+ %tmp24068 = getelementptr inbounds float, float* %tmp24067, i64 1
+ %tmp24069 = getelementptr inbounds float, float* %tmp24068, i64 1
+ %tmp24070 = getelementptr inbounds float, float* %tmp24069, i64 1
+ %tmp24071 = getelementptr inbounds float, float* %tmp24070, i64 1
+ %tmp24072 = getelementptr inbounds float, float* %tmp24071, i64 1
+ %tmp24073 = getelementptr inbounds float, float* %tmp24072, i64 1
+ %tmp24074 = getelementptr inbounds float, float* %tmp24073, i64 1
+ %tmp24075 = getelementptr inbounds float, float* %tmp24074, i64 1
+ %tmp24076 = getelementptr inbounds float, float* %tmp24075, i64 1
+ %tmp24077 = getelementptr inbounds float, float* %tmp24076, i64 1
+ %tmp24078 = getelementptr inbounds float, float* %tmp24077, i64 1
+ %tmp24079 = getelementptr inbounds float, float* %tmp24078, i64 1
+ %tmp24080 = getelementptr inbounds float, float* %tmp24079, i64 1
+ %tmp24081 = getelementptr inbounds float, float* %tmp24080, i64 1
+ %tmp24082 = getelementptr inbounds float, float* %tmp24081, i64 1
+ %tmp24083 = getelementptr inbounds float, float* %tmp24082, i64 1
+ %tmp24084 = getelementptr inbounds float, float* %tmp24083, i64 1
+ %tmp24085 = getelementptr inbounds float, float* %tmp24084, i64 1
+ %tmp24086 = getelementptr inbounds float, float* %tmp24085, i64 1
+ %tmp24087 = getelementptr inbounds float, float* %tmp24086, i64 1
+ %tmp24088 = getelementptr inbounds float, float* %tmp24087, i64 1
+ %tmp24089 = getelementptr inbounds float, float* %tmp24088, i64 1
+ %tmp24090 = getelementptr inbounds float, float* %tmp24089, i64 1
+ %tmp24091 = getelementptr inbounds float, float* %tmp24090, i64 1
+ %tmp24092 = getelementptr inbounds float, float* %tmp24091, i64 1
+ %tmp24093 = getelementptr inbounds float, float* %tmp24092, i64 1
+ %tmp24094 = getelementptr inbounds float, float* %tmp24093, i64 1
+ %tmp24095 = getelementptr inbounds float, float* %tmp24094, i64 1
+ %tmp24096 = getelementptr inbounds float, float* %tmp24095, i64 1
+ %tmp24097 = getelementptr inbounds float, float* %tmp24096, i64 1
+ %tmp24098 = getelementptr inbounds float, float* %tmp24097, i64 1
+ %tmp24099 = getelementptr inbounds float, float* %tmp24098, i64 1
+ %tmp24100 = getelementptr inbounds float, float* %tmp24099, i64 1
+ %tmp24101 = getelementptr inbounds float, float* %tmp24100, i64 1
+ %tmp24102 = getelementptr inbounds float, float* %tmp24101, i64 1
+ %tmp24103 = getelementptr inbounds float, float* %tmp24102, i64 1
+ %tmp24104 = getelementptr inbounds float, float* %tmp24103, i64 1
+ %tmp24105 = getelementptr inbounds float, float* %tmp24104, i64 1
+ %tmp24106 = getelementptr inbounds float, float* %tmp24105, i64 1
+ %tmp24107 = getelementptr inbounds float, float* %tmp24106, i64 1
+ %tmp24108 = getelementptr inbounds float, float* %tmp24107, i64 1
+ %tmp24109 = getelementptr inbounds float, float* %tmp24108, i64 1
+ %tmp24110 = getelementptr inbounds float, float* %tmp24109, i64 1
+ %tmp24111 = getelementptr inbounds float, float* %tmp24110, i64 1
+ %tmp24112 = getelementptr inbounds float, float* %tmp24111, i64 1
+ %tmp24113 = getelementptr inbounds float, float* %tmp24112, i64 1
+ %tmp24114 = getelementptr inbounds float, float* %tmp24113, i64 1
+ %tmp24115 = getelementptr inbounds float, float* %tmp24114, i64 1
+ %tmp24116 = getelementptr inbounds float, float* %tmp24115, i64 1
+ %tmp24117 = getelementptr inbounds float, float* %tmp24116, i64 1
+ %tmp24118 = getelementptr inbounds float, float* %tmp24117, i64 1
+ %tmp24119 = getelementptr inbounds float, float* %tmp24118, i64 1
+ %tmp24120 = getelementptr inbounds float, float* %tmp24119, i64 1
+ %tmp24121 = getelementptr inbounds float, float* %tmp24120, i64 1
+ %tmp24122 = getelementptr inbounds float, float* %tmp24121, i64 1
+ %tmp24123 = getelementptr inbounds float, float* %tmp24122, i64 1
+ %tmp24124 = getelementptr inbounds float, float* %tmp24123, i64 1
+ %tmp24125 = getelementptr inbounds float, float* %tmp24124, i64 1
+ %tmp24126 = getelementptr inbounds float, float* %tmp24125, i64 1
+ %tmp24127 = getelementptr inbounds float, float* %tmp24126, i64 1
+ %tmp24128 = getelementptr inbounds float, float* %tmp24127, i64 1
+ %tmp24129 = getelementptr inbounds float, float* %tmp24128, i64 1
+ %tmp24130 = getelementptr inbounds float, float* %tmp24129, i64 1
+ %tmp24131 = getelementptr inbounds float, float* %tmp24130, i64 1
+ %tmp24132 = getelementptr inbounds float, float* %tmp24131, i64 1
+ %tmp24133 = getelementptr inbounds float, float* %tmp24132, i64 1
+ %tmp24134 = getelementptr inbounds float, float* %tmp24133, i64 1
+ %tmp24135 = getelementptr inbounds float, float* %tmp24134, i64 1
+ %tmp24136 = getelementptr inbounds float, float* %tmp24135, i64 1
+ %tmp24137 = getelementptr inbounds float, float* %tmp24136, i64 1
+ %tmp24138 = getelementptr inbounds float, float* %tmp24137, i64 1
+ %tmp24139 = getelementptr inbounds float, float* %tmp24138, i64 1
+ %tmp24140 = getelementptr inbounds float, float* %tmp24139, i64 1
+ %tmp24141 = getelementptr inbounds float, float* %tmp24140, i64 1
+ %tmp24142 = getelementptr inbounds float, float* %tmp24141, i64 1
+ %tmp24143 = getelementptr inbounds float, float* %tmp24142, i64 1
+ %tmp24144 = getelementptr inbounds float, float* %tmp24143, i64 1
+ %tmp24145 = getelementptr inbounds float, float* %tmp24144, i64 1
+ %tmp24146 = getelementptr inbounds float, float* %tmp24145, i64 1
+ %tmp24147 = getelementptr inbounds float, float* %tmp24146, i64 1
+ %tmp24148 = getelementptr inbounds float, float* %tmp24147, i64 1
+ %tmp24149 = getelementptr inbounds float, float* %tmp24148, i64 1
+ %tmp24150 = getelementptr inbounds float, float* %tmp24149, i64 1
+ %tmp24151 = getelementptr inbounds float, float* %tmp24150, i64 1
+ %tmp24152 = getelementptr inbounds float, float* %tmp24151, i64 1
+ %tmp24153 = getelementptr inbounds float, float* %tmp24152, i64 1
+ %tmp24154 = getelementptr inbounds float, float* %tmp24153, i64 1
+ %tmp24155 = getelementptr inbounds float, float* %tmp24154, i64 1
+ %tmp24156 = getelementptr inbounds float, float* %tmp24155, i64 1
+ %tmp24157 = getelementptr inbounds float, float* %tmp24156, i64 1
+ %tmp24158 = getelementptr inbounds float, float* %tmp24157, i64 1
+ %tmp24159 = getelementptr inbounds float, float* %tmp24158, i64 1
+ %tmp24160 = getelementptr inbounds float, float* %tmp24159, i64 1
+ %tmp24161 = getelementptr inbounds float, float* %tmp24160, i64 1
+ %tmp24162 = getelementptr inbounds float, float* %tmp24161, i64 1
+ %tmp24163 = getelementptr inbounds float, float* %tmp24162, i64 1
+ %tmp24164 = getelementptr inbounds float, float* %tmp24163, i64 1
+ %tmp24165 = getelementptr inbounds float, float* %tmp24164, i64 1
+ %tmp24166 = getelementptr inbounds float, float* %tmp24165, i64 1
+ %tmp24167 = getelementptr inbounds float, float* %tmp24166, i64 1
+ %tmp24168 = getelementptr inbounds float, float* %tmp24167, i64 1
+ %tmp24169 = getelementptr inbounds float, float* %tmp24168, i64 1
+ %tmp24170 = getelementptr inbounds float, float* %tmp24169, i64 1
+ %tmp24171 = getelementptr inbounds float, float* %tmp24170, i64 1
+ %tmp24172 = getelementptr inbounds float, float* %tmp24171, i64 1
+ %tmp24173 = getelementptr inbounds float, float* %tmp24172, i64 1
+ %tmp24174 = getelementptr inbounds float, float* %tmp24173, i64 1
+ %tmp24175 = getelementptr inbounds float, float* %tmp24174, i64 1
+ %tmp24176 = getelementptr inbounds float, float* %tmp24175, i64 1
+ %tmp24177 = getelementptr inbounds float, float* %tmp24176, i64 1
+ %tmp24178 = getelementptr inbounds float, float* %tmp24177, i64 1
+ %tmp24179 = getelementptr inbounds float, float* %tmp24178, i64 1
+ %tmp24180 = getelementptr inbounds float, float* %tmp24179, i64 1
+ %tmp24181 = getelementptr inbounds float, float* %tmp24180, i64 1
+ %tmp24182 = getelementptr inbounds float, float* %tmp24181, i64 1
+ %tmp24183 = getelementptr inbounds float, float* %tmp24182, i64 1
+ %tmp24184 = getelementptr inbounds float, float* %tmp24183, i64 1
+ %tmp24185 = getelementptr inbounds float, float* %tmp24184, i64 1
+ %tmp24186 = getelementptr inbounds float, float* %tmp24185, i64 1
+ %tmp24187 = getelementptr inbounds float, float* %tmp24186, i64 1
+ %tmp24188 = getelementptr inbounds float, float* %tmp24187, i64 1
+ %tmp24189 = getelementptr inbounds float, float* %tmp24188, i64 1
+ %tmp24190 = getelementptr inbounds float, float* %tmp24189, i64 1
+ %tmp24191 = getelementptr inbounds float, float* %tmp24190, i64 1
+ %tmp24192 = getelementptr inbounds float, float* %tmp24191, i64 1
+ %tmp24193 = getelementptr inbounds float, float* %tmp24192, i64 1
+ %tmp24194 = getelementptr inbounds float, float* %tmp24193, i64 1
+ %tmp24195 = getelementptr inbounds float, float* %tmp24194, i64 1
+ %tmp24196 = getelementptr inbounds float, float* %tmp24195, i64 1
+ %tmp24197 = getelementptr inbounds float, float* %tmp24196, i64 1
+ %tmp24198 = getelementptr inbounds float, float* %tmp24197, i64 1
+ %tmp24199 = getelementptr inbounds float, float* %tmp24198, i64 1
+ %tmp24200 = getelementptr inbounds float, float* %tmp24199, i64 1
+ %tmp24201 = getelementptr inbounds float, float* %tmp24200, i64 1
+ %tmp24202 = getelementptr inbounds float, float* %tmp24201, i64 1
+ %tmp24203 = getelementptr inbounds float, float* %tmp24202, i64 1
+ %tmp24204 = getelementptr inbounds float, float* %tmp24203, i64 1
+ %tmp24205 = getelementptr inbounds float, float* %tmp24204, i64 1
+ %tmp24206 = getelementptr inbounds float, float* %tmp24205, i64 1
+ %tmp24207 = getelementptr inbounds float, float* %tmp24206, i64 1
+ %tmp24208 = getelementptr inbounds float, float* %tmp24207, i64 1
+ %tmp24209 = getelementptr inbounds float, float* %tmp24208, i64 1
+ %tmp24210 = getelementptr inbounds float, float* %tmp24209, i64 1
+ %tmp24211 = getelementptr inbounds float, float* %tmp24210, i64 1
+ %tmp24212 = getelementptr inbounds float, float* %tmp24211, i64 1
+ %tmp24213 = getelementptr inbounds float, float* %tmp24212, i64 1
+ %tmp24214 = getelementptr inbounds float, float* %tmp24213, i64 1
+ %tmp24215 = getelementptr inbounds float, float* %tmp24214, i64 1
+ %tmp24216 = getelementptr inbounds float, float* %tmp24215, i64 1
+ %tmp24217 = getelementptr inbounds float, float* %tmp24216, i64 1
+ %tmp24218 = getelementptr inbounds float, float* %tmp24217, i64 1
+ %tmp24219 = getelementptr inbounds float, float* %tmp24218, i64 1
+ %tmp24220 = getelementptr inbounds float, float* %tmp24219, i64 1
+ %tmp24221 = getelementptr inbounds float, float* %tmp24220, i64 1
+ %tmp24222 = getelementptr inbounds float, float* %tmp24221, i64 1
+ %tmp24223 = getelementptr inbounds float, float* %tmp24222, i64 1
+ %tmp24224 = getelementptr inbounds float, float* %tmp24223, i64 1
+ %tmp24225 = getelementptr inbounds float, float* %tmp24224, i64 1
+ %tmp24226 = getelementptr inbounds float, float* %tmp24225, i64 1
+ %tmp24227 = getelementptr inbounds float, float* %tmp24226, i64 1
+ %tmp24228 = getelementptr inbounds float, float* %tmp24227, i64 1
+ %tmp24229 = getelementptr inbounds float, float* %tmp24228, i64 1
+ %tmp24230 = getelementptr inbounds float, float* %tmp24229, i64 1
+ %tmp24231 = getelementptr inbounds float, float* %tmp24230, i64 1
+ %tmp24232 = getelementptr inbounds float, float* %tmp24231, i64 1
+ %tmp24233 = getelementptr inbounds float, float* %tmp24232, i64 1
+ %tmp24234 = getelementptr inbounds float, float* %tmp24233, i64 1
+ %tmp24235 = getelementptr inbounds float, float* %tmp24234, i64 1
+ %tmp24236 = getelementptr inbounds float, float* %tmp24235, i64 1
+ %tmp24237 = getelementptr inbounds float, float* %tmp24236, i64 1
+ %tmp24238 = getelementptr inbounds float, float* %tmp24237, i64 1
+ %tmp24239 = getelementptr inbounds float, float* %tmp24238, i64 1
+ %tmp24240 = getelementptr inbounds float, float* %tmp24239, i64 1
+ %tmp24241 = getelementptr inbounds float, float* %tmp24240, i64 1
+ %tmp24242 = getelementptr inbounds float, float* %tmp24241, i64 1
+ %tmp24243 = getelementptr inbounds float, float* %tmp24242, i64 1
+ %tmp24244 = getelementptr inbounds float, float* %tmp24243, i64 1
+ %tmp24245 = getelementptr inbounds float, float* %tmp24244, i64 1
+ %tmp24246 = getelementptr inbounds float, float* %tmp24245, i64 1
+ %tmp24247 = getelementptr inbounds float, float* %tmp24246, i64 1
+ %tmp24248 = getelementptr inbounds float, float* %tmp24247, i64 1
+ %tmp24249 = getelementptr inbounds float, float* %tmp24248, i64 1
+ %tmp24250 = getelementptr inbounds float, float* %tmp24249, i64 1
+ %tmp24251 = getelementptr inbounds float, float* %tmp24250, i64 1
+ %tmp24252 = getelementptr inbounds float, float* %tmp24251, i64 1
+ %tmp24253 = getelementptr inbounds float, float* %tmp24252, i64 1
+ %tmp24254 = getelementptr inbounds float, float* %tmp24253, i64 1
+ %tmp24255 = getelementptr inbounds float, float* %tmp24254, i64 1
+ %tmp24256 = getelementptr inbounds float, float* %tmp24255, i64 1
+ %tmp24257 = getelementptr inbounds float, float* %tmp24256, i64 1
+ %tmp24258 = getelementptr inbounds float, float* %tmp24257, i64 1
+ %tmp24259 = getelementptr inbounds float, float* %tmp24258, i64 1
+ %tmp24260 = getelementptr inbounds float, float* %tmp24259, i64 1
+ %tmp24261 = getelementptr inbounds float, float* %tmp24260, i64 1
+ %tmp24262 = getelementptr inbounds float, float* %tmp24261, i64 1
+ %tmp24263 = getelementptr inbounds float, float* %tmp24262, i64 1
+ %tmp24264 = getelementptr inbounds float, float* %tmp24263, i64 1
+ %tmp24265 = getelementptr inbounds float, float* %tmp24264, i64 1
+ %tmp24266 = getelementptr inbounds float, float* %tmp24265, i64 1
+ %tmp24267 = getelementptr inbounds float, float* %tmp24266, i64 1
+ %tmp24268 = getelementptr inbounds float, float* %tmp24267, i64 1
+ %tmp24269 = getelementptr inbounds float, float* %tmp24268, i64 1
+ %tmp24270 = getelementptr inbounds float, float* %tmp24269, i64 1
+ %tmp24271 = getelementptr inbounds float, float* %tmp24270, i64 1
+ %tmp24272 = getelementptr inbounds float, float* %tmp24271, i64 1
+ %tmp24273 = getelementptr inbounds float, float* %tmp24272, i64 1
+ %tmp24274 = getelementptr inbounds float, float* %tmp24273, i64 1
+ %tmp24275 = getelementptr inbounds float, float* %tmp24274, i64 1
+ %tmp24276 = getelementptr inbounds float, float* %tmp24275, i64 1
+ %tmp24277 = getelementptr inbounds float, float* %tmp24276, i64 1
+ %tmp24278 = getelementptr inbounds float, float* %tmp24277, i64 1
+ %tmp24279 = getelementptr inbounds float, float* %tmp24278, i64 1
+ %tmp24280 = getelementptr inbounds float, float* %tmp24279, i64 1
+ %tmp24281 = getelementptr inbounds float, float* %tmp24280, i64 1
+ %tmp24282 = getelementptr inbounds float, float* %tmp24281, i64 1
+ %tmp24283 = getelementptr inbounds float, float* %tmp24282, i64 1
+ %tmp24284 = getelementptr inbounds float, float* %tmp24283, i64 1
+ %tmp24285 = getelementptr inbounds float, float* %tmp24284, i64 1
+ %tmp24286 = getelementptr inbounds float, float* %tmp24285, i64 1
+ %tmp24287 = getelementptr inbounds float, float* %tmp24286, i64 1
+ %tmp24288 = getelementptr inbounds float, float* %tmp24287, i64 1
+ %tmp24289 = getelementptr inbounds float, float* %tmp24288, i64 1
+ %tmp24290 = getelementptr inbounds float, float* %tmp24289, i64 1
+ %tmp24291 = getelementptr inbounds float, float* %tmp24290, i64 1
+ %tmp24292 = getelementptr inbounds float, float* %tmp24291, i64 1
+ %tmp24293 = getelementptr inbounds float, float* %tmp24292, i64 1
+ %tmp24294 = getelementptr inbounds float, float* %tmp24293, i64 1
+ %tmp24295 = getelementptr inbounds float, float* %tmp24294, i64 1
+ %tmp24296 = getelementptr inbounds float, float* %tmp24295, i64 1
+ %tmp24297 = getelementptr inbounds float, float* %tmp24296, i64 1
+ %tmp24298 = getelementptr inbounds float, float* %tmp24297, i64 1
+ %tmp24299 = getelementptr inbounds float, float* %tmp24298, i64 1
+ %tmp24300 = getelementptr inbounds float, float* %tmp24299, i64 1
+ %tmp24301 = getelementptr inbounds float, float* %tmp24300, i64 1
+ %tmp24302 = getelementptr inbounds float, float* %tmp24301, i64 1
+ %tmp24303 = getelementptr inbounds float, float* %tmp24302, i64 1
+ %tmp24304 = getelementptr inbounds float, float* %tmp24303, i64 1
+ %tmp24305 = getelementptr inbounds float, float* %tmp24304, i64 1
+ %tmp24306 = getelementptr inbounds float, float* %tmp24305, i64 1
+ %tmp24307 = getelementptr inbounds float, float* %tmp24306, i64 1
+ %tmp24308 = getelementptr inbounds float, float* %tmp24307, i64 1
+ %tmp24309 = getelementptr inbounds float, float* %tmp24308, i64 1
+ %tmp24310 = getelementptr inbounds float, float* %tmp24309, i64 1
+ %tmp24311 = getelementptr inbounds float, float* %tmp24310, i64 1
+ %tmp24312 = getelementptr inbounds float, float* %tmp24311, i64 1
+ %tmp24313 = getelementptr inbounds float, float* %tmp24312, i64 1
+ %tmp24314 = getelementptr inbounds float, float* %tmp24313, i64 1
+ %tmp24315 = getelementptr inbounds float, float* %tmp24314, i64 1
+ %tmp24316 = getelementptr inbounds float, float* %tmp24315, i64 1
+ %tmp24317 = getelementptr inbounds float, float* %tmp24316, i64 1
+ %tmp24318 = getelementptr inbounds float, float* %tmp24317, i64 1
+ %tmp24319 = getelementptr inbounds float, float* %tmp24318, i64 1
+ %tmp24320 = getelementptr inbounds float, float* %tmp24319, i64 1
+ %tmp24321 = getelementptr inbounds float, float* %tmp24320, i64 1
+ %tmp24322 = getelementptr inbounds float, float* %tmp24321, i64 1
+ %tmp24323 = getelementptr inbounds float, float* %tmp24322, i64 1
+ %tmp24324 = getelementptr inbounds float, float* %tmp24323, i64 1
+ %tmp24325 = getelementptr inbounds float, float* %tmp24324, i64 1
+ %tmp24326 = getelementptr inbounds float, float* %tmp24325, i64 1
+ %tmp24327 = getelementptr inbounds float, float* %tmp24326, i64 1
+ %tmp24328 = getelementptr inbounds float, float* %tmp24327, i64 1
+ %tmp24329 = getelementptr inbounds float, float* %tmp24328, i64 1
+ %tmp24330 = getelementptr inbounds float, float* %tmp24329, i64 1
+ %tmp24331 = getelementptr inbounds float, float* %tmp24330, i64 1
+ %tmp24332 = getelementptr inbounds float, float* %tmp24331, i64 1
+ %tmp24333 = getelementptr inbounds float, float* %tmp24332, i64 1
+ %tmp24334 = getelementptr inbounds float, float* %tmp24333, i64 1
+ %tmp24335 = getelementptr inbounds float, float* %tmp24334, i64 1
+ %tmp24336 = getelementptr inbounds float, float* %tmp24335, i64 1
+ %tmp24337 = getelementptr inbounds float, float* %tmp24336, i64 1
+ %tmp24338 = getelementptr inbounds float, float* %tmp24337, i64 1
+ %tmp24339 = getelementptr inbounds float, float* %tmp24338, i64 1
+ %tmp24340 = getelementptr inbounds float, float* %tmp24339, i64 1
+ %tmp24341 = getelementptr inbounds float, float* %tmp24340, i64 1
+ %tmp24342 = getelementptr inbounds float, float* %tmp24341, i64 1
+ %tmp24343 = getelementptr inbounds float, float* %tmp24342, i64 1
+ %tmp24344 = getelementptr inbounds float, float* %tmp24343, i64 1
+ %tmp24345 = getelementptr inbounds float, float* %tmp24344, i64 1
+ %tmp24346 = getelementptr inbounds float, float* %tmp24345, i64 1
+ %tmp24347 = getelementptr inbounds float, float* %tmp24346, i64 1
+ %tmp24348 = getelementptr inbounds float, float* %tmp24347, i64 1
+ %tmp24349 = getelementptr inbounds float, float* %tmp24348, i64 1
+ %tmp24350 = getelementptr inbounds float, float* %tmp24349, i64 1
+ %tmp24351 = getelementptr inbounds float, float* %tmp24350, i64 1
+ %tmp24352 = getelementptr inbounds float, float* %tmp24351, i64 1
+ %tmp24353 = getelementptr inbounds float, float* %tmp24352, i64 1
+ %tmp24354 = getelementptr inbounds float, float* %tmp24353, i64 1
+ %tmp24355 = getelementptr inbounds float, float* %tmp24354, i64 1
+ %tmp24356 = getelementptr inbounds float, float* %tmp24355, i64 1
+ %tmp24357 = getelementptr inbounds float, float* %tmp24356, i64 1
+ %tmp24358 = getelementptr inbounds float, float* %tmp24357, i64 1
+ %tmp24359 = getelementptr inbounds float, float* %tmp24358, i64 1
+ %tmp24360 = getelementptr inbounds float, float* %tmp24359, i64 1
+ %tmp24361 = getelementptr inbounds float, float* %tmp24360, i64 1
+ %tmp24362 = getelementptr inbounds float, float* %tmp24361, i64 1
+ %tmp24363 = getelementptr inbounds float, float* %tmp24362, i64 1
+ %tmp24364 = getelementptr inbounds float, float* %tmp24363, i64 1
+ %tmp24365 = getelementptr inbounds float, float* %tmp24364, i64 1
+ %tmp24366 = getelementptr inbounds float, float* %tmp24365, i64 1
+ %tmp24367 = getelementptr inbounds float, float* %tmp24366, i64 1
+ %tmp24368 = getelementptr inbounds float, float* %tmp24367, i64 1
+ %tmp24369 = getelementptr inbounds float, float* %tmp24368, i64 1
+ %tmp24370 = getelementptr inbounds float, float* %tmp24369, i64 1
+ %tmp24371 = getelementptr inbounds float, float* %tmp24370, i64 1
+ %tmp24372 = getelementptr inbounds float, float* %tmp24371, i64 1
+ %tmp24373 = getelementptr inbounds float, float* %tmp24372, i64 1
+ %tmp24374 = getelementptr inbounds float, float* %tmp24373, i64 1
+ %tmp24375 = getelementptr inbounds float, float* %tmp24374, i64 1
+ %tmp24376 = getelementptr inbounds float, float* %tmp24375, i64 1
+ %tmp24377 = getelementptr inbounds float, float* %tmp24376, i64 1
+ %tmp24378 = getelementptr inbounds float, float* %tmp24377, i64 1
+ %tmp24379 = getelementptr inbounds float, float* %tmp24378, i64 1
+ %tmp24380 = getelementptr inbounds float, float* %tmp24379, i64 1
+ %tmp24381 = getelementptr inbounds float, float* %tmp24380, i64 1
+ %tmp24382 = getelementptr inbounds float, float* %tmp24381, i64 1
+ %tmp24383 = getelementptr inbounds float, float* %tmp24382, i64 1
+ %tmp24384 = getelementptr inbounds float, float* %tmp24383, i64 1
+ %tmp24385 = getelementptr inbounds float, float* %tmp24384, i64 1
+ %tmp24386 = getelementptr inbounds float, float* %tmp24385, i64 1
+ %tmp24387 = getelementptr inbounds float, float* %tmp24386, i64 1
+ %tmp24388 = getelementptr inbounds float, float* %tmp24387, i64 1
+ %tmp24389 = getelementptr inbounds float, float* %tmp24388, i64 1
+ %tmp24390 = getelementptr inbounds float, float* %tmp24389, i64 1
+ %tmp24391 = getelementptr inbounds float, float* %tmp24390, i64 1
+ %tmp24392 = getelementptr inbounds float, float* %tmp24391, i64 1
+ %tmp24393 = getelementptr inbounds float, float* %tmp24392, i64 1
+ %tmp24394 = getelementptr inbounds float, float* %tmp24393, i64 1
+ %tmp24395 = getelementptr inbounds float, float* %tmp24394, i64 1
+ %tmp24396 = getelementptr inbounds float, float* %tmp24395, i64 1
+ %tmp24397 = getelementptr inbounds float, float* %tmp24396, i64 1
+ %tmp24398 = getelementptr inbounds float, float* %tmp24397, i64 1
+ %tmp24399 = getelementptr inbounds float, float* %tmp24398, i64 1
+ %tmp24400 = getelementptr inbounds float, float* %tmp24399, i64 1
+ %tmp24401 = getelementptr inbounds float, float* %tmp24400, i64 1
+ %tmp24402 = getelementptr inbounds float, float* %tmp24401, i64 1
+ %tmp24403 = getelementptr inbounds float, float* %tmp24402, i64 1
+ %tmp24404 = getelementptr inbounds float, float* %tmp24403, i64 1
+ %tmp24405 = getelementptr inbounds float, float* %tmp24404, i64 1
+ %tmp24406 = getelementptr inbounds float, float* %tmp24405, i64 1
+ %tmp24407 = getelementptr inbounds float, float* %tmp24406, i64 1
+ %tmp24408 = getelementptr inbounds float, float* %tmp24407, i64 1
+ %tmp24409 = getelementptr inbounds float, float* %tmp24408, i64 1
+ %tmp24410 = getelementptr inbounds float, float* %tmp24409, i64 1
+ %tmp24411 = getelementptr inbounds float, float* %tmp24410, i64 1
+ %tmp24412 = getelementptr inbounds float, float* %tmp24411, i64 1
+ %tmp24413 = getelementptr inbounds float, float* %tmp24412, i64 1
+ %tmp24414 = getelementptr inbounds float, float* %tmp24413, i64 1
+ %tmp24415 = getelementptr inbounds float, float* %tmp24414, i64 1
+ %tmp24416 = getelementptr inbounds float, float* %tmp24415, i64 1
+ %tmp24417 = getelementptr inbounds float, float* %tmp24416, i64 1
+ %tmp24418 = getelementptr inbounds float, float* %tmp24417, i64 1
+ %tmp24419 = getelementptr inbounds float, float* %tmp24418, i64 1
+ %tmp24420 = getelementptr inbounds float, float* %tmp24419, i64 1
+ %tmp24421 = getelementptr inbounds float, float* %tmp24420, i64 1
+ %tmp24422 = getelementptr inbounds float, float* %tmp24421, i64 1
+ %tmp24423 = getelementptr inbounds float, float* %tmp24422, i64 1
+ %tmp24424 = getelementptr inbounds float, float* %tmp24423, i64 1
+ %tmp24425 = getelementptr inbounds float, float* %tmp24424, i64 1
+ %tmp24426 = getelementptr inbounds float, float* %tmp24425, i64 1
+ %tmp24427 = getelementptr inbounds float, float* %tmp24426, i64 1
+ %tmp24428 = getelementptr inbounds float, float* %tmp24427, i64 1
+ %tmp24429 = getelementptr inbounds float, float* %tmp24428, i64 1
+ %tmp24430 = getelementptr inbounds float, float* %tmp24429, i64 1
+ %tmp24431 = getelementptr inbounds float, float* %tmp24430, i64 1
+ %tmp24432 = getelementptr inbounds float, float* %tmp24431, i64 1
+ %tmp24433 = getelementptr inbounds float, float* %tmp24432, i64 1
+ %tmp24434 = getelementptr inbounds float, float* %tmp24433, i64 1
+ %tmp24435 = getelementptr inbounds float, float* %tmp24434, i64 1
+ %tmp24436 = getelementptr inbounds float, float* %tmp24435, i64 1
+ %tmp24437 = getelementptr inbounds float, float* %tmp24436, i64 1
+ %tmp24438 = getelementptr inbounds float, float* %tmp24437, i64 1
+ %tmp24439 = getelementptr inbounds float, float* %tmp24438, i64 1
+ %tmp24440 = getelementptr inbounds float, float* %tmp24439, i64 1
+ %tmp24441 = getelementptr inbounds float, float* %tmp24440, i64 1
+ %tmp24442 = getelementptr inbounds float, float* %tmp24441, i64 1
+ %tmp24443 = getelementptr inbounds float, float* %tmp24442, i64 1
+ %tmp24444 = getelementptr inbounds float, float* %tmp24443, i64 1
+ %tmp24445 = getelementptr inbounds float, float* %tmp24444, i64 1
+ %tmp24446 = getelementptr inbounds float, float* %tmp24445, i64 1
+ %tmp24447 = getelementptr inbounds float, float* %tmp24446, i64 1
+ %tmp24448 = getelementptr inbounds float, float* %tmp24447, i64 1
+ %tmp24449 = getelementptr inbounds float, float* %tmp24448, i64 1
+ %tmp24450 = getelementptr inbounds float, float* %tmp24449, i64 1
+ %tmp24451 = getelementptr inbounds float, float* %tmp24450, i64 1
+ %tmp24452 = getelementptr inbounds float, float* %tmp24451, i64 1
+ %tmp24453 = getelementptr inbounds float, float* %tmp24452, i64 1
+ %tmp24454 = getelementptr inbounds float, float* %tmp24453, i64 1
+ %tmp24455 = getelementptr inbounds float, float* %tmp24454, i64 1
+ %tmp24456 = getelementptr inbounds float, float* %tmp24455, i64 1
+ %tmp24457 = getelementptr inbounds float, float* %tmp24456, i64 1
+ %tmp24458 = getelementptr inbounds float, float* %tmp24457, i64 1
+ %tmp24459 = getelementptr inbounds float, float* %tmp24458, i64 1
+ %tmp24460 = getelementptr inbounds float, float* %tmp24459, i64 1
+ %tmp24461 = getelementptr inbounds float, float* %tmp24460, i64 1
+ %tmp24462 = getelementptr inbounds float, float* %tmp24461, i64 1
+ %tmp24463 = getelementptr inbounds float, float* %tmp24462, i64 1
+ %tmp24464 = getelementptr inbounds float, float* %tmp24463, i64 1
+ %tmp24465 = getelementptr inbounds float, float* %tmp24464, i64 1
+ %tmp24466 = getelementptr inbounds float, float* %tmp24465, i64 1
+ %tmp24467 = getelementptr inbounds float, float* %tmp24466, i64 1
+ %tmp24468 = getelementptr inbounds float, float* %tmp24467, i64 1
+ %tmp24469 = getelementptr inbounds float, float* %tmp24468, i64 1
+ %tmp24470 = getelementptr inbounds float, float* %tmp24469, i64 1
+ %tmp24471 = getelementptr inbounds float, float* %tmp24470, i64 1
+ %tmp24472 = getelementptr inbounds float, float* %tmp24471, i64 1
+ %tmp24473 = getelementptr inbounds float, float* %tmp24472, i64 1
+ %tmp24474 = getelementptr inbounds float, float* %tmp24473, i64 1
+ %tmp24475 = getelementptr inbounds float, float* %tmp24474, i64 1
+ %tmp24476 = getelementptr inbounds float, float* %tmp24475, i64 1
+ %tmp24477 = getelementptr inbounds float, float* %tmp24476, i64 1
+ %tmp24478 = getelementptr inbounds float, float* %tmp24477, i64 1
+ %tmp24479 = getelementptr inbounds float, float* %tmp24478, i64 1
+ %tmp24480 = getelementptr inbounds float, float* %tmp24479, i64 1
+ %tmp24481 = getelementptr inbounds float, float* %tmp24480, i64 1
+ %tmp24482 = getelementptr inbounds float, float* %tmp24481, i64 1
+ %tmp24483 = getelementptr inbounds float, float* %tmp24482, i64 1
+ %tmp24484 = getelementptr inbounds float, float* %tmp24483, i64 1
+ %tmp24485 = getelementptr inbounds float, float* %tmp24484, i64 1
+ %tmp24486 = getelementptr inbounds float, float* %tmp24485, i64 1
+ %tmp24487 = getelementptr inbounds float, float* %tmp24486, i64 1
+ %tmp24488 = getelementptr inbounds float, float* %tmp24487, i64 1
+ %tmp24489 = getelementptr inbounds float, float* %tmp24488, i64 1
+ %tmp24490 = getelementptr inbounds float, float* %tmp24489, i64 1
+ %tmp24491 = getelementptr inbounds float, float* %tmp24490, i64 1
+ %tmp24492 = getelementptr inbounds float, float* %tmp24491, i64 1
+ %tmp24493 = getelementptr inbounds float, float* %tmp24492, i64 1
+ %tmp24494 = getelementptr inbounds float, float* %tmp24493, i64 1
+ %tmp24495 = getelementptr inbounds float, float* %tmp24494, i64 1
+ %tmp24496 = getelementptr inbounds float, float* %tmp24495, i64 1
+ %tmp24497 = getelementptr inbounds float, float* %tmp24496, i64 1
+ %tmp24498 = getelementptr inbounds float, float* %tmp24497, i64 1
+ %tmp24499 = getelementptr inbounds float, float* %tmp24498, i64 1
+ %tmp24500 = getelementptr inbounds float, float* %tmp24499, i64 1
+ %tmp24501 = getelementptr inbounds float, float* %tmp24500, i64 1
+ %tmp24502 = getelementptr inbounds float, float* %tmp24501, i64 1
+ %tmp24503 = getelementptr inbounds float, float* %tmp24502, i64 1
+ %tmp24504 = getelementptr inbounds float, float* %tmp24503, i64 1
+ %tmp24505 = getelementptr inbounds float, float* %tmp24504, i64 1
+ %tmp24506 = getelementptr inbounds float, float* %tmp24505, i64 1
+ %tmp24507 = getelementptr inbounds float, float* %tmp24506, i64 1
+ %tmp24508 = getelementptr inbounds float, float* %tmp24507, i64 1
+ %tmp24509 = getelementptr inbounds float, float* %tmp24508, i64 1
+ %tmp24510 = getelementptr inbounds float, float* %tmp24509, i64 1
+ %tmp24511 = getelementptr inbounds float, float* %tmp24510, i64 1
+ %tmp24512 = getelementptr inbounds float, float* %tmp24511, i64 1
+ %tmp24513 = getelementptr inbounds float, float* %tmp24512, i64 1
+ %tmp24514 = getelementptr inbounds float, float* %tmp24513, i64 1
+ %tmp24515 = getelementptr inbounds float, float* %tmp24514, i64 1
+ %tmp24516 = getelementptr inbounds float, float* %tmp24515, i64 1
+ %tmp24517 = getelementptr inbounds float, float* %tmp24516, i64 1
+ %tmp24518 = getelementptr inbounds float, float* %tmp24517, i64 1
+ %tmp24519 = getelementptr inbounds float, float* %tmp24518, i64 1
+ %tmp24520 = getelementptr inbounds float, float* %tmp24519, i64 1
+ %tmp24521 = getelementptr inbounds float, float* %tmp24520, i64 1
+ %tmp24522 = getelementptr inbounds float, float* %tmp24521, i64 1
+ %tmp24523 = getelementptr inbounds float, float* %tmp24522, i64 1
+ %tmp24524 = getelementptr inbounds float, float* %tmp24523, i64 1
+ %tmp24525 = getelementptr inbounds float, float* %tmp24524, i64 1
+ %tmp24526 = getelementptr inbounds float, float* %tmp24525, i64 1
+ %tmp24527 = getelementptr inbounds float, float* %tmp24526, i64 1
+ %tmp24528 = getelementptr inbounds float, float* %tmp24527, i64 1
+ %tmp24529 = getelementptr inbounds float, float* %tmp24528, i64 1
+ %tmp24530 = getelementptr inbounds float, float* %tmp24529, i64 1
+ %tmp24531 = getelementptr inbounds float, float* %tmp24530, i64 1
+ %tmp24532 = getelementptr inbounds float, float* %tmp24531, i64 1
+ %tmp24533 = getelementptr inbounds float, float* %tmp24532, i64 1
+ %tmp24534 = getelementptr inbounds float, float* %tmp24533, i64 1
+ %tmp24535 = getelementptr inbounds float, float* %tmp24534, i64 1
+ %tmp24536 = getelementptr inbounds float, float* %tmp24535, i64 1
+ %tmp24537 = getelementptr inbounds float, float* %tmp24536, i64 1
+ %tmp24538 = getelementptr inbounds float, float* %tmp24537, i64 1
+ %tmp24539 = getelementptr inbounds float, float* %tmp24538, i64 1
+ %tmp24540 = getelementptr inbounds float, float* %tmp24539, i64 1
+ %tmp24541 = getelementptr inbounds float, float* %tmp24540, i64 1
+ %tmp24542 = getelementptr inbounds float, float* %tmp24541, i64 1
+ %tmp24543 = getelementptr inbounds float, float* %tmp24542, i64 1
+ %tmp24544 = getelementptr inbounds float, float* %tmp24543, i64 1
+ %tmp24545 = getelementptr inbounds float, float* %tmp24544, i64 1
+ %tmp24546 = getelementptr inbounds float, float* %tmp24545, i64 1
+ %tmp24547 = getelementptr inbounds float, float* %tmp24546, i64 1
+ %tmp24548 = getelementptr inbounds float, float* %tmp24547, i64 1
+ %tmp24549 = getelementptr inbounds float, float* %tmp24548, i64 1
+ %tmp24550 = getelementptr inbounds float, float* %tmp24549, i64 1
+ %tmp24551 = getelementptr inbounds float, float* %tmp24550, i64 1
+ %tmp24552 = getelementptr inbounds float, float* %tmp24551, i64 1
+ %tmp24553 = getelementptr inbounds float, float* %tmp24552, i64 1
+ %tmp24554 = getelementptr inbounds float, float* %tmp24553, i64 1
+ %tmp24555 = getelementptr inbounds float, float* %tmp24554, i64 1
+ %tmp24556 = getelementptr inbounds float, float* %tmp24555, i64 1
+ %tmp24557 = getelementptr inbounds float, float* %tmp24556, i64 1
+ %tmp24558 = getelementptr inbounds float, float* %tmp24557, i64 1
+ %tmp24559 = getelementptr inbounds float, float* %tmp24558, i64 1
+ %tmp24560 = getelementptr inbounds float, float* %tmp24559, i64 1
+ %tmp24561 = getelementptr inbounds float, float* %tmp24560, i64 1
+ %tmp24562 = getelementptr inbounds float, float* %tmp24561, i64 1
+ %tmp24563 = getelementptr inbounds float, float* %tmp24562, i64 1
+ %tmp24564 = getelementptr inbounds float, float* %tmp24563, i64 1
+ %tmp24565 = getelementptr inbounds float, float* %tmp24564, i64 1
+ %tmp24566 = getelementptr inbounds float, float* %tmp24565, i64 1
+ %tmp24567 = getelementptr inbounds float, float* %tmp24566, i64 1
+ %tmp24568 = getelementptr inbounds float, float* %tmp24567, i64 1
+ %tmp24569 = getelementptr inbounds float, float* %tmp24568, i64 1
+ %tmp24570 = getelementptr inbounds float, float* %tmp24569, i64 1
+ %tmp24571 = getelementptr inbounds float, float* %tmp24570, i64 1
+ %tmp24572 = getelementptr inbounds float, float* %tmp24571, i64 1
+ %tmp24573 = getelementptr inbounds float, float* %tmp24572, i64 1
+ %tmp24574 = getelementptr inbounds float, float* %tmp24573, i64 1
+ %tmp24575 = getelementptr inbounds float, float* %tmp24574, i64 1
+ %tmp24576 = getelementptr inbounds float, float* %tmp24575, i64 1
+ %tmp24577 = getelementptr inbounds float, float* %tmp24576, i64 1
+ %tmp24578 = getelementptr inbounds float, float* %tmp24577, i64 1
+ %tmp24579 = getelementptr inbounds float, float* %tmp24578, i64 1
+ %tmp24580 = getelementptr inbounds float, float* %tmp24579, i64 1
+ %tmp24581 = getelementptr inbounds float, float* %tmp24580, i64 1
+ %tmp24582 = getelementptr inbounds float, float* %tmp24581, i64 1
+ %tmp24583 = getelementptr inbounds float, float* %tmp24582, i64 1
+ %tmp24584 = getelementptr inbounds float, float* %tmp24583, i64 1
+ %tmp24585 = getelementptr inbounds float, float* %tmp24584, i64 1
+ %tmp24586 = getelementptr inbounds float, float* %tmp24585, i64 1
+ %tmp24587 = getelementptr inbounds float, float* %tmp24586, i64 1
+ %tmp24588 = getelementptr inbounds float, float* %tmp24587, i64 1
+ %tmp24589 = getelementptr inbounds float, float* %tmp24588, i64 1
+ %tmp24590 = getelementptr inbounds float, float* %tmp24589, i64 1
+ %tmp24591 = getelementptr inbounds float, float* %tmp24590, i64 1
+ %tmp24592 = getelementptr inbounds float, float* %tmp24591, i64 1
+ %tmp24593 = getelementptr inbounds float, float* %tmp24592, i64 1
+ %tmp24594 = getelementptr inbounds float, float* %tmp24593, i64 1
+ %tmp24595 = getelementptr inbounds float, float* %tmp24594, i64 1
+ %tmp24596 = getelementptr inbounds float, float* %tmp24595, i64 1
+ %tmp24597 = getelementptr inbounds float, float* %tmp24596, i64 1
+ %tmp24598 = getelementptr inbounds float, float* %tmp24597, i64 1
+ %tmp24599 = getelementptr inbounds float, float* %tmp24598, i64 1
+ %tmp24600 = getelementptr inbounds float, float* %tmp24599, i64 1
+ %tmp24601 = getelementptr inbounds float, float* %tmp24600, i64 1
+ %tmp24602 = getelementptr inbounds float, float* %tmp24601, i64 1
+ %tmp24603 = getelementptr inbounds float, float* %tmp24602, i64 1
+ %tmp24604 = getelementptr inbounds float, float* %tmp24603, i64 1
+ %tmp24605 = getelementptr inbounds float, float* %tmp24604, i64 1
+ %tmp24606 = getelementptr inbounds float, float* %tmp24605, i64 1
+ %tmp24607 = getelementptr inbounds float, float* %tmp24606, i64 1
+ %tmp24608 = getelementptr inbounds float, float* %tmp24607, i64 1
+ %tmp24609 = getelementptr inbounds float, float* %tmp24608, i64 1
+ %tmp24610 = getelementptr inbounds float, float* %tmp24609, i64 1
+ %tmp24611 = getelementptr inbounds float, float* %tmp24610, i64 1
+ %tmp24612 = getelementptr inbounds float, float* %tmp24611, i64 1
+ %tmp24613 = getelementptr inbounds float, float* %tmp24612, i64 1
+ %tmp24614 = getelementptr inbounds float, float* %tmp24613, i64 1
+ %tmp24615 = getelementptr inbounds float, float* %tmp24614, i64 1
+ %tmp24616 = getelementptr inbounds float, float* %tmp24615, i64 1
+ %tmp24617 = getelementptr inbounds float, float* %tmp24616, i64 1
+ %tmp24618 = getelementptr inbounds float, float* %tmp24617, i64 1
+ %tmp24619 = getelementptr inbounds float, float* %tmp24618, i64 1
+ %tmp24620 = getelementptr inbounds float, float* %tmp24619, i64 1
+ %tmp24621 = getelementptr inbounds float, float* %tmp24620, i64 1
+ %tmp24622 = getelementptr inbounds float, float* %tmp24621, i64 1
+ %tmp24623 = getelementptr inbounds float, float* %tmp24622, i64 1
+ %tmp24624 = getelementptr inbounds float, float* %tmp24623, i64 1
+ %tmp24625 = getelementptr inbounds float, float* %tmp24624, i64 1
+ %tmp24626 = getelementptr inbounds float, float* %tmp24625, i64 1
+ %tmp24627 = getelementptr inbounds float, float* %tmp24626, i64 1
+ %tmp24628 = getelementptr inbounds float, float* %tmp24627, i64 1
+ %tmp24629 = getelementptr inbounds float, float* %tmp24628, i64 1
+ %tmp24630 = getelementptr inbounds float, float* %tmp24629, i64 1
+ %tmp24631 = getelementptr inbounds float, float* %tmp24630, i64 1
+ %tmp24632 = getelementptr inbounds float, float* %tmp24631, i64 1
+ %tmp24633 = getelementptr inbounds float, float* %tmp24632, i64 1
+ %tmp24634 = getelementptr inbounds float, float* %tmp24633, i64 1
+ %tmp24635 = getelementptr inbounds float, float* %tmp24634, i64 1
+ %tmp24636 = getelementptr inbounds float, float* %tmp24635, i64 1
+ %tmp24637 = getelementptr inbounds float, float* %tmp24636, i64 1
+ %tmp24638 = getelementptr inbounds float, float* %tmp24637, i64 1
+ %tmp24639 = getelementptr inbounds float, float* %tmp24638, i64 1
+ %tmp24640 = getelementptr inbounds float, float* %tmp24639, i64 1
+ %tmp24641 = getelementptr inbounds float, float* %tmp24640, i64 1
+ %tmp24642 = getelementptr inbounds float, float* %tmp24641, i64 1
+ %tmp24643 = getelementptr inbounds float, float* %tmp24642, i64 1
+ %tmp24644 = getelementptr inbounds float, float* %tmp24643, i64 1
+ %tmp24645 = getelementptr inbounds float, float* %tmp24644, i64 1
+ %tmp24646 = getelementptr inbounds float, float* %tmp24645, i64 1
+ %tmp24647 = getelementptr inbounds float, float* %tmp24646, i64 1
+ %tmp24648 = getelementptr inbounds float, float* %tmp24647, i64 1
+ %tmp24649 = getelementptr inbounds float, float* %tmp24648, i64 1
+ %tmp24650 = getelementptr inbounds float, float* %tmp24649, i64 1
+ %tmp24651 = getelementptr inbounds float, float* %tmp24650, i64 1
+ %tmp24652 = getelementptr inbounds float, float* %tmp24651, i64 1
+ %tmp24653 = getelementptr inbounds float, float* %tmp24652, i64 1
+ %tmp24654 = getelementptr inbounds float, float* %tmp24653, i64 1
+ %tmp24655 = getelementptr inbounds float, float* %tmp24654, i64 1
+ %tmp24656 = getelementptr inbounds float, float* %tmp24655, i64 1
+ %tmp24657 = getelementptr inbounds float, float* %tmp24656, i64 1
+ %tmp24658 = getelementptr inbounds float, float* %tmp24657, i64 1
+ %tmp24659 = getelementptr inbounds float, float* %tmp24658, i64 1
+ %tmp24660 = getelementptr inbounds float, float* %tmp24659, i64 1
+ %tmp24661 = getelementptr inbounds float, float* %tmp24660, i64 1
+ %tmp24662 = getelementptr inbounds float, float* %tmp24661, i64 1
+ %tmp24663 = getelementptr inbounds float, float* %tmp24662, i64 1
+ %tmp24664 = getelementptr inbounds float, float* %tmp24663, i64 1
+ %tmp24665 = getelementptr inbounds float, float* %tmp24664, i64 1
+ %tmp24666 = getelementptr inbounds float, float* %tmp24665, i64 1
+ %tmp24667 = getelementptr inbounds float, float* %tmp24666, i64 1
+ %tmp24668 = getelementptr inbounds float, float* %tmp24667, i64 1
+ %tmp24669 = getelementptr inbounds float, float* %tmp24668, i64 1
+ %tmp24670 = getelementptr inbounds float, float* %tmp24669, i64 1
+ %tmp24671 = getelementptr inbounds float, float* %tmp24670, i64 1
+ %tmp24672 = getelementptr inbounds float, float* %tmp24671, i64 1
+ %tmp24673 = getelementptr inbounds float, float* %tmp24672, i64 1
+ %tmp24674 = getelementptr inbounds float, float* %tmp24673, i64 1
+ %tmp24675 = getelementptr inbounds float, float* %tmp24674, i64 1
+ %tmp24676 = getelementptr inbounds float, float* %tmp24675, i64 1
+ %tmp24677 = getelementptr inbounds float, float* %tmp24676, i64 1
+ %tmp24678 = getelementptr inbounds float, float* %tmp24677, i64 1
+ %tmp24679 = getelementptr inbounds float, float* %tmp24678, i64 1
+ %tmp24680 = getelementptr inbounds float, float* %tmp24679, i64 1
+ %tmp24681 = getelementptr inbounds float, float* %tmp24680, i64 1
+ %tmp24682 = getelementptr inbounds float, float* %tmp24681, i64 1
+ %tmp24683 = getelementptr inbounds float, float* %tmp24682, i64 1
+ %tmp24684 = getelementptr inbounds float, float* %tmp24683, i64 1
+ %tmp24685 = getelementptr inbounds float, float* %tmp24684, i64 1
+ %tmp24686 = getelementptr inbounds float, float* %tmp24685, i64 1
+ %tmp24687 = getelementptr inbounds float, float* %tmp24686, i64 1
+ %tmp24688 = getelementptr inbounds float, float* %tmp24687, i64 1
+ %tmp24689 = getelementptr inbounds float, float* %tmp24688, i64 1
+ %tmp24690 = getelementptr inbounds float, float* %tmp24689, i64 1
+ %tmp24691 = getelementptr inbounds float, float* %tmp24690, i64 1
+ %tmp24692 = getelementptr inbounds float, float* %tmp24691, i64 1
+ %tmp24693 = getelementptr inbounds float, float* %tmp24692, i64 1
+ %tmp24694 = getelementptr inbounds float, float* %tmp24693, i64 1
+ %tmp24695 = getelementptr inbounds float, float* %tmp24694, i64 1
+ %tmp24696 = getelementptr inbounds float, float* %tmp24695, i64 1
+ %tmp24697 = getelementptr inbounds float, float* %tmp24696, i64 1
+ %tmp24698 = getelementptr inbounds float, float* %tmp24697, i64 1
+ %tmp24699 = getelementptr inbounds float, float* %tmp24698, i64 1
+ %tmp24700 = getelementptr inbounds float, float* %tmp24699, i64 1
+ %tmp24701 = getelementptr inbounds float, float* %tmp24700, i64 1
+ %tmp24702 = getelementptr inbounds float, float* %tmp24701, i64 1
+ %tmp24703 = getelementptr inbounds float, float* %tmp24702, i64 1
+ %tmp24704 = getelementptr inbounds float, float* %tmp24703, i64 1
+ %tmp24705 = getelementptr inbounds float, float* %tmp24704, i64 1
+ %tmp24706 = getelementptr inbounds float, float* %tmp24705, i64 1
+ %tmp24707 = getelementptr inbounds float, float* %tmp24706, i64 1
+ %tmp24708 = getelementptr inbounds float, float* %tmp24707, i64 1
+ %tmp24709 = getelementptr inbounds float, float* %tmp24708, i64 1
+ %tmp24710 = getelementptr inbounds float, float* %tmp24709, i64 1
+ %tmp24711 = getelementptr inbounds float, float* %tmp24710, i64 1
+ %tmp24712 = getelementptr inbounds float, float* %tmp24711, i64 1
+ %tmp24713 = getelementptr inbounds float, float* %tmp24712, i64 1
+ %tmp24714 = getelementptr inbounds float, float* %tmp24713, i64 1
+ %tmp24715 = getelementptr inbounds float, float* %tmp24714, i64 1
+ %tmp24716 = getelementptr inbounds float, float* %tmp24715, i64 1
+ %tmp24717 = getelementptr inbounds float, float* %tmp24716, i64 1
+ %tmp24718 = getelementptr inbounds float, float* %tmp24717, i64 1
+ %tmp24719 = getelementptr inbounds float, float* %tmp24718, i64 1
+ %tmp24720 = getelementptr inbounds float, float* %tmp24719, i64 1
+ %tmp24721 = getelementptr inbounds float, float* %tmp24720, i64 1
+ %tmp24722 = getelementptr inbounds float, float* %tmp24721, i64 1
+ %tmp24723 = getelementptr inbounds float, float* %tmp24722, i64 1
+ %tmp24724 = getelementptr inbounds float, float* %tmp24723, i64 1
+ %tmp24725 = getelementptr inbounds float, float* %tmp24724, i64 1
+ %tmp24726 = getelementptr inbounds float, float* %tmp24725, i64 1
+ %tmp24727 = getelementptr inbounds float, float* %tmp24726, i64 1
+ %tmp24728 = getelementptr inbounds float, float* %tmp24727, i64 1
+ %tmp24729 = getelementptr inbounds float, float* %tmp24728, i64 1
+ %tmp24730 = getelementptr inbounds float, float* %tmp24729, i64 1
+ %tmp24731 = getelementptr inbounds float, float* %tmp24730, i64 1
+ %tmp24732 = getelementptr inbounds float, float* %tmp24731, i64 1
+ %tmp24733 = getelementptr inbounds float, float* %tmp24732, i64 1
+ %tmp24734 = getelementptr inbounds float, float* %tmp24733, i64 1
+ %tmp24735 = getelementptr inbounds float, float* %tmp24734, i64 1
+ %tmp24736 = getelementptr inbounds float, float* %tmp24735, i64 1
+ %tmp24737 = getelementptr inbounds float, float* %tmp24736, i64 1
+ %tmp24738 = getelementptr inbounds float, float* %tmp24737, i64 1
+ %tmp24739 = getelementptr inbounds float, float* %tmp24738, i64 1
+ %tmp24740 = getelementptr inbounds float, float* %tmp24739, i64 1
+ %tmp24741 = getelementptr inbounds float, float* %tmp24740, i64 1
+ %tmp24742 = getelementptr inbounds float, float* %tmp24741, i64 1
+ %tmp24743 = getelementptr inbounds float, float* %tmp24742, i64 1
+ %tmp24744 = getelementptr inbounds float, float* %tmp24743, i64 1
+ %tmp24745 = getelementptr inbounds float, float* %tmp24744, i64 1
+ %tmp24746 = getelementptr inbounds float, float* %tmp24745, i64 1
+ %tmp24747 = getelementptr inbounds float, float* %tmp24746, i64 1
+ %tmp24748 = getelementptr inbounds float, float* %tmp24747, i64 1
+ %tmp24749 = getelementptr inbounds float, float* %tmp24748, i64 1
+ %tmp24750 = getelementptr inbounds float, float* %tmp24749, i64 1
+ %tmp24751 = getelementptr inbounds float, float* %tmp24750, i64 1
+ %tmp24752 = getelementptr inbounds float, float* %tmp24751, i64 1
+ %tmp24753 = getelementptr inbounds float, float* %tmp24752, i64 1
+ %tmp24754 = getelementptr inbounds float, float* %tmp24753, i64 1
+ %tmp24755 = getelementptr inbounds float, float* %tmp24754, i64 1
+ %tmp24756 = getelementptr inbounds float, float* %tmp24755, i64 1
+ %tmp24757 = getelementptr inbounds float, float* %tmp24756, i64 1
+ %tmp24758 = getelementptr inbounds float, float* %tmp24757, i64 1
+ %tmp24759 = getelementptr inbounds float, float* %tmp24758, i64 1
+ %tmp24760 = getelementptr inbounds float, float* %tmp24759, i64 1
+ %tmp24761 = getelementptr inbounds float, float* %tmp24760, i64 1
+ %tmp24762 = getelementptr inbounds float, float* %tmp24761, i64 1
+ %tmp24763 = getelementptr inbounds float, float* %tmp24762, i64 1
+ %tmp24764 = getelementptr inbounds float, float* %tmp24763, i64 1
+ %tmp24765 = getelementptr inbounds float, float* %tmp24764, i64 1
+ %tmp24766 = getelementptr inbounds float, float* %tmp24765, i64 1
+ %tmp24767 = getelementptr inbounds float, float* %tmp24766, i64 1
+ %tmp24768 = getelementptr inbounds float, float* %tmp24767, i64 1
+ %tmp24769 = getelementptr inbounds float, float* %tmp24768, i64 1
+ %tmp24770 = getelementptr inbounds float, float* %tmp24769, i64 1
+ %tmp24771 = getelementptr inbounds float, float* %tmp24770, i64 1
+ %tmp24772 = getelementptr inbounds float, float* %tmp24771, i64 1
+ %tmp24773 = getelementptr inbounds float, float* %tmp24772, i64 1
+ %tmp24774 = getelementptr inbounds float, float* %tmp24773, i64 1
+ %tmp24775 = getelementptr inbounds float, float* %tmp24774, i64 1
+ %tmp24776 = getelementptr inbounds float, float* %tmp24775, i64 1
+ %tmp24777 = getelementptr inbounds float, float* %tmp24776, i64 1
+ %tmp24778 = getelementptr inbounds float, float* %tmp24777, i64 1
+ %tmp24779 = getelementptr inbounds float, float* %tmp24778, i64 1
+ %tmp24780 = getelementptr inbounds float, float* %tmp24779, i64 1
+ %tmp24781 = getelementptr inbounds float, float* %tmp24780, i64 1
+ %tmp24782 = getelementptr inbounds float, float* %tmp24781, i64 1
+ %tmp24783 = getelementptr inbounds float, float* %tmp24782, i64 1
+ %tmp24784 = getelementptr inbounds float, float* %tmp24783, i64 1
+ %tmp24785 = getelementptr inbounds float, float* %tmp24784, i64 1
+ %tmp24786 = getelementptr inbounds float, float* %tmp24785, i64 1
+ %tmp24787 = getelementptr inbounds float, float* %tmp24786, i64 1
+ %tmp24788 = getelementptr inbounds float, float* %tmp24787, i64 1
+ %tmp24789 = getelementptr inbounds float, float* %tmp24788, i64 1
+ %tmp24790 = getelementptr inbounds float, float* %tmp24789, i64 1
+ %tmp24791 = getelementptr inbounds float, float* %tmp24790, i64 1
+ %tmp24792 = getelementptr inbounds float, float* %tmp24791, i64 1
+ %tmp24793 = getelementptr inbounds float, float* %tmp24792, i64 1
+ %tmp24794 = getelementptr inbounds float, float* %tmp24793, i64 1
+ %tmp24795 = getelementptr inbounds float, float* %tmp24794, i64 1
+ %tmp24796 = getelementptr inbounds float, float* %tmp24795, i64 1
+ %tmp24797 = getelementptr inbounds float, float* %tmp24796, i64 1
+ %tmp24798 = getelementptr inbounds float, float* %tmp24797, i64 1
+ %tmp24799 = getelementptr inbounds float, float* %tmp24798, i64 1
+ %tmp24800 = getelementptr inbounds float, float* %tmp24799, i64 1
+ %tmp24801 = getelementptr inbounds float, float* %tmp24800, i64 1
+ %tmp24802 = getelementptr inbounds float, float* %tmp24801, i64 1
+ %tmp24803 = getelementptr inbounds float, float* %tmp24802, i64 1
+ %tmp24804 = getelementptr inbounds float, float* %tmp24803, i64 1
+ %tmp24805 = getelementptr inbounds float, float* %tmp24804, i64 1
+ %tmp24806 = getelementptr inbounds float, float* %tmp24805, i64 1
+ %tmp24807 = getelementptr inbounds float, float* %tmp24806, i64 1
+ %tmp24808 = getelementptr inbounds float, float* %tmp24807, i64 1
+ %tmp24809 = getelementptr inbounds float, float* %tmp24808, i64 1
+ %tmp24810 = getelementptr inbounds float, float* %tmp24809, i64 1
+ %tmp24811 = getelementptr inbounds float, float* %tmp24810, i64 1
+ %tmp24812 = getelementptr inbounds float, float* %tmp24811, i64 1
+ %tmp24813 = getelementptr inbounds float, float* %tmp24812, i64 1
+ %tmp24814 = getelementptr inbounds float, float* %tmp24813, i64 1
+ %tmp24815 = getelementptr inbounds float, float* %tmp24814, i64 1
+ %tmp24816 = getelementptr inbounds float, float* %tmp24815, i64 1
+ %tmp24817 = getelementptr inbounds float, float* %tmp24816, i64 1
+ %tmp24818 = getelementptr inbounds float, float* %tmp24817, i64 1
+ %tmp24819 = getelementptr inbounds float, float* %tmp24818, i64 1
+ %tmp24820 = getelementptr inbounds float, float* %tmp24819, i64 1
+ %tmp24821 = getelementptr inbounds float, float* %tmp24820, i64 1
+ %tmp24822 = getelementptr inbounds float, float* %tmp24821, i64 1
+ %tmp24823 = getelementptr inbounds float, float* %tmp24822, i64 1
+ %tmp24824 = getelementptr inbounds float, float* %tmp24823, i64 1
+ %tmp24825 = getelementptr inbounds float, float* %tmp24824, i64 1
+ %tmp24826 = getelementptr inbounds float, float* %tmp24825, i64 1
+ %tmp24827 = getelementptr inbounds float, float* %tmp24826, i64 1
+ %tmp24828 = getelementptr inbounds float, float* %tmp24827, i64 1
+ %tmp24829 = getelementptr inbounds float, float* %tmp24828, i64 1
+ %tmp24830 = getelementptr inbounds float, float* %tmp24829, i64 1
+ %tmp24831 = getelementptr inbounds float, float* %tmp24830, i64 1
+ %tmp24832 = getelementptr inbounds float, float* %tmp24831, i64 1
+ %tmp24833 = getelementptr inbounds float, float* %tmp24832, i64 1
+ %tmp24834 = getelementptr inbounds float, float* %tmp24833, i64 1
+ %tmp24835 = getelementptr inbounds float, float* %tmp24834, i64 1
+ %tmp24836 = getelementptr inbounds float, float* %tmp24835, i64 1
+ %tmp24837 = getelementptr inbounds float, float* %tmp24836, i64 1
+ %tmp24838 = getelementptr inbounds float, float* %tmp24837, i64 1
+ %tmp24839 = getelementptr inbounds float, float* %tmp24838, i64 1
+ %tmp24840 = getelementptr inbounds float, float* %tmp24839, i64 1
+ %tmp24841 = getelementptr inbounds float, float* %tmp24840, i64 1
+ %tmp24842 = getelementptr inbounds float, float* %tmp24841, i64 1
+ %tmp24843 = getelementptr inbounds float, float* %tmp24842, i64 1
+ %tmp24844 = getelementptr inbounds float, float* %tmp24843, i64 1
+ %tmp24845 = getelementptr inbounds float, float* %tmp24844, i64 1
+ %tmp24846 = getelementptr inbounds float, float* %tmp24845, i64 1
+ %tmp24847 = getelementptr inbounds float, float* %tmp24846, i64 1
+ %tmp24848 = getelementptr inbounds float, float* %tmp24847, i64 1
+ %tmp24849 = getelementptr inbounds float, float* %tmp24848, i64 1
+ %tmp24850 = getelementptr inbounds float, float* %tmp24849, i64 1
+ %tmp24851 = getelementptr inbounds float, float* %tmp24850, i64 1
+ %tmp24852 = getelementptr inbounds float, float* %tmp24851, i64 1
+ %tmp24853 = getelementptr inbounds float, float* %tmp24852, i64 1
+ %tmp24854 = getelementptr inbounds float, float* %tmp24853, i64 1
+ %tmp24855 = getelementptr inbounds float, float* %tmp24854, i64 1
+ %tmp24856 = getelementptr inbounds float, float* %tmp24855, i64 1
+ %tmp24857 = getelementptr inbounds float, float* %tmp24856, i64 1
+ %tmp24858 = getelementptr inbounds float, float* %tmp24857, i64 1
+ %tmp24859 = getelementptr inbounds float, float* %tmp24858, i64 1
+ %tmp24860 = getelementptr inbounds float, float* %tmp24859, i64 1
+ %tmp24861 = getelementptr inbounds float, float* %tmp24860, i64 1
+ %tmp24862 = getelementptr inbounds float, float* %tmp24861, i64 1
+ %tmp24863 = getelementptr inbounds float, float* %tmp24862, i64 1
+ %tmp24864 = getelementptr inbounds float, float* %tmp24863, i64 1
+ %tmp24865 = getelementptr inbounds float, float* %tmp24864, i64 1
+ %tmp24866 = getelementptr inbounds float, float* %tmp24865, i64 1
+ %tmp24867 = getelementptr inbounds float, float* %tmp24866, i64 1
+ %tmp24868 = getelementptr inbounds float, float* %tmp24867, i64 1
+ %tmp24869 = getelementptr inbounds float, float* %tmp24868, i64 1
+ %tmp24870 = getelementptr inbounds float, float* %tmp24869, i64 1
+ %tmp24871 = getelementptr inbounds float, float* %tmp24870, i64 1
+ %tmp24872 = getelementptr inbounds float, float* %tmp24871, i64 1
+ %tmp24873 = getelementptr inbounds float, float* %tmp24872, i64 1
+ %tmp24874 = getelementptr inbounds float, float* %tmp24873, i64 1
+ %tmp24875 = getelementptr inbounds float, float* %tmp24874, i64 1
+ %tmp24876 = getelementptr inbounds float, float* %tmp24875, i64 1
+ %tmp24877 = getelementptr inbounds float, float* %tmp24876, i64 1
+ %tmp24878 = getelementptr inbounds float, float* %tmp24877, i64 1
+ %tmp24879 = getelementptr inbounds float, float* %tmp24878, i64 1
+ %tmp24880 = getelementptr inbounds float, float* %tmp24879, i64 1
+ %tmp24881 = getelementptr inbounds float, float* %tmp24880, i64 1
+ %tmp24882 = getelementptr inbounds float, float* %tmp24881, i64 1
+ %tmp24883 = getelementptr inbounds float, float* %tmp24882, i64 1
+ %tmp24884 = getelementptr inbounds float, float* %tmp24883, i64 1
+ %tmp24885 = getelementptr inbounds float, float* %tmp24884, i64 1
+ %tmp24886 = getelementptr inbounds float, float* %tmp24885, i64 1
+ %tmp24887 = getelementptr inbounds float, float* %tmp24886, i64 1
+ %tmp24888 = getelementptr inbounds float, float* %tmp24887, i64 1
+ %tmp24889 = getelementptr inbounds float, float* %tmp24888, i64 1
+ %tmp24890 = getelementptr inbounds float, float* %tmp24889, i64 1
+ %tmp24891 = getelementptr inbounds float, float* %tmp24890, i64 1
+ %tmp24892 = getelementptr inbounds float, float* %tmp24891, i64 1
+ %tmp24893 = getelementptr inbounds float, float* %tmp24892, i64 1
+ %tmp24894 = getelementptr inbounds float, float* %tmp24893, i64 1
+ %tmp24895 = getelementptr inbounds float, float* %tmp24894, i64 1
+ %tmp24896 = getelementptr inbounds float, float* %tmp24895, i64 1
+ %tmp24897 = getelementptr inbounds float, float* %tmp24896, i64 1
+ %tmp24898 = getelementptr inbounds float, float* %tmp24897, i64 1
+ %tmp24899 = getelementptr inbounds float, float* %tmp24898, i64 1
+ %tmp24900 = getelementptr inbounds float, float* %tmp24899, i64 1
+ %tmp24901 = getelementptr inbounds float, float* %tmp24900, i64 1
+ %tmp24902 = getelementptr inbounds float, float* %tmp24901, i64 1
+ %tmp24903 = getelementptr inbounds float, float* %tmp24902, i64 1
+ %tmp24904 = getelementptr inbounds float, float* %tmp24903, i64 1
+ %tmp24905 = getelementptr inbounds float, float* %tmp24904, i64 1
+ %tmp24906 = getelementptr inbounds float, float* %tmp24905, i64 1
+ %tmp24907 = getelementptr inbounds float, float* %tmp24906, i64 1
+ %tmp24908 = getelementptr inbounds float, float* %tmp24907, i64 1
+ %tmp24909 = getelementptr inbounds float, float* %tmp24908, i64 1
+ %tmp24910 = getelementptr inbounds float, float* %tmp24909, i64 1
+ %tmp24911 = getelementptr inbounds float, float* %tmp24910, i64 1
+ %tmp24912 = getelementptr inbounds float, float* %tmp24911, i64 1
+ %tmp24913 = getelementptr inbounds float, float* %tmp24912, i64 1
+ %tmp24914 = getelementptr inbounds float, float* %tmp24913, i64 1
+ %tmp24915 = getelementptr inbounds float, float* %tmp24914, i64 1
+ %tmp24916 = getelementptr inbounds float, float* %tmp24915, i64 1
+ %tmp24917 = getelementptr inbounds float, float* %tmp24916, i64 1
+ %tmp24918 = getelementptr inbounds float, float* %tmp24917, i64 1
+ %tmp24919 = getelementptr inbounds float, float* %tmp24918, i64 1
+ %tmp24920 = getelementptr inbounds float, float* %tmp24919, i64 1
+ %tmp24921 = getelementptr inbounds float, float* %tmp24920, i64 1
+ %tmp24922 = getelementptr inbounds float, float* %tmp24921, i64 1
+ %tmp24923 = getelementptr inbounds float, float* %tmp24922, i64 1
+ %tmp24924 = getelementptr inbounds float, float* %tmp24923, i64 1
+ %tmp24925 = getelementptr inbounds float, float* %tmp24924, i64 1
+ %tmp24926 = getelementptr inbounds float, float* %tmp24925, i64 1
+ %tmp24927 = getelementptr inbounds float, float* %tmp24926, i64 1
+ %tmp24928 = getelementptr inbounds float, float* %tmp24927, i64 1
+ %tmp24929 = getelementptr inbounds float, float* %tmp24928, i64 1
+ %tmp24930 = getelementptr inbounds float, float* %tmp24929, i64 1
+ %tmp24931 = getelementptr inbounds float, float* %tmp24930, i64 1
+ %tmp24932 = getelementptr inbounds float, float* %tmp24931, i64 1
+ %tmp24933 = getelementptr inbounds float, float* %tmp24932, i64 1
+ %tmp24934 = getelementptr inbounds float, float* %tmp24933, i64 1
+ %tmp24935 = getelementptr inbounds float, float* %tmp24934, i64 1
+ %tmp24936 = getelementptr inbounds float, float* %tmp24935, i64 1
+ %tmp24937 = getelementptr inbounds float, float* %tmp24936, i64 1
+ %tmp24938 = getelementptr inbounds float, float* %tmp24937, i64 1
+ %tmp24939 = getelementptr inbounds float, float* %tmp24938, i64 1
+ %tmp24940 = getelementptr inbounds float, float* %tmp24939, i64 1
+ %tmp24941 = getelementptr inbounds float, float* %tmp24940, i64 1
+ %tmp24942 = getelementptr inbounds float, float* %tmp24941, i64 1
+ %tmp24943 = getelementptr inbounds float, float* %tmp24942, i64 1
+ %tmp24944 = getelementptr inbounds float, float* %tmp24943, i64 1
+ %tmp24945 = getelementptr inbounds float, float* %tmp24944, i64 1
+ %tmp24946 = getelementptr inbounds float, float* %tmp24945, i64 1
store float 0x3F43FD0D00000000, float* %tmp24946
- %tmp24947 = getelementptr inbounds float* undef, i64 1
- %tmp24948 = getelementptr inbounds float* undef, i64 1
- %tmp24949 = getelementptr inbounds float* undef, i64 1
- %tmp24950 = getelementptr inbounds float* undef, i64 1
- %tmp24951 = getelementptr inbounds float* %tmp24950, i64 1
- %tmp24952 = getelementptr inbounds float* undef, i64 1
- %tmp24953 = getelementptr inbounds float* undef, i64 1
- %tmp24954 = getelementptr inbounds float* undef, i64 1
- %tmp24955 = getelementptr inbounds float* undef, i64 1
- %tmp24956 = getelementptr inbounds float* undef, i64 1
- %tmp24957 = getelementptr inbounds float* undef, i64 1
- %tmp24958 = getelementptr inbounds float* %tmp24957, i64 1
- %tmp24959 = getelementptr inbounds float* undef, i64 1
- %tmp24960 = getelementptr inbounds float* undef, i64 1
- %tmp24961 = getelementptr inbounds float* undef, i64 1
- %tmp24962 = getelementptr inbounds float* undef, i64 1
- %tmp24963 = getelementptr inbounds float* undef, i64 1
- %tmp24964 = getelementptr inbounds float* undef, i64 1
- %tmp24965 = getelementptr inbounds float* undef, i64 1
- %tmp24966 = getelementptr inbounds float* %tmp24965, i64 1
- %tmp24967 = getelementptr inbounds float* undef, i64 1
- %tmp24968 = getelementptr inbounds float* undef, i64 1
- %tmp24969 = getelementptr inbounds float* undef, i64 1
- %tmp24970 = getelementptr inbounds float* undef, i64 1
- %tmp24971 = getelementptr inbounds float* %tmp24970, i64 1
- %tmp24972 = getelementptr inbounds float* %tmp24971, i64 1
- %tmp24973 = getelementptr inbounds float* %tmp24972, i64 1
- %tmp24974 = getelementptr inbounds float* undef, i64 1
- %tmp24975 = getelementptr inbounds float* undef, i64 1
- %tmp24976 = getelementptr inbounds float* %tmp24975, i64 1
- %tmp24977 = getelementptr inbounds float* undef, i64 1
- %tmp24978 = getelementptr inbounds float* undef, i64 1
- %tmp24979 = getelementptr inbounds float* undef, i64 1
- %tmp24980 = getelementptr inbounds float* undef, i64 1
- %tmp24981 = getelementptr inbounds float* undef, i64 1
- %tmp24982 = getelementptr inbounds float* undef, i64 1
- %tmp24983 = getelementptr inbounds float* %tmp24982, i64 1
- %tmp24984 = getelementptr inbounds float* undef, i64 1
- %tmp24985 = getelementptr inbounds float* %tmp24984, i64 1
- %tmp24986 = getelementptr inbounds float* undef, i64 1
- %tmp24987 = getelementptr inbounds float* %tmp24986, i64 1
- %tmp24988 = getelementptr inbounds float* %tmp24987, i64 1
- %tmp24989 = getelementptr inbounds float* undef, i64 1
- %tmp24990 = getelementptr inbounds float* undef, i64 1
- %tmp24991 = getelementptr inbounds float* %tmp24990, i64 1
- %tmp24992 = getelementptr inbounds float* undef, i64 1
- %tmp24993 = getelementptr inbounds float* %tmp24992, i64 1
- %tmp24994 = getelementptr inbounds float* %tmp24993, i64 1
- %tmp24995 = getelementptr inbounds float* undef, i64 1
- %tmp24996 = getelementptr inbounds float* undef, i64 1
- %tmp24997 = getelementptr inbounds float* undef, i64 1
- %tmp24998 = getelementptr inbounds float* undef, i64 1
- %tmp24999 = getelementptr inbounds float* undef, i64 1
- %tmp25000 = getelementptr inbounds float* undef, i64 1
- %tmp25001 = getelementptr inbounds float* undef, i64 1
- %tmp25002 = getelementptr inbounds float* undef, i64 1
- %tmp25003 = getelementptr inbounds float* undef, i64 1
- %tmp25004 = getelementptr inbounds float* undef, i64 1
- %tmp25005 = getelementptr inbounds float* undef, i64 1
- %tmp25006 = getelementptr inbounds float* undef, i64 1
- %tmp25007 = getelementptr inbounds float* undef, i64 1
- %tmp25008 = getelementptr inbounds float* undef, i64 1
- %tmp25009 = getelementptr inbounds float* undef, i64 1
- %tmp25010 = getelementptr inbounds float* undef, i64 1
- %tmp25011 = getelementptr inbounds float* undef, i64 1
- %tmp25012 = getelementptr inbounds float* %tmp25011, i64 1
- %tmp25013 = getelementptr inbounds float* undef, i64 1
- %tmp25014 = getelementptr inbounds float* undef, i64 1
- %tmp25015 = getelementptr inbounds float* undef, i64 1
- %tmp25016 = getelementptr inbounds float* undef, i64 1
- %tmp25017 = getelementptr inbounds float* %tmp25016, i64 1
- %tmp25018 = getelementptr inbounds float* undef, i64 1
- %tmp25019 = getelementptr inbounds float* undef, i64 1
- %tmp25020 = getelementptr inbounds float* undef, i64 1
- %tmp25021 = getelementptr inbounds float* undef, i64 1
- %tmp25022 = getelementptr inbounds float* undef, i64 1
- %tmp25023 = getelementptr inbounds float* %tmp25022, i64 1
- %tmp25024 = getelementptr inbounds float* %tmp25023, i64 1
- %tmp25025 = getelementptr inbounds float* undef, i64 1
- %tmp25026 = getelementptr inbounds float* undef, i64 1
- %tmp25027 = getelementptr inbounds float* undef, i64 1
- %tmp25028 = getelementptr inbounds float* undef, i64 1
- %tmp25029 = getelementptr inbounds float* undef, i64 1
- %tmp25030 = getelementptr inbounds float* undef, i64 1
- %tmp25031 = getelementptr inbounds float* undef, i64 1
- %tmp25032 = getelementptr inbounds float* undef, i64 1
- %tmp25033 = getelementptr inbounds float* undef, i64 1
- %tmp25034 = getelementptr inbounds float* undef, i64 1
- %tmp25035 = getelementptr inbounds float* %tmp25034, i64 1
- %tmp25036 = getelementptr inbounds float* undef, i64 1
- %tmp25037 = getelementptr inbounds float* undef, i64 1
- %tmp25038 = getelementptr inbounds float* %tmp25037, i64 1
- %tmp25039 = getelementptr inbounds float* undef, i64 1
- %tmp25040 = getelementptr inbounds float* undef, i64 1
- %tmp25041 = getelementptr inbounds float* undef, i64 1
- %tmp25042 = getelementptr inbounds float* undef, i64 1
- %tmp25043 = getelementptr inbounds float* undef, i64 1
- %tmp25044 = getelementptr inbounds float* undef, i64 1
- %tmp25045 = getelementptr inbounds float* %tmp25044, i64 1
- %tmp25046 = getelementptr inbounds float* undef, i64 1
- %tmp25047 = getelementptr inbounds float* %tmp25046, i64 1
- %tmp25048 = getelementptr inbounds float* undef, i64 1
- %tmp25049 = getelementptr inbounds float* %tmp25048, i64 1
- %tmp25050 = getelementptr inbounds float* %tmp25049, i64 1
- %tmp25051 = getelementptr inbounds float* undef, i64 1
- %tmp25052 = getelementptr inbounds float* undef, i64 1
- %tmp25053 = getelementptr inbounds float* undef, i64 1
- %tmp25054 = getelementptr inbounds float* undef, i64 1
- %tmp25055 = getelementptr inbounds float* undef, i64 1
- %tmp25056 = getelementptr inbounds float* undef, i64 1
- %tmp25057 = getelementptr inbounds float* undef, i64 1
- %tmp25058 = getelementptr inbounds float* undef, i64 1
- %tmp25059 = getelementptr inbounds float* undef, i64 1
- %tmp25060 = getelementptr inbounds float* undef, i64 1
- %tmp25061 = getelementptr inbounds float* undef, i64 1
- %tmp25062 = getelementptr inbounds float* undef, i64 1
- %tmp25063 = getelementptr inbounds float* undef, i64 1
- %tmp25064 = getelementptr inbounds float* undef, i64 1
- %tmp25065 = getelementptr inbounds float* undef, i64 1
- %tmp25066 = getelementptr inbounds float* undef, i64 1
- %tmp25067 = getelementptr inbounds float* %tmp25066, i64 1
- %tmp25068 = getelementptr inbounds float* undef, i64 1
- %tmp25069 = getelementptr inbounds float* %tmp25068, i64 1
- %tmp25070 = getelementptr inbounds float* undef, i64 1
- %tmp25071 = getelementptr inbounds float* undef, i64 1
- %tmp25072 = getelementptr inbounds float* undef, i64 1
- %tmp25073 = getelementptr inbounds float* undef, i64 1
- %tmp25074 = getelementptr inbounds float* undef, i64 1
- %tmp25075 = getelementptr inbounds float* %tmp25074, i64 1
- %tmp25076 = getelementptr inbounds float* undef, i64 1
- %tmp25077 = getelementptr inbounds float* undef, i64 1
- %tmp25078 = getelementptr inbounds float* undef, i64 1
- %tmp25079 = getelementptr inbounds float* undef, i64 1
- %tmp25080 = getelementptr inbounds float* undef, i64 1
- %tmp25081 = getelementptr inbounds float* undef, i64 1
- %tmp25082 = getelementptr inbounds float* undef, i64 1
- %tmp25083 = getelementptr inbounds float* undef, i64 1
- %tmp25084 = getelementptr inbounds float* undef, i64 1
- %tmp25085 = getelementptr inbounds float* undef, i64 1
- %tmp25086 = getelementptr inbounds float* undef, i64 1
- %tmp25087 = getelementptr inbounds float* undef, i64 1
- %tmp25088 = getelementptr inbounds float* undef, i64 1
- %tmp25089 = getelementptr inbounds float* undef, i64 1
- %tmp25090 = getelementptr inbounds float* undef, i64 1
- %tmp25091 = getelementptr inbounds float* undef, i64 1
- %tmp25092 = getelementptr inbounds float* undef, i64 1
- %tmp25093 = getelementptr inbounds float* undef, i64 1
- %tmp25094 = getelementptr inbounds float* undef, i64 1
- %tmp25095 = getelementptr inbounds float* %tmp25094, i64 1
- %tmp25096 = getelementptr inbounds float* undef, i64 1
- %tmp25097 = getelementptr inbounds float* %tmp25096, i64 1
- %tmp25098 = getelementptr inbounds float* %tmp25097, i64 1
- %tmp25099 = getelementptr inbounds float* undef, i64 1
- %tmp25100 = getelementptr inbounds float* undef, i64 1
- %tmp25101 = getelementptr inbounds float* undef, i64 1
- %tmp25102 = getelementptr inbounds float* undef, i64 1
- %tmp25103 = getelementptr inbounds float* undef, i64 1
- %tmp25104 = getelementptr inbounds float* undef, i64 1
- %tmp25105 = getelementptr inbounds float* undef, i64 1
- %tmp25106 = getelementptr inbounds float* undef, i64 1
- %tmp25107 = getelementptr inbounds float* %tmp25106, i64 1
- %tmp25108 = getelementptr inbounds float* undef, i64 1
- %tmp25109 = getelementptr inbounds float* undef, i64 1
- %tmp25110 = getelementptr inbounds float* undef, i64 1
- %tmp25111 = getelementptr inbounds float* undef, i64 1
- %tmp25112 = getelementptr inbounds float* undef, i64 1
- %tmp25113 = getelementptr inbounds float* undef, i64 1
- %tmp25114 = getelementptr inbounds float* undef, i64 1
- %tmp25115 = getelementptr inbounds float* undef, i64 1
- %tmp25116 = getelementptr inbounds float* undef, i64 1
- %tmp25117 = getelementptr inbounds float* undef, i64 1
- %tmp25118 = getelementptr inbounds float* undef, i64 1
- %tmp25119 = getelementptr inbounds float* undef, i64 1
- %tmp25120 = getelementptr inbounds float* undef, i64 1
- %tmp25121 = getelementptr inbounds float* undef, i64 1
- %tmp25122 = getelementptr inbounds float* %tmp25121, i64 1
- %tmp25123 = getelementptr inbounds float* undef, i64 1
- %tmp25124 = getelementptr inbounds float* undef, i64 1
- %tmp25125 = getelementptr inbounds float* undef, i64 1
- %tmp25126 = getelementptr inbounds float* undef, i64 1
- %tmp25127 = getelementptr inbounds float* undef, i64 1
- %tmp25128 = getelementptr inbounds float* undef, i64 1
- %tmp25129 = getelementptr inbounds float* undef, i64 1
- %tmp25130 = getelementptr inbounds float* undef, i64 1
- %tmp25131 = getelementptr inbounds float* undef, i64 1
- %tmp25132 = getelementptr inbounds float* undef, i64 1
- %tmp25133 = getelementptr inbounds float* undef, i64 1
- %tmp25134 = getelementptr inbounds float* undef, i64 1
- %tmp25135 = getelementptr inbounds float* undef, i64 1
- %tmp25136 = getelementptr inbounds float* undef, i64 1
- %tmp25137 = getelementptr inbounds float* undef, i64 1
- %tmp25138 = getelementptr inbounds float* undef, i64 1
- %tmp25139 = getelementptr inbounds float* undef, i64 1
- %tmp25140 = getelementptr inbounds float* undef, i64 1
- %tmp25141 = getelementptr inbounds float* undef, i64 1
- %tmp25142 = getelementptr inbounds float* undef, i64 1
- %tmp25143 = getelementptr inbounds float* undef, i64 1
- %tmp25144 = getelementptr inbounds float* undef, i64 1
- %tmp25145 = getelementptr inbounds float* undef, i64 1
- %tmp25146 = getelementptr inbounds float* %tmp25145, i64 1
- %tmp25147 = getelementptr inbounds float* undef, i64 1
- %tmp25148 = getelementptr inbounds float* %tmp25147, i64 1
- %tmp25149 = getelementptr inbounds float* undef, i64 1
- %tmp25150 = getelementptr inbounds float* undef, i64 1
- %tmp25151 = getelementptr inbounds float* undef, i64 1
- %tmp25152 = getelementptr inbounds float* undef, i64 1
- %tmp25153 = getelementptr inbounds float* %tmp25152, i64 1
- %tmp25154 = getelementptr inbounds float* undef, i64 1
- %tmp25155 = getelementptr inbounds float* undef, i64 1
- %tmp25156 = getelementptr inbounds float* undef, i64 1
- %tmp25157 = getelementptr inbounds float* undef, i64 1
- %tmp25158 = getelementptr inbounds float* undef, i64 1
- %tmp25159 = getelementptr inbounds float* undef, i64 1
- %tmp25160 = getelementptr inbounds float* undef, i64 1
- %tmp25161 = getelementptr inbounds float* undef, i64 1
- %tmp25162 = getelementptr inbounds float* %tmp25161, i64 1
- %tmp25163 = getelementptr inbounds float* undef, i64 1
- %tmp25164 = getelementptr inbounds float* undef, i64 1
- %tmp25165 = getelementptr inbounds float* undef, i64 1
- %tmp25166 = getelementptr inbounds float* undef, i64 1
- %tmp25167 = getelementptr inbounds float* undef, i64 1
- %tmp25168 = getelementptr inbounds float* undef, i64 1
- %tmp25169 = getelementptr inbounds float* undef, i64 1
- %tmp25170 = getelementptr inbounds float* %tmp25169, i64 1
- %tmp25171 = getelementptr inbounds float* undef, i64 1
- %tmp25172 = getelementptr inbounds float* undef, i64 1
- %tmp25173 = getelementptr inbounds float* undef, i64 1
- %tmp25174 = getelementptr inbounds float* undef, i64 1
- %tmp25175 = getelementptr inbounds float* %tmp25174, i64 1
- %tmp25176 = getelementptr inbounds float* undef, i64 1
- %tmp25177 = getelementptr inbounds float* undef, i64 1
- %tmp25178 = getelementptr inbounds float* %tmp25177, i64 1
- %tmp25179 = getelementptr inbounds float* undef, i64 1
- %tmp25180 = getelementptr inbounds float* undef, i64 1
- %tmp25181 = getelementptr inbounds float* undef, i64 1
- %tmp25182 = getelementptr inbounds float* undef, i64 1
- %tmp25183 = getelementptr inbounds float* undef, i64 1
- %tmp25184 = getelementptr inbounds float* undef, i64 1
- %tmp25185 = getelementptr inbounds float* undef, i64 1
- %tmp25186 = getelementptr inbounds float* undef, i64 1
- %tmp25187 = getelementptr inbounds float* %tmp25186, i64 1
- %tmp25188 = getelementptr inbounds float* %tmp25187, i64 1
- %tmp25189 = getelementptr inbounds float* undef, i64 1
- %tmp25190 = getelementptr inbounds float* undef, i64 1
- %tmp25191 = getelementptr inbounds float* undef, i64 1
- %tmp25192 = getelementptr inbounds float* %tmp25191, i64 1
- %tmp25193 = getelementptr inbounds float* undef, i64 1
- %tmp25194 = getelementptr inbounds float* undef, i64 1
- %tmp25195 = getelementptr inbounds float* undef, i64 1
- %tmp25196 = getelementptr inbounds float* undef, i64 1
- %tmp25197 = getelementptr inbounds float* undef, i64 1
- %tmp25198 = getelementptr inbounds float* undef, i64 1
- %tmp25199 = getelementptr inbounds float* undef, i64 1
- %tmp25200 = getelementptr inbounds float* undef, i64 1
- %tmp25201 = getelementptr inbounds float* %tmp25200, i64 1
- %tmp25202 = getelementptr inbounds float* undef, i64 1
- %tmp25203 = getelementptr inbounds float* undef, i64 1
- %tmp25204 = getelementptr inbounds float* undef, i64 1
- %tmp25205 = getelementptr inbounds float* undef, i64 1
- %tmp25206 = getelementptr inbounds float* undef, i64 1
- %tmp25207 = getelementptr inbounds float* undef, i64 1
- %tmp25208 = getelementptr inbounds float* undef, i64 1
- %tmp25209 = getelementptr inbounds float* undef, i64 1
- %tmp25210 = getelementptr inbounds float* undef, i64 1
- %tmp25211 = getelementptr inbounds float* undef, i64 1
- %tmp25212 = getelementptr inbounds float* undef, i64 1
- %tmp25213 = getelementptr inbounds float* undef, i64 1
- %tmp25214 = getelementptr inbounds float* undef, i64 1
- %tmp25215 = getelementptr inbounds float* undef, i64 1
- %tmp25216 = getelementptr inbounds float* undef, i64 1
- %tmp25217 = getelementptr inbounds float* undef, i64 1
- %tmp25218 = getelementptr inbounds float* undef, i64 1
- %tmp25219 = getelementptr inbounds float* undef, i64 1
- %tmp25220 = getelementptr inbounds float* undef, i64 1
- %tmp25221 = getelementptr inbounds float* undef, i64 1
- %tmp25222 = getelementptr inbounds float* undef, i64 1
- %tmp25223 = getelementptr inbounds float* undef, i64 1
- %tmp25224 = getelementptr inbounds float* undef, i64 1
- %tmp25225 = getelementptr inbounds float* undef, i64 1
- %tmp25226 = getelementptr inbounds float* undef, i64 1
- %tmp25227 = getelementptr inbounds float* undef, i64 1
- %tmp25228 = getelementptr inbounds float* undef, i64 1
- %tmp25229 = getelementptr inbounds float* undef, i64 1
- %tmp25230 = getelementptr inbounds float* %tmp25229, i64 1
- %tmp25231 = getelementptr inbounds float* undef, i64 1
- %tmp25232 = getelementptr inbounds float* undef, i64 1
- %tmp25233 = getelementptr inbounds float* undef, i64 1
- %tmp25234 = getelementptr inbounds float* undef, i64 1
- %tmp25235 = getelementptr inbounds float* %tmp25234, i64 1
- %tmp25236 = getelementptr inbounds float* undef, i64 1
- %tmp25237 = getelementptr inbounds float* %tmp25236, i64 1
- %tmp25238 = getelementptr inbounds float* undef, i64 1
- %tmp25239 = getelementptr inbounds float* undef, i64 1
- %tmp25240 = getelementptr inbounds float* undef, i64 1
- %tmp25241 = getelementptr inbounds float* undef, i64 1
- %tmp25242 = getelementptr inbounds float* undef, i64 1
- %tmp25243 = getelementptr inbounds float* undef, i64 1
- %tmp25244 = getelementptr inbounds float* undef, i64 1
- %tmp25245 = getelementptr inbounds float* undef, i64 1
- %tmp25246 = getelementptr inbounds float* undef, i64 1
- %tmp25247 = getelementptr inbounds float* undef, i64 1
- %tmp25248 = getelementptr inbounds float* %tmp25247, i64 1
- %tmp25249 = getelementptr inbounds float* undef, i64 1
- %tmp25250 = getelementptr inbounds float* undef, i64 1
- %tmp25251 = getelementptr inbounds float* undef, i64 1
- %tmp25252 = getelementptr inbounds float* undef, i64 1
- %tmp25253 = getelementptr inbounds float* undef, i64 1
- %tmp25254 = getelementptr inbounds float* undef, i64 1
- %tmp25255 = getelementptr inbounds float* undef, i64 1
- %tmp25256 = getelementptr inbounds float* undef, i64 1
- %tmp25257 = getelementptr inbounds float* undef, i64 1
- %tmp25258 = getelementptr inbounds float* undef, i64 1
- %tmp25259 = getelementptr inbounds float* undef, i64 1
- %tmp25260 = getelementptr inbounds float* undef, i64 1
- %tmp25261 = getelementptr inbounds float* undef, i64 1
- %tmp25262 = getelementptr inbounds float* undef, i64 1
- %tmp25263 = getelementptr inbounds float* undef, i64 1
- %tmp25264 = getelementptr inbounds float* undef, i64 1
- %tmp25265 = getelementptr inbounds float* undef, i64 1
- %tmp25266 = getelementptr inbounds float* undef, i64 1
- %tmp25267 = getelementptr inbounds float* undef, i64 1
- %tmp25268 = getelementptr inbounds float* undef, i64 1
- %tmp25269 = getelementptr inbounds float* undef, i64 1
+ %tmp24947 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24948 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24949 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24950 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24951 = getelementptr inbounds float, float* %tmp24950, i64 1
+ %tmp24952 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24953 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24954 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24955 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24956 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24957 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24958 = getelementptr inbounds float, float* %tmp24957, i64 1
+ %tmp24959 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24960 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24961 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24962 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24963 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24964 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24965 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24966 = getelementptr inbounds float, float* %tmp24965, i64 1
+ %tmp24967 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24968 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24969 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24970 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24971 = getelementptr inbounds float, float* %tmp24970, i64 1
+ %tmp24972 = getelementptr inbounds float, float* %tmp24971, i64 1
+ %tmp24973 = getelementptr inbounds float, float* %tmp24972, i64 1
+ %tmp24974 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24975 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24976 = getelementptr inbounds float, float* %tmp24975, i64 1
+ %tmp24977 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24978 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24979 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24980 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24981 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24982 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24983 = getelementptr inbounds float, float* %tmp24982, i64 1
+ %tmp24984 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24985 = getelementptr inbounds float, float* %tmp24984, i64 1
+ %tmp24986 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24987 = getelementptr inbounds float, float* %tmp24986, i64 1
+ %tmp24988 = getelementptr inbounds float, float* %tmp24987, i64 1
+ %tmp24989 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24990 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24991 = getelementptr inbounds float, float* %tmp24990, i64 1
+ %tmp24992 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24993 = getelementptr inbounds float, float* %tmp24992, i64 1
+ %tmp24994 = getelementptr inbounds float, float* %tmp24993, i64 1
+ %tmp24995 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24996 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24997 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24998 = getelementptr inbounds float, float* undef, i64 1
+ %tmp24999 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25000 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25001 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25002 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25003 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25004 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25005 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25006 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25007 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25008 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25009 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25010 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25011 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25012 = getelementptr inbounds float, float* %tmp25011, i64 1
+ %tmp25013 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25014 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25015 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25016 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25017 = getelementptr inbounds float, float* %tmp25016, i64 1
+ %tmp25018 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25019 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25020 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25021 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25022 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25023 = getelementptr inbounds float, float* %tmp25022, i64 1
+ %tmp25024 = getelementptr inbounds float, float* %tmp25023, i64 1
+ %tmp25025 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25026 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25027 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25028 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25029 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25030 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25031 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25032 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25033 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25034 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25035 = getelementptr inbounds float, float* %tmp25034, i64 1
+ %tmp25036 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25037 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25038 = getelementptr inbounds float, float* %tmp25037, i64 1
+ %tmp25039 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25040 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25041 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25042 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25043 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25044 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25045 = getelementptr inbounds float, float* %tmp25044, i64 1
+ %tmp25046 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25047 = getelementptr inbounds float, float* %tmp25046, i64 1
+ %tmp25048 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25049 = getelementptr inbounds float, float* %tmp25048, i64 1
+ %tmp25050 = getelementptr inbounds float, float* %tmp25049, i64 1
+ %tmp25051 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25052 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25053 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25054 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25055 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25056 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25057 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25058 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25059 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25060 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25061 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25062 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25063 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25064 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25065 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25066 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25067 = getelementptr inbounds float, float* %tmp25066, i64 1
+ %tmp25068 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25069 = getelementptr inbounds float, float* %tmp25068, i64 1
+ %tmp25070 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25071 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25072 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25073 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25074 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25075 = getelementptr inbounds float, float* %tmp25074, i64 1
+ %tmp25076 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25077 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25078 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25079 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25080 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25081 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25082 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25083 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25084 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25085 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25086 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25087 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25088 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25089 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25090 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25091 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25092 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25093 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25094 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25095 = getelementptr inbounds float, float* %tmp25094, i64 1
+ %tmp25096 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25097 = getelementptr inbounds float, float* %tmp25096, i64 1
+ %tmp25098 = getelementptr inbounds float, float* %tmp25097, i64 1
+ %tmp25099 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25100 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25101 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25102 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25103 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25104 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25105 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25106 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25107 = getelementptr inbounds float, float* %tmp25106, i64 1
+ %tmp25108 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25109 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25110 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25111 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25112 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25113 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25114 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25115 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25116 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25117 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25118 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25119 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25120 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25121 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25122 = getelementptr inbounds float, float* %tmp25121, i64 1
+ %tmp25123 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25124 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25125 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25126 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25127 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25128 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25129 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25130 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25131 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25132 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25133 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25134 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25135 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25136 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25137 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25138 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25139 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25140 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25141 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25142 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25143 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25144 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25145 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25146 = getelementptr inbounds float, float* %tmp25145, i64 1
+ %tmp25147 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25148 = getelementptr inbounds float, float* %tmp25147, i64 1
+ %tmp25149 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25150 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25151 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25152 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25153 = getelementptr inbounds float, float* %tmp25152, i64 1
+ %tmp25154 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25155 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25156 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25157 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25158 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25159 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25160 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25161 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25162 = getelementptr inbounds float, float* %tmp25161, i64 1
+ %tmp25163 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25164 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25165 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25166 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25167 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25168 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25169 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25170 = getelementptr inbounds float, float* %tmp25169, i64 1
+ %tmp25171 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25172 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25173 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25174 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25175 = getelementptr inbounds float, float* %tmp25174, i64 1
+ %tmp25176 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25177 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25178 = getelementptr inbounds float, float* %tmp25177, i64 1
+ %tmp25179 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25180 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25181 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25182 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25183 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25184 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25185 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25186 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25187 = getelementptr inbounds float, float* %tmp25186, i64 1
+ %tmp25188 = getelementptr inbounds float, float* %tmp25187, i64 1
+ %tmp25189 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25190 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25191 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25192 = getelementptr inbounds float, float* %tmp25191, i64 1
+ %tmp25193 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25194 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25195 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25196 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25197 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25198 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25199 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25200 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25201 = getelementptr inbounds float, float* %tmp25200, i64 1
+ %tmp25202 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25203 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25204 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25205 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25206 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25207 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25208 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25209 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25210 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25211 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25212 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25213 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25214 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25215 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25216 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25217 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25218 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25219 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25220 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25221 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25222 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25223 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25224 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25225 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25226 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25227 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25228 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25229 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25230 = getelementptr inbounds float, float* %tmp25229, i64 1
+ %tmp25231 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25232 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25233 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25234 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25235 = getelementptr inbounds float, float* %tmp25234, i64 1
+ %tmp25236 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25237 = getelementptr inbounds float, float* %tmp25236, i64 1
+ %tmp25238 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25239 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25240 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25241 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25242 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25243 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25244 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25245 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25246 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25247 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25248 = getelementptr inbounds float, float* %tmp25247, i64 1
+ %tmp25249 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25250 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25251 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25252 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25253 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25254 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25255 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25256 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25257 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25258 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25259 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25260 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25261 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25262 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25263 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25264 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25265 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25266 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25267 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25268 = getelementptr inbounds float, float* undef, i64 1
+ %tmp25269 = getelementptr inbounds float, float* undef, i64 1
br i1 undef, label %bb25270, label %bb25271
bb25270: ; preds = %bb2
; CHECK: shll $2, %edx
define fastcc i32* @_ada_smkr([2147483647 x i32]* %u, i32 %t) nounwind {
- %x = getelementptr [2147483647 x i32]* %u, i32 %t, i32 0
+ %x = getelementptr [2147483647 x i32], [2147483647 x i32]* %u, i32 %t, i32 0
ret i32* %x
}
while.cond: ; preds = %while.cond, %entry
%d.addr.0 = phi i32 [ %d, %entry ], [ %inc, %while.cond ]
- %arrayidx = getelementptr inbounds [8 x i32]* %a, i32 0, i32 %d.addr.0
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %a, i32 0, i32 %d.addr.0
; CHECK: leaq -40(%rsp,%r{{[^,]*}},4), %rax
; X32: leal -40(%rsp,%r{{[^,]*}},4), %eax
while.cond: ; preds = %while.cond, %entry
%d.addr.0 = phi i32 [ %d, %entry ], [ %inc, %while.cond ]
- %arrayidx = getelementptr inbounds [8 x i32]* %a, i32 0, i32 %d.addr.0
+ %arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %a, i32 0, i32 %d.addr.0
; CHECK: leaq (%rsp,%r{{[^,]*}},4), %rax
; X32: leal (%rsp,%r{{[^,]*}},4), %eax
unreachable
bb92: ; preds = %bb71
- %1 = getelementptr inbounds i8* undef, i32 %.sum745
+ %1 = getelementptr inbounds i8, i8* undef, i32 %.sum745
unreachable
bb348: ; preds = %bb27
br i1 %cmp, label %while.cond.preheader, label %bb.nph53
while.cond.preheader: ; preds = %entry
- %arrayidx = getelementptr inbounds i8** %argv, i64 1 ; <i8**> [#uses=1]
+ %arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1 ; <i8**> [#uses=1]
%tmp2 = load i8** %arrayidx ; <i8*> [#uses=1]
%call = tail call i32 @atoi(i8* %tmp2) nounwind ; <i32> [#uses=2]
%tobool51 = icmp eq i32 %call, 0 ; <i1> [#uses=1]
for.body: ; preds = %for.body, %bb.nph
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] ; <i64> [#uses=2]
%tmp = add i64 %indvar, 2 ; <i64> [#uses=1]
- %arrayidx10 = getelementptr [8193 x i8]* @main.flags, i64 0, i64 %tmp ; <i8*> [#uses=1]
+ %arrayidx10 = getelementptr [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %tmp ; <i8*> [#uses=1]
store i8 1, i8* %arrayidx10
%indvar.next = add i64 %indvar, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %indvar.next, 8191 ; <i1> [#uses=1]
%tmp71 = add i64 %tmp70, 6 ; <i64> [#uses=1]
%tmp73 = shl i64 %indvar57, 1 ; <i64> [#uses=1]
%add = add i64 %tmp73, 4 ; <i64> [#uses=2]
- %arrayidx17 = getelementptr [8193 x i8]* @main.flags, i64 0, i64 %tmp68 ; <i8*> [#uses=1]
+ %arrayidx17 = getelementptr [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %tmp68 ; <i8*> [#uses=1]
%tmp18 = load i8* %arrayidx17 ; <i8> [#uses=1]
%tobool19 = icmp eq i8 %tmp18, 0 ; <i1> [#uses=1]
br i1 %tobool19, label %for.inc35, label %if.then
%indvar55 = phi i64 [ %indvar.next56, %for.body25 ], [ 0, %if.then ] ; <i64> [#uses=2]
%tmp60 = mul i64 %tmp68, %indvar55 ; <i64> [#uses=2]
%tmp75 = add i64 %add, %tmp60 ; <i64> [#uses=1]
- %arrayidx27 = getelementptr [8193 x i8]* @main.flags, i64 0, i64 %tmp75 ; <i8*> [#uses=1]
+ %arrayidx27 = getelementptr [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %tmp75 ; <i8*> [#uses=1]
store i8 0, i8* %arrayidx27
%add31 = add i64 %tmp71, %tmp60 ; <i64> [#uses=1]
%cmp24 = icmp slt i64 %add31, 8193 ; <i1> [#uses=1]
bb8: ; preds = %.loopexit
%tmp9 = sext i32 %.04 to i64 ; <i64> [#uses=1]
- %tmp10 = getelementptr inbounds %0* %arg, i64 0, i32 11, i64 %tmp9 ; <i8*> [#uses=1]
+ %tmp10 = getelementptr inbounds %0, %0* %arg, i64 0, i32 11, i64 %tmp9 ; <i8*> [#uses=1]
store i8 0, i8* %tmp10, align 1
ret void
; REGULAR-NEXT: vmovq [[RES_Vec]], ([[BASE]])
define void @t1(%class.Complex* nocapture %out, i64 %out_start) {
entry:
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
+ %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%tmp = bitcast %class.Complex* %arrayidx to i64*
%tmp1 = load i64* %tmp, align 8
%t0.sroa.0.0.extract.trunc = trunc i64 %tmp1 to i32
%t0.sroa.2.0.extract.trunc = trunc i64 %t0.sroa.2.0.extract.shift to i32
%tmp3 = bitcast i32 %t0.sroa.2.0.extract.trunc to float
%add = add i64 %out_start, 8
- %arrayidx2 = getelementptr inbounds %class.Complex* %out, i64 %add
- %i.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 0
+ %arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add
+ %i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0
%tmp4 = load float* %i.i, align 4
%add.i = fadd float %tmp4, %tmp2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
- %r.i = getelementptr inbounds %class.Complex* %arrayidx2, i64 0, i32 1
+ %r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1
%tmp5 = load float* %r.i, align 4
%add5.i = fadd float %tmp5, %tmp3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
; REGULAR-LABEL: t2:
; REGULAR: shrq $48
define i32 @t2(%class.Complex* nocapture %out, i64 %out_start) {
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
+ %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%bitcast = bitcast %class.Complex* %arrayidx to i64*
%chunk64 = load i64* %bitcast, align 8
%slice32_low = trunc i64 %chunk64 to i32
; REGULAR: shrq $48
; REGULAR: shrq $32
define i32 @t3(%class.Complex* nocapture %out, i64 %out_start) {
- %arrayidx = getelementptr inbounds %class.Complex* %out, i64 %out_start
+ %arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%bitcast = bitcast %class.Complex* %arrayidx to i64*
%chunk64 = load i64* %bitcast, align 8
%slice32_low = trunc i64 %chunk64 to i32
cond_true: ; preds = %cond_true, %entry
%indvar = phi i32 [ %x, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
%i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %tmp = getelementptr [0 x i32]* @Arr, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
+ %tmp = getelementptr [0 x i32], [0 x i32]* @Arr, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
store i32 %i.0.0, i32* %tmp
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
%i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
%tmp2 = add i32 %i.0.0, 1 ; <i32> [#uses=1]
- %tmp = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
+ %tmp = getelementptr [16 x [16 x i32]], [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
store i32 4, i32* %tmp
%tmp5.upgrd.1 = add i32 %i.0.0, 2 ; <i32> [#uses=1]
- %tmp7 = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr [16 x [16 x i32]], [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
store i32 5, i32* %tmp7
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
%i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
%tmp2 = add i32 %i.0.0, 1 ; <i32> [#uses=1]
- %tmp = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
+ %tmp = getelementptr [16 x [16 x i32]], [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
store i32 4, i32* %tmp
%tmp5.upgrd.1 = add i32 %i.0.0, 2 ; <i32> [#uses=1]
- %tmp7 = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr [16 x [16 x i32]], [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
store i32 5, i32* %tmp7
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %cond_true ] ; <i32> [#uses=2]
%i.0.0 = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
%tmp2 = add i32 %i.0.0, 1 ; <i32> [#uses=1]
- %tmp = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
+ %tmp = getelementptr [16 x [16 x i32]], [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp2 ; <i32*> [#uses=1]
store i32 4, i32* %tmp
%tmp5.upgrd.1 = add i32 %i.0.0, 2 ; <i32> [#uses=1]
- %tmp7 = getelementptr [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr [16 x [16 x i32]], [16 x [16 x i32]]* @A, i32 0, i32 %row, i32 %tmp5.upgrd.1 ; <i32*> [#uses=1]
store i32 5, i32* %tmp7
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %N ; <i1> [#uses=1]
%tmp.16 = add i32 %tmp.15, %tmp. ; <i32> [#uses=2]
%k_addr.0.0 = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp.16 to i64 ; <i64> [#uses=1]
- %tmp = getelementptr [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
+ %tmp = getelementptr [8193 x i8], [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
store i8 0, i8* %tmp
%k_addr.0 = add i32 %k_addr.0.0, %i ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp sgt i32 %k_addr.0, 8192 ; <i1> [#uses=1]
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%t.063.0 = phi i32 [ 0, %entry ], [ %tmp47, %bb ] ; <i32> [#uses=1]
%j.065.0 = shl i32 %indvar, 2 ; <i32> [#uses=4]
- %tmp3 = getelementptr [0 x i32]* @state, i32 0, i32 %j.065.0 ; <i32*> [#uses=2]
+ %tmp3 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %j.065.0 ; <i32*> [#uses=2]
%tmp4 = load i32* %tmp3, align 4 ; <i32> [#uses=1]
- %tmp6 = getelementptr [0 x i32]* @S, i32 0, i32 %t.063.0 ; <i32*> [#uses=1]
+ %tmp6 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %t.063.0 ; <i32*> [#uses=1]
%tmp7 = load i32* %tmp6, align 4 ; <i32> [#uses=1]
%tmp8 = xor i32 %tmp7, %tmp4 ; <i32> [#uses=2]
store i32 %tmp8, i32* %tmp3, align 4
%tmp1378 = or i32 %j.065.0, 1 ; <i32> [#uses=1]
- %tmp16 = getelementptr [0 x i32]* @state, i32 0, i32 %tmp1378 ; <i32*> [#uses=2]
+ %tmp16 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp1378 ; <i32*> [#uses=2]
%tmp17 = load i32* %tmp16, align 4 ; <i32> [#uses=1]
- %tmp19 = getelementptr [0 x i32]* @S, i32 0, i32 %tmp8 ; <i32*> [#uses=1]
+ %tmp19 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp8 ; <i32*> [#uses=1]
%tmp20 = load i32* %tmp19, align 4 ; <i32> [#uses=1]
%tmp21 = xor i32 %tmp20, %tmp17 ; <i32> [#uses=2]
store i32 %tmp21, i32* %tmp16, align 4
%tmp2680 = or i32 %j.065.0, 2 ; <i32> [#uses=1]
- %tmp29 = getelementptr [0 x i32]* @state, i32 0, i32 %tmp2680 ; <i32*> [#uses=2]
+ %tmp29 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp2680 ; <i32*> [#uses=2]
%tmp30 = load i32* %tmp29, align 4 ; <i32> [#uses=1]
- %tmp32 = getelementptr [0 x i32]* @S, i32 0, i32 %tmp21 ; <i32*> [#uses=1]
+ %tmp32 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp21 ; <i32*> [#uses=1]
%tmp33 = load i32* %tmp32, align 4 ; <i32> [#uses=1]
%tmp34 = xor i32 %tmp33, %tmp30 ; <i32> [#uses=2]
store i32 %tmp34, i32* %tmp29, align 4
%tmp3982 = or i32 %j.065.0, 3 ; <i32> [#uses=1]
- %tmp42 = getelementptr [0 x i32]* @state, i32 0, i32 %tmp3982 ; <i32*> [#uses=2]
+ %tmp42 = getelementptr [0 x i32], [0 x i32]* @state, i32 0, i32 %tmp3982 ; <i32*> [#uses=2]
%tmp43 = load i32* %tmp42, align 4 ; <i32> [#uses=1]
- %tmp45 = getelementptr [0 x i32]* @S, i32 0, i32 %tmp34 ; <i32*> [#uses=1]
+ %tmp45 = getelementptr [0 x i32], [0 x i32]* @S, i32 0, i32 %tmp34 ; <i32*> [#uses=1]
%tmp46 = load i32* %tmp45, align 4 ; <i32> [#uses=1]
%tmp47 = xor i32 %tmp46, %tmp43 ; <i32> [#uses=3]
store i32 %tmp47, i32* %tmp42, align 4
bb29.i38: ; preds = %bb33.i47, %bb28.i37
%indvar32.i = phi i32 [ %indvar.next33.i, %bb33.i47 ], [ 0, %bb28.i37 ] ; <i32> [#uses=2]
%sfb.314.i = add i32 %indvar32.i, 0 ; <i32> [#uses=3]
- %1 = getelementptr [4 x [21 x double]]* null, i32 0, i32 %0, i32 %sfb.314.i ; <double*> [#uses=1]
+ %1 = getelementptr [4 x [21 x double]], [4 x [21 x double]]* null, i32 0, i32 %0, i32 %sfb.314.i ; <double*> [#uses=1]
%2 = load double* %1, align 8 ; <double> [#uses=0]
br i1 false, label %bb30.i41, label %bb33.i47
bb30.i41: ; preds = %bb29.i38
- %3 = getelementptr %struct.III_scalefac_t* null, i32 0, i32 1, i32 %sfb.314.i, i32 %i.1.reg2mem.0.i ; <i32*> [#uses=1]
+ %3 = getelementptr %struct.III_scalefac_t, %struct.III_scalefac_t* null, i32 0, i32 1, i32 %sfb.314.i, i32 %i.1.reg2mem.0.i ; <i32*> [#uses=1]
store i32 0, i32* %3, align 4
br label %bb33.i47
%p1 = bitcast i8** %p to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %p1)
%0 = call fastcc %struct.tree_node* @make_node(i32 %code) nounwind ; <%struct.tree_node*> [#uses=2]
- %1 = getelementptr [256 x i32]* @tree_code_length, i32 0, i32 %code ; <i32*> [#uses=1]
+ %1 = getelementptr [256 x i32], [256 x i32]* @tree_code_length, i32 0, i32 %code ; <i32*> [#uses=1]
%2 = load i32* %1, align 4 ; <i32> [#uses=2]
%3 = load i32* @lineno, align 4 ; <i32> [#uses=1]
%4 = bitcast %struct.tree_node* %0 to %struct.tree_exp* ; <%struct.tree_exp*> [#uses=2]
- %5 = getelementptr %struct.tree_exp* %4, i32 0, i32 1 ; <i32*> [#uses=1]
+ %5 = getelementptr %struct.tree_exp, %struct.tree_exp* %4, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %3, i32* %5, align 4
%6 = icmp sgt i32 %2, 0 ; <i1> [#uses=1]
br i1 %6, label %bb, label %bb3
bb: ; preds = %bb, %entry
%i.01 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ] ; <i32> [#uses=2]
%7 = load i8** %p, align 4 ; <i8*> [#uses=2]
- %8 = getelementptr i8* %7, i32 4 ; <i8*> [#uses=1]
+ %8 = getelementptr i8, i8* %7, i32 4 ; <i8*> [#uses=1]
store i8* %8, i8** %p, align 4
%9 = bitcast i8* %7 to %struct.tree_node** ; <%struct.tree_node**> [#uses=1]
%10 = load %struct.tree_node** %9, align 4 ; <%struct.tree_node*> [#uses=1]
- %11 = getelementptr %struct.tree_exp* %4, i32 0, i32 2, i32 %i.01 ; <%struct.tree_node**> [#uses=1]
+ %11 = getelementptr %struct.tree_exp, %struct.tree_exp* %4, i32 0, i32 2, i32 %i.01 ; <%struct.tree_node**> [#uses=1]
store %struct.tree_node* %10, %struct.tree_node** %11, align 4
%indvar.next = add i32 %i.01, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %2 ; <i1> [#uses=1]
%add106 = trunc i64 %tmp43 to i32 ; <i32> [#uses=1]
%add112 = trunc i64 %tmp45 to i32 ; <i32> [#uses=1]
%add118 = trunc i64 %tmp47 to i32 ; <i32> [#uses=1]
- %tmp10 = getelementptr %struct.Bu* %bu, i64 %indvar, i32 2 ; <i32*> [#uses=1]
+ %tmp10 = getelementptr %struct.Bu, %struct.Bu* %bu, i64 %indvar, i32 2 ; <i32*> [#uses=1]
%tmp11 = load i32* %tmp10 ; <i32> [#uses=0]
tail call void undef(i32 %add22)
tail call void undef(i32 %add28)
bb2: ; preds = %bb2, %bb2.preheader
%indvar = phi i32 [ 0, %bb1 ], [ %indvar.next, %bb2 ] ; <i32> [#uses=2]
%tmp19 = add i32 %tmp18, %indvar ; <i32> [#uses=1]
- %scevgep = getelementptr %struct.anon* @mp2grad_, i32 0, i32 0, i32 %tmp19 ; <i32*> [#uses=1]
+ %scevgep = getelementptr %struct.anon, %struct.anon* @mp2grad_, i32 0, i32 0, i32 %tmp19 ; <i32*> [#uses=1]
store i32 0, i32* %scevgep
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
%c = icmp ne i32 %indvar.next, %m
bb11: ; preds = %bb10, %bb11
%tmp12 = phi i64 [ %tmp14, %bb11 ], [ 2, %bb10 ] ; <i64> [#uses=2]
- %tmp13 = getelementptr inbounds [8192 x i8]* @flags, i64 0, i64 %tmp12 ; <i8*> [#uses=1]
+ %tmp13 = getelementptr inbounds [8192 x i8], [8192 x i8]* @flags, i64 0, i64 %tmp12 ; <i8*> [#uses=1]
store i8 0, i8* %tmp13, align 1
%tmp14 = add nsw i64 %tmp12, %tmp8 ; <i64> [#uses=2]
%tmp15 = icmp slt i64 %tmp14, 8192 ; <i1> [#uses=1]
define void @t(i8* nocapture %in, i8* nocapture %out, i32* nocapture %rk, i32 %r) nounwind {
entry:
%0 = load i32* %rk, align 4 ; <i32> [#uses=1]
- %1 = getelementptr i32* %rk, i64 1 ; <i32*> [#uses=1]
+ %1 = getelementptr i32, i32* %rk, i64 1 ; <i32*> [#uses=1]
%2 = load i32* %1, align 4 ; <i32> [#uses=1]
%tmp15 = add i32 %r, -1 ; <i32> [#uses=1]
%tmp.16 = zext i32 %tmp15 to i64 ; <i64> [#uses=2]
%rk26 = bitcast i32* %rk to i8* ; <i8*> [#uses=6]
%3 = lshr i32 %s0.0, 24 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %5 = getelementptr [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
+ %5 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %4 ; <i32*> [#uses=1]
%6 = load i32* %5, align 4 ; <i32> [#uses=1]
%7 = lshr i32 %s1.0, 16 ; <i32> [#uses=1]
%8 = and i32 %7, 255 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %10 = getelementptr [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
+ %10 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %9 ; <i32*> [#uses=1]
%11 = load i32* %10, align 4 ; <i32> [#uses=1]
%ctg2.sum2728 = or i64 %tmp18, 8 ; <i64> [#uses=1]
- %12 = getelementptr i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
+ %12 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2728 ; <i8*> [#uses=1]
%13 = bitcast i8* %12 to i32* ; <i32*> [#uses=1]
%14 = load i32* %13, align 4 ; <i32> [#uses=1]
%15 = xor i32 %11, %6 ; <i32> [#uses=1]
%16 = xor i32 %15, %14 ; <i32> [#uses=3]
%17 = lshr i32 %s1.0, 24 ; <i32> [#uses=1]
%18 = zext i32 %17 to i64 ; <i64> [#uses=1]
- %19 = getelementptr [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
+ %19 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %18 ; <i32*> [#uses=1]
%20 = load i32* %19, align 4 ; <i32> [#uses=1]
%21 = and i32 %s0.0, 255 ; <i32> [#uses=1]
%22 = zext i32 %21 to i64 ; <i64> [#uses=1]
- %23 = getelementptr [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
+ %23 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %22 ; <i32*> [#uses=1]
%24 = load i32* %23, align 4 ; <i32> [#uses=1]
%ctg2.sum2930 = or i64 %tmp18, 12 ; <i64> [#uses=1]
- %25 = getelementptr i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
+ %25 = getelementptr i8, i8* %rk26, i64 %ctg2.sum2930 ; <i8*> [#uses=1]
%26 = bitcast i8* %25 to i32* ; <i32*> [#uses=1]
%27 = load i32* %26, align 4 ; <i32> [#uses=1]
%28 = xor i32 %24, %20 ; <i32> [#uses=1]
%29 = xor i32 %28, %27 ; <i32> [#uses=4]
%30 = lshr i32 %16, 24 ; <i32> [#uses=1]
%31 = zext i32 %30 to i64 ; <i64> [#uses=1]
- %32 = getelementptr [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
+ %32 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %31 ; <i32*> [#uses=1]
%33 = load i32* %32, align 4 ; <i32> [#uses=2]
%exitcond = icmp eq i64 %indvar, %tmp.16 ; <i1> [#uses=1]
br i1 %exitcond, label %bb2, label %bb1
bb1: ; preds = %bb
%ctg2.sum31 = add i64 %tmp18, 16 ; <i64> [#uses=1]
- %34 = getelementptr i8* %rk26, i64 %ctg2.sum31 ; <i8*> [#uses=1]
+ %34 = getelementptr i8, i8* %rk26, i64 %ctg2.sum31 ; <i8*> [#uses=1]
%35 = bitcast i8* %34 to i32* ; <i32*> [#uses=1]
%36 = lshr i32 %29, 16 ; <i32> [#uses=1]
%37 = and i32 %36, 255 ; <i32> [#uses=1]
%38 = zext i32 %37 to i64 ; <i64> [#uses=1]
- %39 = getelementptr [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
+ %39 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %38 ; <i32*> [#uses=1]
%40 = load i32* %39, align 4 ; <i32> [#uses=1]
%41 = load i32* %35, align 4 ; <i32> [#uses=1]
%42 = xor i32 %40, %33 ; <i32> [#uses=1]
%43 = xor i32 %42, %41 ; <i32> [#uses=1]
%44 = lshr i32 %29, 24 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
- %46 = getelementptr [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
+ %46 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %45 ; <i32*> [#uses=1]
%47 = load i32* %46, align 4 ; <i32> [#uses=1]
%48 = and i32 %16, 255 ; <i32> [#uses=1]
%49 = zext i32 %48 to i64 ; <i64> [#uses=1]
- %50 = getelementptr [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
+ %50 = getelementptr [256 x i32], [256 x i32]* @Te3, i64 0, i64 %49 ; <i32*> [#uses=1]
%51 = load i32* %50, align 4 ; <i32> [#uses=1]
%ctg2.sum32 = add i64 %tmp18, 20 ; <i64> [#uses=1]
- %52 = getelementptr i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
+ %52 = getelementptr i8, i8* %rk26, i64 %ctg2.sum32 ; <i8*> [#uses=1]
%53 = bitcast i8* %52 to i32* ; <i32*> [#uses=1]
%54 = load i32* %53, align 4 ; <i32> [#uses=1]
%55 = xor i32 %51, %47 ; <i32> [#uses=1]
bb2: ; preds = %bb
%tmp10 = shl i64 %tmp.16, 4 ; <i64> [#uses=2]
%ctg2.sum = add i64 %tmp10, 16 ; <i64> [#uses=1]
- %tmp1213 = getelementptr i8* %rk26, i64 %ctg2.sum ; <i8*> [#uses=1]
+ %tmp1213 = getelementptr i8, i8* %rk26, i64 %ctg2.sum ; <i8*> [#uses=1]
%57 = bitcast i8* %tmp1213 to i32* ; <i32*> [#uses=1]
%58 = and i32 %33, -16777216 ; <i32> [#uses=1]
%59 = lshr i32 %29, 16 ; <i32> [#uses=1]
%60 = and i32 %59, 255 ; <i32> [#uses=1]
%61 = zext i32 %60 to i64 ; <i64> [#uses=1]
- %62 = getelementptr [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
+ %62 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %61 ; <i32*> [#uses=1]
%63 = load i32* %62, align 4 ; <i32> [#uses=1]
%64 = and i32 %63, 16711680 ; <i32> [#uses=1]
%65 = or i32 %64, %58 ; <i32> [#uses=1]
%67 = xor i32 %65, %66 ; <i32> [#uses=2]
%68 = lshr i32 %29, 8 ; <i32> [#uses=1]
%69 = zext i32 %68 to i64 ; <i64> [#uses=1]
- %70 = getelementptr [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
+ %70 = getelementptr [256 x i32], [256 x i32]* @Te0, i64 0, i64 %69 ; <i32*> [#uses=1]
%71 = load i32* %70, align 4 ; <i32> [#uses=1]
%72 = and i32 %71, -16777216 ; <i32> [#uses=1]
%73 = and i32 %16, 255 ; <i32> [#uses=1]
%74 = zext i32 %73 to i64 ; <i64> [#uses=1]
- %75 = getelementptr [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
+ %75 = getelementptr [256 x i32], [256 x i32]* @Te1, i64 0, i64 %74 ; <i32*> [#uses=1]
%76 = load i32* %75, align 4 ; <i32> [#uses=1]
%77 = and i32 %76, 16711680 ; <i32> [#uses=1]
%78 = or i32 %77, %72 ; <i32> [#uses=1]
%ctg2.sum25 = add i64 %tmp10, 20 ; <i64> [#uses=1]
- %79 = getelementptr i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
+ %79 = getelementptr i8, i8* %rk26, i64 %ctg2.sum25 ; <i8*> [#uses=1]
%80 = bitcast i8* %79 to i32* ; <i32*> [#uses=1]
%81 = load i32* %80, align 4 ; <i32> [#uses=1]
%82 = xor i32 %78, %81 ; <i32> [#uses=2]
store i8 %84, i8* %out, align 1
%85 = lshr i32 %67, 16 ; <i32> [#uses=1]
%86 = trunc i32 %85 to i8 ; <i8> [#uses=1]
- %87 = getelementptr i8* %out, i64 1 ; <i8*> [#uses=1]
+ %87 = getelementptr i8, i8* %out, i64 1 ; <i8*> [#uses=1]
store i8 %86, i8* %87, align 1
- %88 = getelementptr i8* %out, i64 4 ; <i8*> [#uses=1]
+ %88 = getelementptr i8, i8* %out, i64 4 ; <i8*> [#uses=1]
%89 = lshr i32 %82, 24 ; <i32> [#uses=1]
%90 = trunc i32 %89 to i8 ; <i8> [#uses=1]
store i8 %90, i8* %88, align 1
%91 = lshr i32 %82, 16 ; <i32> [#uses=1]
%92 = trunc i32 %91 to i8 ; <i8> [#uses=1]
- %93 = getelementptr i8* %out, i64 5 ; <i8*> [#uses=1]
+ %93 = getelementptr i8, i8* %out, i64 5 ; <i8*> [#uses=1]
store i8 %92, i8* %93, align 1
ret void
}
%indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%bi.06 = phi i32 [ 0, %for.body.lr.ph ], [ %i.addr.0.bi.0, %for.body ]
%b.05 = phi i32 [ 0, %for.body.lr.ph ], [ %.b.0, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32* %arrayidx, align 4
%cmp1 = icmp ugt i32 %1, %b.05
%.b.0 = select i1 %cmp1, i32 %1, i32 %b.05
%tmp = alloca %0, align 8 ; <%0*> [#uses=11]
%tmp2 = bitcast %0* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i64(i8* %tmp2, i8 0, i64 16, i32 8, i1 false) nounwind
- %tmp3 = getelementptr inbounds %0* %tmp, i64 0, i32 0 ; <%0**> [#uses=3]
+ %tmp3 = getelementptr inbounds %0, %0* %tmp, i64 0, i32 0 ; <%0**> [#uses=3]
store %0* %tmp, %0** %tmp3
- %tmp4 = getelementptr inbounds %0* %tmp, i64 0, i32 1 ; <%0**> [#uses=1]
+ %tmp4 = getelementptr inbounds %0, %0* %tmp, i64 0, i32 1 ; <%0**> [#uses=1]
store %0* %tmp, %0** %tmp4
%tmp5 = call noalias i8* @_Znwm(i64 24) nounwind ; <i8*> [#uses=2]
- %tmp6 = getelementptr inbounds i8* %tmp5, i64 16 ; <i8*> [#uses=2]
+ %tmp6 = getelementptr inbounds i8, i8* %tmp5, i64 16 ; <i8*> [#uses=2]
%tmp7 = icmp eq i8* %tmp6, null ; <i1> [#uses=1]
br i1 %tmp7, label %bb10, label %bb8
bb16: ; preds = %bb16, %bb10
%tmp17 = phi i64 [ %tmp22, %bb16 ], [ 0, %bb10 ] ; <i64> [#uses=1]
%tmp18 = phi %0* [ %tmp20, %bb16 ], [ %tmp12, %bb10 ] ; <%0*> [#uses=1]
- %tmp19 = getelementptr inbounds %0* %tmp18, i64 0, i32 0 ; <%0**> [#uses=1]
+ %tmp19 = getelementptr inbounds %0, %0* %tmp18, i64 0, i32 0 ; <%0**> [#uses=1]
%tmp20 = load %0** %tmp19 ; <%0*> [#uses=2]
%tmp21 = icmp eq %0* %tmp20, %tmp ; <i1> [#uses=1]
%tmp22 = add i64 %tmp17, 1 ; <i64> [#uses=2]
bb25: ; preds = %bb25, %bb23
%tmp26 = phi i64 [ %tmp31, %bb25 ], [ 0, %bb23 ] ; <i64> [#uses=1]
%tmp27 = phi %0* [ %tmp29, %bb25 ], [ %tmp12, %bb23 ] ; <%0*> [#uses=1]
- %tmp28 = getelementptr inbounds %0* %tmp27, i64 0, i32 0 ; <%0**> [#uses=1]
+ %tmp28 = getelementptr inbounds %0, %0* %tmp27, i64 0, i32 0 ; <%0**> [#uses=1]
%tmp29 = load %0** %tmp28 ; <%0*> [#uses=2]
%tmp30 = icmp eq %0* %tmp29, %tmp ; <i1> [#uses=1]
%tmp31 = add i64 %tmp26, 1 ; <i64> [#uses=2]
bb38: ; preds = %bb38, %bb35
%tmp39 = phi %0* [ %tmp41, %bb38 ], [ %tmp36, %bb35 ] ; <%0*> [#uses=2]
- %tmp40 = getelementptr inbounds %0* %tmp39, i64 0, i32 0 ; <%0**> [#uses=1]
+ %tmp40 = getelementptr inbounds %0, %0* %tmp39, i64 0, i32 0 ; <%0**> [#uses=1]
%tmp41 = load %0** %tmp40 ; <%0*> [#uses=2]
%tmp42 = bitcast %0* %tmp39 to i8* ; <i8*> [#uses=1]
call void @_ZdlPv(i8* %tmp42) nounwind
%z5 = add nsw i32 %z4, %z2
%z6 = trunc i32 %z5 to i16
call fastcc void @dw210x_op_rw(i16 zeroext %z6)
- %z7 = getelementptr i8* null, i64 %z
+ %z7 = getelementptr i8, i8* null, i64 %z
store i8 undef, i8* %z7, align 1
%z8 = add nsw i32 %z2, 1
br label %bb
bb38: ; preds = %bb200, %bb
%tmp39 = phi i64 [ %tmp201, %bb200 ], [ 0, %bb ]
%tmp40 = sub i64 0, %tmp39
- %tmp47 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 0
+ %tmp47 = getelementptr [5 x %0], [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 0
%tmp34 = load i32* %tmp47, align 16
%tmp203 = icmp slt i32 %tmp34, 12
br i1 %tmp203, label %bb215, label %bb200
br label %bb38
bb215: ; preds = %bb38
- %tmp50 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 2
- %tmp49 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 1
- %tmp48 = getelementptr [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 0
+ %tmp50 = getelementptr [5 x %0], [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 2
+ %tmp49 = getelementptr [5 x %0], [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 1
+ %tmp48 = getelementptr [5 x %0], [5 x %0]* @pgm, i64 0, i64 %tmp40, i32 1, i64 0
%tmp216 = add nsw i32 %tmp34, 1
store i32 %tmp216, i32* %tmp47, align 16
%tmp217 = sext i32 %tmp216 to i64
- %tmp218 = getelementptr inbounds [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 0
+ %tmp218 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 0
%tmp219 = load i32* %tmp218, align 8
store i32 %tmp219, i32* %tmp48, align 4
- %tmp220 = getelementptr inbounds [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 1
+ %tmp220 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 1
%tmp221 = load i32* %tmp220, align 4
store i32 %tmp221, i32* %tmp49, align 4
- %tmp222 = getelementptr inbounds [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 2
+ %tmp222 = getelementptr inbounds [13 x %1], [13 x %1]* @isa, i64 0, i64 %tmp217, i32 3, i64 2
%tmp223 = load i32* %tmp222, align 8
store i32 %tmp223, i32* %tmp50, align 4
ret void
bb:
%indvar = phi i64 [ %indvar.next, %bb ], [ 0, %entry ]
%tmp = shl i64 %indvar, 2
- %scevgep = getelementptr float* %y, i64 %tmp
+ %scevgep = getelementptr float, float* %y, i64 %tmp
%scevgep9 = bitcast float* %scevgep to <4 x float>*
- %scevgep10 = getelementptr float* %x, i64 %tmp
+ %scevgep10 = getelementptr float, float* %x, i64 %tmp
%scevgep1011 = bitcast float* %scevgep10 to <4 x float>*
%2 = load <4 x float>* %scevgep1011, align 16
%3 = bitcast <4 x float> %2 to <4 x i32>
loop:
%i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
+ %Ai = getelementptr inbounds double, double* %A, i64 %i
+ %Bi = getelementptr inbounds double, double* %B, i64 %i
+ %Ci = getelementptr inbounds double, double* %C, i64 %i
%t1 = load double* %Bi
%t2 = load double* %Ci
%m = fmul double %t1, %t2
loop:
%i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
+ %Ai = getelementptr inbounds double, double* %A, i64 %i
+ %Bi = getelementptr inbounds double, double* %B, i64 %i
+ %Ci = getelementptr inbounds double, double* %C, i64 %i
%t1 = load double* %Bi
%t2 = load double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%j = add i64 %i, 256
- %Aj = getelementptr inbounds double* %A, i64 %j
- %Bj = getelementptr inbounds double* %B, i64 %j
- %Cj = getelementptr inbounds double* %C, i64 %j
+ %Aj = getelementptr inbounds double, double* %A, i64 %j
+ %Bj = getelementptr inbounds double, double* %B, i64 %j
+ %Cj = getelementptr inbounds double, double* %C, i64 %j
%t3 = load double* %Bj
%t4 = load double* %Cj
%o = fdiv double %t3, %t4
loop:
%i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
+ %Ai = getelementptr inbounds double, double* %A, i64 %i
+ %Bi = getelementptr inbounds double, double* %B, i64 %i
+ %Ci = getelementptr inbounds double, double* %C, i64 %i
%t1 = load double* %Bi
%t2 = load double* %Ci
%m = fmul double %t1, %t2
store double %m, double* %Ai
%j = sub i64 %i, 256
- %Aj = getelementptr inbounds double* %A, i64 %j
- %Bj = getelementptr inbounds double* %B, i64 %j
- %Cj = getelementptr inbounds double* %C, i64 %j
+ %Aj = getelementptr inbounds double, double* %A, i64 %j
+ %Bj = getelementptr inbounds double, double* %B, i64 %j
+ %Cj = getelementptr inbounds double, double* %C, i64 %j
%t3 = load double* %Bj
%t4 = load double* %Cj
%o = fdiv double %t3, %t4
loop:
%i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
%k = add i64 %i, 256
- %Ak = getelementptr inbounds double* %A, i64 %k
- %Bk = getelementptr inbounds double* %B, i64 %k
- %Ck = getelementptr inbounds double* %C, i64 %k
+ %Ak = getelementptr inbounds double, double* %A, i64 %k
+ %Bk = getelementptr inbounds double, double* %B, i64 %k
+ %Ck = getelementptr inbounds double, double* %C, i64 %k
%t1 = load double* %Bk
%t2 = load double* %Ck
%m = fmul double %t1, %t2
store double %m, double* %Ak
%j = sub i64 %i, 256
- %Aj = getelementptr inbounds double* %A, i64 %j
- %Bj = getelementptr inbounds double* %B, i64 %j
- %Cj = getelementptr inbounds double* %C, i64 %j
+ %Aj = getelementptr inbounds double, double* %A, i64 %j
+ %Bj = getelementptr inbounds double, double* %B, i64 %j
+ %Cj = getelementptr inbounds double, double* %C, i64 %j
%t3 = load double* %Bj
%t4 = load double* %Cj
%o = fdiv double %t3, %t4
loop:
%i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
+ %Ai = getelementptr inbounds double, double* %A, i64 %i
+ %Bi = getelementptr inbounds double, double* %B, i64 %i
+ %Ci = getelementptr inbounds double, double* %C, i64 %i
%t1 = load double* %Bi
%t2 = load double* %Ci
%m = fmul double %t1, %t2
loop:
%i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
+ %Ai = getelementptr inbounds double, double* %A, i64 %i
+ %Bi = getelementptr inbounds double, double* %B, i64 %i
+ %Ci = getelementptr inbounds double, double* %C, i64 %i
%t1 = load double* %Bi
%t2 = load double* %Ci
%m = fmul double %t1, %t2
loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
%i5 = add i64 %i, 5
- %Ai = getelementptr double* %A, i64 %i5
+ %Ai = getelementptr double, double* %A, i64 %i5
%t2 = load double* %Ai
- %Bi = getelementptr double* %B, i64 %i5
+ %Bi = getelementptr double, double* %B, i64 %i5
%t4 = load double* %Bi
%t5 = fadd double %t2, %t4
- %Ci = getelementptr double* %C, i64 %i5
+ %Ci = getelementptr double, double* %C, i64 %i5
store double %t5, double* %Ci
%i10 = add i64 %i, 10
- %Ai10 = getelementptr double* %A, i64 %i10
+ %Ai10 = getelementptr double, double* %A, i64 %i10
%t9 = load double* %Ai10
- %Bi10 = getelementptr double* %B, i64 %i10
+ %Bi10 = getelementptr double, double* %B, i64 %i10
%t11 = load double* %Bi10
%t12 = fsub double %t9, %t11
- %Ci10 = getelementptr double* %C, i64 %i10
+ %Ci10 = getelementptr double, double* %C, i64 %i10
store double %t12, double* %Ci10
%i.next = add i64 %i, 1
%exitcond = icmp eq i64 %i.next, 5000
loop:
%i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
%i5 = add i64 %i, 5
- %Ai = getelementptr double* %A, i64 %i5
+ %Ai = getelementptr double, double* %A, i64 %i5
%t2 = load double* %Ai
- %Bi = getelementptr double* %B, i64 %i5
+ %Bi = getelementptr double, double* %B, i64 %i5
%t4 = load double* %Bi
%t5 = fadd double %t2, %t4
- %Ci = getelementptr double* %C, i64 %i5
+ %Ci = getelementptr double, double* %C, i64 %i5
store double %t5, double* %Ci
%i10 = add i64 %i, 10
- %Ai10 = getelementptr double* %A, i64 %i10
+ %Ai10 = getelementptr double, double* %A, i64 %i10
%t9 = load double* %Ai10
- %Bi10 = getelementptr double* %B, i64 %i10
+ %Bi10 = getelementptr double, double* %B, i64 %i10
%t11 = load double* %Bi10
%t12 = fsub double %t9, %t11
- %Ci10 = getelementptr double* %C, i64 %i10
+ %Ci10 = getelementptr double, double* %C, i64 %i10
store double %t12, double* %Ci10
%i.next = add i64 %i, 1
%exitcond = icmp eq i64 %i.next, %n
loop:
%i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
call void @use(i64 %i)
- %Ai = getelementptr inbounds double* %A, i64 %i
- %Bi = getelementptr inbounds double* %B, i64 %i
- %Ci = getelementptr inbounds double* %C, i64 %i
+ %Ai = getelementptr inbounds double, double* %A, i64 %i
+ %Bi = getelementptr inbounds double, double* %B, i64 %i
+ %Ci = getelementptr inbounds double, double* %C, i64 %i
%t1 = load double* %Bi
%t2 = load double* %Ci
%m = fmul double %t1, %t2
bb: ; preds = %bb3, %bb.nph14
%indvar16 = phi i64 [ 0, %bb.nph14 ], [ %indvar.next17, %bb3 ] ; <i64> [#uses=3]
%s.113 = phi i32 [ 0, %bb.nph14 ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=2]
- %scevgep2526 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 0 ; <i32*> [#uses=1]
+ %scevgep2526 = getelementptr [123123 x %struct.anon], [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 0 ; <i32*> [#uses=1]
%1 = load i32* %scevgep2526, align 4 ; <i32> [#uses=2]
%2 = icmp sgt i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %bb.nph, label %bb3
bb1: ; preds = %bb.nph, %bb1
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp19, %bb1 ] ; <i64> [#uses=2]
%s.07 = phi i32 [ %s.113, %bb.nph ], [ %4, %bb1 ] ; <i32> [#uses=1]
- %c.08 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 1, i64 %indvar ; <i32*> [#uses=1]
+ %c.08 = getelementptr [123123 x %struct.anon], [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 1, i64 %indvar ; <i32*> [#uses=1]
%3 = load i32* %c.08, align 4 ; <i32> [#uses=1]
%4 = add nsw i32 %3, %s.07 ; <i32> [#uses=2]
%tmp19 = add i64 %indvar, 1 ; <i64> [#uses=2]
%t22 = phi i64 [ %t36, %bb32 ], [ 0, %bb19 ] ; <i64> [#uses=21]
%t23 = phi float [ %t35, %bb32 ], [ %t20, %bb19 ] ; <float> [#uses=6]
%t24 = sub i64 %arg6, %t22 ; <i64> [#uses=4]
- %t25 = getelementptr float* %arg4, i64 %t22 ; <float*> [#uses=4]
- %t26 = getelementptr float* %arg, i64 %t22 ; <float*> [#uses=3]
+ %t25 = getelementptr float, float* %arg4, i64 %t22 ; <float*> [#uses=4]
+ %t26 = getelementptr float, float* %arg, i64 %t22 ; <float*> [#uses=3]
%t27 = icmp sgt i64 %t24, 0 ; <i1> [#uses=1]
br i1 %t27, label %bb28, label %bb37
br i1 %t56, label %bb61, label %bb112
bb58: ; preds = %bb68
- %t59 = getelementptr float* %arg, i64 %t78 ; <float*> [#uses=1]
- %t60 = getelementptr float* %arg4, i64 %t78 ; <float*> [#uses=1]
+ %t59 = getelementptr float, float* %arg, i64 %t78 ; <float*> [#uses=1]
+ %t60 = getelementptr float, float* %arg4, i64 %t78 ; <float*> [#uses=1]
br label %bb112
bb61: ; preds = %bb57
%t73 = phi <4 x float> [ %t52, %bb61 ], [ %t109, %bb68 ] ; <<4 x float>> [#uses=2]
%t74 = shl i64 %t69, 4 ; <i64> [#uses=5]
%t75 = add i64 %t22, %t74 ; <i64> [#uses=2]
- %t76 = getelementptr float* %arg, i64 %t75 ; <float*> [#uses=1]
+ %t76 = getelementptr float, float* %arg, i64 %t75 ; <float*> [#uses=1]
%t77 = bitcast float* %t76 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t78 = add i64 %t62, %t74 ; <i64> [#uses=2]
%t79 = add i64 %t63, %t74 ; <i64> [#uses=2]
- %t80 = getelementptr float* %arg, i64 %t79 ; <float*> [#uses=1]
+ %t80 = getelementptr float, float* %arg, i64 %t79 ; <float*> [#uses=1]
%t81 = bitcast float* %t80 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t82 = add i64 %t64, %t74 ; <i64> [#uses=2]
- %t83 = getelementptr float* %arg, i64 %t82 ; <float*> [#uses=1]
+ %t83 = getelementptr float, float* %arg, i64 %t82 ; <float*> [#uses=1]
%t84 = bitcast float* %t83 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t85 = add i64 %t65, %t74 ; <i64> [#uses=2]
- %t86 = getelementptr float* %arg, i64 %t85 ; <float*> [#uses=1]
+ %t86 = getelementptr float, float* %arg, i64 %t85 ; <float*> [#uses=1]
%t87 = bitcast float* %t86 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %t88 = getelementptr float* %arg4, i64 %t75 ; <float*> [#uses=1]
+ %t88 = getelementptr float, float* %arg4, i64 %t75 ; <float*> [#uses=1]
%t89 = bitcast float* %t88 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %t90 = getelementptr float* %arg4, i64 %t79 ; <float*> [#uses=1]
+ %t90 = getelementptr float, float* %arg4, i64 %t79 ; <float*> [#uses=1]
%t91 = bitcast float* %t90 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %t92 = getelementptr float* %arg4, i64 %t82 ; <float*> [#uses=1]
+ %t92 = getelementptr float, float* %arg4, i64 %t82 ; <float*> [#uses=1]
%t93 = bitcast float* %t92 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %t94 = getelementptr float* %arg4, i64 %t85 ; <float*> [#uses=1]
+ %t94 = getelementptr float, float* %arg4, i64 %t85 ; <float*> [#uses=1]
%t95 = bitcast float* %t94 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t96 = mul i64 %t69, -16 ; <i64> [#uses=1]
%t97 = add i64 %t67, %t96 ; <i64> [#uses=2]
br i1 %t56, label %bb122, label %bb194
bb119: ; preds = %bb137
- %t120 = getelementptr float* %arg, i64 %t145 ; <float*> [#uses=1]
- %t121 = getelementptr float* %arg4, i64 %t145 ; <float*> [#uses=1]
+ %t120 = getelementptr float, float* %arg, i64 %t145 ; <float*> [#uses=1]
+ %t121 = getelementptr float, float* %arg4, i64 %t145 ; <float*> [#uses=1]
br label %bb194
bb122: ; preds = %bb118
%t123 = add i64 %t22, -1 ; <i64> [#uses=1]
- %t124 = getelementptr inbounds float* %arg, i64 %t123 ; <float*> [#uses=1]
+ %t124 = getelementptr inbounds float, float* %arg, i64 %t123 ; <float*> [#uses=1]
%t125 = bitcast float* %t124 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t126 = load <4 x float>* %t125 ; <<4 x float>> [#uses=1]
%t127 = add i64 %t22, 16 ; <i64> [#uses=1]
%t144 = shl i64 %t138, 4 ; <i64> [#uses=9]
%t145 = add i64 %t127, %t144 ; <i64> [#uses=2]
%t146 = add i64 %t128, %t144 ; <i64> [#uses=1]
- %t147 = getelementptr float* %arg, i64 %t146 ; <float*> [#uses=1]
+ %t147 = getelementptr float, float* %arg, i64 %t146 ; <float*> [#uses=1]
%t148 = bitcast float* %t147 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t149 = add i64 %t129, %t144 ; <i64> [#uses=1]
- %t150 = getelementptr float* %arg, i64 %t149 ; <float*> [#uses=1]
+ %t150 = getelementptr float, float* %arg, i64 %t149 ; <float*> [#uses=1]
%t151 = bitcast float* %t150 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t152 = add i64 %t130, %t144 ; <i64> [#uses=1]
- %t153 = getelementptr float* %arg, i64 %t152 ; <float*> [#uses=1]
+ %t153 = getelementptr float, float* %arg, i64 %t152 ; <float*> [#uses=1]
%t154 = bitcast float* %t153 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t155 = add i64 %t131, %t144 ; <i64> [#uses=1]
- %t156 = getelementptr float* %arg, i64 %t155 ; <float*> [#uses=1]
+ %t156 = getelementptr float, float* %arg, i64 %t155 ; <float*> [#uses=1]
%t157 = bitcast float* %t156 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t158 = add i64 %t22, %t144 ; <i64> [#uses=1]
- %t159 = getelementptr float* %arg4, i64 %t158 ; <float*> [#uses=1]
+ %t159 = getelementptr float, float* %arg4, i64 %t158 ; <float*> [#uses=1]
%t160 = bitcast float* %t159 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t161 = add i64 %t132, %t144 ; <i64> [#uses=1]
- %t162 = getelementptr float* %arg4, i64 %t161 ; <float*> [#uses=1]
+ %t162 = getelementptr float, float* %arg4, i64 %t161 ; <float*> [#uses=1]
%t163 = bitcast float* %t162 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t164 = add i64 %t133, %t144 ; <i64> [#uses=1]
- %t165 = getelementptr float* %arg4, i64 %t164 ; <float*> [#uses=1]
+ %t165 = getelementptr float, float* %arg4, i64 %t164 ; <float*> [#uses=1]
%t166 = bitcast float* %t165 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t167 = add i64 %t134, %t144 ; <i64> [#uses=1]
- %t168 = getelementptr float* %arg4, i64 %t167 ; <float*> [#uses=1]
+ %t168 = getelementptr float, float* %arg4, i64 %t167 ; <float*> [#uses=1]
%t169 = bitcast float* %t168 to <4 x float>* ; <<4 x float>*> [#uses=1]
%t170 = mul i64 %t138, -16 ; <i64> [#uses=1]
%t171 = add i64 %t136, %t170 ; <i64> [#uses=2]
bb201: ; preds = %bb201, %bb194
%t202 = phi i64 [ %t209, %bb201 ], [ 0, %bb194 ] ; <i64> [#uses=3]
%t203 = phi float [ %t208, %bb201 ], [ %t199, %bb194 ] ; <float> [#uses=2]
- %t204 = getelementptr float* %t198, i64 %t202 ; <float*> [#uses=1]
- %t205 = getelementptr float* %t197, i64 %t202 ; <float*> [#uses=1]
+ %t204 = getelementptr float, float* %t198, i64 %t202 ; <float*> [#uses=1]
+ %t205 = getelementptr float, float* %t197, i64 %t202 ; <float*> [#uses=1]
%t206 = load float* %t204 ; <float> [#uses=1]
%t207 = fmul float %t203, %t206 ; <float> [#uses=1]
store float %t207, float* %t205
for.body:
%i.06 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr [0 x double]* @A, i64 0, i64 %i.06
+ %arrayidx = getelementptr [0 x double], [0 x double]* @A, i64 0, i64 %i.06
%tmp3 = load double* %arrayidx, align 8
%mul = fmul double %tmp3, 2.300000e+00
store double %mul, double* %arrayidx, align 8
; CHECK-LABEL: t:
; CHECK: leaq (%rax,%rax,4)
%0 = zext i32 %base to i64
- %1 = getelementptr inbounds %struct.s2* null, i64 %0
+ %1 = getelementptr inbounds %struct.s2, %struct.s2* null, i64 %0
br i1 undef, label %bb1, label %bb2
bb1:
; CHECK-NOT: shlq $9
; CHECK-NOT: leaq
; CHECK: call
- %2 = getelementptr inbounds %struct.s2* null, i64 %0, i32 0
+ %2 = getelementptr inbounds %struct.s2, %struct.s2* null, i64 %0, i32 0
call void @bar(i32* %2) nounwind
unreachable
br i1 %cmp3, label %return, label %do.cond
do.cond:
- %incdec.ptr = getelementptr inbounds i8* %p.0, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %p.0, i64 1
%dec = add i64 %n.addr.0, -1
%cmp6 = icmp eq i64 %dec, 0
br i1 %cmp6, label %return, label %do.body
loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fdiv double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
loop:
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
%indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
%s0 = shl i64 %indvar, 8
%indvar.i8 = ashr i64 %s0, 8
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%s1 = shl i64 %indvar, 24
%indvar.i24 = ashr i64 %s1, 24
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
ret i32 0
spill_vectors:
- %vp1 = getelementptr <4 x i32>* %vp0, i32 1
+ %vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
%v0 = load <4 x i32>* %vp0
%v1 = load <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
ret i32 0
spill_vectors:
- %vp1 = getelementptr <4 x i32>* %vp0, i32 1
+ %vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
%v0 = load <4 x i32>* %vp0
%v1 = load <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
ret i32 0
spill_vectors:
- %vp1 = getelementptr <4 x i32>* %vp0, i32 1
+ %vp1 = getelementptr <4 x i32>, <4 x i32>* %vp0, i32 1
%v0 = load <4 x i32>* %vp0
%v1 = load <4 x i32>* %vp1
%vicmp = icmp slt <4 x i32> %v0, %v1
define void @t() nounwind ssp {
entry:
%buf = alloca [512 x i8], align 1
- %ptr = getelementptr inbounds [512 x i8]* %buf, i32 0, i32 0
+ %ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %ptr, i8 undef, i32 512, i32 1, i1 false)
unreachable
}
define void @t() nounwind {
entry:
%up_mvd = alloca [8 x %struct.x] ; <[8 x %struct.x]*> [#uses=2]
- %up_mvd116 = getelementptr [8 x %struct.x]* %up_mvd, i32 0, i32 0 ; <%struct.x*> [#uses=1]
+ %up_mvd116 = getelementptr [8 x %struct.x], [8 x %struct.x]* %up_mvd, i32 0, i32 0 ; <%struct.x*> [#uses=1]
%tmp110117 = bitcast [8 x %struct.x]* %up_mvd to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i64(i8* %tmp110117, i8 0, i64 32, i32 8, i1 false)
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 1, i32* %arrayidx, align 4
%0 = or i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %0
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %0
store i32 1, i32* %arrayidx2, align 4
%1 = or i64 %indvars.iv, 2
- %arrayidx5 = getelementptr inbounds i32* %a, i64 %1
+ %arrayidx5 = getelementptr inbounds i32, i32* %a, i64 %1
store i32 1, i32* %arrayidx5, align 4
%2 = or i64 %indvars.iv, 3
- %arrayidx8 = getelementptr inbounds i32* %a, i64 %2
+ %arrayidx8 = getelementptr inbounds i32, i32* %a, i64 %2
store i32 1, i32* %arrayidx8, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
%3 = trunc i64 %indvars.iv.next to i32
; ELF: calll _alloca
; ELF: movl 8028(%esp), %eax
%A2 = alloca [2000 x i32], align 16 ; <[2000 x i32]*> [#uses=1]
- %A2.sub = getelementptr [2000 x i32]* %A2, i32 0, i32 0 ; <i32*> [#uses=1]
+ %A2.sub = getelementptr [2000 x i32], [2000 x i32]* %A2, i32 0, i32 0 ; <i32*> [#uses=1]
call void @bar2( i32* %A2.sub, i32 %N )
ret void
}
%ref.tmp.i = alloca %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199", align 8
%Op.i = alloca %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", align 8
%0 = bitcast %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i to i8*
- %retval.sroa.0.0.idx.i36 = getelementptr inbounds %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i, i64 0, i32 1, i32 0, i32 0
+ %retval.sroa.0.0.idx.i36 = getelementptr inbounds %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199", %"struct.std::pair.112.119.719.1079.2039.2159.2399.4199"* %ref.tmp.i, i64 0, i32 1, i32 0, i32 0
%retval.sroa.0.0.copyload.i37 = load i32* %retval.sroa.0.0.idx.i36, align 8
call void @llvm.lifetime.end(i64 24, i8* %0) #1
%agg.tmp8.sroa.2.0.copyload = load i32* undef, align 8
%1 = bitcast %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i to i8*
call void @llvm.lifetime.start(i64 16, i8* %1) #1
- %2 = getelementptr %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i, i64 0, i32 1
+ %2 = getelementptr %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083", %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* %Op.i, i64 0, i32 1
store i32 %agg.tmp8.sroa.2.0.copyload, i32* %2, align 8
; CHECK: movl (%rax), %eax
br i1 undef, label %if.else56, label %cond.end.i
cond.end.i:
- %significand.i18.i = getelementptr inbounds %c1* %temp_rhs, i64 0, i32 1
- %exponent.i = getelementptr inbounds %c1* %temp_rhs, i64 0, i32 2
+ %significand.i18.i = getelementptr inbounds %c1, %c1* %temp_rhs, i64 0, i32 1
+ %exponent.i = getelementptr inbounds %c1, %c1* %temp_rhs, i64 0, i32 2
%0 = load i16* %exponent.i, align 8
%sub.i = add i16 %0, -1
store i16 %sub.i, i16* %exponent.i, align 8
for.body:
%indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ]
%tmp57 = load i32* %tmp56, align 4
- %arrayidx12.us.i61 = getelementptr inbounds i32* %pre, i64 %indvars.iv42.i
+ %arrayidx12.us.i61 = getelementptr inbounds i32, i32* %pre, i64 %indvars.iv42.i
%tmp58 = load i32* %arrayidx12.us.i61, align 4
%mul.us.i = mul nsw i32 %tmp58, %tmp57
- %arrayidx8.us.i.1 = getelementptr inbounds i32* %tmp56, i64 1
+ %arrayidx8.us.i.1 = getelementptr inbounds i32, i32* %tmp56, i64 1
%tmp59 = load i32* %arrayidx8.us.i.1, align 4
- %arrayidx12.us.i61.1 = getelementptr inbounds i32* %pre94, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.1 = getelementptr inbounds i32, i32* %pre94, i64 %indvars.iv42.i
%tmp60 = load i32* %arrayidx12.us.i61.1, align 4
%mul.us.i.1 = mul nsw i32 %tmp60, %tmp59
%add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i
- %arrayidx8.us.i.2 = getelementptr inbounds i32* %tmp56, i64 2
+ %arrayidx8.us.i.2 = getelementptr inbounds i32, i32* %tmp56, i64 2
%tmp61 = load i32* %arrayidx8.us.i.2, align 4
- %arrayidx12.us.i61.2 = getelementptr inbounds i32* %pre95, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.2 = getelementptr inbounds i32, i32* %pre95, i64 %indvars.iv42.i
%tmp62 = load i32* %arrayidx12.us.i61.2, align 4
%mul.us.i.2 = mul nsw i32 %tmp62, %tmp61
%add.us.i.2 = add nsw i32 %mul.us.i.2, %add.us.i.1
- %arrayidx8.us.i.3 = getelementptr inbounds i32* %tmp56, i64 3
+ %arrayidx8.us.i.3 = getelementptr inbounds i32, i32* %tmp56, i64 3
%tmp63 = load i32* %arrayidx8.us.i.3, align 4
- %arrayidx12.us.i61.3 = getelementptr inbounds i32* %pre96, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.3 = getelementptr inbounds i32, i32* %pre96, i64 %indvars.iv42.i
%tmp64 = load i32* %arrayidx12.us.i61.3, align 4
%mul.us.i.3 = mul nsw i32 %tmp64, %tmp63
%add.us.i.3 = add nsw i32 %mul.us.i.3, %add.us.i.2
- %arrayidx8.us.i.4 = getelementptr inbounds i32* %tmp56, i64 4
+ %arrayidx8.us.i.4 = getelementptr inbounds i32, i32* %tmp56, i64 4
%tmp65 = load i32* %arrayidx8.us.i.4, align 4
- %arrayidx12.us.i61.4 = getelementptr inbounds i32* %pre97, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.4 = getelementptr inbounds i32, i32* %pre97, i64 %indvars.iv42.i
%tmp66 = load i32* %arrayidx12.us.i61.4, align 4
%mul.us.i.4 = mul nsw i32 %tmp66, %tmp65
%add.us.i.4 = add nsw i32 %mul.us.i.4, %add.us.i.3
- %arrayidx8.us.i.5 = getelementptr inbounds i32* %tmp56, i64 5
+ %arrayidx8.us.i.5 = getelementptr inbounds i32, i32* %tmp56, i64 5
%tmp67 = load i32* %arrayidx8.us.i.5, align 4
- %arrayidx12.us.i61.5 = getelementptr inbounds i32* %pre98, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.5 = getelementptr inbounds i32, i32* %pre98, i64 %indvars.iv42.i
%tmp68 = load i32* %arrayidx12.us.i61.5, align 4
%mul.us.i.5 = mul nsw i32 %tmp68, %tmp67
%add.us.i.5 = add nsw i32 %mul.us.i.5, %add.us.i.4
- %arrayidx8.us.i.6 = getelementptr inbounds i32* %tmp56, i64 6
+ %arrayidx8.us.i.6 = getelementptr inbounds i32, i32* %tmp56, i64 6
%tmp69 = load i32* %arrayidx8.us.i.6, align 4
- %arrayidx12.us.i61.6 = getelementptr inbounds i32* %pre99, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.6 = getelementptr inbounds i32, i32* %pre99, i64 %indvars.iv42.i
%tmp70 = load i32* %arrayidx12.us.i61.6, align 4
%mul.us.i.6 = mul nsw i32 %tmp70, %tmp69
%add.us.i.6 = add nsw i32 %mul.us.i.6, %add.us.i.5
- %arrayidx8.us.i.7 = getelementptr inbounds i32* %tmp56, i64 7
+ %arrayidx8.us.i.7 = getelementptr inbounds i32, i32* %tmp56, i64 7
%tmp71 = load i32* %arrayidx8.us.i.7, align 4
- %arrayidx12.us.i61.7 = getelementptr inbounds i32* %pre100, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.7 = getelementptr inbounds i32, i32* %pre100, i64 %indvars.iv42.i
%tmp72 = load i32* %arrayidx12.us.i61.7, align 4
%mul.us.i.7 = mul nsw i32 %tmp72, %tmp71
%add.us.i.7 = add nsw i32 %mul.us.i.7, %add.us.i.6
- %arrayidx8.us.i.8 = getelementptr inbounds i32* %tmp56, i64 8
+ %arrayidx8.us.i.8 = getelementptr inbounds i32, i32* %tmp56, i64 8
%tmp73 = load i32* %arrayidx8.us.i.8, align 4
- %arrayidx12.us.i61.8 = getelementptr inbounds i32* %pre101, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.8 = getelementptr inbounds i32, i32* %pre101, i64 %indvars.iv42.i
%tmp74 = load i32* %arrayidx12.us.i61.8, align 4
%mul.us.i.8 = mul nsw i32 %tmp74, %tmp73
%add.us.i.8 = add nsw i32 %mul.us.i.8, %add.us.i.7
- %arrayidx8.us.i.9 = getelementptr inbounds i32* %tmp56, i64 9
+ %arrayidx8.us.i.9 = getelementptr inbounds i32, i32* %tmp56, i64 9
%tmp75 = load i32* %arrayidx8.us.i.9, align 4
- %arrayidx12.us.i61.9 = getelementptr inbounds i32* %pre102, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.9 = getelementptr inbounds i32, i32* %pre102, i64 %indvars.iv42.i
%tmp76 = load i32* %arrayidx12.us.i61.9, align 4
%mul.us.i.9 = mul nsw i32 %tmp76, %tmp75
%add.us.i.9 = add nsw i32 %mul.us.i.9, %add.us.i.8
- %arrayidx16.us.i = getelementptr inbounds i32* %tmp55, i64 %indvars.iv42.i
+ %arrayidx16.us.i = getelementptr inbounds i32, i32* %tmp55, i64 %indvars.iv42.i
store i32 %add.us.i.9, i32* %arrayidx16.us.i, align 4
%indvars.iv.next43.i = add i64 %indvars.iv42.i, 1
%lftr.wideiv = trunc i64 %indvars.iv.next43.i to i32
for.body:
%indvars.iv42.i = phi i64 [ %indvars.iv.next43.i, %for.body ], [ 0, %entry ]
%tmp57 = load i32* %tmp56, align 4
- %arrayidx12.us.i61 = getelementptr inbounds i32* %pre, i64 %indvars.iv42.i
+ %arrayidx12.us.i61 = getelementptr inbounds i32, i32* %pre, i64 %indvars.iv42.i
%tmp58 = load i32* %arrayidx12.us.i61, align 4
- %arrayidx8.us.i.1 = getelementptr inbounds i32* %tmp56, i64 1
+ %arrayidx8.us.i.1 = getelementptr inbounds i32, i32* %tmp56, i64 1
%tmp59 = load i32* %arrayidx8.us.i.1, align 4
- %arrayidx12.us.i61.1 = getelementptr inbounds i32* %pre94, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.1 = getelementptr inbounds i32, i32* %pre94, i64 %indvars.iv42.i
%tmp60 = load i32* %arrayidx12.us.i61.1, align 4
- %arrayidx8.us.i.2 = getelementptr inbounds i32* %tmp56, i64 2
+ %arrayidx8.us.i.2 = getelementptr inbounds i32, i32* %tmp56, i64 2
%tmp61 = load i32* %arrayidx8.us.i.2, align 4
- %arrayidx12.us.i61.2 = getelementptr inbounds i32* %pre95, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.2 = getelementptr inbounds i32, i32* %pre95, i64 %indvars.iv42.i
%tmp62 = load i32* %arrayidx12.us.i61.2, align 4
- %arrayidx8.us.i.3 = getelementptr inbounds i32* %tmp56, i64 3
+ %arrayidx8.us.i.3 = getelementptr inbounds i32, i32* %tmp56, i64 3
%tmp63 = load i32* %arrayidx8.us.i.3, align 4
- %arrayidx12.us.i61.3 = getelementptr inbounds i32* %pre96, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.3 = getelementptr inbounds i32, i32* %pre96, i64 %indvars.iv42.i
%tmp64 = load i32* %arrayidx12.us.i61.3, align 4
- %arrayidx8.us.i.4 = getelementptr inbounds i32* %tmp56, i64 4
+ %arrayidx8.us.i.4 = getelementptr inbounds i32, i32* %tmp56, i64 4
%tmp65 = load i32* %arrayidx8.us.i.4, align 4
- %arrayidx12.us.i61.4 = getelementptr inbounds i32* %pre97, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.4 = getelementptr inbounds i32, i32* %pre97, i64 %indvars.iv42.i
%tmp66 = load i32* %arrayidx12.us.i61.4, align 4
- %arrayidx8.us.i.5 = getelementptr inbounds i32* %tmp56, i64 5
+ %arrayidx8.us.i.5 = getelementptr inbounds i32, i32* %tmp56, i64 5
%tmp67 = load i32* %arrayidx8.us.i.5, align 4
- %arrayidx12.us.i61.5 = getelementptr inbounds i32* %pre98, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.5 = getelementptr inbounds i32, i32* %pre98, i64 %indvars.iv42.i
%tmp68 = load i32* %arrayidx12.us.i61.5, align 4
- %arrayidx8.us.i.6 = getelementptr inbounds i32* %tmp56, i64 6
+ %arrayidx8.us.i.6 = getelementptr inbounds i32, i32* %tmp56, i64 6
%tmp69 = load i32* %arrayidx8.us.i.6, align 4
- %arrayidx12.us.i61.6 = getelementptr inbounds i32* %pre99, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.6 = getelementptr inbounds i32, i32* %pre99, i64 %indvars.iv42.i
%tmp70 = load i32* %arrayidx12.us.i61.6, align 4
%mul.us.i = mul nsw i32 %tmp58, %tmp57
- %arrayidx8.us.i.7 = getelementptr inbounds i32* %tmp56, i64 7
+ %arrayidx8.us.i.7 = getelementptr inbounds i32, i32* %tmp56, i64 7
%tmp71 = load i32* %arrayidx8.us.i.7, align 4
- %arrayidx12.us.i61.7 = getelementptr inbounds i32* %pre100, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.7 = getelementptr inbounds i32, i32* %pre100, i64 %indvars.iv42.i
%tmp72 = load i32* %arrayidx12.us.i61.7, align 4
- %arrayidx8.us.i.8 = getelementptr inbounds i32* %tmp56, i64 8
+ %arrayidx8.us.i.8 = getelementptr inbounds i32, i32* %tmp56, i64 8
%tmp73 = load i32* %arrayidx8.us.i.8, align 4
- %arrayidx12.us.i61.8 = getelementptr inbounds i32* %pre101, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.8 = getelementptr inbounds i32, i32* %pre101, i64 %indvars.iv42.i
%tmp74 = load i32* %arrayidx12.us.i61.8, align 4
- %arrayidx8.us.i.9 = getelementptr inbounds i32* %tmp56, i64 9
+ %arrayidx8.us.i.9 = getelementptr inbounds i32, i32* %tmp56, i64 9
%tmp75 = load i32* %arrayidx8.us.i.9, align 4
- %arrayidx12.us.i61.9 = getelementptr inbounds i32* %pre102, i64 %indvars.iv42.i
+ %arrayidx12.us.i61.9 = getelementptr inbounds i32, i32* %pre102, i64 %indvars.iv42.i
%tmp76 = load i32* %arrayidx12.us.i61.9, align 4
%mul.us.i.1 = mul nsw i32 %tmp60, %tmp59
%add.us.i.1 = add nsw i32 %mul.us.i.1, %mul.us.i
%add.us.i.8 = add nsw i32 %mul.us.i.8, %add.us.i.7
%mul.us.i.9 = mul nsw i32 %tmp76, %tmp75
%add.us.i.9 = add nsw i32 %mul.us.i.9, %add.us.i.8
- %arrayidx16.us.i = getelementptr inbounds i32* %tmp55, i64 %indvars.iv42.i
+ %arrayidx16.us.i = getelementptr inbounds i32, i32* %tmp55, i64 %indvars.iv42.i
store i32 %add.us.i.9, i32* %arrayidx16.us.i, align 4
%indvars.iv.next43.i = add i64 %indvars.iv42.i, 1
%lftr.wideiv = trunc i64 %indvars.iv.next43.i to i32
%l7 = load i32* @c
%add.i = add i32 %l7, %l6
%idxprom.i = zext i32 %l7 to i64
- %arrayidx.i = getelementptr inbounds i32* @d, i64 %idxprom.i
+ %arrayidx.i = getelementptr inbounds i32, i32* @d, i64 %idxprom.i
%l8 = load i32* %arrayidx.i
store i32 346, i32* @c
store i32 20021, i32* @d
for.body34.i: ; preds = %for.inc39.i, %if.then24
%index.178.i = phi i64 [ %add21.i, %if.then24 ], [ %inc41.i, %for.inc39.i ]
- %arrayidx35.i = getelementptr inbounds i8* %plane, i64 %index.178.i
+ %arrayidx35.i = getelementptr inbounds i8, i8* %plane, i64 %index.178.i
%1 = load i8* %arrayidx35.i, align 1
%tobool36.i = icmp eq i8 %1, 0
br i1 %tobool36.i, label %for.inc39.i, label %return
br i1 %cond, label %loop, label %loop2
loop2: ; preds = %loop1
- %gep = getelementptr inbounds i32** %next.ptr, i32 1
+ %gep = getelementptr inbounds i32*, i32** %next.ptr, i32 1
store i32* %next.load, i32** undef
br label %loop
}
br i1 %cond, label %loop, label %loop2
loop2:
- %gep = getelementptr inbounds i32** %next.ptr, i32 1
+ %gep = getelementptr inbounds i32*, i32** %next.ptr, i32 1
store i32* %next.load, i32** undef
br label %loop
}
br i1 %cond, label %loop2a, label %loop2b
loop2b: ; preds = %loop1
- %gep = getelementptr inbounds i32** %next.ptr, i32 1
+ %gep = getelementptr inbounds i32*, i32** %next.ptr, i32 1
store i32* %next.load, i32** undef
br label %loop2a
}
br i1 %cond, label %loop2a, label %loop2b
loop2b: ; preds = %loop1
- %gep = getelementptr inbounds i32** %next.ptr, i32 1
+ %gep = getelementptr inbounds i32*, i32** %next.ptr, i32 1
store i32* %next.load, i32** undef
br label %loop2a
}
define void @wrap_mul4(double* nocapture %Out, [4 x double]* nocapture %A, [4 x double]* nocapture %B) #0 {
entry:
- %arrayidx1.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 0
+ %arrayidx1.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 0
%0 = load double* %arrayidx1.i, align 8
- %arrayidx3.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 0
+ %arrayidx3.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 0
%1 = load double* %arrayidx3.i, align 8
%mul.i = fmul double %0, %1
- %arrayidx5.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 1
+ %arrayidx5.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 1
%2 = load double* %arrayidx5.i, align 8
- %arrayidx7.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 0
+ %arrayidx7.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 0
%3 = load double* %arrayidx7.i, align 8
%mul8.i = fmul double %2, %3
%add.i = fadd double %mul.i, %mul8.i
- %arrayidx10.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 2
+ %arrayidx10.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 2
%4 = load double* %arrayidx10.i, align 8
- %arrayidx12.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 0
+ %arrayidx12.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 0
%5 = load double* %arrayidx12.i, align 8
%mul13.i = fmul double %4, %5
%add14.i = fadd double %add.i, %mul13.i
- %arrayidx16.i = getelementptr inbounds [4 x double]* %A, i64 0, i64 3
+ %arrayidx16.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 0, i64 3
%6 = load double* %arrayidx16.i, align 8
- %arrayidx18.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 0
+ %arrayidx18.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 0
%7 = load double* %arrayidx18.i, align 8
%mul19.i = fmul double %6, %7
%add20.i = fadd double %add14.i, %mul19.i
- %arrayidx25.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 1
+ %arrayidx25.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 1
%8 = load double* %arrayidx25.i, align 8
%mul26.i = fmul double %0, %8
- %arrayidx30.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 1
+ %arrayidx30.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 1
%9 = load double* %arrayidx30.i, align 8
%mul31.i = fmul double %2, %9
%add32.i = fadd double %mul26.i, %mul31.i
- %arrayidx36.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 1
+ %arrayidx36.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 1
%10 = load double* %arrayidx36.i, align 8
%mul37.i = fmul double %4, %10
%add38.i = fadd double %add32.i, %mul37.i
- %arrayidx42.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 1
+ %arrayidx42.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 1
%11 = load double* %arrayidx42.i, align 8
%mul43.i = fmul double %6, %11
%add44.i = fadd double %add38.i, %mul43.i
- %arrayidx49.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 2
+ %arrayidx49.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 2
%12 = load double* %arrayidx49.i, align 8
%mul50.i = fmul double %0, %12
- %arrayidx54.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 2
+ %arrayidx54.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 2
%13 = load double* %arrayidx54.i, align 8
%mul55.i = fmul double %2, %13
%add56.i = fadd double %mul50.i, %mul55.i
- %arrayidx60.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 2
+ %arrayidx60.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 2
%14 = load double* %arrayidx60.i, align 8
%mul61.i = fmul double %4, %14
%add62.i = fadd double %add56.i, %mul61.i
- %arrayidx66.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 2
+ %arrayidx66.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 2
%15 = load double* %arrayidx66.i, align 8
%mul67.i = fmul double %6, %15
%add68.i = fadd double %add62.i, %mul67.i
- %arrayidx73.i = getelementptr inbounds [4 x double]* %B, i64 0, i64 3
+ %arrayidx73.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 0, i64 3
%16 = load double* %arrayidx73.i, align 8
%mul74.i = fmul double %0, %16
- %arrayidx78.i = getelementptr inbounds [4 x double]* %B, i64 1, i64 3
+ %arrayidx78.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 1, i64 3
%17 = load double* %arrayidx78.i, align 8
%mul79.i = fmul double %2, %17
%add80.i = fadd double %mul74.i, %mul79.i
- %arrayidx84.i = getelementptr inbounds [4 x double]* %B, i64 2, i64 3
+ %arrayidx84.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 2, i64 3
%18 = load double* %arrayidx84.i, align 8
%mul85.i = fmul double %4, %18
%add86.i = fadd double %add80.i, %mul85.i
- %arrayidx90.i = getelementptr inbounds [4 x double]* %B, i64 3, i64 3
+ %arrayidx90.i = getelementptr inbounds [4 x double], [4 x double]* %B, i64 3, i64 3
%19 = load double* %arrayidx90.i, align 8
%mul91.i = fmul double %6, %19
%add92.i = fadd double %add86.i, %mul91.i
- %arrayidx95.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 0
+ %arrayidx95.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 0
%20 = load double* %arrayidx95.i, align 8
%mul98.i = fmul double %1, %20
- %arrayidx100.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 1
+ %arrayidx100.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 1
%21 = load double* %arrayidx100.i, align 8
%mul103.i = fmul double %3, %21
%add104.i = fadd double %mul98.i, %mul103.i
- %arrayidx106.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 2
+ %arrayidx106.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 2
%22 = load double* %arrayidx106.i, align 8
%mul109.i = fmul double %5, %22
%add110.i = fadd double %add104.i, %mul109.i
- %arrayidx112.i = getelementptr inbounds [4 x double]* %A, i64 1, i64 3
+ %arrayidx112.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 1, i64 3
%23 = load double* %arrayidx112.i, align 8
%mul115.i = fmul double %7, %23
%add116.i = fadd double %add110.i, %mul115.i
%add182.i = fadd double %add176.i, %mul181.i
%mul187.i = fmul double %19, %23
%add188.i = fadd double %add182.i, %mul187.i
- %arrayidx191.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 0
+ %arrayidx191.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 0
%24 = load double* %arrayidx191.i, align 8
%mul194.i = fmul double %1, %24
- %arrayidx196.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 1
+ %arrayidx196.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 1
%25 = load double* %arrayidx196.i, align 8
%mul199.i = fmul double %3, %25
%add200.i = fadd double %mul194.i, %mul199.i
- %arrayidx202.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 2
+ %arrayidx202.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 2
%26 = load double* %arrayidx202.i, align 8
%mul205.i = fmul double %5, %26
%add206.i = fadd double %add200.i, %mul205.i
- %arrayidx208.i = getelementptr inbounds [4 x double]* %A, i64 2, i64 3
+ %arrayidx208.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 2, i64 3
%27 = load double* %arrayidx208.i, align 8
%mul211.i = fmul double %7, %27
%add212.i = fadd double %add206.i, %mul211.i
%add278.i = fadd double %add272.i, %mul277.i
%mul283.i = fmul double %19, %27
%add284.i = fadd double %add278.i, %mul283.i
- %arrayidx287.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 0
+ %arrayidx287.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 0
%28 = load double* %arrayidx287.i, align 8
%mul290.i = fmul double %1, %28
- %arrayidx292.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 1
+ %arrayidx292.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 1
%29 = load double* %arrayidx292.i, align 8
%mul295.i = fmul double %3, %29
%add296.i = fadd double %mul290.i, %mul295.i
- %arrayidx298.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 2
+ %arrayidx298.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 2
%30 = load double* %arrayidx298.i, align 8
%mul301.i = fmul double %5, %30
%add302.i = fadd double %add296.i, %mul301.i
- %arrayidx304.i = getelementptr inbounds [4 x double]* %A, i64 3, i64 3
+ %arrayidx304.i = getelementptr inbounds [4 x double], [4 x double]* %A, i64 3, i64 3
%31 = load double* %arrayidx304.i, align 8
%mul307.i = fmul double %7, %31
%add308.i = fadd double %add302.i, %mul307.i
%mul379.i = fmul double %19, %31
%add380.i = fadd double %add374.i, %mul379.i
store double %add20.i, double* %Out, align 8
- %Res.i.sroa.1.8.idx2 = getelementptr inbounds double* %Out, i64 1
+ %Res.i.sroa.1.8.idx2 = getelementptr inbounds double, double* %Out, i64 1
store double %add44.i, double* %Res.i.sroa.1.8.idx2, align 8
- %Res.i.sroa.2.16.idx4 = getelementptr inbounds double* %Out, i64 2
+ %Res.i.sroa.2.16.idx4 = getelementptr inbounds double, double* %Out, i64 2
store double %add68.i, double* %Res.i.sroa.2.16.idx4, align 8
- %Res.i.sroa.3.24.idx6 = getelementptr inbounds double* %Out, i64 3
+ %Res.i.sroa.3.24.idx6 = getelementptr inbounds double, double* %Out, i64 3
store double %add92.i, double* %Res.i.sroa.3.24.idx6, align 8
- %Res.i.sroa.4.32.idx8 = getelementptr inbounds double* %Out, i64 4
+ %Res.i.sroa.4.32.idx8 = getelementptr inbounds double, double* %Out, i64 4
store double %add116.i, double* %Res.i.sroa.4.32.idx8, align 8
- %Res.i.sroa.5.40.idx10 = getelementptr inbounds double* %Out, i64 5
+ %Res.i.sroa.5.40.idx10 = getelementptr inbounds double, double* %Out, i64 5
store double %add140.i, double* %Res.i.sroa.5.40.idx10, align 8
- %Res.i.sroa.6.48.idx12 = getelementptr inbounds double* %Out, i64 6
+ %Res.i.sroa.6.48.idx12 = getelementptr inbounds double, double* %Out, i64 6
store double %add164.i, double* %Res.i.sroa.6.48.idx12, align 8
- %Res.i.sroa.7.56.idx14 = getelementptr inbounds double* %Out, i64 7
+ %Res.i.sroa.7.56.idx14 = getelementptr inbounds double, double* %Out, i64 7
store double %add188.i, double* %Res.i.sroa.7.56.idx14, align 8
- %Res.i.sroa.8.64.idx16 = getelementptr inbounds double* %Out, i64 8
+ %Res.i.sroa.8.64.idx16 = getelementptr inbounds double, double* %Out, i64 8
store double %add212.i, double* %Res.i.sroa.8.64.idx16, align 8
- %Res.i.sroa.9.72.idx18 = getelementptr inbounds double* %Out, i64 9
+ %Res.i.sroa.9.72.idx18 = getelementptr inbounds double, double* %Out, i64 9
store double %add236.i, double* %Res.i.sroa.9.72.idx18, align 8
- %Res.i.sroa.10.80.idx20 = getelementptr inbounds double* %Out, i64 10
+ %Res.i.sroa.10.80.idx20 = getelementptr inbounds double, double* %Out, i64 10
store double %add260.i, double* %Res.i.sroa.10.80.idx20, align 8
- %Res.i.sroa.11.88.idx22 = getelementptr inbounds double* %Out, i64 11
+ %Res.i.sroa.11.88.idx22 = getelementptr inbounds double, double* %Out, i64 11
store double %add284.i, double* %Res.i.sroa.11.88.idx22, align 8
- %Res.i.sroa.12.96.idx24 = getelementptr inbounds double* %Out, i64 12
+ %Res.i.sroa.12.96.idx24 = getelementptr inbounds double, double* %Out, i64 12
store double %add308.i, double* %Res.i.sroa.12.96.idx24, align 8
- %Res.i.sroa.13.104.idx26 = getelementptr inbounds double* %Out, i64 13
+ %Res.i.sroa.13.104.idx26 = getelementptr inbounds double, double* %Out, i64 13
store double %add332.i, double* %Res.i.sroa.13.104.idx26, align 8
- %Res.i.sroa.14.112.idx28 = getelementptr inbounds double* %Out, i64 14
+ %Res.i.sroa.14.112.idx28 = getelementptr inbounds double, double* %Out, i64 14
store double %add356.i, double* %Res.i.sroa.14.112.idx28, align 8
- %Res.i.sroa.15.120.idx30 = getelementptr inbounds double* %Out, i64 15
+ %Res.i.sroa.15.120.idx30 = getelementptr inbounds double, double* %Out, i64 15
store double %add380.i, double* %Res.i.sroa.15.120.idx30, align 8
ret void
}
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx8 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 0
+ %arrayidx8 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 0
%tmp = load i32* %arrayidx8, align 4
- %arrayidx12 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 0
+ %arrayidx12 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 0
%tmp1 = load i32* %arrayidx12, align 4
- %arrayidx8.1 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 1
+ %arrayidx8.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 1
%tmp2 = load i32* %arrayidx8.1, align 4
- %arrayidx12.1 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 0
+ %arrayidx12.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 0
%tmp3 = load i32* %arrayidx12.1, align 4
- %arrayidx8.2 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 2
+ %arrayidx8.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 2
%tmp4 = load i32* %arrayidx8.2, align 4
- %arrayidx12.2 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 0
+ %arrayidx12.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 0
%tmp5 = load i32* %arrayidx12.2, align 4
- %arrayidx8.3 = getelementptr inbounds [4 x i32]* %m1, i64 %indvars.iv, i64 3
+ %arrayidx8.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m1, i64 %indvars.iv, i64 3
%tmp6 = load i32* %arrayidx8.3, align 4
- %arrayidx12.3 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 0
+ %arrayidx12.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 0
%tmp8 = load i32* %arrayidx8, align 4
- %arrayidx12.137 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 1
+ %arrayidx12.137 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 1
%tmp9 = load i32* %arrayidx12.137, align 4
%tmp10 = load i32* %arrayidx8.1, align 4
- %arrayidx12.1.1 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 1
+ %arrayidx12.1.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 1
%tmp11 = load i32* %arrayidx12.1.1, align 4
%tmp12 = load i32* %arrayidx8.2, align 4
- %arrayidx12.2.1 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 1
+ %arrayidx12.2.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 1
%tmp13 = load i32* %arrayidx12.2.1, align 4
%tmp14 = load i32* %arrayidx8.3, align 4
- %arrayidx12.3.1 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 1
+ %arrayidx12.3.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 1
%tmp15 = load i32* %arrayidx12.3.1, align 4
%tmp16 = load i32* %arrayidx8, align 4
- %arrayidx12.239 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 2
+ %arrayidx12.239 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 2
%tmp17 = load i32* %arrayidx12.239, align 4
%tmp18 = load i32* %arrayidx8.1, align 4
- %arrayidx12.1.2 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 2
+ %arrayidx12.1.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 2
%tmp19 = load i32* %arrayidx12.1.2, align 4
%tmp20 = load i32* %arrayidx8.2, align 4
- %arrayidx12.2.2 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 2
+ %arrayidx12.2.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 2
%tmp21 = load i32* %arrayidx12.2.2, align 4
%tmp22 = load i32* %arrayidx8.3, align 4
- %arrayidx12.3.2 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 2
+ %arrayidx12.3.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 2
%tmp23 = load i32* %arrayidx12.3.2, align 4
%tmp24 = load i32* %arrayidx8, align 4
- %arrayidx12.341 = getelementptr inbounds [4 x i32]* %m2, i64 0, i64 3
+ %arrayidx12.341 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 0, i64 3
%tmp25 = load i32* %arrayidx12.341, align 4
%tmp26 = load i32* %arrayidx8.1, align 4
- %arrayidx12.1.3 = getelementptr inbounds [4 x i32]* %m2, i64 1, i64 3
+ %arrayidx12.1.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 1, i64 3
%tmp27 = load i32* %arrayidx12.1.3, align 4
%tmp28 = load i32* %arrayidx8.2, align 4
- %arrayidx12.2.3 = getelementptr inbounds [4 x i32]* %m2, i64 2, i64 3
+ %arrayidx12.2.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 2, i64 3
%tmp29 = load i32* %arrayidx12.2.3, align 4
%tmp30 = load i32* %arrayidx8.3, align 4
- %arrayidx12.3.3 = getelementptr inbounds [4 x i32]* %m2, i64 3, i64 3
+ %arrayidx12.3.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m2, i64 3, i64 3
%tmp31 = load i32* %arrayidx12.3.3, align 4
%tmp7 = load i32* %arrayidx12.3, align 4
%mul = mul nsw i32 %tmp1, %tmp
%add.1.3 = add nsw i32 %mul.1.3, %mul.342
%add.2.3 = add nsw i32 %mul.2.3, %add.1.3
%add.3.3 = add nsw i32 %mul.3.3, %add.2.3
- %arrayidx16 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 0
+ %arrayidx16 = getelementptr inbounds [4 x i32], [4 x i32]* %m3, i64 %indvars.iv, i64 0
store i32 %add.3, i32* %arrayidx16, align 4
- %arrayidx16.1 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 1
+ %arrayidx16.1 = getelementptr inbounds [4 x i32], [4 x i32]* %m3, i64 %indvars.iv, i64 1
store i32 %add.3.1, i32* %arrayidx16.1, align 4
- %arrayidx16.2 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 2
+ %arrayidx16.2 = getelementptr inbounds [4 x i32], [4 x i32]* %m3, i64 %indvars.iv, i64 2
store i32 %add.3.2, i32* %arrayidx16.2, align 4
- %arrayidx16.3 = getelementptr inbounds [4 x i32]* %m3, i64 %indvars.iv, i64 3
+ %arrayidx16.3 = getelementptr inbounds [4 x i32], [4 x i32]* %m3, i64 %indvars.iv, i64 3
store i32 %add.3.3, i32* %arrayidx16.3, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; X32: adcl
%i.037.0 = phi i32 [ 0, %entry ], [ %tmp25, %bb26 ]
%sum.035.0 = phi <1 x i64> [ zeroinitializer, %entry ], [ %tmp22, %bb26 ]
- %tmp13 = getelementptr <1 x i64>* %b, i32 %i.037.0
+ %tmp13 = getelementptr <1 x i64>, <1 x i64>* %b, i32 %i.037.0
%tmp14 = load <1 x i64>* %tmp13
- %tmp18 = getelementptr <1 x i64>* %a, i32 %i.037.0
+ %tmp18 = getelementptr <1 x i64>, <1 x i64>* %a, i32 %i.037.0
%tmp19 = load <1 x i64>* %tmp18
%tmp21 = add <1 x i64> %tmp19, %tmp14
%tmp22 = add <1 x i64> %tmp21, %sum.035.0
%0 = bitcast double* %__x.addr.i to i8*
%1 = bitcast %0* %__u.i to i8*
store double %d1, double* %__x.addr.i, align 8
- %__f.i = getelementptr inbounds %0* %__u.i, i64 0, i32 0
+ %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
store double %d1, double* %__f.i, align 8
%tmp = bitcast double %d1 to i64
; CHECK-NOT: shr
%0 = bitcast double* %__x.addr.i to i8*
%1 = bitcast %0* %__u.i to i8*
store double %add, double* %__x.addr.i, align 8
- %__f.i = getelementptr inbounds %0* %__u.i, i64 0, i32 0
+ %__f.i = getelementptr inbounds %0, %0* %__u.i, i64 0, i32 0
store double %add, double* %__f.i, align 8
%tmp = bitcast double %add to i64
; CHECK-NOT: shr
%0 = bitcast float* %__x.addr.i to i8*
%1 = bitcast %union.anon* %__u.i to i8*
store float %f1, float* %__x.addr.i, align 4
- %__f.i = getelementptr inbounds %union.anon* %__u.i, i64 0, i32 0
+ %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
store float %f1, float* %__f.i, align 4
%2 = bitcast float %f1 to i32
; CHECK-NOT: shr
%0 = bitcast float* %__x.addr.i to i8*
%1 = bitcast %union.anon* %__u.i to i8*
store float %add, float* %__x.addr.i, align 4
- %__f.i = getelementptr inbounds %union.anon* %__u.i, i64 0, i32 0
+ %__f.i = getelementptr inbounds %union.anon, %union.anon* %__u.i, i64 0, i32 0
store float %add, float* %__f.i, align 4
%2 = bitcast float %add to i32
; CHECK-NOT: shr
; CHECK-NOT: movslq
%0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
%idxprom = sext i32 %0 to i64
- %arrayidx = getelementptr inbounds i32* %indexTable, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
%1 = load i32* %arrayidx, align 4
ret i32 %1
}
%0 = bitcast <4 x float> %x to <2 x double>
%1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
%idxprom = sext i32 %1 to i64
- %arrayidx = getelementptr inbounds i32* %indexTable, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %indexTable, i64 %idxprom
%2 = load i32* %arrayidx, align 4
ret i32 %2
}
define i32 @t18() nounwind {
entry:
%foo = alloca %struct.t18_type, align 4
- %a = getelementptr inbounds %struct.t18_type* %foo, i32 0, i32 0
+ %a = getelementptr inbounds %struct.t18_type, %struct.t18_type* %foo, i32 0, i32 0
store i32 1, i32* %a, align 4
- %b = getelementptr inbounds %struct.t18_type* %foo, i32 0, i32 1
+ %b = getelementptr inbounds %struct.t18_type, %struct.t18_type* %foo, i32 0, i32 1
store i32 2, i32* %b, align 4
call void asm sideeffect inteldialect "lea ebx, foo\0A\09mov eax, [ebx].0\0A\09mov [ebx].4, ecx", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind
- %b1 = getelementptr inbounds %struct.t18_type* %foo, i32 0, i32 1
+ %b1 = getelementptr inbounds %struct.t18_type, %struct.t18_type* %foo, i32 0, i32 1
%0 = load i32* %b1, align 4
ret i32 %0
; CHECK: t18
; CHECK-NOT: mul
%carry.02 = phi i128 [ 0, %.lr.ph ], [ %10, %3 ]
%i.01 = phi i64 [ 0, %.lr.ph ], [ %11, %3 ]
- %4 = getelementptr inbounds i64* %arr, i64 %i.01
+ %4 = getelementptr inbounds i64, i64* %arr, i64 %i.01
%5 = load i64* %4, align 8
%6 = sext i64 %5 to i128
%7 = mul nsw i128 %6, %2
%coerce1 = alloca i128, align 16
%b.addr = alloca i128, align 16
%0 = bitcast i128* %coerce to %0*
- %1 = getelementptr %0* %0, i32 0, i32 0
+ %1 = getelementptr %0, %0* %0, i32 0, i32 0
store i64 %a.coerce0, i64* %1
- %2 = getelementptr %0* %0, i32 0, i32 1
+ %2 = getelementptr %0, %0* %0, i32 0, i32 1
store i64 %a.coerce1, i64* %2
%a = load i128* %coerce, align 16
store i128 %a, i128* %a.addr, align 16
%3 = bitcast i128* %coerce1 to %0*
- %4 = getelementptr %0* %3, i32 0, i32 0
+ %4 = getelementptr %0, %0* %3, i32 0, i32 0
store i64 %b.coerce0, i64* %4
- %5 = getelementptr %0* %3, i32 0, i32 1
+ %5 = getelementptr %0, %0* %3, i32 0, i32 1
store i64 %b.coerce1, i64* %5
%b = load i128* %coerce1, align 16
store i128 %b, i128* %b.addr, align 16
%14 = phi i64 [ %indvar.next53, %bb1 ], [ 0, %bb ] ; <i64> [#uses=21]
%x.0 = phi float [ %13, %bb1 ], [ %10, %bb ] ; <float> [#uses=6]
%N_addr.0 = sub i64 %N, %14 ; <i64> [#uses=4]
- %O_addr.0 = getelementptr float* %O, i64 %14 ; <float*> [#uses=4]
- %I_addr.0 = getelementptr float* %I, i64 %14 ; <float*> [#uses=3]
+ %O_addr.0 = getelementptr float, float* %O, i64 %14 ; <float*> [#uses=4]
+ %I_addr.0 = getelementptr float, float* %I, i64 %14 ; <float*> [#uses=3]
%15 = icmp slt i64 %N_addr.0, 1 ; <i1> [#uses=1]
br i1 %15, label %bb4, label %bb3
%vX1.036 = phi <4 x float> [ %32, %bb.nph43 ], [ %47, %bb5 ] ; <<4 x float>> [#uses=2]
%tmp104 = shl i64 %indvar102, 4 ; <i64> [#uses=5]
%tmp105 = add i64 %14, %tmp104 ; <i64> [#uses=2]
- %scevgep106 = getelementptr float* %I, i64 %tmp105 ; <float*> [#uses=1]
+ %scevgep106 = getelementptr float, float* %I, i64 %tmp105 ; <float*> [#uses=1]
%scevgep106107 = bitcast float* %scevgep106 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp109 = add i64 %tmp108, %tmp104 ; <i64> [#uses=2]
%tmp112 = add i64 %tmp111, %tmp104 ; <i64> [#uses=2]
- %scevgep113 = getelementptr float* %I, i64 %tmp112 ; <float*> [#uses=1]
+ %scevgep113 = getelementptr float, float* %I, i64 %tmp112 ; <float*> [#uses=1]
%scevgep113114 = bitcast float* %scevgep113 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp116 = add i64 %tmp115, %tmp104 ; <i64> [#uses=2]
- %scevgep117 = getelementptr float* %I, i64 %tmp116 ; <float*> [#uses=1]
+ %scevgep117 = getelementptr float, float* %I, i64 %tmp116 ; <float*> [#uses=1]
%scevgep117118 = bitcast float* %scevgep117 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp120 = add i64 %tmp119, %tmp104 ; <i64> [#uses=2]
- %scevgep121 = getelementptr float* %I, i64 %tmp120 ; <float*> [#uses=1]
+ %scevgep121 = getelementptr float, float* %I, i64 %tmp120 ; <float*> [#uses=1]
%scevgep121122 = bitcast float* %scevgep121 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %scevgep123 = getelementptr float* %O, i64 %tmp105 ; <float*> [#uses=1]
+ %scevgep123 = getelementptr float, float* %O, i64 %tmp105 ; <float*> [#uses=1]
%scevgep123124 = bitcast float* %scevgep123 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %scevgep126 = getelementptr float* %O, i64 %tmp112 ; <float*> [#uses=1]
+ %scevgep126 = getelementptr float, float* %O, i64 %tmp112 ; <float*> [#uses=1]
%scevgep126127 = bitcast float* %scevgep126 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %scevgep128 = getelementptr float* %O, i64 %tmp116 ; <float*> [#uses=1]
+ %scevgep128 = getelementptr float, float* %O, i64 %tmp116 ; <float*> [#uses=1]
%scevgep128129 = bitcast float* %scevgep128 to <4 x float>* ; <<4 x float>*> [#uses=1]
- %scevgep130 = getelementptr float* %O, i64 %tmp120 ; <float*> [#uses=1]
+ %scevgep130 = getelementptr float, float* %O, i64 %tmp120 ; <float*> [#uses=1]
%scevgep130131 = bitcast float* %scevgep130 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp132 = mul i64 %indvar102, -16 ; <i64> [#uses=1]
%tmp136 = add i64 %tmp135, %tmp132 ; <i64> [#uses=2]
bb6.bb7_crit_edge: ; preds = %bb5
call void asm sideeffect "# Stop.", "~{dirflag},~{fpsr},~{flags}"() nounwind
- %scevgep110 = getelementptr float* %I, i64 %tmp109 ; <float*> [#uses=1]
- %scevgep125 = getelementptr float* %O, i64 %tmp109 ; <float*> [#uses=1]
+ %scevgep110 = getelementptr float, float* %I, i64 %tmp109 ; <float*> [#uses=1]
+ %scevgep125 = getelementptr float, float* %O, i64 %tmp109 ; <float*> [#uses=1]
br label %bb7
bb7: ; preds = %bb6.bb7_crit_edge, %bb6.preheader
bb.nph: ; preds = %bb8
%I_addr.0.sum = add i64 %14, -1 ; <i64> [#uses=1]
- %49 = getelementptr inbounds float* %I, i64 %I_addr.0.sum ; <float*> [#uses=1]
+ %49 = getelementptr inbounds float, float* %I, i64 %I_addr.0.sum ; <float*> [#uses=1]
%50 = bitcast float* %49 to <4 x float>* ; <<4 x float>*> [#uses=1]
%51 = load <4 x float>* %50, align 16 ; <<4 x float>> [#uses=1]
%tmp54 = add i64 %14, 16 ; <i64> [#uses=1]
%tmp51 = shl i64 %indvar, 4 ; <i64> [#uses=9]
%tmp55 = add i64 %tmp54, %tmp51 ; <i64> [#uses=2]
%tmp57 = add i64 %tmp56, %tmp51 ; <i64> [#uses=1]
- %scevgep58 = getelementptr float* %I, i64 %tmp57 ; <float*> [#uses=1]
+ %scevgep58 = getelementptr float, float* %I, i64 %tmp57 ; <float*> [#uses=1]
%scevgep5859 = bitcast float* %scevgep58 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp61 = add i64 %tmp60, %tmp51 ; <i64> [#uses=1]
- %scevgep62 = getelementptr float* %I, i64 %tmp61 ; <float*> [#uses=1]
+ %scevgep62 = getelementptr float, float* %I, i64 %tmp61 ; <float*> [#uses=1]
%scevgep6263 = bitcast float* %scevgep62 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp65 = add i64 %tmp64, %tmp51 ; <i64> [#uses=1]
- %scevgep66 = getelementptr float* %I, i64 %tmp65 ; <float*> [#uses=1]
+ %scevgep66 = getelementptr float, float* %I, i64 %tmp65 ; <float*> [#uses=1]
%scevgep6667 = bitcast float* %scevgep66 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp69 = add i64 %tmp68, %tmp51 ; <i64> [#uses=1]
- %scevgep70 = getelementptr float* %I, i64 %tmp69 ; <float*> [#uses=1]
+ %scevgep70 = getelementptr float, float* %I, i64 %tmp69 ; <float*> [#uses=1]
%scevgep7071 = bitcast float* %scevgep70 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp72 = add i64 %14, %tmp51 ; <i64> [#uses=1]
- %scevgep73 = getelementptr float* %O, i64 %tmp72 ; <float*> [#uses=1]
+ %scevgep73 = getelementptr float, float* %O, i64 %tmp72 ; <float*> [#uses=1]
%scevgep7374 = bitcast float* %scevgep73 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp77 = add i64 %tmp76, %tmp51 ; <i64> [#uses=1]
- %scevgep78 = getelementptr float* %O, i64 %tmp77 ; <float*> [#uses=1]
+ %scevgep78 = getelementptr float, float* %O, i64 %tmp77 ; <float*> [#uses=1]
%scevgep7879 = bitcast float* %scevgep78 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp81 = add i64 %tmp80, %tmp51 ; <i64> [#uses=1]
- %scevgep82 = getelementptr float* %O, i64 %tmp81 ; <float*> [#uses=1]
+ %scevgep82 = getelementptr float, float* %O, i64 %tmp81 ; <float*> [#uses=1]
%scevgep8283 = bitcast float* %scevgep82 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp85 = add i64 %tmp84, %tmp51 ; <i64> [#uses=1]
- %scevgep86 = getelementptr float* %O, i64 %tmp85 ; <float*> [#uses=1]
+ %scevgep86 = getelementptr float, float* %O, i64 %tmp85 ; <float*> [#uses=1]
%scevgep8687 = bitcast float* %scevgep86 to <4 x float>* ; <<4 x float>*> [#uses=1]
%tmp88 = mul i64 %indvar, -16 ; <i64> [#uses=1]
%tmp92 = add i64 %tmp91, %tmp88 ; <i64> [#uses=2]
br i1 %72, label %bb9, label %bb10.bb11.loopexit_crit_edge
bb10.bb11.loopexit_crit_edge: ; preds = %bb9
- %scevgep = getelementptr float* %I, i64 %tmp55 ; <float*> [#uses=1]
- %scevgep75 = getelementptr float* %O, i64 %tmp55 ; <float*> [#uses=1]
+ %scevgep = getelementptr float, float* %I, i64 %tmp55 ; <float*> [#uses=1]
+ %scevgep75 = getelementptr float, float* %O, i64 %tmp55 ; <float*> [#uses=1]
br label %bb11
bb11: ; preds = %bb8, %bb10.bb11.loopexit_crit_edge, %bb7
bb12: ; preds = %bb11, %bb12
%indvar94 = phi i64 [ %indvar.next95, %bb12 ], [ 0, %bb11 ] ; <i64> [#uses=3]
%x.130 = phi float [ %77, %bb12 ], [ %73, %bb11 ] ; <float> [#uses=2]
- %I_addr.433 = getelementptr float* %I_addr.2, i64 %indvar94 ; <float*> [#uses=1]
- %O_addr.432 = getelementptr float* %O_addr.2, i64 %indvar94 ; <float*> [#uses=1]
+ %I_addr.433 = getelementptr float, float* %I_addr.2, i64 %indvar94 ; <float*> [#uses=1]
+ %O_addr.432 = getelementptr float, float* %O_addr.2, i64 %indvar94 ; <float*> [#uses=1]
%75 = load float* %I_addr.433, align 4 ; <float> [#uses=1]
%76 = fmul float %75, %x.130 ; <float> [#uses=1]
store float %76, float* %O_addr.432, align 4
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
%vtable = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
- %vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 1
+ %vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 1
%2 = load i32 (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
%3 = musttail call x86_thiscallcc i32 %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
ret i32 %3
entry:
%1 = bitcast %struct.B* %this to void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)***
%vtable = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*** %1
- %vfn = getelementptr inbounds void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 2
+ %vfn = getelementptr inbounds void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)*, void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vtable, i32 2
%2 = load void (%struct.B*, <{ %struct.A, i32, %struct.A }>*)** %vfn
musttail call x86_thiscallcc void %2(%struct.B* %this, <{ %struct.A, i32, %struct.A }>* inalloca %0)
ret void
entry:
%1 = bitcast %struct.B* %this to %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)***
%vtable = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*** %1
- %vfn = getelementptr inbounds %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vtable, i32 3
+ %vfn = getelementptr inbounds %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)*, %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vtable, i32 3
%2 = load %struct.A* (%struct.B*, <{ %struct.A*, %struct.A, i32, %struct.A }>*)** %vfn
%3 = musttail call x86_thiscallcc %struct.A* %2(%struct.B* %this, <{ %struct.A*, %struct.A, i32, %struct.A }>* inalloca %0)
ret %struct.A* %3
entry:
%1 = bitcast %struct.B* %this to void (%struct.A*, %struct.B*, i32)***
%vtable = load void (%struct.A*, %struct.B*, i32)*** %1
- %vfn = getelementptr inbounds void (%struct.A*, %struct.B*, i32)** %vtable, i32 4
+ %vfn = getelementptr inbounds void (%struct.A*, %struct.B*, i32)*, void (%struct.A*, %struct.B*, i32)** %vtable, i32 4
%2 = load void (%struct.A*, %struct.B*, i32)** %vfn
musttail call x86_thiscallcc void %2(%struct.A* sret %agg.result, %struct.B* %this, i32 %0)
ret void
; CHECK-NOT: ret
define x86_stdcallcc i32 @stdcall_thunk(<{ %struct.B*, %struct.A }>* inalloca) {
entry:
- %this_ptr = getelementptr inbounds <{ %struct.B*, %struct.A }>* %0, i32 0, i32 0
+ %this_ptr = getelementptr inbounds <{ %struct.B*, %struct.A }>, <{ %struct.B*, %struct.A }>* %0, i32 0, i32 0
%this = load %struct.B** %this_ptr
%1 = bitcast %struct.B* %this to i32 (<{ %struct.B*, %struct.A }>*)***
%vtable = load i32 (<{ %struct.B*, %struct.A }>*)*** %1
- %vfn = getelementptr inbounds i32 (<{ %struct.B*, %struct.A }>*)** %vtable, i32 1
+ %vfn = getelementptr inbounds i32 (<{ %struct.B*, %struct.A }>*)*, i32 (<{ %struct.B*, %struct.A }>*)** %vtable, i32 1
%2 = load i32 (<{ %struct.B*, %struct.A }>*)** %vfn
%3 = musttail call x86_stdcallcc i32 %2(<{ %struct.B*, %struct.A }>* inalloca %0)
ret i32 %3
entry:
%1 = bitcast %struct.B* %this to i32 (%struct.B*, <{ %struct.A }>*)***
%vtable = load i32 (%struct.B*, <{ %struct.A }>*)*** %1
- %vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A }>*)** %vtable, i32 1
+ %vfn = getelementptr inbounds i32 (%struct.B*, <{ %struct.A }>*)*, i32 (%struct.B*, <{ %struct.A }>*)** %vtable, i32 1
%2 = load i32 (%struct.B*, <{ %struct.A }>*)** %vfn
%3 = musttail call x86_fastcallcc i32 %2(%struct.B* inreg %this, <{ %struct.A }>* inalloca %0)
ret i32 %3
; CHECK-LABEL: t1:
; CHECK: jmp {{_?}}t1_callee
define x86_thiscallcc void @t1(i8* %this) {
- %adj = getelementptr i8* %this, i32 4
+ %adj = getelementptr i8, i8* %this, i32 4
musttail call x86_thiscallcc void @t1_callee(i8* %adj)
ret void
}
; CHECK-LABEL: t2:
; CHECK: jmp {{_?}}t2_callee
define x86_thiscallcc i32 @t2(i8* %this, i32 %a) {
- %adj = getelementptr i8* %this, i32 4
+ %adj = getelementptr i8, i8* %this, i32 4
%rv = musttail call x86_thiscallcc i32 @t2_callee(i8* %adj, i32 %a)
ret i32 %rv
}
; CHECK-LABEL: t3:
; CHECK: jmp {{_?}}t3_callee
define x86_thiscallcc i8* @t3(i8* %this, <{ i8*, i32 }>* inalloca %args) {
- %adj = getelementptr i8* %this, i32 4
- %a_ptr = getelementptr <{ i8*, i32 }>* %args, i32 0, i32 1
+ %adj = getelementptr i8, i8* %this, i32 4
+ %a_ptr = getelementptr <{ i8*, i32 }>, <{ i8*, i32 }>* %args, i32 0, i32 1
store i32 0, i32* %a_ptr
%rv = musttail call x86_thiscallcc i8* @t3_callee(i8* %adj, <{ i8*, i32 }>* inalloca %args)
ret i8* %rv
@g = external global i32
define void @h_thunk(%struct.Foo* %this, ...) {
- %cond_p = getelementptr %struct.Foo* %this, i32 0, i32 0
+ %cond_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 0
%cond = load i1* %cond_p
br i1 %cond, label %then, label %else
then:
- %a_p = getelementptr %struct.Foo* %this, i32 0, i32 1
+ %a_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 1
%a_i8 = load i8** %a_p
%a = bitcast i8* %a_i8 to void (%struct.Foo*, ...)*
musttail call void (%struct.Foo*, ...)* %a(%struct.Foo* %this, ...)
ret void
else:
- %b_p = getelementptr %struct.Foo* %this, i32 0, i32 2
+ %b_p = getelementptr %struct.Foo, %struct.Foo* %this, i32 0, i32 2
%b_i8 = load i8** %b_p
%b = bitcast i8* %b_i8 to void (%struct.Foo*, ...)*
store i32 42, i32* @g
bb: ; preds = %bb23
%tmp = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp1 = getelementptr [3 x i32]* @fnan, i32 0, i32 %tmp ; <i32*> [#uses=1]
+ %tmp1 = getelementptr [3 x i32], [3 x i32]* @fnan, i32 0, i32 %tmp ; <i32*> [#uses=1]
%tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
- %tmp3 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp3 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp34 = bitcast float* %tmp3 to i32* ; <i32*> [#uses=1]
store i32 %tmp2, i32* %tmp34, align 4
- %tmp5 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp5 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp6 = load float* %tmp5, align 4 ; <float> [#uses=1]
%tmp67 = fpext float %tmp6 to double ; <double> [#uses=1]
- %tmp8 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp8 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
store double %tmp67, double* %tmp8, align 8
- %tmp9 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp9 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp910 = bitcast double* %tmp9 to i64* ; <i64*> [#uses=1]
%tmp11 = load i64* %tmp910, align 8 ; <i64> [#uses=1]
%tmp1112 = trunc i64 %tmp11 to i32 ; <i32> [#uses=1]
%tmp13 = and i32 %tmp1112, -1 ; <i32> [#uses=1]
- %tmp14 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp14 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp1415 = bitcast double* %tmp14 to i64* ; <i64*> [#uses=1]
%tmp16 = load i64* %tmp1415, align 8 ; <i64> [#uses=1]
%.cast = zext i32 32 to i64 ; <i64> [#uses=1]
%tmp17 = ashr i64 %tmp16, %.cast ; <i64> [#uses=1]
%tmp1718 = trunc i64 %tmp17 to i32 ; <i32> [#uses=1]
- %tmp19 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp19 = getelementptr [10 x i8], [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp1718, i32* @var
store volatile i32 %tmp13, i32* @var
%tmp21 = load i32* %i, align 4 ; <i32> [#uses=1]
bb28: ; preds = %bb46
%tmp29 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp30 = getelementptr [3 x i64]* @dnan, i32 0, i32 %tmp29 ; <i64*> [#uses=1]
+ %tmp30 = getelementptr [3 x i64], [3 x i64]* @dnan, i32 0, i32 %tmp29 ; <i64*> [#uses=1]
%tmp31 = load i64* %tmp30, align 8 ; <i64> [#uses=1]
- %tmp32 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp32 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp3233 = bitcast double* %tmp32 to i64* ; <i64*> [#uses=1]
store i64 %tmp31, i64* %tmp3233, align 8
- %tmp35 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp35 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp36 = load double* %tmp35, align 8 ; <double> [#uses=1]
%tmp3637 = fptrunc double %tmp36 to float ; <float> [#uses=1]
- %tmp38 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp38 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
store float %tmp3637, float* %tmp38, align 4
- %tmp39 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp39 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp3940 = bitcast float* %tmp39 to i32* ; <i32*> [#uses=1]
%tmp41 = load i32* %tmp3940, align 4 ; <i32> [#uses=1]
- %tmp42 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp42 = getelementptr [6 x i8], [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp41, i32* @var
%tmp44 = load i32* %i, align 4 ; <i32> [#uses=1]
%tmp45 = add i32 %tmp44, 1 ; <i32> [#uses=1]
bb52: ; preds = %bb78
%tmp53 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp54 = getelementptr [3 x i32]* @fsnan, i32 0, i32 %tmp53 ; <i32*> [#uses=1]
+ %tmp54 = getelementptr [3 x i32], [3 x i32]* @fsnan, i32 0, i32 %tmp53 ; <i32*> [#uses=1]
%tmp55 = load i32* %tmp54, align 4 ; <i32> [#uses=1]
- %tmp56 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp56 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp5657 = bitcast float* %tmp56 to i32* ; <i32*> [#uses=1]
store i32 %tmp55, i32* %tmp5657, align 4
- %tmp58 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp58 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp59 = load float* %tmp58, align 4 ; <float> [#uses=1]
%tmp5960 = fpext float %tmp59 to double ; <double> [#uses=1]
- %tmp61 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp61 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
store double %tmp5960, double* %tmp61, align 8
- %tmp62 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp62 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp6263 = bitcast double* %tmp62 to i64* ; <i64*> [#uses=1]
%tmp64 = load i64* %tmp6263, align 8 ; <i64> [#uses=1]
%tmp6465 = trunc i64 %tmp64 to i32 ; <i32> [#uses=1]
%tmp66 = and i32 %tmp6465, -1 ; <i32> [#uses=1]
- %tmp68 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp68 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp6869 = bitcast double* %tmp68 to i64* ; <i64*> [#uses=1]
%tmp70 = load i64* %tmp6869, align 8 ; <i64> [#uses=1]
%.cast71 = zext i32 32 to i64 ; <i64> [#uses=1]
%tmp72 = ashr i64 %tmp70, %.cast71 ; <i64> [#uses=1]
%tmp7273 = trunc i64 %tmp72 to i32 ; <i32> [#uses=1]
- %tmp74 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp74 = getelementptr [10 x i8], [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp7273, i32* @var
store volatile i32 %tmp66, i32* @var
%tmp76 = load i32* %i, align 4 ; <i32> [#uses=1]
bb84: ; preds = %bb101
%tmp85 = load i32* %i, align 4 ; <i32> [#uses=1]
- %tmp86 = getelementptr [3 x i64]* @dsnan, i32 0, i32 %tmp85 ; <i64*> [#uses=1]
+ %tmp86 = getelementptr [3 x i64], [3 x i64]* @dsnan, i32 0, i32 %tmp85 ; <i64*> [#uses=1]
%tmp87 = load i64* %tmp86, align 8 ; <i64> [#uses=1]
- %tmp88 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp88 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp8889 = bitcast double* %tmp88 to i64* ; <i64*> [#uses=1]
store i64 %tmp87, i64* %tmp8889, align 8
- %tmp90 = getelementptr %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp90 = getelementptr %struct..1anon, %struct..1anon* %ud, i32 0, i32 0 ; <double*> [#uses=1]
%tmp91 = load double* %tmp90, align 8 ; <double> [#uses=1]
%tmp9192 = fptrunc double %tmp91 to float ; <float> [#uses=1]
- %tmp93 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp93 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
store float %tmp9192, float* %tmp93, align 4
- %tmp94 = getelementptr %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp94 = getelementptr %struct..0anon, %struct..0anon* %uf, i32 0, i32 0 ; <float*> [#uses=1]
%tmp9495 = bitcast float* %tmp94 to i32* ; <i32*> [#uses=1]
%tmp96 = load i32* %tmp9495, align 4 ; <i32> [#uses=1]
- %tmp97 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp97 = getelementptr [6 x i8], [6 x i8]* @.str1, i32 0, i32 0 ; <i8*> [#uses=1]
store volatile i32 %tmp96, i32* @var
%tmp99 = load i32* %i, align 4 ; <i32> [#uses=1]
%tmp100 = add i32 %tmp99, 1 ; <i32> [#uses=1]
declare void @_ZN21HNodeTranslateRotate311toCartesianEv(%struct.HNodeTranslateRotate3*)
define linkonce void @_ZN21HNodeTranslateRotate36setVelERK9CDSVectorIdLi1EN3CDS12DefaultAllocEE(%struct.HNodeTranslateRotate3* %this, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv) {
- %1 = getelementptr double* null, i32 -1 ; <double*> [#uses=1]
+ %1 = getelementptr double, double* null, i32 -1 ; <double*> [#uses=1]
%2 = load double* %1, align 8 ; <double> [#uses=1]
%3 = load double* null, align 8 ; <double> [#uses=2]
%4 = load double* null, align 8 ; <double> [#uses=2]
%5 = load double* null, align 8 ; <double> [#uses=3]
- %6 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
- %7 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=0]
- %8 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0]
- %9 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
+ %6 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
+ %7 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=0]
+ %8 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0]
+ %9 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 2, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
%10 = load double* null, align 8 ; <double> [#uses=2]
%11 = fsub double -0.000000e+00, %10 ; <double> [#uses=1]
%12 = load double* null, align 8 ; <double> [#uses=2]
- %13 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=1]
+ %13 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=1]
%14 = load double* %13, align 8 ; <double> [#uses=2]
%15 = fsub double -0.000000e+00, %14 ; <double> [#uses=1]
- %16 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %16 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%17 = load double* %16, align 8 ; <double> [#uses=2]
%18 = fsub double -0.000000e+00, %17 ; <double> [#uses=1]
- %19 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
- %20 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
- %21 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 6 ; <double*> [#uses=0]
- %22 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 9 ; <double*> [#uses=0]
- %23 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=0]
- %24 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 4 ; <double*> [#uses=0]
- %25 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 7 ; <double*> [#uses=0]
- %26 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 10 ; <double*> [#uses=0]
- %27 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0]
- %28 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 5 ; <double*> [#uses=0]
- %29 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 8 ; <double*> [#uses=0]
- %30 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 11 ; <double*> [#uses=0]
- %31 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
- %32 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
- %33 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %19 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
+ %20 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 3 ; <double*> [#uses=0]
+ %21 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 6 ; <double*> [#uses=0]
+ %22 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 9 ; <double*> [#uses=0]
+ %23 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=0]
+ %24 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 4 ; <double*> [#uses=0]
+ %25 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 7 ; <double*> [#uses=0]
+ %26 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 10 ; <double*> [#uses=0]
+ %27 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=0]
+ %28 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 5 ; <double*> [#uses=0]
+ %29 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 8 ; <double*> [#uses=0]
+ %30 = getelementptr %"struct.FixedMatrix<double,2,6,0,0>", %"struct.FixedMatrix<double,2,6,0,0>"* null, i32 0, i32 0, i32 0, i32 11 ; <double*> [#uses=0]
+ %31 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 0 ; <double*> [#uses=0]
+ %32 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %33 = getelementptr %"struct.FixedMatrix<double,1,3,0,0>", %"struct.FixedMatrix<double,1,3,0,0>"* null, i32 0, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
%34 = fmul double %17, %5 ; <double> [#uses=1]
%35 = fadd double 0.000000e+00, %34 ; <double> [#uses=1]
%36 = fadd double 0.000000e+00, 0.000000e+00 ; <double> [#uses=1]
%51 = fmul double %35, 2.000000e+00 ; <double> [#uses=1]
%52 = fmul double %42, 2.000000e+00 ; <double> [#uses=1]
%53 = fmul double %50, 2.000000e+00 ; <double> [#uses=1]
- %54 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
+ %54 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 0 ; <double*> [#uses=1]
store double %51, double* %54, align 8
- %55 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
+ %55 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 1 ; <double*> [#uses=1]
store double %52, double* %55, align 8
- %56 = getelementptr %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
+ %56 = getelementptr %struct.HNodeTranslateRotate3, %struct.HNodeTranslateRotate3* %this, i32 0, i32 0, i32 10, i32 0, i32 0, i32 2 ; <double*> [#uses=1]
store double %53, double* %56, align 8
- %57 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 0 ; <%"struct.CDSVector<double,0,CDS::DefaultAlloc>"**> [#uses=1]
+ %57 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >", %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 0 ; <%"struct.CDSVector<double,0,CDS::DefaultAlloc>"**> [#uses=1]
store %"struct.CDSVector<double,0,CDS::DefaultAlloc>"* %velv, %"struct.CDSVector<double,0,CDS::DefaultAlloc>"** %57, align 8
- %58 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 1 ; <i32*> [#uses=1]
+ %58 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >", %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 4, i32* %58, align 4
- %59 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 2 ; <i32*> [#uses=1]
+ %59 = getelementptr %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >", %"struct.SubVector<CDSVector<double, 1, CDS::DefaultAlloc> >"* null, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 3, i32* %59, align 8
unreachable
}
%ap = alloca [1 x %struct.__va_list_tag], align 8 ; <[1 x %struct.__va_list_tag]*> [#uses=4]
%ap12 = bitcast [1 x %struct.__va_list_tag]* %ap to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %ap12)
- %0 = getelementptr [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0 ; <i32*> [#uses=2]
+ %0 = getelementptr [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0 ; <i32*> [#uses=2]
%1 = load i32* %0, align 8 ; <i32> [#uses=3]
%2 = icmp ult i32 %1, 48 ; <i1> [#uses=1]
br i1 %2, label %bb, label %bb3
bb: ; preds = %entry
- %3 = getelementptr [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 3 ; <i8**> [#uses=1]
+ %3 = getelementptr [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 3 ; <i8**> [#uses=1]
%4 = load i8** %3, align 8 ; <i8*> [#uses=1]
%5 = inttoptr i32 %1 to i8* ; <i8*> [#uses=1]
%6 = ptrtoint i8* %5 to i64 ; <i64> [#uses=1]
- %ctg2 = getelementptr i8* %4, i64 %6 ; <i8*> [#uses=1]
+ %ctg2 = getelementptr i8, i8* %4, i64 %6 ; <i8*> [#uses=1]
%7 = add i32 %1, 8 ; <i32> [#uses=1]
store i32 %7, i32* %0, align 8
br label %bb4
bb3: ; preds = %entry
- %8 = getelementptr [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2 ; <i8**> [#uses=2]
+ %8 = getelementptr [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2 ; <i8**> [#uses=2]
%9 = load i8** %8, align 8 ; <i8*> [#uses=2]
- %10 = getelementptr i8* %9, i64 8 ; <i8*> [#uses=1]
+ %10 = getelementptr i8, i8* %9, i64 8 ; <i8*> [#uses=1]
store i8* %10, i8** %8, align 8
br label %bb4
%7 = add i32 %x.06, %4
%8 = shl i32 %x.06, 1
%9 = add i32 %6, %8
- %10 = getelementptr i8* %r, i32 %9
+ %10 = getelementptr i8, i8* %r, i32 %9
%11 = load i8* %10, align 1
- %12 = getelementptr i8* %j, i32 %7
+ %12 = getelementptr i8, i8* %j, i32 %7
store i8 %11, i8* %12, align 1
br label %bb7
%x.12 = phi i32 [ 0, %bb.nph3 ], [ %indvar.next, %bb15 ]
%25 = shl i32 %x.12, 2
%26 = add i32 %25, %21
- %27 = getelementptr i8* %r, i32 %26
+ %27 = getelementptr i8, i8* %r, i32 %26
%28 = load i8* %27, align 1
%.sum = add i32 %22, %x.12
- %29 = getelementptr i8* %j, i32 %.sum
+ %29 = getelementptr i8, i8* %j, i32 %.sum
store i8 %28, i8* %29, align 1
%30 = shl i32 %x.12, 2
%31 = or i32 %30, 2
%32 = add i32 %31, %21
- %33 = getelementptr i8* %r, i32 %32
+ %33 = getelementptr i8, i8* %r, i32 %32
%34 = load i8* %33, align 1
%.sum6 = add i32 %23, %x.12
- %35 = getelementptr i8* %j, i32 %.sum6
+ %35 = getelementptr i8, i8* %j, i32 %.sum6
store i8 %34, i8* %35, align 1
br label %bb15
%y.21 = phi i32 [ 0, %bb.nph ], [ %indvar.next5, %bb24 ]
%45 = mul i32 %y.21, %42
%.sum1 = add i32 %45, %43
- %46 = getelementptr i8* %r, i32 %.sum1
+ %46 = getelementptr i8, i8* %r, i32 %.sum1
%47 = mul i32 %y.21, %w
%.sum5 = add i32 %47, %.sum3
- %48 = getelementptr i8* %j, i32 %.sum5
+ %48 = getelementptr i8, i8* %j, i32 %.sum5
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i32 1, i1 false)
br label %bb24
bb26: ; preds = %bb24.bb26_crit_edge, %bb22
%49 = mul i32 %x, %w
%.sum4 = add i32 %.sum3, %49
- %50 = getelementptr i8* %j, i32 %.sum4
+ %50 = getelementptr i8, i8* %j, i32 %.sum4
%51 = mul i32 %x, %w
%52 = sdiv i32 %51, 2
tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false)
bb30: ; preds = %bb31, %bb.nph11
%y.310 = phi i32 [ 0, %bb.nph11 ], [ %indvar.next13, %bb31 ]
%56 = mul i32 %y.310, %54
- %57 = getelementptr i8* %r, i32 %56
+ %57 = getelementptr i8, i8* %r, i32 %56
%58 = mul i32 %y.310, %w
- %59 = getelementptr i8* %j, i32 %58
+ %59 = getelementptr i8, i8* %j, i32 %58
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i32 1, i1 false)
br label %bb31
bb33: ; preds = %bb31.bb33_crit_edge, %bb29
%60 = mul i32 %x, %w
- %61 = getelementptr i8* %j, i32 %60
+ %61 = getelementptr i8, i8* %j, i32 %60
%62 = mul i32 %x, %w
%63 = sdiv i32 %62, 2
tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false)
%7 = add i32 %x.06, %4
%8 = shl i32 %x.06, 1
%9 = add i32 %6, %8
- %10 = getelementptr i8* %r, i32 %9
+ %10 = getelementptr i8, i8* %r, i32 %9
%11 = load i8* %10, align 1
- %12 = getelementptr i8* %j, i32 %7
+ %12 = getelementptr i8, i8* %j, i32 %7
store i8 %11, i8* %12, align 1
br label %bb7
%x.12 = phi i32 [ 0, %bb.nph3 ], [ %indvar.next, %bb15 ]
%25 = shl i32 %x.12, 2
%26 = add i32 %25, %21
- %27 = getelementptr i8* %r, i32 %26
+ %27 = getelementptr i8, i8* %r, i32 %26
%28 = load i8* %27, align 1
%.sum = add i32 %22, %x.12
- %29 = getelementptr i8* %j, i32 %.sum
+ %29 = getelementptr i8, i8* %j, i32 %.sum
store i8 %28, i8* %29, align 1
%30 = shl i32 %x.12, 2
%31 = or i32 %30, 2
%32 = add i32 %31, %21
- %33 = getelementptr i8* %r, i32 %32
+ %33 = getelementptr i8, i8* %r, i32 %32
%34 = load i8* %33, align 1
%.sum6 = add i32 %23, %x.12
- %35 = getelementptr i8* %j, i32 %.sum6
+ %35 = getelementptr i8, i8* %j, i32 %.sum6
store i8 %34, i8* %35, align 1
br label %bb15
%y.21 = phi i32 [ 0, %bb.nph ], [ %indvar.next5, %bb24 ]
%45 = mul i32 %y.21, %42
%.sum1 = add i32 %45, %43
- %46 = getelementptr i8* %r, i32 %.sum1
+ %46 = getelementptr i8, i8* %r, i32 %.sum1
%47 = mul i32 %y.21, %w
%.sum5 = add i32 %47, %.sum3
- %48 = getelementptr i8* %j, i32 %.sum5
+ %48 = getelementptr i8, i8* %j, i32 %.sum5
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %48, i8* %46, i32 %w, i32 1, i1 false)
br label %bb24
bb26: ; preds = %bb24.bb26_crit_edge, %bb22
%49 = mul i32 %x, %w
%.sum4 = add i32 %.sum3, %49
- %50 = getelementptr i8* %j, i32 %.sum4
+ %50 = getelementptr i8, i8* %j, i32 %.sum4
%51 = mul i32 %x, %w
%52 = udiv i32 %51, 2
tail call void @llvm.memset.p0i8.i32(i8* %50, i8 -128, i32 %52, i32 1, i1 false)
bb30: ; preds = %bb31, %bb.nph11
%y.310 = phi i32 [ 0, %bb.nph11 ], [ %indvar.next13, %bb31 ]
%56 = mul i32 %y.310, %54
- %57 = getelementptr i8* %r, i32 %56
+ %57 = getelementptr i8, i8* %r, i32 %56
%58 = mul i32 %y.310, %w
- %59 = getelementptr i8* %j, i32 %58
+ %59 = getelementptr i8, i8* %j, i32 %58
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %59, i8* %57, i32 %w, i32 1, i1 false)
br label %bb31
bb33: ; preds = %bb31.bb33_crit_edge, %bb29
%60 = mul i32 %x, %w
- %61 = getelementptr i8* %j, i32 %60
+ %61 = getelementptr i8, i8* %j, i32 %60
%62 = mul i32 %x, %w
%63 = udiv i32 %62, 2
tail call void @llvm.memset.p0i8.i32(i8* %61, i8 -128, i32 %63, i32 1, i1 false)
bb: ; preds = %bb, %entry
%i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
+ %scevgep = getelementptr double, double* %p, i64 %i.0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %scevgep, align 8
%0 = add i64 %i.0, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %0, %smax ; <i1> [#uses=1]
bb: ; preds = %bb, %entry
%i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
+ %scevgep = getelementptr double, double* %p, i64 %i.0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %scevgep, align 8
%0 = add i64 %i.0, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %0, %smax ; <i1> [#uses=1]
bb: ; preds = %bb, %entry
%i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
+ %scevgep = getelementptr double, double* %p, i64 %i.0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %scevgep, align 8
%0 = add i64 %i.0, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %0, %umax ; <i1> [#uses=1]
bb: ; preds = %bb, %entry
%i.0 = phi i64 [ 0, %entry ], [ %0, %bb ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.0 ; <double*> [#uses=1]
+ %scevgep = getelementptr double, double* %p, i64 %i.0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %scevgep, align 8
%0 = add i64 %i.0, 1 ; <i64> [#uses=2]
%exitcond = icmp eq i64 %0, %umax ; <i1> [#uses=1]
bb4: ; preds = %bb4, %entry
%i.07 = phi i64 [ 0, %entry ], [ %2, %bb4 ] ; <i64> [#uses=2]
- %scevgep = getelementptr double* %p, i64 %i.07 ; <double*> [#uses=2]
+ %scevgep = getelementptr double, double* %p, i64 %i.07 ; <double*> [#uses=2]
%0 = load double* %scevgep, align 8 ; <double> [#uses=1]
%1 = fmul double %0, 2.000000e+00 ; <double> [#uses=1]
store double %1, double* %scevgep, align 8
for.body: ; preds = %for.body.preheader, %for.body
%i = phi i64 [ %i.next, %for.body ], [ 0, %for.body.preheader ] ; <i64> [#uses=2]
- %arrayidx = getelementptr double* %p, i64 %i ; <double*> [#uses=2]
+ %arrayidx = getelementptr double, double* %p, i64 %i ; <double*> [#uses=2]
%t4 = load double* %arrayidx ; <double> [#uses=1]
%mul = fmul double %t4, 2.200000e+00 ; <double> [#uses=1]
store double %mul, double* %arrayidx
%4 = add i8 %3, %iftmp.0.0 ; <i8> [#uses=1]
%5 = shl i8 %4, 2 ; <i8> [#uses=1]
%6 = zext i8 %5 to i64 ; <i64> [#uses=4]
- %7 = getelementptr inbounds i32* %array, i64 %6 ; <i32*> [#uses=1]
+ %7 = getelementptr inbounds i32, i32* %array, i64 %6 ; <i32*> [#uses=1]
store i32 %r0, i32* %7, align 4
%8 = or i64 %6, 2 ; <i64> [#uses=1]
- %9 = getelementptr inbounds i32* %array, i64 %8 ; <i32*> [#uses=1]
+ %9 = getelementptr inbounds i32, i32* %array, i64 %8 ; <i32*> [#uses=1]
store i32 %r0, i32* %9, align 4
%10 = or i64 %6, 1 ; <i64> [#uses=1]
- %11 = getelementptr inbounds i32* %array, i64 %10 ; <i32*> [#uses=1]
+ %11 = getelementptr inbounds i32, i32* %array, i64 %10 ; <i32*> [#uses=1]
store i32 %r0, i32* %11, align 4
%12 = or i64 %6, 3 ; <i64> [#uses=1]
- %13 = getelementptr inbounds i32* %array, i64 %12 ; <i32*> [#uses=1]
+ %13 = getelementptr inbounds i32, i32* %array, i64 %12 ; <i32*> [#uses=1]
store i32 %r0, i32* %13, align 4
%14 = add nsw i8 %j.010, 1 ; <i8> [#uses=2]
%15 = add i8 %iftmp.0.0, 1 ; <i8> [#uses=1]
%mul22 = shl i8 %inc.k.addr.1, 4 ; <i8> [#uses=1]
%add23 = add i8 %mul22, %mul ; <i8> [#uses=1]
%idxprom = zext i8 %add23 to i64 ; <i64> [#uses=4]
- %arrayidx = getelementptr inbounds i32* %array, i64 %idxprom ; <i32*> [#uses=1]
+ %arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom ; <i32*> [#uses=1]
store i32 %r0, i32* %arrayidx
%add3356 = or i64 %idxprom, 2 ; <i64> [#uses=1]
- %arrayidx36 = getelementptr inbounds i32* %array, i64 %add3356 ; <i32*> [#uses=1]
+ %arrayidx36 = getelementptr inbounds i32, i32* %array, i64 %add3356 ; <i32*> [#uses=1]
store i32 %r0, i32* %arrayidx36
%add4058 = or i64 %idxprom, 1 ; <i64> [#uses=1]
- %arrayidx43 = getelementptr inbounds i32* %array, i64 %add4058 ; <i32*> [#uses=1]
+ %arrayidx43 = getelementptr inbounds i32, i32* %array, i64 %add4058 ; <i32*> [#uses=1]
store i32 %r0, i32* %arrayidx43
%add4760 = or i64 %idxprom, 3 ; <i64> [#uses=1]
- %arrayidx50 = getelementptr inbounds i32* %array, i64 %add4760 ; <i32*> [#uses=1]
+ %arrayidx50 = getelementptr inbounds i32, i32* %array, i64 %add4760 ; <i32*> [#uses=1]
store i32 %r0, i32* %arrayidx50
%inc52 = add nsw i8 %j.065, 1 ; <i8> [#uses=2]
%add = add i8 %cond, 1 ; <i8> [#uses=1]
bb:
%indvar = phi i64 [ %n, %entry ], [ %indvar.next, %bb ]
%i.03 = add i64 %indvar, %n
- %0 = getelementptr double* %d, i64 %i.03
+ %0 = getelementptr double, double* %d, i64 %i.03
%1 = load double* %0, align 8
%2 = fmul double %1, 3.000000e+00
store double %2, double* %0, align 8
bb:
%indvar = phi i32 [ 0, %0 ], [ %indvar.next, %bb ]
%i.03 = sub i32 %n, %indvar
- %1 = getelementptr double* %p, i32 %i.03
+ %1 = getelementptr double, double* %p, i32 %i.03
%2 = load double* %1, align 4
%3 = fmul double %2, 2.930000e+00
store double %3, double* %1, align 4
%1 = alloca <2 x double>, align 16
%tmpcast = bitcast <2 x double>* %1 to %struct.S1*
call void @foo3(%struct.S1* %tmpcast) #2
- %p2 = getelementptr inbounds %struct.S1* %tmpcast, i64 0, i32 0
+ %p2 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 0
%2 = load double* %p2, align 16
- %p3 = getelementptr inbounds %struct.S1* %tmpcast, i64 0, i32 1
+ %p3 = getelementptr inbounds %struct.S1, %struct.S1* %tmpcast, i64 0, i32 1
%3 = load double* %p3, align 8
%4 = insertelement <2 x double> undef, double %2, i32 0
%5 = insertelement <2 x double> %4, double 0.000000e+00, i32 1
br i1 %cmp, label %return, label %for.body
for.body: ; preds = %for.cond
- %arrayidx = getelementptr inbounds i32* %b, i64 %conv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %conv
%tmp5 = load i32* %arrayidx, align 4
%conv6 = zext i32 %tmp5 to i64
%rem.i.i.i.i = and i64 %conv6, 63
for.cond: ; preds = %entry, %for.cond
%p.addr.0 = phi i8* [ %incdec.ptr, %for.cond ], [ %p, %entry ]
- %incdec.ptr = getelementptr inbounds i8* %p.addr.0, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %p.addr.0, i64 1
%0 = load i8* %p.addr.0, align 1
%tobool = icmp eq i8 %0, 0
br i1 %tobool, label %for.cond, label %if.end2
%indvar = phi i32 [ 0, %bb.nph ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%j.06 = sub i32 %j.03, %indvar ; <i32> [#uses=1]
%tmp11 = sub i32 %tmp10, %indvar ; <i32> [#uses=1]
- %scevgep = getelementptr i32* %ptr, i32 %tmp11 ; <i32*> [#uses=1]
+ %scevgep = getelementptr i32, i32* %ptr, i32 %tmp11 ; <i32*> [#uses=1]
%1 = load i32* %scevgep, align 4 ; <i32> [#uses=1]
%2 = ashr i32 %j.06, %shifts ; <i32> [#uses=1]
%3 = and i32 %2, 65535 ; <i32> [#uses=1]
- %4 = getelementptr inbounds i32* %quadrant, i32 %1 ; <i32*> [#uses=1]
+ %4 = getelementptr inbounds i32, i32* %quadrant, i32 %1 ; <i32*> [#uses=1]
store i32 %3, i32* %4, align 4
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, %bbSize ; <i1> [#uses=1]
unreachable
bb39.preheader: ; preds = %bb34
- %7 = getelementptr inbounds %struct.epoch_t* undef, i32 %indvar54, i32 3 ; <i32*> [#uses=1]
- %8 = getelementptr inbounds %struct.epoch_t* undef, i32 %indvar54, i32 2 ; <i32*> [#uses=0]
+ %7 = getelementptr inbounds %struct.epoch_t, %struct.epoch_t* undef, i32 %indvar54, i32 3 ; <i32*> [#uses=1]
+ %8 = getelementptr inbounds %struct.epoch_t, %struct.epoch_t* undef, i32 %indvar54, i32 2 ; <i32*> [#uses=0]
br i1 false, label %bb42, label %bb45
bb42: ; preds = %bb39.preheader
bb: ; preds = %bb, %bb.nph
%tmp9 = mul i64 undef, undef ; <i64> [#uses=2]
%tmp12 = add i64 %tmp11, %tmp9 ; <i64> [#uses=1]
- %scevgep13 = getelementptr i8* %bufp, i64 %tmp12 ; <i8*> [#uses=1]
+ %scevgep13 = getelementptr i8, i8* %bufp, i64 %tmp12 ; <i8*> [#uses=1]
%tmp15 = add i64 %tmp14, %tmp9 ; <i64> [#uses=1]
- %scevgep16 = getelementptr i8* %bufp, i64 %tmp15 ; <i8*> [#uses=1]
+ %scevgep16 = getelementptr i8, i8* %bufp, i64 %tmp15 ; <i8*> [#uses=1]
%0 = load i8* undef, align 1 ; <i8> [#uses=1]
%1 = zext i8 %0 to i32 ; <i32> [#uses=1]
- %2 = getelementptr inbounds [16 x i16]* @map_4_to_16, i64 0, i64 0 ; <i16*> [#uses=1]
+ %2 = getelementptr inbounds [16 x i16], [16 x i16]* @map_4_to_16, i64 0, i64 0 ; <i16*> [#uses=1]
%3 = load i16* %2, align 2 ; <i16> [#uses=1]
%4 = trunc i16 %3 to i8 ; <i8> [#uses=1]
store i8 %4, i8* undef, align 1
%5 = and i32 %1, 15 ; <i32> [#uses=1]
%6 = zext i32 %5 to i64 ; <i64> [#uses=1]
- %7 = getelementptr inbounds [16 x i16]* @map_4_to_16, i64 0, i64 %6 ; <i16*> [#uses=1]
+ %7 = getelementptr inbounds [16 x i16], [16 x i16]* @map_4_to_16, i64 0, i64 %6 ; <i16*> [#uses=1]
%8 = load i16* %7, align 2 ; <i16> [#uses=2]
%9 = lshr i16 %8, 8 ; <i16> [#uses=1]
%10 = trunc i16 %9 to i8 ; <i8> [#uses=1]
define <8 x float> @foo64(<8 x float>* %p) {
%1 = load <8 x float>* %p
- %idx1 = getelementptr inbounds <8 x float>* %p, i64 1
+ %idx1 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 1
%2 = load <8 x float>* %idx1
- %idx2 = getelementptr inbounds <8 x float>* %p, i64 2
+ %idx2 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 2
%3 = load <8 x float>* %idx2
- %idx3 = getelementptr inbounds <8 x float>* %p, i64 3
+ %idx3 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 3
%4 = load <8 x float>* %idx3
- %idx4 = getelementptr inbounds <8 x float>* %p, i64 4
+ %idx4 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 4
%5 = load <8 x float>* %idx4
- %idx5 = getelementptr inbounds <8 x float>* %p, i64 5
+ %idx5 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 5
%6 = load <8 x float>* %idx5
- %idx6 = getelementptr inbounds <8 x float>* %p, i64 6
+ %idx6 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 6
%7 = load <8 x float>* %idx6
- %idx7 = getelementptr inbounds <8 x float>* %p, i64 7
+ %idx7 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 7
%8 = load <8 x float>* %idx7
- %idx8 = getelementptr inbounds <8 x float>* %p, i64 8
+ %idx8 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 8
%9 = load <8 x float>* %idx8
- %idx9 = getelementptr inbounds <8 x float>* %p, i64 9
+ %idx9 = getelementptr inbounds <8 x float>, <8 x float>* %p, i64 9
%10 = load <8 x float>* %idx9
%r = tail call <8 x float> @bar64(<8 x float> %1, <8 x float> %2,
<8 x float> %3, <8 x float> %4,
; RUN: llc -mtriple=x86_64-unknown-unknown < %s
%foo = type { i64, i64 }
define void @bar(%foo* %zed) {
- %tmp = getelementptr inbounds %foo* %zed, i64 0, i32 0
+ %tmp = getelementptr inbounds %foo, %foo* %zed, i64 0, i32 0
store i64 0, i64* %tmp, align 8
- %tmp2 = getelementptr inbounds %foo* %zed, i64 0, i32 1
+ %tmp2 = getelementptr inbounds %foo, %foo* %zed, i64 0, i32 1
store i64 0, i64* %tmp2, align 8
%tmp3 = bitcast %foo* %zed to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp3, i8 0, i64 16, i32 8, i1 false)
define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) noinline {
L.entry:
- %0 = getelementptr <2 x i64>* %src, i32 10
+ %0 = getelementptr <2 x i64>, <2 x i64>* %src, i32 10
%1 = load <2 x i64>* %0, align 16
%2 = uitofp <2 x i64> %1 to <2 x float>
- %3 = getelementptr <2 x float>* %dest, i32 10
+ %3 = getelementptr <2 x float>, <2 x float>* %dest, i32 10
store <2 x float> %2, <2 x float>* %3, align 8
ret void
}
%retval = alloca i32
%this.addr = alloca %"Iterator"*
%this1 = load %"Iterator"** %this.addr
- %bundle_ = getelementptr inbounds %"Iterator"* %this1, i32 0, i32 0
+ %bundle_ = getelementptr inbounds %"Iterator", %"Iterator"* %this1, i32 0, i32 0
%0 = load i32** %bundle_
%1 = call { i64, <2 x float> } @Call()
%2 = call { i64, <2 x float> }* @CallPtr()
- %3 = getelementptr { i64, <2 x float> }* %2, i32 0, i32 1
+ %3 = getelementptr { i64, <2 x float> }, { i64, <2 x float> }* %2, i32 0, i32 1
%4 = extractvalue { i64, <2 x float> } %1, 1
store <2 x float> %4, <2 x float>* %3
%5 = load { i64, <2 x float> }* %2
%3 = or i64 0, 16
%add.ptr111.sum4096 = add i64 %3, 0
%4 = load <8 x float>* null, align 16, !tbaa !5
- %add.ptr162 = getelementptr inbounds [65536 x float]* null, i64 0, i64 %add.ptr111.sum4096
+ %add.ptr162 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr111.sum4096
%__v.i4158 = bitcast float* %add.ptr162 to <8 x float>*
%5 = load <8 x float>* %__v.i4158, align 16, !tbaa !5
%add.ptr158.sum40975066 = or i64 %add.ptr111.sum4096, 8
- %add.ptr183 = getelementptr inbounds [65536 x float]* null, i64 0, i64 %add.ptr158.sum40975066
+ %add.ptr183 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr158.sum40975066
%__v.i4162 = bitcast float* %add.ptr183 to <8 x float>*
%6 = load <8 x float>* %__v.i4162, align 16, !tbaa !5
%add.ptr200.sum40995067 = or i64 undef, 8
- %add.ptr225 = getelementptr inbounds [65536 x float]* null, i64 0, i64 %add.ptr200.sum40995067
+ %add.ptr225 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr200.sum40995067
%__v.i4167 = bitcast float* %add.ptr225 to <8 x float>*
%7 = load <8 x float>* %__v.i4167, align 4, !tbaa !5
%8 = load <8 x float>* undef, align 16, !tbaa !5
%add.ptr242.sum41015068 = or i64 0, 8
- %add.ptr267 = getelementptr inbounds [65536 x float]* null, i64 0, i64 %add.ptr242.sum41015068
+ %add.ptr267 = getelementptr inbounds [65536 x float], [65536 x float]* null, i64 0, i64 %add.ptr242.sum41015068
%__v.i4171 = bitcast float* %add.ptr267 to <8 x float>*
%9 = load <8 x float>* %__v.i4171, align 4, !tbaa !5
%mul.i4690 = fmul <8 x float> %7, undef
br i1 %cmp4, label %for.body.preheader, label %for.end38
for.body.preheader: ; preds = %entry
- %gep = getelementptr %struct.planet* %bodies, i64 1, i32 1
+ %gep = getelementptr %struct.planet, %struct.planet* %bodies, i64 1, i32 1
%gep13 = bitcast double* %gep to %struct.planet*
%0 = add i32 %nbodies, -1
br label %for.body
br i1 %cmp22, label %for.body3.lr.ph, label %for.inc20
for.body3.lr.ph: ; preds = %for.body
- %x = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 0
- %y = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 1
- %vx = getelementptr inbounds %struct.planet* %bodies, i64 %iv9, i32 2
+ %x = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %iv9, i32 0
+ %y = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %iv9, i32 1
+ %vx = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %iv9, i32 2
br label %for.body3
for.body3: ; preds = %for.body3, %for.body3.lr.ph
%iv15 = phi %struct.planet* [ %gep16, %for.body3 ], [ %iv, %for.body3.lr.ph ]
%iv1517 = bitcast %struct.planet* %iv15 to double*
%2 = load double* %x, align 8
- %gep18 = getelementptr double* %iv1517, i64 -1
+ %gep18 = getelementptr double, double* %iv1517, i64 -1
%3 = load double* %gep18, align 8
%sub = fsub double %2, %3
%4 = load double* %y, align 8
%add10 = fadd double %sub, %sub8
%call = tail call double @sqrt(double %sub8) #2
store double %add10, double* %vx, align 8
- %gep16 = getelementptr %struct.planet* %iv15, i64 1
+ %gep16 = getelementptr %struct.planet, %struct.planet* %iv15, i64 1
%iv.next21 = add i32 %iv20, -1
%exitcond = icmp eq i32 %iv.next21, 0
br i1 %exitcond, label %for.inc20, label %for.body3
for.inc20: ; preds = %for.body3, %for.body
%lftr.wideiv11 = trunc i64 %iv.next10 to i32
- %gep14 = getelementptr %struct.planet* %iv, i64 1
+ %gep14 = getelementptr %struct.planet, %struct.planet* %iv, i64 1
%iv.next = add i32 %iv19, -1
%exitcond12 = icmp eq i32 %lftr.wideiv11, %nbodies
br i1 %exitcond12, label %for.end38, label %for.body
bb118: ; preds = %bb5, %bb5, %bb5, %bb5
%tmp125 = load i8** null, align 8 ; <i8*> [#uses=1]
%tmp125126 = bitcast i8* %tmp125 to %struct.S2259* ; <%struct.S2259*> [#uses=1]
- %tmp128 = getelementptr %struct.S2259* %tmp125126, i32 0, i32 0 ; <<4 x i16>*> [#uses=1]
+ %tmp128 = getelementptr %struct.S2259, %struct.S2259* %tmp125126, i32 0, i32 0 ; <<4 x i16>*> [#uses=1]
%tmp129 = load <4 x i16>* %tmp128, align 8 ; <<4 x i16>> [#uses=1]
store <4 x i16> %tmp129, <4 x i16>* null, align 8
ret void
%tmp3 = bitcast <2 x i64> %and.i to <4 x i32>\r
%index.sroa.0.0.vec.extract = extractelement <4 x i32> %tmp3, i32 0\r
%idx.ext = sext i32 %index.sroa.0.0.vec.extract to i64\r
- %add.ptr = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext\r
+ %add.ptr = getelementptr inbounds i8, i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext\r
%tmp4 = bitcast i8* %add.ptr to double*\r
%index.sroa.0.4.vec.extract = extractelement <4 x i32> %tmp3, i32 1\r
%idx.ext5 = sext i32 %index.sroa.0.4.vec.extract to i64\r
- %add.ptr6 = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext5\r
+ %add.ptr6 = getelementptr inbounds i8, i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext5\r
%tmp5 = bitcast i8* %add.ptr6 to double*\r
%index.sroa.0.8.vec.extract = extractelement <4 x i32> %tmp3, i32 2\r
%idx.ext14 = sext i32 %index.sroa.0.8.vec.extract to i64\r
- %add.ptr15 = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext14\r
+ %add.ptr15 = getelementptr inbounds i8, i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext14\r
%tmp6 = bitcast i8* %add.ptr15 to double*\r
%index.sroa.0.12.vec.extract = extractelement <4 x i32> %tmp3, i32 3\r
%idx.ext19 = sext i32 %index.sroa.0.12.vec.extract to i64\r
- %add.ptr20 = getelementptr inbounds i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext19\r
+ %add.ptr20 = getelementptr inbounds i8, i8* bitcast ([256 x double]* @stuff to i8*), i64 %idx.ext19\r
%tmp7 = bitcast i8* %add.ptr20 to double*\r
- %add.ptr46 = getelementptr inbounds i8* bitcast (double* getelementptr inbounds ([256 x double]* @stuff, i64 0, i64 1) to i8*), i64 %idx.ext\r
+ %add.ptr46 = getelementptr inbounds i8, i8* bitcast (double* getelementptr inbounds ([256 x double]* @stuff, i64 0, i64 1) to i8*), i64 %idx.ext\r
%tmp16 = bitcast i8* %add.ptr46 to double*\r
- %add.ptr51 = getelementptr inbounds i8* bitcast (double* getelementptr inbounds ([256 x double]* @stuff, i64 0, i64 1) to i8*), i64 %idx.ext5\r
+ %add.ptr51 = getelementptr inbounds i8, i8* bitcast (double* getelementptr inbounds ([256 x double]* @stuff, i64 0, i64 1) to i8*), i64 %idx.ext5\r
%tmp17 = bitcast i8* %add.ptr51 to double*\r
call void @toto(double* %tmp4, double* %tmp5, double* %tmp6, double* %tmp7, double* %tmp16, double* %tmp17)\r
ret void\r
define void @foo(%struct.anon* byval %p) nounwind {
entry:
- %tmp = getelementptr %struct.anon* %p, i32 0, i32 0 ; <float*> [#uses=1]
+ %tmp = getelementptr %struct.anon, %struct.anon* %p, i32 0, i32 0 ; <float*> [#uses=1]
%tmp1 = load float* %tmp ; <float> [#uses=1]
- %tmp2 = getelementptr %struct.anon* %p, i32 0, i32 1 ; <float*> [#uses=1]
+ %tmp2 = getelementptr %struct.anon, %struct.anon* %p, i32 0, i32 1 ; <float*> [#uses=1]
%tmp3 = load float* %tmp2 ; <float> [#uses=1]
%neg = fsub float -0.000000e+00, %tmp1 ; <float> [#uses=1]
%conv = fpext float %neg to double ; <double> [#uses=1]
%tmp1 = ptrtoint %struct.NODE** %storemerge.in.i to i64
%tmp2 = lshr i64 %tmp1, 3
%tmp3 = and i64 %tmp2, 2147483647
- %tmp4 = getelementptr %struct.HashEntry* %tmp, i64 %tmp3, i32 0, i32 1
+ %tmp4 = getelementptr %struct.HashEntry, %struct.HashEntry* %tmp, i64 %tmp3, i32 0, i32 1
%tmp7 = load i8** %tmp4, align 8
- %tmp8 = getelementptr %struct.NODE* %storemerge.i, i64 0, i32 2
+ %tmp8 = getelementptr %struct.NODE, %struct.NODE* %storemerge.i, i64 0, i32 2
%tmp9 = bitcast %struct.anon* %tmp8 to %struct.NODE***
%tmp11 = load %struct.NODE*** %tmp9, align 8
%tmp12 = ptrtoint %struct.NODE** %tmp11 to i64
%tmp13 = lshr i64 %tmp12, 3
%tmp14 = and i64 %tmp13, 2147483647
- %tmp15 = getelementptr %struct.HashEntry* %tmp, i64 %tmp14, i32 0, i32 1
+ %tmp15 = getelementptr %struct.HashEntry, %struct.HashEntry* %tmp, i64 %tmp14, i32 0, i32 1
call fastcc void @xlprint(i8** %tmp4, i8* %tmp7, i8** %tmp15)
ret void
}
%0 = add i32 %len, 2 ; <i32> [#uses=1]
%1 = add i32 %0, %lag ; <i32> [#uses=1]
%2 = alloca double, i32 %1 ; <double*> [#uses=2]
- %3 = getelementptr double* %2, i32 %lag ; <double*> [#uses=2]
+ %3 = getelementptr double, double* %2, i32 %lag ; <double*> [#uses=2]
%4 = ptrtoint double* %3 to i32 ; <i32> [#uses=1]
%5 = and i32 %4, 8 ; <i32> [#uses=1]
%6 = icmp eq i32 %5, 0 ; <i1> [#uses=1]
bb: ; preds = %entry
%.sum = add i32 %lag, 1 ; <i32> [#uses=1]
- %7 = getelementptr double* %2, i32 %.sum ; <double*> [#uses=1]
+ %7 = getelementptr double, double* %2, i32 %.sum ; <double*> [#uses=1]
br label %bb19
bb19: ; preds = %bb, %entry
call void asm sideeffect "movsd $0, %xmm7 \0A\09movapd ff_pd_1, %xmm6 \0A\09movapd ff_pd_2, %xmm5 \0A\09movlhps %xmm7, %xmm7 \0A\09subpd %xmm5, %xmm7 \0A\09addsd %xmm6, %xmm7 \0A\09", "*m,~{dirflag},~{fpsr},~{flags}"(double* %c) nounwind
%15 = and i32 %len, 1 ; <i32> [#uses=1]
%toBool = icmp eq i32 %15, 0 ; <i1> [#uses=1]
- %16 = getelementptr double* %data15.0, i32 %11 ; <double*> [#uses=2]
- %17 = getelementptr i32* %data, i32 %11 ; <i32*> [#uses=2]
+ %16 = getelementptr double, double* %data15.0, i32 %11 ; <double*> [#uses=2]
+ %17 = getelementptr i32, i32* %data, i32 %11 ; <i32*> [#uses=2]
br i1 %toBool, label %bb22, label %bb20
bb20: ; preds = %bb19
bb27: ; preds = %bb27, %bb28.preheader
%j4.042 = phi i32 [ 0, %bb28.preheader ], [ %indvar.next45, %bb27 ] ; <i32> [#uses=2]
%19 = sub i32 %j4.042, %lag ; <i32> [#uses=1]
- %20 = getelementptr double* %data15.0, i32 %19 ; <double*> [#uses=1]
+ %20 = getelementptr double, double* %data15.0, i32 %19 ; <double*> [#uses=1]
store double 0.000000e+00, double* %20, align 8
%indvar.next45 = add i32 %j4.042, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next45, %lag ; <i1> [#uses=1]
br i1 %exitcond, label %bb29, label %bb27
bb29: ; preds = %bb27, %bb28.preheader
- %21 = getelementptr double* %data15.0, i32 %len ; <double*> [#uses=3]
+ %21 = getelementptr double, double* %data15.0, i32 %len ; <double*> [#uses=3]
store double 0.000000e+00, double* %21, align 8
br i1 %18, label %bb.nph, label %bb37
bb31: ; preds = %bb30
%26 = add i32 %j4.141, 2 ; <i32> [#uses=2]
%.sum38 = sub i32 %len, %j4.141 ; <i32> [#uses=1]
- %27 = getelementptr double* %data15.0, i32 %.sum38 ; <double*> [#uses=1]
- %28 = getelementptr double* %autoc, i32 %j4.141 ; <double*> [#uses=1]
- %29 = getelementptr double* %autoc, i32 %25 ; <double*> [#uses=1]
- %30 = getelementptr double* %autoc, i32 %26 ; <double*> [#uses=1]
+ %27 = getelementptr double, double* %data15.0, i32 %.sum38 ; <double*> [#uses=1]
+ %28 = getelementptr double, double* %autoc, i32 %j4.141 ; <double*> [#uses=1]
+ %29 = getelementptr double, double* %autoc, i32 %25 ; <double*> [#uses=1]
+ %30 = getelementptr double, double* %autoc, i32 %26 ; <double*> [#uses=1]
%asmtmp32 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\09movsd ff_pd_1, %xmm2 \0A\091: \0A\09movapd ($4,$0), %xmm3 \0A\09movupd -8($5,$0), %xmm4 \0A\09movapd ($5,$0), %xmm5 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd %xmm3, %xmm5 \0A\09mulpd -16($5,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm5, %xmm0 \0A\09addpd %xmm3, %xmm2 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09movhlps %xmm2, %xmm5 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09addsd %xmm5, %xmm2 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09movsd %xmm2, $3 \0A\09", "=&r,=*m,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %28, double* %29, double* %30, double* %21, double* %27, i32 %22) nounwind ; <i32> [#uses=0]
br label %bb35
bb33: ; preds = %bb30
%.sum39 = sub i32 %len, %j4.141 ; <i32> [#uses=1]
- %31 = getelementptr double* %data15.0, i32 %.sum39 ; <double*> [#uses=1]
- %32 = getelementptr double* %autoc, i32 %j4.141 ; <double*> [#uses=1]
- %33 = getelementptr double* %autoc, i32 %25 ; <double*> [#uses=1]
+ %31 = getelementptr double, double* %data15.0, i32 %.sum39 ; <double*> [#uses=1]
+ %32 = getelementptr double, double* %autoc, i32 %j4.141 ; <double*> [#uses=1]
+ %33 = getelementptr double, double* %autoc, i32 %25 ; <double*> [#uses=1]
%asmtmp34 = call i32 asm sideeffect "movsd ff_pd_1, %xmm0 \0A\09movsd ff_pd_1, %xmm1 \0A\091: \0A\09movapd ($3,$0), %xmm3 \0A\09movupd -8($4,$0), %xmm4 \0A\09mulpd %xmm3, %xmm4 \0A\09mulpd ($4,$0), %xmm3 \0A\09addpd %xmm4, %xmm1 \0A\09addpd %xmm3, %xmm0 \0A\09add $$16, $0 \0A\09jl 1b \0A\09movhlps %xmm0, %xmm3 \0A\09movhlps %xmm1, %xmm4 \0A\09addsd %xmm3, %xmm0 \0A\09addsd %xmm4, %xmm1 \0A\09movsd %xmm0, $1 \0A\09movsd %xmm1, $2 \0A\09", "=&r,=*m,=*m,r,r,0,~{dirflag},~{fpsr},~{flags}"(double* %32, double* %33, double* %21, double* %31, i32 %22) nounwind ; <i32> [#uses=0]
%.pre = add i32 %j4.141, 2 ; <i32> [#uses=1]
br label %bb35
define i32 @JnJVM_java_rmi_activation_ActivationGroupID_hashCode__(%JavaObject* nocapture) nounwind {
start:
- %1 = getelementptr %JavaObject* %0, i64 1, i32 1 ; <%JavaCommonClass**> [#uses=1]
+ %1 = getelementptr %JavaObject, %JavaObject* %0, i64 1, i32 1 ; <%JavaCommonClass**> [#uses=1]
%2 = load %JavaCommonClass** %1 ; <%JavaCommonClass*> [#uses=4]
%3 = icmp eq %JavaCommonClass* %2, null ; <i1> [#uses=1]
br i1 %3, label %verifyNullExit1, label %verifyNullCont2
verifyNullCont2: ; preds = %start
%4 = bitcast %JavaCommonClass* %2 to { %JavaObject, i16, i32, i64 }* ; <{ %JavaObject, i16, i32, i64 }*> [#uses=1]
- %5 = getelementptr { %JavaObject, i16, i32, i64 }* %4, i64 0, i32 2 ; <i32*> [#uses=1]
+ %5 = getelementptr { %JavaObject, i16, i32, i64 }, { %JavaObject, i16, i32, i64 }* %4, i64 0, i32 2 ; <i32*> [#uses=1]
%6 = load i32* %5 ; <i32> [#uses=1]
- %7 = getelementptr %JavaCommonClass* %2, i64 0, i32 4 ; <%JavaClass***> [#uses=1]
+ %7 = getelementptr %JavaCommonClass, %JavaCommonClass* %2, i64 0, i32 4 ; <%JavaClass***> [#uses=1]
%8 = bitcast %JavaClass*** %7 to i64* ; <i64*> [#uses=1]
%9 = load i64* %8 ; <i64> [#uses=1]
%10 = trunc i64 %9 to i32 ; <i32> [#uses=1]
- %11 = getelementptr %JavaCommonClass* %2, i64 0, i32 3 ; <i16*> [#uses=1]
+ %11 = getelementptr %JavaCommonClass, %JavaCommonClass* %2, i64 0, i32 3 ; <i16*> [#uses=1]
%12 = load i16* %11 ; <i16> [#uses=1]
%13 = sext i16 %12 to i32 ; <i32> [#uses=1]
%14 = xor i32 %10, %6 ; <i32> [#uses=1]
; CHECK-NOT: Repushing
; CHECK: *** Final schedule
define i32 @test(i8* %pin) #0 {
- %g0 = getelementptr inbounds i8* %pin, i64 0
+ %g0 = getelementptr inbounds i8, i8* %pin, i64 0
%l0 = load i8* %g0, align 1
- %g1a = getelementptr inbounds i8* %pin, i64 1
+ %g1a = getelementptr inbounds i8, i8* %pin, i64 1
%l1a = load i8* %g1a, align 1
%z1a = zext i8 %l1a to i32
- %g1b = getelementptr inbounds i8* %pin, i64 2
+ %g1b = getelementptr inbounds i8, i8* %pin, i64 2
%l1b = load i8* %g1b, align 1
%z1b = zext i8 %l1b to i32
%c1 = icmp ne i8 %l0, 0
%x1 = xor i32 %z1a, %z1b
%s1 = select i1 %c1, i32 %z1a, i32 %x1
- %g2a = getelementptr inbounds i8* %pin, i64 3
+ %g2a = getelementptr inbounds i8, i8* %pin, i64 3
%l2a = load i8* %g2a, align 1
%z2a = zext i8 %l2a to i32
- %g2b = getelementptr inbounds i8* %pin, i64 4
+ %g2b = getelementptr inbounds i8, i8* %pin, i64 4
%l2b = load i8* %g2b, align 1
%z2b = zext i8 %l2b to i32
%x2 = xor i32 %z2a, %z2b
%s2 = select i1 %c1, i32 %z2a, i32 %x2
- %g3a = getelementptr inbounds i8* %pin, i64 5
+ %g3a = getelementptr inbounds i8, i8* %pin, i64 5
%l3a = load i8* %g3a, align 1
%z3a = zext i8 %l3a to i32
- %g3b = getelementptr inbounds i8* %pin, i64 6
+ %g3b = getelementptr inbounds i8, i8* %pin, i64 6
%l3b = load i8* %g3b, align 1
%z3b = zext i8 %l3b to i32
%x3 = xor i32 %z3a, %z3b
define internal i32* @"\01-[Example1 whatever]"() nounwind optsize ssp {
entry:
- %0 = getelementptr %struct.A* @"_ZZ20-[Example1 whatever]E4C.91", i64 0, i32 0 ; <i32**> [#uses=1]
+ %0 = getelementptr %struct.A, %struct.A* @"_ZZ20-[Example1 whatever]E4C.91", i64 0, i32 0 ; <i32**> [#uses=1]
%1 = load i32** %0, align 8 ; <i32*> [#uses=1]
ret i32* %1
}
define void @test1(i16* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i16* %head, i64 0
+ %0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <8 x i16>*
%2 = load <8 x i16>* %1, align 2
%3 = icmp slt <8 x i16> %2, zeroinitializer
define void @test2(i16* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i16* %head, i64 0
+ %0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <8 x i16>*
%2 = load <8 x i16>* %1, align 2
%3 = icmp ugt <8 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>
vector.ph:
%0 = insertelement <8 x i16> undef, i16 %w, i32 0
%broadcast15 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer
- %1 = getelementptr inbounds i16* %head, i64 0
+ %1 = getelementptr inbounds i16, i16* %head, i64 0
%2 = bitcast i16* %1 to <8 x i16>*
%3 = load <8 x i16>* %2, align 2
%4 = icmp ult <8 x i16> %3, %broadcast15
define void @test4(i8* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i8* %head, i64 0
+ %0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <16 x i8>*
%2 = load <16 x i8>* %1, align 1
%3 = icmp slt <16 x i8> %2, zeroinitializer
define void @test5(i8* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i8* %head, i64 0
+ %0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <16 x i8>*
%2 = load <16 x i8>* %1, align 1
%3 = icmp ugt <16 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126>
vector.ph:
%0 = insertelement <16 x i8> undef, i8 %w, i32 0
%broadcast15 = shufflevector <16 x i8> %0, <16 x i8> undef, <16 x i32> zeroinitializer
- %1 = getelementptr inbounds i8* %head, i64 0
+ %1 = getelementptr inbounds i8, i8* %head, i64 0
%2 = bitcast i8* %1 to <16 x i8>*
%3 = load <16 x i8>* %2, align 1
%4 = icmp ult <16 x i8> %3, %broadcast15
define void @test7(i16* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i16* %head, i64 0
+ %0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <16 x i16>*
%2 = load <16 x i16>* %1, align 2
%3 = icmp slt <16 x i16> %2, zeroinitializer
define void @test8(i16* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i16* %head, i64 0
+ %0 = getelementptr inbounds i16, i16* %head, i64 0
%1 = bitcast i16* %0 to <16 x i16>*
%2 = load <16 x i16>* %1, align 2
%3 = icmp ugt <16 x i16> %2, <i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766, i16 32766>
vector.ph:
%0 = insertelement <16 x i16> undef, i16 %w, i32 0
%broadcast15 = shufflevector <16 x i16> %0, <16 x i16> undef, <16 x i32> zeroinitializer
- %1 = getelementptr inbounds i16* %head, i64 0
+ %1 = getelementptr inbounds i16, i16* %head, i64 0
%2 = bitcast i16* %1 to <16 x i16>*
%3 = load <16 x i16>* %2, align 2
%4 = icmp ult <16 x i16> %3, %broadcast15
define void @test10(i8* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i8* %head, i64 0
+ %0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <32 x i8>*
%2 = load <32 x i8>* %1, align 1
%3 = icmp slt <32 x i8> %2, zeroinitializer
define void @test11(i8* nocapture %head) nounwind {
vector.ph:
- %0 = getelementptr inbounds i8* %head, i64 0
+ %0 = getelementptr inbounds i8, i8* %head, i64 0
%1 = bitcast i8* %0 to <32 x i8>*
%2 = load <32 x i8>* %1, align 1
%3 = icmp ugt <32 x i8> %2, <i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126, i8 126>
vector.ph:
%0 = insertelement <32 x i8> undef, i8 %w, i32 0
%broadcast15 = shufflevector <32 x i8> %0, <32 x i8> undef, <32 x i32> zeroinitializer
- %1 = getelementptr inbounds i8* %head, i64 0
+ %1 = getelementptr inbounds i8, i8* %head, i64 0
%2 = bitcast i8* %1 to <32 x i8>*
%3 = load <32 x i8>* %2, align 1
%4 = icmp ult <32 x i8> %3, %broadcast15
%cmp = icmp eq i16 %0, %1
br i1 %cmp, label %if.end, label %return, !prof !988
if.end:
- %priority = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 2
+ %priority = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 2
%2 = load i8* %priority, align 1
- %priority5 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 2
+ %priority5 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 2
%3 = load i8* %priority5, align 1
- %string = getelementptr inbounds %struct.Connector_struct* %a, i64 0, i32 5
+ %string = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %a, i64 0, i32 5
%4 = load i8** %string, align 8
- %string7 = getelementptr inbounds %struct.Connector_struct* %b, i64 0, i32 5
+ %string7 = getelementptr inbounds %struct.Connector_struct, %struct.Connector_struct* %b, i64 0, i32 5
%5 = load i8** %string7, align 8
br label %while.cond
while.cond:
%lsr.iv27 = phi i64 [ %lsr.iv.next28, %if.end17 ], [ 0, %if.end ]
- %scevgep55 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep55 = getelementptr i8, i8* %4, i64 %lsr.iv27
%6 = load i8* %scevgep55, align 1
%idxprom.i.i = sext i8 %6 to i64
%isascii.i.i224 = icmp sgt i8 %6, -1
br i1 %isascii.i.i224, label %cond.true.i.i, label %cond.false.i.i, !prof !181
cond.true.i.i:
- %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
+ %arrayidx.i.i = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i
%7 = load i32* %arrayidx.i.i, align 4
%and.i.i = and i32 %7, 32768
br label %isupper.exit
%isascii.i.i213225 = icmp sgt i8 %9, -1
br i1 %isascii.i.i213225, label %cond.true.i.i217, label %cond.false.i.i219, !prof !181
cond.true.i.i217:
- %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
+ %arrayidx.i.i215 = getelementptr inbounds %struct._RuneLocale, %struct._RuneLocale* @_DefaultRuneLocale, i64 0, i32 5, i64 %idxprom.i.i214
%10 = load i32* %arrayidx.i.i215, align 4
%and.i.i216 = and i32 %10, 32768
br label %isupper.exit223
land.lhs.true43:
%20 = ptrtoint i8* %16 to i64
%21 = sub i64 0, %20
- %scevgep52 = getelementptr i8* %4, i64 %21
- %scevgep53 = getelementptr i8* %scevgep52, i64 %lsr.iv27
- %scevgep54 = getelementptr i8* %scevgep53, i64 -1
+ %scevgep52 = getelementptr i8, i8* %4, i64 %21
+ %scevgep53 = getelementptr i8, i8* %scevgep52, i64 %lsr.iv27
+ %scevgep54 = getelementptr i8, i8* %scevgep53, i64 -1
%cmp45 = icmp eq i8* %scevgep54, null
br i1 %cmp45, label %return, label %lor.lhs.false47, !prof !996
lor.lhs.false47:
%22 = ptrtoint i8* %16 to i64
%23 = sub i64 0, %22
- %scevgep47 = getelementptr i8* %4, i64 %23
- %scevgep48 = getelementptr i8* %scevgep47, i64 %lsr.iv27
- %scevgep49 = getelementptr i8* %scevgep48, i64 -2
+ %scevgep47 = getelementptr i8, i8* %4, i64 %23
+ %scevgep48 = getelementptr i8, i8* %scevgep47, i64 %lsr.iv27
+ %scevgep49 = getelementptr i8, i8* %scevgep48, i64 -2
%cmp50 = icmp eq i8* %scevgep49, null
br i1 %cmp50, label %land.lhs.true52, label %while.cond59.preheader, !prof !997
land.lhs.true52:
%cmp61233.old = icmp eq i8 %18, 0
br i1 %cmp61233.old, label %return, label %land.rhs.preheader, !prof !999
land.rhs.preheader:
- %scevgep33 = getelementptr i8* %5, i64 %lsr.iv27
- %scevgep43 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep33 = getelementptr i8, i8* %5, i64 %lsr.iv27
+ %scevgep43 = getelementptr i8, i8* %4, i64 %lsr.iv27
br label %land.rhs
land.rhs:
%lsr.iv = phi i64 [ 0, %land.rhs.preheader ], [ %lsr.iv.next, %if.then83 ]
%25 = phi i8 [ %27, %if.then83 ], [ %18, %land.rhs.preheader ]
- %scevgep34 = getelementptr i8* %scevgep33, i64 %lsr.iv
+ %scevgep34 = getelementptr i8, i8* %scevgep33, i64 %lsr.iv
%26 = load i8* %scevgep34, align 1
%cmp64 = icmp eq i8 %26, 0
br i1 %cmp64, label %return, label %while.body66, !prof !1000
%or.cond208 = or i1 %cmp77, %cmp81
br i1 %or.cond208, label %return, label %if.then83, !prof !1002
if.then83:
- %scevgep44 = getelementptr i8* %scevgep43, i64 %lsr.iv
- %scevgep45 = getelementptr i8* %scevgep44, i64 1
+ %scevgep44 = getelementptr i8, i8* %scevgep43, i64 %lsr.iv
+ %scevgep45 = getelementptr i8, i8* %scevgep44, i64 1
%27 = load i8* %scevgep45, align 1
%cmp61 = icmp eq i8 %27, 0
%lsr.iv.next = add i64 %lsr.iv, 1
%cmp97238 = icmp eq i8 %28, 0
br i1 %cmp97238, label %return, label %land.rhs99.preheader, !prof !1004
land.rhs99.preheader:
- %scevgep31 = getelementptr i8* %5, i64 %lsr.iv27
- %scevgep40 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep31 = getelementptr i8, i8* %5, i64 %lsr.iv27
+ %scevgep40 = getelementptr i8, i8* %4, i64 %lsr.iv27
br label %land.rhs99
land.rhs99:
%lsr.iv17 = phi i64 [ 0, %land.rhs99.preheader ], [ %lsr.iv.next18, %if.then117 ]
%29 = phi i8 [ %31, %if.then117 ], [ %28, %land.rhs99.preheader ]
- %scevgep32 = getelementptr i8* %scevgep31, i64 %lsr.iv17
+ %scevgep32 = getelementptr i8, i8* %scevgep31, i64 %lsr.iv17
%30 = load i8* %scevgep32, align 1
%cmp101 = icmp eq i8 %30, 0
br i1 %cmp101, label %return, label %while.body104, !prof !1005
%or.cond210 = or i1 %or.cond209, %cmp115
br i1 %or.cond210, label %if.then117, label %return, !prof !1006
if.then117:
- %scevgep41 = getelementptr i8* %scevgep40, i64 %lsr.iv17
- %scevgep42 = getelementptr i8* %scevgep41, i64 1
+ %scevgep41 = getelementptr i8, i8* %scevgep40, i64 %lsr.iv17
+ %scevgep42 = getelementptr i8, i8* %scevgep41, i64 1
%31 = load i8* %scevgep42, align 1
%cmp97 = icmp eq i8 %31, 0
%lsr.iv.next18 = add i64 %lsr.iv17, 1
%cmp132244 = icmp eq i8 %32, 0
br i1 %cmp132244, label %return, label %land.rhs134.preheader, !prof !1008
land.rhs134.preheader:
- %scevgep29 = getelementptr i8* %5, i64 %lsr.iv27
- %scevgep37 = getelementptr i8* %4, i64 %lsr.iv27
+ %scevgep29 = getelementptr i8, i8* %5, i64 %lsr.iv27
+ %scevgep37 = getelementptr i8, i8* %4, i64 %lsr.iv27
br label %land.rhs134
land.rhs134:
%lsr.iv22 = phi i64 [ 0, %land.rhs134.preheader ], [ %lsr.iv.next23, %if.then152 ]
%33 = phi i8 [ %35, %if.then152 ], [ %32, %land.rhs134.preheader ]
- %scevgep30 = getelementptr i8* %scevgep29, i64 %lsr.iv22
+ %scevgep30 = getelementptr i8, i8* %scevgep29, i64 %lsr.iv22
%34 = load i8* %scevgep30, align 1
%cmp136 = icmp eq i8 %34, 0
br i1 %cmp136, label %return, label %while.body139, !prof !1009
%or.cond212 = or i1 %or.cond211, %cmp150
br i1 %or.cond212, label %if.then152, label %return, !prof !1010
if.then152:
- %scevgep38 = getelementptr i8* %scevgep37, i64 %lsr.iv22
- %scevgep39 = getelementptr i8* %scevgep38, i64 1
+ %scevgep38 = getelementptr i8, i8* %scevgep37, i64 %lsr.iv22
+ %scevgep39 = getelementptr i8, i8* %scevgep38, i64 1
%35 = load i8* %scevgep39, align 1
%cmp132 = icmp eq i8 %35, 0
%lsr.iv.next23 = add i64 %lsr.iv22, 1
entry:
%sub.ptr.rhs.cast646 = ptrtoint i8* %line to i64
%old = alloca [512 x i8], align 16
- %0 = getelementptr inbounds [512 x i8]* %old, i64 0, i64 0
+ %0 = getelementptr inbounds [512 x i8], [512 x i8]* %old, i64 0, i64 0
switch i64 %fid, label %if.then [
i64 2, label %if.end
i64 0, label %if.end
unreachable
SyTime.exit2720:
- %add.ptr = getelementptr [512 x i8]* %old, i64 0, i64 512
+ %add.ptr = getelementptr [512 x i8], [512 x i8]* %old, i64 0, i64 512
%cmp293427 = icmp ult i8* %0, %add.ptr
br i1 %cmp293427, label %for.body.lr.ph, label %while.body.preheader
br label %while.body.preheader
while.body.preheader:
- %add.ptr1603 = getelementptr [512 x i8]* null, i64 0, i64 512
- %echo.i3101 = getelementptr [16 x %struct.TMP.1]* @syBuf, i64 0, i64 %fid, i32 1
+ %add.ptr1603 = getelementptr [512 x i8], [512 x i8]* null, i64 0, i64 512
+ %echo.i3101 = getelementptr [16 x %struct.TMP.1], [16 x %struct.TMP.1]* @syBuf, i64 0, i64 %fid, i32 1
%1 = xor i64 %sub.ptr.rhs.cast646, -1
br label %do.body
br i1 undef, label %do.body479.backedge, label %if.end517
do.body479.backedge:
- %incdec.ptr480 = getelementptr i8* %incdec.ptr4803316, i64 1
+ %incdec.ptr480 = getelementptr i8, i8* %incdec.ptr4803316, i64 1
%cmp483 = icmp eq i8 undef, 0
br i1 %cmp483, label %if.end517, label %do.body479.backedge.land.rhs485_crit_edge
%s.2.lcssa = phi i8* [ undef, %for.cond542.preheader ], [ %q.4, %for.body545 ]
%sub.ptr.lhs.cast553 = ptrtoint i8* %s.2.lcssa to i64
%sub.ptr.sub555 = sub i64 %sub.ptr.lhs.cast553, 0
- %arrayidx556 = getelementptr i8* null, i64 %sub.ptr.sub555
+ %arrayidx556 = getelementptr i8, i8* null, i64 %sub.ptr.sub555
store i8 0, i8* %arrayidx556, align 1, !tbaa !5
br label %while.cond197.backedge
for.body1723:
%q.303203 = phi i8* [ getelementptr inbounds ([8192 x i8]* @syHistory, i64 0, i64 8189), %if.then1477 ], [ %incdec.ptr1730, %for.body1723 ]
- %add.ptr1728 = getelementptr i8* %q.303203, i64 %idx.neg1727
+ %add.ptr1728 = getelementptr i8, i8* %q.303203, i64 %idx.neg1727
%5 = load i8* %add.ptr1728, align 1, !tbaa !5
- %incdec.ptr1730 = getelementptr i8* %q.303203, i64 -1
+ %incdec.ptr1730 = getelementptr i8, i8* %q.303203, i64 -1
br label %for.body1723
cleanup:
indirectbr i8* undef, [label %bb439, label %bb85]
bb206: ; preds = %bb
- %tmp = getelementptr [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 undef
+ %tmp = getelementptr [499 x i32], [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 undef
%tmp207 = load i32* %tmp
%tmp208 = add i32 %tmp207, 1
%tmp209 = inttoptr i32 %tmp208 to i8*
%tmp214 = load i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
%tmp215 = load i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
%tmp216 = urem i32 -717428541, %tmp214
- %tmp217 = getelementptr i8* %tmp215, i32 %tmp216
+ %tmp217 = getelementptr i8, i8* %tmp215, i32 %tmp216
%tmp218 = bitcast i8* %tmp217 to i32*
%tmp219 = load i32* %tmp218, align 4
store i32 %tmp219, i32* undef, align 4
%tmp231 = xor i32 %tmp230, 1059356227
%tmp232 = mul i32 %tmp231, 1603744721
%tmp233 = urem i32 %tmp232, 259
- %tmp234 = getelementptr [259 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 2039075) to [259 x i8]*), i32 0, i32 %tmp233
+ %tmp234 = getelementptr [259 x i8], [259 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 2039075) to [259 x i8]*), i32 0, i32 %tmp233
%tmp235 = load i8* %tmp234, align 1
%tmp236 = add i32 %tmp233, 2
- %tmp237 = getelementptr [264 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 3388166) to [264 x i8]*), i32 0, i32 %tmp236
+ %tmp237 = getelementptr [264 x i8], [264 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 3388166) to [264 x i8]*), i32 0, i32 %tmp236
%tmp238 = load i8* %tmp237, align 1
- %tmp239 = getelementptr [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 0
+ %tmp239 = getelementptr [265 x i8], [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 0
%tmp240 = load i8* %tmp239, align 1
%tmp241 = add i32 %tmp233, 6
%tmp242 = trunc i32 %tmp241 to i8
%tmp258 = load i32* @fp_dh_18716afa4a5354de0a302c8edb3b0ee1, align 4
%tmp259 = load i8** @fp_dh_20a33cdeefab8f4c8887e82766cb9dcb, align 4
%tmp260 = urem i32 -717428541, %tmp258
- %tmp261 = getelementptr i8* %tmp259, i32 %tmp260
+ %tmp261 = getelementptr i8, i8* %tmp259, i32 %tmp260
%tmp262 = bitcast i8* %tmp261 to i32*
%tmp263 = load i32* %tmp262, align 4
%tmp264 = xor i32 %tmp263, 0
%tmp270 = mul i32 %tmp269, 1603744721
%tmp271 = urem i32 %tmp270, 259
%tmp274 = add i32 %tmp271, 3
- %tmp275 = getelementptr [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 %tmp274
+ %tmp275 = getelementptr [265 x i8], [265 x i8]* bitcast (i8* getelementptr inbounds ([5419648 x i8]* @fp_dh_9d93c897906e39883c58b034c8e786b2, i32 0, i32 1325165) to [265 x i8]*), i32 0, i32 %tmp274
%tmp276 = load i8* %tmp275, align 1
%tmp277 = add i32 %tmp271, 6
%tmp278 = trunc i32 %tmp277 to i8
%tmp334 = add i32 %tmp327, -1456704142
%tmp335 = zext i1 %tmp333 to i32
%tmp336 = add i32 %tmp334, %tmp335
- %tmp337 = getelementptr [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp336
+ %tmp337 = getelementptr [499 x i32], [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp336
%tmp338 = load i32* %tmp337
%tmp339 = add i32 %tmp338, 1
%tmp340 = inttoptr i32 %tmp339 to i8*
bb432: ; preds = %bb432, %bb213
%tmp433 = phi i32 [ %tmp221, %bb213 ], [ %tmp433, %bb432 ]
%tmp434 = add i32 %tmp433, 1022523279
- %tmp435 = getelementptr [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp434
+ %tmp435 = getelementptr [499 x i32], [499 x i32]* @fp_dh_36985b17790d59a27994eaab5dcb00ee, i32 0, i32 %tmp434
%tmp436 = load i32* %tmp435
%tmp437 = add i32 %tmp436, 1
%tmp438 = inttoptr i32 %tmp437 to i8*
entry:
; CHECK: decq (%{{rdi|rcx}})
; CHECK-NEXT: je
- %refcnt = getelementptr inbounds %struct.obj* %o, i64 0, i32 0
+ %refcnt = getelementptr inbounds %struct.obj, %struct.obj* %o, i64 0, i32 0
%0 = load i64* %refcnt, align 8
%dec = add i64 %0, -1
store i64 %dec, i64* %refcnt, align 8
define void @example_dec(%struct.obj2* %o) nounwind uwtable ssp {
; 64 bit dec
entry:
- %s64 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 0
+ %s64 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 0
; CHECK-NOT: load
%0 = load i64* %s64, align 8
; CHECK: decq ({{.*}})
; 32 bit dec
if.end:
- %s32 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 1
+ %s32 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 1
; CHECK-NOT: load
%1 = load i32* %s32, align 4
; CHECK: decl {{[0-9][0-9]*}}({{.*}})
; 16 bit dec
if.end1:
- %s16 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 2
+ %s16 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 2
; CHECK-NOT: load
%2 = load i16* %s16, align 2
; CHECK: decw {{[0-9][0-9]*}}({{.*}})
; 8 bit dec
if.end2:
- %s8 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 3
+ %s8 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 3
; CHECK-NOT: load
%3 = load i8* %s8
; CHECK: decb {{[0-9][0-9]*}}({{.*}})
define void @example_inc(%struct.obj2* %o) nounwind uwtable ssp {
; 64 bit inc
entry:
- %s64 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 0
+ %s64 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 0
; CHECK-NOT: load
%0 = load i64* %s64, align 8
; CHECK: incq ({{.*}})
; 32 bit inc
if.end:
- %s32 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 1
+ %s32 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 1
; CHECK-NOT: load
%1 = load i32* %s32, align 4
; CHECK: incl {{[0-9][0-9]*}}({{.*}})
; 16 bit inc
if.end1:
- %s16 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 2
+ %s16 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 2
; CHECK-NOT: load
%2 = load i16* %s16, align 2
; CHECK: incw {{[0-9][0-9]*}}({{.*}})
; 8 bit inc
if.end2:
- %s8 = getelementptr inbounds %struct.obj2* %o, i64 0, i32 3
+ %s8 = getelementptr inbounds %struct.obj2, %struct.obj2* %o, i64 0, i32 3
; CHECK-NOT: load
%3 = load i8* %s8
; CHECK: incb {{[0-9][0-9]*}}({{.*}})
; CHECK-LABEL: test3:
; CHECK: decq 16(%rax)
%0 = load i64** @foo, align 8
- %arrayidx = getelementptr inbounds i64* %0, i64 2
+ %arrayidx = getelementptr inbounds i64, i64* %0, i64 2
%1 = load i64* %arrayidx, align 8
%dec = add i64 %1, -1
store i64 %dec, i64* %arrayidx, align 8
%p.addr.03 = phi i32* [ %incdec.ptr, %while.body ], [ %p, %entry ]
%n.addr.02 = phi i32 [ %dec, %while.body ], [ %n, %entry ]
%dec = add nsw i32 %n.addr.02, -1
- %incdec.ptr = getelementptr inbounds i32* %p.addr.03, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.03, i64 1
%rand = tail call { i32, i32 } @llvm.x86.rdrand.32() nounwind
%v1 = extractvalue { i32, i32 } %rand, 0
store i32 %v1, i32* %p.addr.03, align 4
define %struct._list* @make_list(i32* nocapture readonly %data, i32* nocapture %value, i32* nocapture %all) {
entry:
%call = tail call i8* @malloc(i64 16)
- %next = getelementptr inbounds i8* %call, i64 8
+ %next = getelementptr inbounds i8, i8* %call, i64 8
%tmp = bitcast i8* %next to %struct._list**
%tmp2 = bitcast i8* %call to %struct._list*
%.pre78 = load i32* @ncol, align 4
br i1 %tobool.i66, label %if.else, label %land.rhs.i
land.rhs.i: ; preds = %while.cond.i
- %arrayidx.i67 = getelementptr inbounds i32* %call4, i64 %indvars.iv.next.i65
+ %arrayidx.i67 = getelementptr inbounds i32, i32* %call4, i64 %indvars.iv.next.i65
%tmp11 = load i32* %arrayidx.i67, align 4
- %arrayidx2.i68 = getelementptr inbounds i32* %data, i64 %indvars.iv.next.i65
+ %arrayidx2.i68 = getelementptr inbounds i32, i32* %data, i64 %indvars.iv.next.i65
%tmp12 = load i32* %arrayidx2.i68, align 4
%cmp.i69 = icmp eq i32 %tmp11, %tmp12
br i1 %cmp.i69, label %while.cond.i, label %equal_data.exit
br i1 %cmp3.i, label %if.else, label %if.then
if.then: ; preds = %equal_data.exit
- %next7 = getelementptr inbounds %struct._list* %current.173, i64 0, i32 1
+ %next7 = getelementptr inbounds %struct._list, %struct._list* %current.173, i64 0, i32 1
%tmp14 = load %struct._list** %next7, align 8
- %next12 = getelementptr inbounds %struct._list* %tmp14, i64 0, i32 1
+ %next12 = getelementptr inbounds %struct._list, %struct._list* %tmp14, i64 0, i32 1
store %struct._list* null, %struct._list** %next12, align 8
%tmp15 = load %struct._list** %next7, align 8
%tmp16 = load i32* %value, align 4
define i32 @regpressure1(i32* %P) {
%A = load i32* %P ; <i32> [#uses=1]
- %Bp = getelementptr i32* %P, i32 1 ; <i32*> [#uses=1]
+ %Bp = getelementptr i32, i32* %P, i32 1 ; <i32*> [#uses=1]
%B = load i32* %Bp ; <i32> [#uses=1]
%s1 = mul i32 %A, %B ; <i32> [#uses=1]
- %Cp = getelementptr i32* %P, i32 2 ; <i32*> [#uses=1]
+ %Cp = getelementptr i32, i32* %P, i32 2 ; <i32*> [#uses=1]
%C = load i32* %Cp ; <i32> [#uses=1]
%s2 = mul i32 %s1, %C ; <i32> [#uses=1]
- %Dp = getelementptr i32* %P, i32 3 ; <i32*> [#uses=1]
+ %Dp = getelementptr i32, i32* %P, i32 3 ; <i32*> [#uses=1]
%D = load i32* %Dp ; <i32> [#uses=1]
%s3 = mul i32 %s2, %D ; <i32> [#uses=1]
- %Ep = getelementptr i32* %P, i32 4 ; <i32*> [#uses=1]
+ %Ep = getelementptr i32, i32* %P, i32 4 ; <i32*> [#uses=1]
%E = load i32* %Ep ; <i32> [#uses=1]
%s4 = mul i32 %s3, %E ; <i32> [#uses=1]
- %Fp = getelementptr i32* %P, i32 5 ; <i32*> [#uses=1]
+ %Fp = getelementptr i32, i32* %P, i32 5 ; <i32*> [#uses=1]
%F = load i32* %Fp ; <i32> [#uses=1]
%s5 = mul i32 %s4, %F ; <i32> [#uses=1]
- %Gp = getelementptr i32* %P, i32 6 ; <i32*> [#uses=1]
+ %Gp = getelementptr i32, i32* %P, i32 6 ; <i32*> [#uses=1]
%G = load i32* %Gp ; <i32> [#uses=1]
%s6 = mul i32 %s5, %G ; <i32> [#uses=1]
- %Hp = getelementptr i32* %P, i32 7 ; <i32*> [#uses=1]
+ %Hp = getelementptr i32, i32* %P, i32 7 ; <i32*> [#uses=1]
%H = load i32* %Hp ; <i32> [#uses=1]
%s7 = mul i32 %s6, %H ; <i32> [#uses=1]
- %Ip = getelementptr i32* %P, i32 8 ; <i32*> [#uses=1]
+ %Ip = getelementptr i32, i32* %P, i32 8 ; <i32*> [#uses=1]
%I = load i32* %Ip ; <i32> [#uses=1]
%s8 = mul i32 %s7, %I ; <i32> [#uses=1]
- %Jp = getelementptr i32* %P, i32 9 ; <i32*> [#uses=1]
+ %Jp = getelementptr i32, i32* %P, i32 9 ; <i32*> [#uses=1]
%J = load i32* %Jp ; <i32> [#uses=1]
%s9 = mul i32 %s8, %J ; <i32> [#uses=1]
ret i32 %s9
define i32 @regpressure2(i32* %P) {
%A = load i32* %P ; <i32> [#uses=1]
- %Bp = getelementptr i32* %P, i32 1 ; <i32*> [#uses=1]
+ %Bp = getelementptr i32, i32* %P, i32 1 ; <i32*> [#uses=1]
%B = load i32* %Bp ; <i32> [#uses=1]
- %Cp = getelementptr i32* %P, i32 2 ; <i32*> [#uses=1]
+ %Cp = getelementptr i32, i32* %P, i32 2 ; <i32*> [#uses=1]
%C = load i32* %Cp ; <i32> [#uses=1]
- %Dp = getelementptr i32* %P, i32 3 ; <i32*> [#uses=1]
+ %Dp = getelementptr i32, i32* %P, i32 3 ; <i32*> [#uses=1]
%D = load i32* %Dp ; <i32> [#uses=1]
- %Ep = getelementptr i32* %P, i32 4 ; <i32*> [#uses=1]
+ %Ep = getelementptr i32, i32* %P, i32 4 ; <i32*> [#uses=1]
%E = load i32* %Ep ; <i32> [#uses=1]
- %Fp = getelementptr i32* %P, i32 5 ; <i32*> [#uses=1]
+ %Fp = getelementptr i32, i32* %P, i32 5 ; <i32*> [#uses=1]
%F = load i32* %Fp ; <i32> [#uses=1]
- %Gp = getelementptr i32* %P, i32 6 ; <i32*> [#uses=1]
+ %Gp = getelementptr i32, i32* %P, i32 6 ; <i32*> [#uses=1]
%G = load i32* %Gp ; <i32> [#uses=1]
- %Hp = getelementptr i32* %P, i32 7 ; <i32*> [#uses=1]
+ %Hp = getelementptr i32, i32* %P, i32 7 ; <i32*> [#uses=1]
%H = load i32* %Hp ; <i32> [#uses=1]
- %Ip = getelementptr i32* %P, i32 8 ; <i32*> [#uses=1]
+ %Ip = getelementptr i32, i32* %P, i32 8 ; <i32*> [#uses=1]
%I = load i32* %Ip ; <i32> [#uses=1]
- %Jp = getelementptr i32* %P, i32 9 ; <i32*> [#uses=1]
+ %Jp = getelementptr i32, i32* %P, i32 9 ; <i32*> [#uses=1]
%J = load i32* %Jp ; <i32> [#uses=1]
%s1 = mul i32 %A, %B ; <i32> [#uses=1]
%s2 = mul i32 %s1, %C ; <i32> [#uses=1]
define i32 @regpressure3(i16* %P, i1 %Cond, i32* %Other) {
%A = load i16* %P ; <i16> [#uses=1]
- %Bp = getelementptr i16* %P, i32 1 ; <i16*> [#uses=1]
+ %Bp = getelementptr i16, i16* %P, i32 1 ; <i16*> [#uses=1]
%B = load i16* %Bp ; <i16> [#uses=1]
- %Cp = getelementptr i16* %P, i32 2 ; <i16*> [#uses=1]
+ %Cp = getelementptr i16, i16* %P, i32 2 ; <i16*> [#uses=1]
%C = load i16* %Cp ; <i16> [#uses=1]
- %Dp = getelementptr i16* %P, i32 3 ; <i16*> [#uses=1]
+ %Dp = getelementptr i16, i16* %P, i32 3 ; <i16*> [#uses=1]
%D = load i16* %Dp ; <i16> [#uses=1]
- %Ep = getelementptr i16* %P, i32 4 ; <i16*> [#uses=1]
+ %Ep = getelementptr i16, i16* %P, i32 4 ; <i16*> [#uses=1]
%E = load i16* %Ep ; <i16> [#uses=1]
- %Fp = getelementptr i16* %P, i32 5 ; <i16*> [#uses=1]
+ %Fp = getelementptr i16, i16* %P, i32 5 ; <i16*> [#uses=1]
%F = load i16* %Fp ; <i16> [#uses=1]
- %Gp = getelementptr i16* %P, i32 6 ; <i16*> [#uses=1]
+ %Gp = getelementptr i16, i16* %P, i32 6 ; <i16*> [#uses=1]
%G = load i16* %Gp ; <i16> [#uses=1]
- %Hp = getelementptr i16* %P, i32 7 ; <i16*> [#uses=1]
+ %Hp = getelementptr i16, i16* %P, i32 7 ; <i16*> [#uses=1]
%H = load i16* %Hp ; <i16> [#uses=1]
- %Ip = getelementptr i16* %P, i32 8 ; <i16*> [#uses=1]
+ %Ip = getelementptr i16, i16* %P, i32 8 ; <i16*> [#uses=1]
%I = load i16* %Ip ; <i16> [#uses=1]
- %Jp = getelementptr i16* %P, i32 9 ; <i16*> [#uses=1]
+ %Jp = getelementptr i16, i16* %P, i32 9 ; <i16*> [#uses=1]
%J = load i16* %Jp ; <i16> [#uses=1]
%A.upgrd.1 = sext i16 %A to i32 ; <i32> [#uses=1]
%B.upgrd.2 = sext i16 %B to i32 ; <i32> [#uses=1]
if.then.i.i.i.i71: ; preds = %while.body12
%call4.i.i.i.i68 = call noalias i8* @malloc(i32 undef) nounwind
- %tmp1 = getelementptr inbounds %type_a* %tmp, i32 0, i32 1, i32 0, i32 1
+ %tmp1 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 0, i32 1
%buf_6.i.i.i.i70 = bitcast %type_d* %tmp1 to i8**
%tmp2 = load i8** %buf_6.i.i.i.i70, align 4
call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %tmp2, i32 undef, i32 1, i1 false) nounwind
unreachable
if.else.i.i.i.i74: ; preds = %while.body12
- %i_.i.i.i.i72 = getelementptr inbounds %type_a* %tmp, i32 0, i32 1, i32 0, i32 1, i32 0
+ %i_.i.i.i.i72 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 0, i32 1, i32 0
%tmp3 = load i64* %i_.i.i.i.i72, align 4
%tmp4 = zext i64 %tmp3 to i128
%tmp5 = shl nuw nsw i128 %tmp4, 32
%ins148 = or i128 %tmp5, %ins151
- %second3.i.i76 = getelementptr inbounds %type_a* %tmp, i32 0, i32 1, i32 1
+ %second3.i.i76 = getelementptr inbounds %type_a, %type_a* %tmp, i32 0, i32 1, i32 1
%tmp6 = load i32* %second3.i.i76, align 4
%tmp7 = zext i32 %tmp6 to i128
%tmp8 = shl nuw i128 %tmp7, 96
%mask144 = and i128 %ins148, 79228162495817593519834398720
%tmp9 = load %type_e** undef, align 4
- %len_.i.i.i.i86 = getelementptr inbounds %type_e* %tmp9, i32 0, i32 0, i32 0
+ %len_.i.i.i.i86 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 0, i32 0
%tmp10 = load i32* %len_.i.i.i.i86, align 4
%tmp11 = zext i32 %tmp10 to i128
%ins135 = or i128 %tmp11, %ins135156160
if.then.i.i.i.i92: ; preds = %if.else.i.i.i.i74
%call4.i.i.i.i89 = call noalias i8* @malloc(i32 %tmp10) nounwind
%ins126 = or i128 0, %ins135
- %tmp12 = getelementptr inbounds %type_e* %tmp9, i32 0, i32 0, i32 1
+ %tmp12 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 0, i32 1
%buf_6.i.i.i.i91 = bitcast %type_d* %tmp12 to i8**
%tmp13 = load i8** %buf_6.i.i.i.i91, align 4
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %call4.i.i.i.i89, i8* %tmp13, i32 %tmp10, i32 1, i1 false) nounwind
br label %A
if.else.i.i.i.i95: ; preds = %if.else.i.i.i.i74
- %i_.i.i.i.i93 = getelementptr inbounds %type_e* %tmp9, i32 0, i32 0, i32 1, i32 0
+ %i_.i.i.i.i93 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 0, i32 1, i32 0
br label %A
A: ; preds = %if.else.i.i.i.i95, %if.then.i.i.i.i92
%ins135157 = phi i128 [ %ins126, %if.then.i.i.i.i92 ], [ undef, %if.else.i.i.i.i95 ]
- %second3.i.i97 = getelementptr inbounds %type_e* %tmp9, i32 0, i32 1
+ %second3.i.i97 = getelementptr inbounds %type_e, %type_e* %tmp9, i32 0, i32 1
%tmp14 = load i64* %second3.i.i97, align 4
%tmp15 = trunc i64 %tmp14 to i32
%cmp.i99 = icmp sgt i32 %tmp6, %tmp15
%extract11.i.i.i = lshr i128 %bf.load.i.i, %const3
%extract.t12.i.i.i = trunc i128 %extract11.i.i.i to i32
%bf.cast7.i.i.i = and i32 %extract.t12.i.i.i, 3
- %arrayidx.i.i.i = getelementptr inbounds %struct.A* %ht, i32 0, i32 3, i32 %bf.cast7.i.i.i
+ %arrayidx.i.i.i = getelementptr inbounds %struct.A, %struct.A* %ht, i32 0, i32 3, i32 %bf.cast7.i.i.i
br label %cond.end12.i.i
cond.false10.i.i: ; preds = %sw.bb.i
- %arrayidx.i6.i.i = getelementptr inbounds %struct.A* %ht, i32 0, i32 3, i32 0
+ %arrayidx.i6.i.i = getelementptr inbounds %struct.A, %struct.A* %ht, i32 0, i32 3, i32 0
br label %cond.end12.i.i
cond.end12.i.i: ; preds = %cond.false10.i.i, %__XXX2.exit.i.i
br i1 undef, label %for.body.i.i, label %if.end196
for.body.i.i: ; preds = %for.body.i.i, %cond.end12.i.i
- %weak.i.i = getelementptr inbounds %union.E* %tmp, i32 undef, i32 0
+ %weak.i.i = getelementptr inbounds %union.E, %union.E* %tmp, i32 undef, i32 0
%tmp1 = load i32* %weak.i.i, align 4
%cmp36.i.i = icmp ne i32 %tmp1, %shl.i.i
%or.cond = and i1 %cmp36.i.i, false
define void @foo(double* nocapture %x, double* nocapture %y) nounwind {
entry:
%tmp1 = load double* %x ; <double> [#uses=1]
- %arrayidx4 = getelementptr inbounds double* %x, i64 1 ; <double*> [#uses=1]
+ %arrayidx4 = getelementptr inbounds double, double* %x, i64 1 ; <double*> [#uses=1]
%tmp5 = load double* %arrayidx4 ; <double> [#uses=1]
- %arrayidx8 = getelementptr inbounds double* %x, i64 2 ; <double*> [#uses=1]
+ %arrayidx8 = getelementptr inbounds double, double* %x, i64 2 ; <double*> [#uses=1]
%tmp9 = load double* %arrayidx8 ; <double> [#uses=1]
- %arrayidx12 = getelementptr inbounds double* %x, i64 3 ; <double*> [#uses=1]
+ %arrayidx12 = getelementptr inbounds double, double* %x, i64 3 ; <double*> [#uses=1]
%tmp13 = load double* %arrayidx12 ; <double> [#uses=1]
- %arrayidx16 = getelementptr inbounds double* %x, i64 4 ; <double*> [#uses=1]
+ %arrayidx16 = getelementptr inbounds double, double* %x, i64 4 ; <double*> [#uses=1]
%tmp17 = load double* %arrayidx16 ; <double> [#uses=1]
- %arrayidx20 = getelementptr inbounds double* %x, i64 5 ; <double*> [#uses=1]
+ %arrayidx20 = getelementptr inbounds double, double* %x, i64 5 ; <double*> [#uses=1]
%tmp21 = load double* %arrayidx20 ; <double> [#uses=1]
- %arrayidx24 = getelementptr inbounds double* %x, i64 6 ; <double*> [#uses=1]
+ %arrayidx24 = getelementptr inbounds double, double* %x, i64 6 ; <double*> [#uses=1]
%tmp25 = load double* %arrayidx24 ; <double> [#uses=1]
- %arrayidx28 = getelementptr inbounds double* %x, i64 7 ; <double*> [#uses=1]
+ %arrayidx28 = getelementptr inbounds double, double* %x, i64 7 ; <double*> [#uses=1]
%tmp29 = load double* %arrayidx28 ; <double> [#uses=1]
- %arrayidx32 = getelementptr inbounds double* %x, i64 8 ; <double*> [#uses=1]
+ %arrayidx32 = getelementptr inbounds double, double* %x, i64 8 ; <double*> [#uses=1]
%tmp33 = load double* %arrayidx32 ; <double> [#uses=1]
- %arrayidx36 = getelementptr inbounds double* %x, i64 9 ; <double*> [#uses=1]
+ %arrayidx36 = getelementptr inbounds double, double* %x, i64 9 ; <double*> [#uses=1]
%tmp37 = load double* %arrayidx36 ; <double> [#uses=1]
- %arrayidx40 = getelementptr inbounds double* %x, i64 10 ; <double*> [#uses=1]
+ %arrayidx40 = getelementptr inbounds double, double* %x, i64 10 ; <double*> [#uses=1]
%tmp41 = load double* %arrayidx40 ; <double> [#uses=1]
- %arrayidx44 = getelementptr inbounds double* %x, i64 11 ; <double*> [#uses=1]
+ %arrayidx44 = getelementptr inbounds double, double* %x, i64 11 ; <double*> [#uses=1]
%tmp45 = load double* %arrayidx44 ; <double> [#uses=1]
- %arrayidx48 = getelementptr inbounds double* %x, i64 12 ; <double*> [#uses=1]
+ %arrayidx48 = getelementptr inbounds double, double* %x, i64 12 ; <double*> [#uses=1]
%tmp49 = load double* %arrayidx48 ; <double> [#uses=1]
- %arrayidx52 = getelementptr inbounds double* %x, i64 13 ; <double*> [#uses=1]
+ %arrayidx52 = getelementptr inbounds double, double* %x, i64 13 ; <double*> [#uses=1]
%tmp53 = load double* %arrayidx52 ; <double> [#uses=1]
- %arrayidx56 = getelementptr inbounds double* %x, i64 14 ; <double*> [#uses=1]
+ %arrayidx56 = getelementptr inbounds double, double* %x, i64 14 ; <double*> [#uses=1]
%tmp57 = load double* %arrayidx56 ; <double> [#uses=1]
- %arrayidx60 = getelementptr inbounds double* %x, i64 15 ; <double*> [#uses=1]
+ %arrayidx60 = getelementptr inbounds double, double* %x, i64 15 ; <double*> [#uses=1]
%tmp61 = load double* %arrayidx60 ; <double> [#uses=1]
- %arrayidx64 = getelementptr inbounds double* %x, i64 16 ; <double*> [#uses=1]
+ %arrayidx64 = getelementptr inbounds double, double* %x, i64 16 ; <double*> [#uses=1]
%tmp65 = load double* %arrayidx64 ; <double> [#uses=1]
%div = fdiv double %tmp1, 0.000000e+00 ; <double> [#uses=1]
store double %div, double* %y
%div70 = fdiv double %tmp5, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx72 = getelementptr inbounds double* %y, i64 1 ; <double*> [#uses=1]
+ %arrayidx72 = getelementptr inbounds double, double* %y, i64 1 ; <double*> [#uses=1]
store double %div70, double* %arrayidx72
%div74 = fdiv double %tmp9, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx76 = getelementptr inbounds double* %y, i64 2 ; <double*> [#uses=1]
+ %arrayidx76 = getelementptr inbounds double, double* %y, i64 2 ; <double*> [#uses=1]
store double %div74, double* %arrayidx76
%div78 = fdiv double %tmp13, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx80 = getelementptr inbounds double* %y, i64 3 ; <double*> [#uses=1]
+ %arrayidx80 = getelementptr inbounds double, double* %y, i64 3 ; <double*> [#uses=1]
store double %div78, double* %arrayidx80
%div82 = fdiv double %tmp17, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx84 = getelementptr inbounds double* %y, i64 4 ; <double*> [#uses=1]
+ %arrayidx84 = getelementptr inbounds double, double* %y, i64 4 ; <double*> [#uses=1]
store double %div82, double* %arrayidx84
%div86 = fdiv double %tmp21, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx88 = getelementptr inbounds double* %y, i64 5 ; <double*> [#uses=1]
+ %arrayidx88 = getelementptr inbounds double, double* %y, i64 5 ; <double*> [#uses=1]
store double %div86, double* %arrayidx88
%div90 = fdiv double %tmp25, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx92 = getelementptr inbounds double* %y, i64 6 ; <double*> [#uses=1]
+ %arrayidx92 = getelementptr inbounds double, double* %y, i64 6 ; <double*> [#uses=1]
store double %div90, double* %arrayidx92
%div94 = fdiv double %tmp29, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx96 = getelementptr inbounds double* %y, i64 7 ; <double*> [#uses=1]
+ %arrayidx96 = getelementptr inbounds double, double* %y, i64 7 ; <double*> [#uses=1]
store double %div94, double* %arrayidx96
%div98 = fdiv double %tmp33, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx100 = getelementptr inbounds double* %y, i64 8 ; <double*> [#uses=1]
+ %arrayidx100 = getelementptr inbounds double, double* %y, i64 8 ; <double*> [#uses=1]
store double %div98, double* %arrayidx100
%div102 = fdiv double %tmp37, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx104 = getelementptr inbounds double* %y, i64 9 ; <double*> [#uses=1]
+ %arrayidx104 = getelementptr inbounds double, double* %y, i64 9 ; <double*> [#uses=1]
store double %div102, double* %arrayidx104
%div106 = fdiv double %tmp41, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx108 = getelementptr inbounds double* %y, i64 10 ; <double*> [#uses=1]
+ %arrayidx108 = getelementptr inbounds double, double* %y, i64 10 ; <double*> [#uses=1]
store double %div106, double* %arrayidx108
%div110 = fdiv double %tmp45, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx112 = getelementptr inbounds double* %y, i64 11 ; <double*> [#uses=1]
+ %arrayidx112 = getelementptr inbounds double, double* %y, i64 11 ; <double*> [#uses=1]
store double %div110, double* %arrayidx112
%div114 = fdiv double %tmp49, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx116 = getelementptr inbounds double* %y, i64 12 ; <double*> [#uses=1]
+ %arrayidx116 = getelementptr inbounds double, double* %y, i64 12 ; <double*> [#uses=1]
store double %div114, double* %arrayidx116
%div118 = fdiv double %tmp53, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx120 = getelementptr inbounds double* %y, i64 13 ; <double*> [#uses=1]
+ %arrayidx120 = getelementptr inbounds double, double* %y, i64 13 ; <double*> [#uses=1]
store double %div118, double* %arrayidx120
%div122 = fdiv double %tmp57, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx124 = getelementptr inbounds double* %y, i64 14 ; <double*> [#uses=1]
+ %arrayidx124 = getelementptr inbounds double, double* %y, i64 14 ; <double*> [#uses=1]
store double %div122, double* %arrayidx124
%div126 = fdiv double %tmp61, 2.000000e-01 ; <double> [#uses=1]
- %arrayidx128 = getelementptr inbounds double* %y, i64 15 ; <double*> [#uses=1]
+ %arrayidx128 = getelementptr inbounds double, double* %y, i64 15 ; <double*> [#uses=1]
store double %div126, double* %arrayidx128
%div130 = fdiv double %tmp65, 0.000000e+00 ; <double> [#uses=1]
- %arrayidx132 = getelementptr inbounds double* %y, i64 16 ; <double*> [#uses=1]
+ %arrayidx132 = getelementptr inbounds double, double* %y, i64 16 ; <double*> [#uses=1]
store double %div130, double* %arrayidx132
ret void
}
br i1 %cmp2, label %for.body3, label %for.inc9
for.body3:
- %arraydecay = getelementptr inbounds [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv50, i64 0
+ %arraydecay = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv50, i64 0
%call = call i8* @memchr(i8* %arraydecay, i32 120, i64 1000)
- %add.ptr = getelementptr inbounds [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv50, i64 %indvars.iv50
+ %add.ptr = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv50, i64 %indvars.iv50
%cmp7 = icmp eq i8* %call, %add.ptr
%indvars.iv.next51 = add i64 %indvars.iv50, 1
br i1 %cmp7, label %for.cond1, label %if.then
br i1 %cmp19, label %for.body20, label %for.inc38
for.body20:
- %arraydecay24 = getelementptr inbounds [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv, i64 0
+ %arraydecay24 = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv, i64 0
br label %do.body.i
do.body.i:
br i1 %cmp3.i, label %exit, label %do.cond.i
do.cond.i:
- %incdec.ptr.i = getelementptr inbounds i8* %p.0.i, i64 1
+ %incdec.ptr.i = getelementptr inbounds i8, i8* %p.0.i, i64 1
%dec.i = add i64 %n.addr.0.i, -1
%cmp5.i = icmp eq i64 %dec.i, 0
br i1 %cmp5.i, label %if.then32, label %do.body.i
exit:
- %add.ptr30 = getelementptr inbounds [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv, i64 %indvars.iv
+ %add.ptr30 = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* %strs, i64 0, i64 %indvars.iv, i64 %indvars.iv
%cmp31 = icmp eq i8* %p.0.i, %add.ptr30
%indvars.iv.next = add i64 %indvars.iv, 1
br i1 %cmp31, label %for.cond18, label %if.then32
@a = internal global double 3.4
define double* @foo() nounwind {
- %a = getelementptr double* @a, i64 0
+ %a = getelementptr double, double* @a, i64 0
ret double* %a
; PIC64: leaq a(%rip)
store <2 x i32> addrspace(1)* %qdest, <2 x i32> addrspace(1)** %qdest.addr
%tmp = load <2 x i32> addrspace(1)** %qdest.addr
%tmp1 = load i32* %index
- %arrayidx = getelementptr <2 x i32> addrspace(1)* %tmp, i32 %tmp1
+ %arrayidx = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp, i32 %tmp1
%tmp2 = load <2 x i32> addrspace(1)** %nsource.addr
%tmp3 = load i32* %index
- %arrayidx4 = getelementptr <2 x i32> addrspace(1)* %tmp2, i32 %tmp3
+ %arrayidx4 = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp2, i32 %tmp3
%tmp5 = load <2 x i32> addrspace(1)* %arrayidx4
%tmp6 = load <2 x i32> addrspace(1)** %dsource.addr
%tmp7 = load i32* %index
- %arrayidx8 = getelementptr <2 x i32> addrspace(1)* %tmp6, i32 %tmp7
+ %arrayidx8 = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %tmp6, i32 %tmp7
%tmp9 = load <2 x i32> addrspace(1)* %arrayidx8
%tmp10 = sdiv <2 x i32> %tmp5, %tmp9
store <2 x i32> %tmp10, <2 x i32> addrspace(1)* %arrayidx
for.body:
%i.014 = phi i32 [ 0, %bb.nph ], [ %inc, %for.body ]
- %arrayidx11 = getelementptr <3 x i32>* %dest, i32 %i.014
+ %arrayidx11 = getelementptr <3 x i32>, <3 x i32>* %dest, i32 %i.014
%tmp4 = load <3 x i32>* %arrayidx11 ; <<3 x i32>> [#uses=1]
- %arrayidx7 = getelementptr inbounds <3 x i32>* %old, i32 %i.014
+ %arrayidx7 = getelementptr inbounds <3 x i32>, <3 x i32>* %old, i32 %i.014
%tmp8 = load <3 x i32>* %arrayidx7 ; <<3 x i32>> [#uses=1]
%div = sdiv <3 x i32> %tmp4, %tmp8
store <3 x i32> %div, <3 x i32>* %arrayidx11
entry:
%0 = fcmp olt double %F, 4.200000e+01 ; <i1> [#uses=1]
%iftmp.0.0 = select i1 %0, i32 4, i32 0 ; <i32> [#uses=1]
- %1 = getelementptr i8* %P, i32 %iftmp.0.0 ; <i8*> [#uses=1]
+ %1 = getelementptr i8, i8* %P, i32 %iftmp.0.0 ; <i8*> [#uses=1]
%2 = load i8* %1, align 1 ; <i8> [#uses=1]
ret i8 %2
; CHECK-LABEL: test4:
; CHECK-NEXT: ret
define i32 @test2({i16, [6 x i8]}* %this) {
entry:
- %b48 = getelementptr inbounds { i16, [6 x i8] }* %this, i32 0, i32 1
+ %b48 = getelementptr inbounds { i16, [6 x i8] }, { i16, [6 x i8] }* %this, i32 0, i32 1
%cast = bitcast [6 x i8]* %b48 to i48*
%bf.load = load i48* %cast, align 2
%bf.ashr = ashr i48 %bf.load, 32
entry:
%tmp2 = lshr i32 %x, 2
%tmp3 = and i32 %tmp2, 3
- %tmp4 = getelementptr [4 x i32]* @array, i32 0, i32 %tmp3
+ %tmp4 = getelementptr [4 x i32], [4 x i32]* @array, i32 0, i32 %tmp3
%tmp5 = load i32* %tmp4, align 4
ret i32 %tmp5
}
entry:
%Y = lshr i32 %X, 2
%gep.upgrd.1 = zext i32 %Y to i64
- %P2 = getelementptr i32* %P, i64 %gep.upgrd.1
+ %P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.1
ret i32* %P2
}
entry:
%Y = shl i32 %X, 2
%gep.upgrd.2 = zext i32 %Y to i64
- %P2 = getelementptr i32* %P, i64 %gep.upgrd.2
+ %P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.2
ret i32* %P2
}
entry:
%Y = ashr i32 %X, 2
- %P2 = getelementptr i32* %P, i32 %Y
+ %P2 = getelementptr i32, i32* %P, i32 %Y
ret i32* %P2
}
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
- %val.ptr = getelementptr inbounds i32* %arr, i64 %index.zext
+ %val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext
%val = load i32* %val.ptr
%val.zext = zext i32 %val to i64
%sum = add i64 %val.zext, %index.zext
define void @test_cl(<4 x i64>* %dst, <4 x i64>* %src, i32 %idx) {
entry:
- %arrayidx = getelementptr inbounds <4 x i64> * %src, i32 %idx
+ %arrayidx = getelementptr inbounds <4 x i64>, <4 x i64> * %src, i32 %idx
%0 = load <4 x i64> * %arrayidx, align 32
- %arrayidx1 = getelementptr inbounds <4 x i64> * %dst, i32 %idx
+ %arrayidx1 = getelementptr inbounds <4 x i64>, <4 x i64> * %dst, i32 %idx
%1 = load <4 x i64> * %arrayidx1, align 32
%2 = extractelement <4 x i64> %1, i32 0
%and = and i64 %2, 63
cm1:
; CHECK-LABEL: t:
; CHECK: jmpl *%eax
- %nm3 = getelementptr i32* %Sp_Arg, i32 1
+ %nm3 = getelementptr i32, i32* %Sp_Arg, i32 1
%nm9 = load i32* %Sp_Arg
%nma = inttoptr i32 %nm9 to void (i32*, i32*, i32*, i32)*
tail call ghccc void %nma(i32* %Base_Arg, i32* %nm3, i32* %Hp_Arg, i32 %R1_Arg) nounwind
; X32ABI: movl 20(%edi), %edi
; X32ABI-NEXT: movl 12(%edi), %eax
; X32ABI-NEXT: jmpq *%rax
- %0 = getelementptr inbounds %struct.__block_literal_2* %.block_descriptor, i64 0, i32 5 ; <void ()**> [#uses=1]
+ %0 = getelementptr inbounds %struct.__block_literal_2, %struct.__block_literal_2* %.block_descriptor, i64 0, i32 5 ; <void ()**> [#uses=1]
%1 = load void ()** %0, align 8 ; <void ()*> [#uses=2]
%2 = bitcast void ()* %1 to %struct.__block_literal_1* ; <%struct.__block_literal_1*> [#uses=1]
- %3 = getelementptr inbounds %struct.__block_literal_1* %2, i64 0, i32 3 ; <i8**> [#uses=1]
+ %3 = getelementptr inbounds %struct.__block_literal_1, %struct.__block_literal_1* %2, i64 0, i32 3 ; <i8**> [#uses=1]
%4 = load i8** %3, align 8 ; <i8*> [#uses=1]
%5 = bitcast i8* %4 to void (i8*)* ; <void (i8*)*> [#uses=1]
%6 = bitcast void ()* %1 to i8* ; <i8*> [#uses=1]
bb:
%i.03 = phi i64 [ 0, %entry ], [ %3, %bb ]
- %scevgep = getelementptr double* %p, i64 %i.03
+ %scevgep = getelementptr double, double* %p, i64 %i.03
%1 = load double* %scevgep, align 8
%2 = fdiv double 3.200000e+00, %1
store double %2, double* %scevgep, align 8
%tmp58 = bitcast <4 x i32> %tmp57 to <4 x float> ; <<4 x float>> [#uses=1]
%4 = bitcast float* %y_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1]
store <4 x float> %tmp58, <4 x float>* %4, align 16
- %5 = getelementptr float* %x_addr.0, i64 4 ; <float*> [#uses=1]
- %6 = getelementptr float* %y_addr.0, i64 4 ; <float*> [#uses=1]
+ %5 = getelementptr float, float* %x_addr.0, i64 4 ; <float*> [#uses=1]
+ %6 = getelementptr float, float* %y_addr.0, i64 4 ; <float*> [#uses=1]
%7 = add i32 %i.0, 4 ; <i32> [#uses=1]
%8 = load i32* %n, align 4 ; <i32> [#uses=1]
%9 = icmp sgt i32 %8, %7 ; <i1> [#uses=1]
loop:
%i = phi i32 [ 0, %entry ], [ %i2, %loop ]
%j = mul i32 %i, %i
- %addr = getelementptr i32* %output, i32 %i
+ %addr = getelementptr i32, i32* %output, i32 %i
store i32 %i, i32* %addr
%i2 = add i32 %i, 1
%exit_cond = icmp sge i32 %i2, %n
for.body: ; preds = %for.body.preheader, %for.cond
%i.06 = phi i32 [ %dec, %for.cond ], [ %s, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %i.06
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.06
%0 = load i32* %arrayidx, align 4, !tbaa !1
%cmp1 = icmp eq i32 %0, 0
;
for.body: ; preds = %for.body.preheader, %for.cond
%i.06 = phi i32 [ %inc, %for.cond ], [ %s, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %a, i32 %i.06
+ %arrayidx = getelementptr inbounds i32, i32* %a, i32 %i.06
%0 = load i32* %arrayidx, align 4, !tbaa !1
%cmp1 = icmp eq i32 %0, 0
%inc = add nsw i32 %i.06, 1
%va = alloca [1 x %struct.__va_list_tag], align 8 ; <[1 x %struct.__va_list_tag]*> [#uses=2]
%va12 = bitcast [1 x %struct.__va_list_tag]* %va to i8* ; <i8*> [#uses=2]
call void @llvm.va_start(i8* %va12)
- %va3 = getelementptr [1 x %struct.__va_list_tag]* %va, i64 0, i64 0 ; <%struct.__va_list_tag*> [#uses=1]
+ %va3 = getelementptr [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0 ; <%struct.__va_list_tag*> [#uses=1]
call void @bar(%struct.__va_list_tag* %va3) nounwind
call void @llvm.va_end(i8* %va12)
ret i32 undef
%x.02 = phi <4 x i32> [ %add, %while.body ], [ zeroinitializer, %entry ]
%dec = add nsw i32 %n.addr.03, -1
%and = and <4 x i32> %x.02, <i32 127, i32 127, i32 127, i32 127>
- %incdec.ptr = getelementptr inbounds <4 x i32>* %p.addr.04, i64 1
+ %incdec.ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %p.addr.04, i64 1
store <4 x i32> %and, <4 x i32>* %p.addr.04, align 16
%0 = load <4 x i32>* %incdec.ptr, align 16
%add = shl <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
; CHECK-NEXT: movapd 96(%eax), %xmm0
; CHECK-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
; CHECK-NEXT: retl
- %i5 = getelementptr inbounds <4 x double>* %srcA, i32 3
+ %i5 = getelementptr inbounds <4 x double>, <4 x double>* %srcA, i32 3
%i6 = load <4 x double>* %i5, align 32
%i7 = shufflevector <4 x double> %i6, <4 x double> undef, <2 x i32> <i32 0, i32 2>
ret <2 x double> %i7
; X64-NEXT: shlq $4, %rsi
; X64-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3]
; X64-NEXT: retq
- %1 = getelementptr inbounds <4 x float>* %pb, i64 %index
+ %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
%2 = load <4 x float>* %1, align 16
%3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
ret <4 x float> %3
; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0]
; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; X64-NEXT: retq
- %1 = getelementptr inbounds float* %fb, i64 %index
+ %1 = getelementptr inbounds float, float* %fb, i64 %index
%2 = load float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
; X64-NEXT: addps %xmm2, %xmm3
; X64-NEXT: addps %xmm3, %xmm0
; X64-NEXT: retq
- %1 = getelementptr inbounds float* %fb, i64 %index
+ %1 = getelementptr inbounds float, float* %fb, i64 %index
%2 = load float* %1, align 4
%3 = insertelement <4 x float> undef, float %2, i32 0
%4 = insertelement <4 x float> %3, float %2, i32 1
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
- %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
- %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
- %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ %arrayidx8 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
- %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ %arrayidx10 = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
- %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
- %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ %foo = getelementptr inbounds %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8], [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
- %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
- %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ %foo14 = getelementptr inbounds %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8], [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
- %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
- %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar, %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32], [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
- %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
- %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16], [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
- %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
- %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
- %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
- %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
- %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
- %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
- %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
- %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
- %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
- %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ %arrayidx8 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
- %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ %arrayidx10 = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
- %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
- %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ %foo = getelementptr inbounds %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8], [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
- %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
- %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ %foo14 = getelementptr inbounds %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8], [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
- %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
- %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar, %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32], [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
- %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
- %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16], [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
- %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
- %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
- %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
- %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
- %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
- %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
- %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
store i32 %call3, i32* %ptr, align 4
call void @end_addrof()
%call4 = call signext i16 @get_small_nonchar()
- %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i64 0
store i16 %call4, i16* %arrayidx, align 2
call void @end_small_nonchar()
%call5 = call i32 @get_large_nonchar()
- %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i64 0
store i32 %call5, i32* %arrayidx6, align 4
call void @end_large_nonchar()
%call7 = call signext i8 @get_small_char()
- %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
+ %arrayidx8 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i64 0
store i8 %call7, i8* %arrayidx8, align 1
call void @end_small_char()
%call9 = call signext i8 @get_large_char()
- %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ %arrayidx10 = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call9, i8* %arrayidx10, align 1
call void @end_large_char()
%call11 = call signext i8 @get_struct_large_char()
- %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
- %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
+ %foo = getelementptr inbounds %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
+ %arrayidx12 = getelementptr inbounds [8 x i8], [8 x i8]* %foo, i32 0, i64 0
store i8 %call11, i8* %arrayidx12, align 1
call void @end_struct_large_char()
%call13 = call signext i8 @get_struct_small_char()
- %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
- %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
+ %foo14 = getelementptr inbounds %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
+ %arrayidx15 = getelementptr inbounds [2 x i8], [2 x i8]* %foo14, i32 0, i64 0
store i8 %call13, i8* %arrayidx15, align 1
call void @end_struct_small_char()
%call16 = call i32 @get_struct_large_nonchar()
- %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
- %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
+ %foo17 = getelementptr inbounds %struct.struct_large_nonchar, %struct.struct_large_nonchar* %c, i32 0, i32 0
+ %arrayidx18 = getelementptr inbounds [8 x i32], [8 x i32]* %foo17, i32 0, i64 0
store i32 %call16, i32* %arrayidx18, align 4
call void @end_struct_large_nonchar()
%call19 = call signext i16 @get_struct_small_nonchar()
- %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
- %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
+ %foo20 = getelementptr inbounds %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %arrayidx21 = getelementptr inbounds [2 x i16], [2 x i16]* %foo20, i32 0, i64 0
store i16 %call19, i16* %arrayidx21, align 2
call void @end_struct_small_nonchar()
- %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
- %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
- %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
- %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
+ %arraydecay22 = getelementptr inbounds [2 x i8], [2 x i8]* %small, i32 0, i32 0
+ %arraydecay23 = getelementptr inbounds [8 x i32], [8 x i32]* %large2, i32 0, i32 0
+ %arraydecay24 = getelementptr inbounds [2 x i16], [2 x i16]* %small2, i32 0, i32 0
%0 = load i32* %x, align 4
%1 = load i32* %y, align 4
%2 = load i32* %z, align 4
- %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.struct_large_char, %struct.struct_large_char* %a, i32 0, i32 0
%3 = bitcast [8 x i8]* %coerce.dive to i64*
%4 = load i64* %3, align 1
- %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
+ %coerce.dive25 = getelementptr %struct.struct_small_char, %struct.struct_small_char* %b, i32 0, i32 0
%5 = bitcast [2 x i8]* %coerce.dive25 to i16*
%6 = load i16* %5, align 1
- %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
+ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0
%7 = bitcast [2 x i16]* %coerce.dive26 to i32*
%8 = load i32* %7, align 1
call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
store i32 %call, i32* %x, align 4
call void @end_scalar1()
%call1 = call signext i8 @get_large_char()
- %arrayidx = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i64 0
store i8 %call1, i8* %arrayidx, align 1
call void @end_large_char()
%0 = load i32* %x, align 4
- %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [8 x i8], [8 x i8]* %large, i32 0, i32 0
call void @takes_two(i32 %0, i8* %arraydecay)
ret void
}
%tmp3 = load double* @G, align 16 ; <double> [#uses=1]
%tmp4 = tail call double @fabs( double %tmp3 ) readnone ; <double> [#uses=1]
store volatile double %tmp4, double* %P
- %tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp = getelementptr { double, double }, { double, double }* %z, i32 0, i32 0 ; <double*> [#uses=1]
%tmp1 = load volatile double* %tmp, align 8 ; <double> [#uses=1]
%tmp2 = tail call double @fabs( double %tmp1 ) readnone ; <double> [#uses=1]
%tmp6 = fadd double %tmp4, %tmp2 ; <double> [#uses=1]
if.else: ; preds = %entry
tail call fastcc void @send_int(i32 %i)
- %arrayidx = getelementptr inbounds [8 x i8]* %data, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [8 x i8], [8 x i8]* %data, i32 0, i32 0
call void @zero_char(i8* %arrayidx)
br label %if.end
%a = alloca [128 x i32], align 16
%0 = bitcast [128 x i32]* %a to i8*
call void @llvm.lifetime.start(i64 512, i8* %0)
- %arraydecay = getelementptr inbounds [128 x i32]* %a, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [128 x i32], [128 x i32]* %a, i64 0, i64 0
call void @foo2(i32* %arraydecay)
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds [128 x i32]* %a, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* %a, i64 0, i64 %idxprom
%1 = load i32* %arrayidx, align 4
call void @llvm.lifetime.end(i64 512, i8* %0)
ret i32 %1
%a.addr = alloca i8*, align 8
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%buf = alloca [16 x i8], align 16
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [16 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [16 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo, %struct.foo* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [16 x i8], [16 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
%a.addr = alloca i8*, align 8
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%buf = alloca [4 x i8], align 1
store i8* %a, i8** %a.addr, align 8
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %arraydecay1 = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %arraydecay1 = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo.0, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo.0, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo.0, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
%a.addr = alloca i8*, align 8
%b = alloca %struct.foo.0, align 1
store i8* %a, i8** %a.addr, align 8
- %buf = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay = getelementptr inbounds [4 x i8]* %buf, i32 0, i32 0
+ %buf = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
%0 = load i8** %a.addr, align 8
%call = call i8* @strcpy(i8* %arraydecay, i8* %0)
- %buf1 = getelementptr inbounds %struct.foo.0* %b, i32 0, i32 0
- %arraydecay2 = getelementptr inbounds [4 x i8]* %buf1, i32 0, i32 0
+ %buf1 = getelementptr inbounds %struct.foo.0, %struct.foo.0* %b, i32 0, i32 0
+ %arraydecay2 = getelementptr inbounds [4 x i8], [4 x i8]* %buf1, i32 0, i32 0
%call3 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay2)
ret void
}
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
%0 = load i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
%0 = load i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
%0 = load i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
store i32* %y, i32** %b, align 8
%0 = load i32** %b, align 8
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32* %0)
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
%0 = ptrtoint i32* %y to i64
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %0)
ret void
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
%0 = ptrtoint i32* %y to i64
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %0)
ret void
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
%0 = ptrtoint i32* %y to i64
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %0)
ret void
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
%b = alloca i32*, align 8
- %y = getelementptr inbounds %struct.pair* %c, i32 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
%0 = ptrtoint i32* %y to i64
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %0)
ret void
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
- %y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i64 0, i32 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.pair, align 4
- %y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i64 0, i32 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
- %y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i64 0, i32 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.pair, align 4
- %y = getelementptr inbounds %struct.pair* %c, i64 0, i32 1
+ %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i64 0, i32 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %y)
ret void
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%a = alloca i32, align 4
- %add.ptr5 = getelementptr inbounds i32* %a, i64 -12
+ %add.ptr5 = getelementptr inbounds i32, i32* %a, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%a = alloca i32, align 4
- %add.ptr5 = getelementptr inbounds i32* %a, i64 -12
+ %add.ptr5 = getelementptr inbounds i32, i32* %a, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca i32, align 4
- %add.ptr5 = getelementptr inbounds i32* %a, i64 -12
+ %add.ptr5 = getelementptr inbounds i32, i32* %a, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca i32, align 4
- %add.ptr5 = getelementptr inbounds i32* %a, i64 -12
+ %add.ptr5 = getelementptr inbounds i32, i32* %a, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5)
ret void
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.vec, align 16
- %y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
- %add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
+ %y = getelementptr inbounds %struct.vec, %struct.vec* %c, i64 0, i32 0
+ %add.ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %y, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.vec, align 16
- %y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
- %add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
+ %y = getelementptr inbounds %struct.vec, %struct.vec* %c, i64 0, i32 0
+ %add.ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %y, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.vec, align 16
- %y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
- %add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
+ %y = getelementptr inbounds %struct.vec, %struct.vec* %c, i64 0, i32 0
+ %add.ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %y, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%c = alloca %struct.vec, align 16
- %y = getelementptr inbounds %struct.vec* %c, i64 0, i32 0
- %add.ptr = getelementptr inbounds <4 x i32>* %y, i64 -12
+ %y = getelementptr inbounds %struct.vec, %struct.vec* %c, i64 0, i32 0
+ %add.ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %y, i64 -12
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr)
ret void
}
%c = alloca %struct.pair, align 4
%exn.slot = alloca i8*
%ehselector.slot = alloca i32
- %a = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
store i32 0, i32* %a, align 4
- %a1 = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
invoke void @_Z3exceptPi(i32* %a1)
to label %invoke.cont unwind label %lpad
%c = alloca %struct.pair, align 4
%exn.slot = alloca i8*
%ehselector.slot = alloca i32
- %a = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
store i32 0, i32* %a, align 4
- %a1 = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
invoke void @_Z3exceptPi(i32* %a1)
to label %invoke.cont unwind label %lpad
%c = alloca %struct.pair, align 4
%exn.slot = alloca i8*
%ehselector.slot = alloca i32
- %a = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
store i32 0, i32* %a, align 4
- %a1 = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
invoke void @_Z3exceptPi(i32* %a1)
to label %invoke.cont unwind label %lpad
%c = alloca %struct.pair, align 4
%exn.slot = alloca i8*
%ehselector.slot = alloca i32
- %a = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
store i32 0, i32* %a, align 4
- %a1 = getelementptr inbounds %struct.pair* %c, i32 0, i32 0
+ %a1 = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
invoke void @_Z3exceptPi(i32* %a1)
to label %invoke.cont unwind label %lpad
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%a = alloca %class.A, align 1
- %array = getelementptr inbounds %class.A* %a, i32 0, i32 0
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%a = alloca %class.A, align 1
- %array = getelementptr inbounds %class.A* %a, i32 0, i32 0
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca %class.A, align 1
- %array = getelementptr inbounds %class.A* %a, i32 0, i32 0
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca %class.A, align 1
- %array = getelementptr inbounds %class.A* %a, i32 0, i32 0
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%x = alloca %struct.deep, align 1
- %b = getelementptr inbounds %struct.deep* %x, i32 0, i32 0
+ %b = getelementptr inbounds %struct.deep, %struct.deep* %x, i32 0, i32 0
%c = bitcast %union.anon* %b to %struct.anon*
- %d = getelementptr inbounds %struct.anon* %c, i32 0, i32 0
- %e = getelementptr inbounds %struct.anon.0* %d, i32 0, i32 0
+ %d = getelementptr inbounds %struct.anon, %struct.anon* %c, i32 0, i32 0
+ %e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%x = alloca %struct.deep, align 1
- %b = getelementptr inbounds %struct.deep* %x, i32 0, i32 0
+ %b = getelementptr inbounds %struct.deep, %struct.deep* %x, i32 0, i32 0
%c = bitcast %union.anon* %b to %struct.anon*
- %d = getelementptr inbounds %struct.anon* %c, i32 0, i32 0
- %e = getelementptr inbounds %struct.anon.0* %d, i32 0, i32 0
+ %d = getelementptr inbounds %struct.anon, %struct.anon* %c, i32 0, i32 0
+ %e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%x = alloca %struct.deep, align 1
- %b = getelementptr inbounds %struct.deep* %x, i32 0, i32 0
+ %b = getelementptr inbounds %struct.deep, %struct.deep* %x, i32 0, i32 0
%c = bitcast %union.anon* %b to %struct.anon*
- %d = getelementptr inbounds %struct.anon* %c, i32 0, i32 0
- %e = getelementptr inbounds %struct.anon.0* %d, i32 0, i32 0
+ %d = getelementptr inbounds %struct.anon, %struct.anon* %c, i32 0, i32 0
+ %e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%x = alloca %struct.deep, align 1
- %b = getelementptr inbounds %struct.deep* %x, i32 0, i32 0
+ %b = getelementptr inbounds %struct.deep, %struct.deep* %x, i32 0, i32 0
%c = bitcast %union.anon* %b to %struct.anon*
- %d = getelementptr inbounds %struct.anon* %c, i32 0, i32 0
- %e = getelementptr inbounds %struct.anon.0* %d, i32 0, i32 0
+ %d = getelementptr inbounds %struct.anon, %struct.anon* %c, i32 0, i32 0
+ %e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
%array = bitcast %union.anon.1* %e to [2 x i8]*
- %arrayidx = getelementptr inbounds [2 x i8]* %array, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
%0 = load i8* %arrayidx, align 1
ret i8 %0
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%a = alloca [4 x i32], align 16
- %arrayidx = getelementptr inbounds [4 x i32]* %a, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
%0 = load i32* %arrayidx, align 4
ret i32 %0
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca [4 x i32], align 16
- %arrayidx = getelementptr inbounds [4 x i32]* %a, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
%0 = load i32* %arrayidx, align 4
ret i32 %0
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca [4 x i32], align 16
- %arrayidx = getelementptr inbounds [4 x i32]* %a, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
%0 = load i32* %arrayidx, align 4
ret i32 %0
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%a = alloca [4 x i32], align 16
- %arrayidx = getelementptr inbounds [4 x i32]* %a, i32 0, i64 0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %a, i32 0, i64 0
%0 = load i32* %arrayidx, align 4
ret i32 %0
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%c = alloca %struct.nest, align 4
- %b = getelementptr inbounds %struct.nest* %c, i32 0, i32 1
- %_a = getelementptr inbounds %struct.pair* %b, i32 0, i32 0
+ %b = getelementptr inbounds %struct.nest, %struct.nest* %c, i32 0, i32 1
+ %_a = getelementptr inbounds %struct.pair, %struct.pair* %b, i32 0, i32 0
%0 = load i32* %_a, align 4
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %0)
ret void
%tmp7 = phi %struct.small* [ %tmp19, %bb17 ], [ %tmp2, %bb ]
%tmp8 = phi i64 [ %tmp20, %bb17 ], [ 1, %bb ]
%tmp9 = phi i32 [ %tmp14, %bb17 ], [ %tmp1, %bb ]
- %tmp10 = getelementptr inbounds %struct.small* %tmp7, i64 0, i32 0
+ %tmp10 = getelementptr inbounds %struct.small, %struct.small* %tmp7, i64 0, i32 0
%tmp11 = load i8* %tmp10, align 1
%tmp12 = icmp eq i8 %tmp11, 1
%tmp13 = add nsw i32 %tmp9, 8
br i1 %tmp16, label %bb21, label %bb17
bb17: ; preds = %bb6
- %tmp18 = getelementptr inbounds %struct.small** %tmp, i64 %tmp8
+ %tmp18 = getelementptr inbounds %struct.small*, %struct.small** %tmp, i64 %tmp8
%tmp19 = load %struct.small** %tmp18, align 8
%tmp20 = add i64 %tmp8, 1
br label %bb6
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%test = alloca [32 x i8], align 16
- %arraydecay = getelementptr inbounds [32 x i8]* %test, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [32 x i8], [32 x i8]* %test, i32 0, i32 0
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
ret i32 %call
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%test = alloca [33 x i8], align 16
- %arraydecay = getelementptr inbounds [33 x i8]* %test, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [33 x i8], [33 x i8]* %test, i32 0, i32 0
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
ret i32 %call
}
; DARWIN-X64-NOT: callq ___stack_chk_fail
; DARWIN-X64: .cfi_endproc
%test = alloca [4 x i8], align 1
- %arraydecay = getelementptr inbounds [4 x i8]* %test, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [4 x i8], [4 x i8]* %test, i32 0, i32 0
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
ret i32 %call
}
; DARWIN-X64: mov{{l|q}} ___stack_chk_guard
; DARWIN-X64: callq ___stack_chk_fail
%test = alloca [5 x i8], align 1
- %arraydecay = getelementptr inbounds [5 x i8]* %test, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [5 x i8], [5 x i8]* %test, i32 0, i32 0
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay)
ret i32 %call
}
%0 = bitcast { i64, i8 }* %test.coerce to i8*
%1 = bitcast %struct.small_char* %test to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
- %2 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 0
+ %2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0
%3 = load i64* %2, align 1
- %4 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 1
+ %4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1
%5 = load i8* %4, align 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %3, i8 %5)
ret i32 %call
%0 = bitcast { i64, i8 }* %test.coerce to i8*
%1 = bitcast %struct.small_char* %test to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 12, i32 0, i1 false)
- %2 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 0
+ %2 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 0
%3 = load i64* %2, align 1
- %4 = getelementptr { i64, i8 }* %test.coerce, i32 0, i32 1
+ %4 = getelementptr { i64, i8 }, { i64, i8 }* %test.coerce, i32 0, i32 1
%5 = load i8* %4, align 1
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i64 %3, i8 %5)
ret i32 %call
; ATOM_LP64: leaq -1608
; ATOM_ILP32: leal -1608
- %arraydecay = getelementptr inbounds [400 x i32]* %arr, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [400 x i32], [400 x i32]* %arr, i64 0, i64 0
%call = call i32 @foo(i32 %a, i32* %arraydecay) nounwind
ret i32 %call
%a1 = alloca [256 x i32], align 16
%0 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %0)
- %arraydecay = getelementptr inbounds [256 x i32]* %a1, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %a1, i64 0, i64 0
call void @foo3(i32* %arraydecay)
call void asm sideeffect "foo2", "~{r12},~{r13},~{r14},~{r15},~{ebx},~{esi},~{edi},~{dirflag},~{fpsr},~{flags}"()
call void @llvm.lifetime.end(i64 1024, i8* %0)
%ap = alloca [1 x %struct.__va_list_tag], align 8; <[1 x %struct.__va_list_tag]*> [#uses=2]
%ap12 = bitcast [1 x %struct.__va_list_tag]* %ap to i8*; <i8*> [#uses=2]
call void @llvm.va_start(i8* %ap12)
- %ap3 = getelementptr inbounds [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0; <%struct.__va_list_tag*> [#uses=1]
+ %ap3 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0; <%struct.__va_list_tag*> [#uses=1]
call void @bar(%struct.__va_list_tag* %ap3) nounwind
call void @llvm.va_end(i8* %ap12)
ret void
define internal fastcc i32 @dct_chroma(i32 %uv, i32 %cr_cbp) nounwind {
cond_true2732.preheader: ; preds = %entry
- %tmp2666 = getelementptr %struct.Macroblock* null, i32 0, i32 13 ; <i64*> [#uses=2]
+ %tmp2666 = getelementptr %struct.Macroblock, %struct.Macroblock* null, i32 0, i32 13 ; <i64*> [#uses=2]
%tmp2674 = trunc i32 0 to i8 ; <i8> [#uses=1]
%tmp2667.us.us = load i64* %tmp2666 ; <i64> [#uses=1]
%tmp2670.us.us = load i64* null ; <i64> [#uses=1]
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
- %tmp2 = getelementptr [1000 x i8]* @B, i32 0, i32 %i.019.0
+ %tmp2 = getelementptr [1000 x i8], [1000 x i8]* @B, i32 0, i32 %i.019.0
%tmp3 = load i8* %tmp2, align 4
%tmp4 = mul i8 %tmp3, 2
- %tmp5 = getelementptr [1000 x i8]* @A, i32 0, i32 %i.019.0
+ %tmp5 = getelementptr [1000 x i8], [1000 x i8]* @A, i32 0, i32 %i.019.0
store i8 %tmp4, i8* %tmp5, align 4
%tmp8 = mul i32 %i.019.0, 9
%tmp0 = add i32 %tmp8, %p
- %tmp10 = getelementptr [1000 x i8]* @P, i32 0, i32 %tmp0
+ %tmp10 = getelementptr [1000 x i8], [1000 x i8]* @P, i32 0, i32 %tmp0
store i8 17, i8* %tmp10, align 4
- %tmp11 = getelementptr [1000 x i8]* @Q, i32 0, i32 %tmp0
+ %tmp11 = getelementptr [1000 x i8], [1000 x i8]* @Q, i32 0, i32 %tmp0
store i8 19, i8* %tmp11, align 4
%indvar.next = add i32 %i.019.0, 1
%exitcond = icmp eq i32 %indvar.next, %m
bb:
%i.019.0 = phi i32 [ %indvar.next, %bb ], [ 0, %entry ]
- %tmp2 = getelementptr [1000 x float]* @B, i32 0, i32 %i.019.0
+ %tmp2 = getelementptr [1000 x float], [1000 x float]* @B, i32 0, i32 %i.019.0
%tmp3 = load float* %tmp2, align 4
%tmp4 = fmul float %tmp3, 2.000000e+00
- %tmp5 = getelementptr [1000 x float]* @A, i32 0, i32 %i.019.0
+ %tmp5 = getelementptr [1000 x float], [1000 x float]* @A, i32 0, i32 %i.019.0
store float %tmp4, float* %tmp5, align 4
%tmp8 = shl i32 %i.019.0, 1
%tmp9 = add i32 %tmp8, 64
- %tmp10 = getelementptr [1000 x i32]* @P, i32 0, i32 %i.019.0
+ %tmp10 = getelementptr [1000 x i32], [1000 x i32]* @P, i32 0, i32 %i.019.0
store i32 %tmp9, i32* %tmp10, align 4
%indvar.next = add i32 %i.019.0, 1
%exitcond = icmp eq i32 %indvar.next, %m
bb5: ; preds = %prologue
%tmp10 = zext i32 %tmp6 to i64 ; <i64> [#uses=1]
- %tmp11 = getelementptr %"struct.XXC::ArrayStorage"* %tmp9, i64 0, i32 5, i64 %tmp10 ; <%XXValue**> [#uses=1]
+ %tmp11 = getelementptr %"struct.XXC::ArrayStorage", %"struct.XXC::ArrayStorage"* %tmp9, i64 0, i32 5, i64 %tmp10 ; <%XXValue**> [#uses=1]
%tmp12 = load %XXValue** %tmp11, align 8 ; <%XXValue*> [#uses=1]
ret %XXValue* %tmp12
}
define void @test_sink(i8* %arg1, i32 %arg2, i8 %arg3) #0 {
%tmp1 = add i32 -2147483648, %arg2
%tmp2 = add i32 -2147483648, %tmp1
- %tmp3 = getelementptr i8* %arg1, i32 %arg2
+ %tmp3 = getelementptr i8, i8* %arg1, i32 %arg2
br label %bb1
bb1:
- %tmp4 = getelementptr i8* %arg1, i32 %tmp2
+ %tmp4 = getelementptr i8, i8* %arg1, i32 %tmp2
store i8 %arg3, i8* %tmp4
ret void;
}
define i1 @dont_merge_oddly(float* %result) nounwind {
entry:
- %tmp4 = getelementptr float* %result, i32 2
+ %tmp4 = getelementptr float, float* %result, i32 2
%tmp5 = load float* %tmp4, align 4
- %tmp7 = getelementptr float* %result, i32 4
+ %tmp7 = getelementptr float, float* %result, i32 4
%tmp8 = load float* %tmp7, align 4
- %tmp10 = getelementptr float* %result, i32 6
+ %tmp10 = getelementptr float, float* %result, i32 6
%tmp11 = load float* %tmp10, align 4
%tmp12 = fcmp olt float %tmp8, %tmp11
br i1 %tmp12, label %bb, label %bb21
lvalue_p.exit: ; preds = %bb.i
%tmp21 = load %union.tree_node** null, align 8 ; <%union.tree_node*> [#uses=3]
- %tmp22 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 0 ; <i8*> [#uses=1]
+ %tmp22 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 0 ; <i8*> [#uses=1]
%tmp23 = load i8* %tmp22, align 8 ; <i8> [#uses=1]
%tmp24 = zext i8 %tmp23 to i32 ; <i32> [#uses=1]
switch i32 %tmp24, label %lvalue_p.exit4 [
]
bb.i1: ; preds = %lvalue_p.exit
- %tmp25 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 2 ; <i32*> [#uses=1]
+ %tmp25 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 2 ; <i32*> [#uses=1]
%tmp26 = bitcast i32* %tmp25 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
%tmp27 = load %union.tree_node** %tmp26, align 8 ; <%union.tree_node*> [#uses=2]
- %tmp28 = getelementptr inbounds %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
+ %tmp28 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
%tmp29 = load i8* %tmp28, align 8 ; <i8> [#uses=1]
%tmp30 = zext i8 %tmp29 to i32 ; <i32> [#uses=1]
switch i32 %tmp30, label %lvalue_p.exit4 [
br label %lvalue_p.exit4
bb2.i.i2: ; preds = %bb.i1
- %tmp35 = getelementptr inbounds %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
+ %tmp35 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
%tmp36 = bitcast i8* %tmp35 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
%tmp37 = load %union.tree_node** %tmp36, align 8 ; <%union.tree_node*> [#uses=1]
- %tmp38 = getelementptr inbounds %union.tree_node* %tmp37, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
+ %tmp38 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp37, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
%tmp39 = load i8* %tmp38, align 8 ; <i8> [#uses=1]
switch i8 %tmp39, label %bb2 [
i8 16, label %lvalue_p.exit4
]
bb2.i3: ; preds = %lvalue_p.exit
- %tmp40 = getelementptr inbounds %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
+ %tmp40 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1]
%tmp41 = bitcast i8* %tmp40 to %union.tree_node** ; <%union.tree_node**> [#uses=1]
%tmp42 = load %union.tree_node** %tmp41, align 8 ; <%union.tree_node*> [#uses=1]
- %tmp43 = getelementptr inbounds %union.tree_node* %tmp42, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
+ %tmp43 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp42, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1]
%tmp44 = load i8* %tmp43, align 8 ; <i8> [#uses=1]
switch i8 %tmp44, label %bb2 [
i8 16, label %lvalue_p.exit4
define %struct.A* @test_upcast() {
entry:
%A = tail call %struct.B* @testu()
- %x = getelementptr inbounds %struct.B* %A, i32 0, i32 0
+ %x = getelementptr inbounds %struct.B, %struct.B* %A, i32 0, i32 0
ret %struct.A* %x
}
@func_table = external global [0 x %struct.funcs]
define void @fold_indexed_load(i8* %mbstr, i64 %idxprom) nounwind uwtable ssp {
entry:
- %dsplen = getelementptr inbounds [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2
+ %dsplen = getelementptr inbounds [0 x %struct.funcs], [0 x %struct.funcs]* @func_table, i64 0, i64 %idxprom, i32 2
%x1 = load i32 (i8*)** %dsplen, align 8
%call = tail call i32 %x1(i8* %mbstr) nounwind
ret void
define i32 @rdar12282281(i32 %n) nounwind uwtable ssp {
entry:
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [0 x i32 (i8*, ...)*], [0 x i32 (i8*, ...)*]* @funcs, i64 0, i64 %idxprom
%0 = load i32 (i8*, ...)** %arrayidx, align 8
%call = tail call i32 (i8*, ...)* %0(i8* null, i32 0, i32 0, i32 0, i32 0, i32 0) nounwind
ret i32 %call
define ghccc void @rBM_info(i64* noalias nocapture %Base_Arg, i64* noalias nocapture %Sp_Arg, i64* noalias nocapture %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind align 8 {
c263:
- %ln265 = getelementptr inbounds i64* %Sp_Arg, i64 -2
+ %ln265 = getelementptr inbounds i64, i64* %Sp_Arg, i64 -2
%ln266 = ptrtoint i64* %ln265 to i64
%ln268 = icmp ult i64 %ln266, %R3_Arg
br i1 %ln268, label %c26a, label %n26p
ret void
c26a: ; preds = %c263
- %ln27h = getelementptr inbounds i64* %Base_Arg, i64 -2
+ %ln27h = getelementptr inbounds i64, i64* %Base_Arg, i64 -2
%ln27j = load i64* %ln27h, align 8
%ln27k = inttoptr i64 %ln27j to void (i64*, i64*, i64*, i64, i64, i64)*
tail call ghccc void %ln27k(i64* %Base_Arg, i64* %Sp_Arg, i64* %Hp_Arg, i64 %R1_Arg, i64 %R2_Arg, i64 %R3_Arg) nounwind
entry:
%0 = bitcast %vt* %Ty to %vt* (%vt*, %class*)***
%vtable = load %vt* (%vt*, %class*)*** %0, align 8
- %vfn = getelementptr inbounds %vt* (%vt*, %class*)** %vtable, i64 4
+ %vfn = getelementptr inbounds %vt* (%vt*, %class*)*, %vt* (%vt*, %class*)** %vtable, i64 4
%1 = load %vt* (%vt*, %class*)** %vfn, align 8
%call = tail call %vt* %1(%vt* %Ty, %class* %this)
ret %vt* %call
define fastcc i32 @tailcallee(%struct.s* byval %a) nounwind {
entry:
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 0
+ %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0
%tmp3 = load i32* %tmp2
ret i32 %tmp3
; CHECK: tailcallee
define fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) {
entry:
- %tmp2 = getelementptr %struct.s* %a, i32 0, i32 1
+ %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1
%tmp3 = load i64* %tmp2, align 8
%tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
ret i64 %tmp4
entry:
; CHECK-LABEL: C_ctor:
; CHECK: jmp B_ctor # TAILCALL
- %0 = getelementptr inbounds %struct.C* %this, i64 0, i32 0
+ %0 = getelementptr inbounds %struct.C, %struct.C* %this, i64 0, i32 0
%call = tail call %struct.B* @B_ctor(%struct.B* %0, i32 %y)
ret %struct.C* %this
}
entry:
; CHECK-LABEL: C_ctor_nothisret:
; CHECK-NOT: jmp B_ctor_nothisret
- %0 = getelementptr inbounds %struct.C* %this, i64 0, i32 0
+ %0 = getelementptr inbounds %struct.C, %struct.C* %this, i64 0, i32 0
%call = tail call %struct.B* @B_ctor_nothisret(%struct.B* %0, i32 %y)
ret %struct.C* %this
}
; CHECK: jmp B_ctor # TAILCALL
%0 = bitcast %struct.D* %this to %struct.A*
%call = tail call %struct.A* @A_ctor(%struct.A* %0)
- %1 = getelementptr inbounds %struct.D* %this, i64 0, i32 0
+ %1 = getelementptr inbounds %struct.D, %struct.D* %this, i64 0, i32 0
%call2 = tail call %struct.B* @B_ctor(%struct.B* %1, i32 %y)
; (this next line would never be generated by Clang, actually)
%2 = bitcast %struct.A* %call to %struct.D*
; CHECK-NOT: jmp B_ctor_nothisret
%0 = bitcast %struct.D* %this to %struct.A*
%call = tail call %struct.A* @A_ctor_nothisret(%struct.A* %0)
- %1 = getelementptr inbounds %struct.D* %this, i64 0, i32 0
+ %1 = getelementptr inbounds %struct.D, %struct.D* %this, i64 0, i32 0
%call2 = tail call %struct.B* @B_ctor_nothisret(%struct.B* %1, i32 %y)
; (this next line would never be generated by Clang, actually)
%2 = bitcast %struct.A* %call to %struct.D*
; CHECK: callq B_ctor
; CHECK: movq [[SAVETHIS]], %rcx
; CHECK: jmp B_ctor # TAILCALL
- %b = getelementptr inbounds %struct.E* %this, i64 0, i32 0
+ %b = getelementptr inbounds %struct.E, %struct.E* %this, i64 0, i32 0
%call = tail call %struct.B* @B_ctor(%struct.B* %b, i32 %x)
%call4 = tail call %struct.B* @B_ctor(%struct.B* %b, i32 %x)
ret %struct.E* %this
; CHECK: callq B_ctor_nothisret
; CHECK: movq [[SAVETHIS]], %rcx
; CHECK-NOT: jmp B_ctor_nothisret
- %b = getelementptr inbounds %struct.E* %this, i64 0, i32 0
+ %b = getelementptr inbounds %struct.E, %struct.E* %this, i64 0, i32 0
%call = tail call %struct.B* @B_ctor_nothisret(%struct.B* %b, i32 %x)
%call4 = tail call %struct.B* @B_ctor_nothisret(%struct.B* %b, i32 %x)
ret %struct.E* %this
bb: ; preds = %bb, %entry
%skiplist_addr.0.rec = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; <i32> [#uses=3]
%vYp_addr.0.rec = shl i32 %skiplist_addr.0.rec, 3 ; <i32> [#uses=3]
- %vDct_addr.0 = getelementptr <2 x i64>* %vDct, i32 %vYp_addr.0.rec ; <<2 x i64>*> [#uses=1]
- %vYp_addr.0 = getelementptr <2 x i64>* %vYp, i32 %vYp_addr.0.rec ; <<2 x i64>*> [#uses=1]
- %skiplist_addr.0 = getelementptr i8* %skiplist, i32 %skiplist_addr.0.rec ; <i8*> [#uses=1]
+ %vDct_addr.0 = getelementptr <2 x i64>, <2 x i64>* %vDct, i32 %vYp_addr.0.rec ; <<2 x i64>*> [#uses=1]
+ %vYp_addr.0 = getelementptr <2 x i64>, <2 x i64>* %vYp, i32 %vYp_addr.0.rec ; <<2 x i64>*> [#uses=1]
+ %skiplist_addr.0 = getelementptr i8, i8* %skiplist, i32 %skiplist_addr.0.rec ; <i8*> [#uses=1]
%vDct_addr.0.sum43 = or i32 %vYp_addr.0.rec, 1 ; <i32> [#uses=1]
- %tmp7 = getelementptr <2 x i64>* %vDct, i32 %vDct_addr.0.sum43 ; <<2 x i64>*> [#uses=1]
+ %tmp7 = getelementptr <2 x i64>, <2 x i64>* %vDct, i32 %vDct_addr.0.sum43 ; <<2 x i64>*> [#uses=1]
%tmp8 = load <2 x i64>* %tmp7, align 16 ; <<2 x i64>> [#uses=1]
%tmp11 = load <2 x i64>* %vDct_addr.0, align 16 ; <<2 x i64>> [#uses=1]
%tmp13 = bitcast <2 x i64> %tmp8 to <8 x i16> ; <<8 x i16>> [#uses=1]
; HASWELL: vmovups
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 1
- %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 2
+ %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 1
+ %ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 2
%v1 = load <4 x float>* %ptr1, align 1
%v2 = load <4 x float>* %ptr2, align 1
%shuffle = shufflevector <4 x float> %v1, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
; HASWELL: vmovups
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 2
- %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 3
+ %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 2
+ %ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
%v1 = load <4 x float>* %ptr1, align 1
%v2 = load <4 x float>* %ptr2, align 1
%shuffle = shufflevector <4 x float> %v2, <4 x float> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 1, i32 2, i32 3>
; HASWELL: vmovups
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 3
- %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 4
+ %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 3
+ %ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
%v1 = load <4 x float>* %ptr1, align 1
%v2 = load <4 x float>* %ptr2, align 1
%v3 = shufflevector <4 x float> %v1, <4 x float> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; HASWELL: vmovups
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <4 x float>* %ptr, i64 4
- %ptr2 = getelementptr inbounds <4 x float>* %ptr, i64 5
+ %ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 4
+ %ptr2 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 5
%v1 = load <4 x float>* %ptr1, align 1
%v2 = load <4 x float>* %ptr2, align 1
%v3 = shufflevector <4 x float> %v2, <4 x float> %v1, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3>
; HASWELL: vpaddq
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <2 x i64>* %ptr, i64 5
- %ptr2 = getelementptr inbounds <2 x i64>* %ptr, i64 6
+ %ptr1 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 5
+ %ptr2 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 6
%v1 = load <2 x i64>* %ptr1, align 1
%v2 = load <2 x i64>* %ptr2, align 1
%v3 = shufflevector <2 x i64> %v1, <2 x i64> %v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; HASWELL: vpaddd
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <4 x i32>* %ptr, i64 6
- %ptr2 = getelementptr inbounds <4 x i32>* %ptr, i64 7
+ %ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 6
+ %ptr2 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 7
%v1 = load <4 x i32>* %ptr1, align 1
%v2 = load <4 x i32>* %ptr2, align 1
%v3 = shufflevector <4 x i32> %v1, <4 x i32> %v2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; HASWELL: vpaddw
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <8 x i16>* %ptr, i64 7
- %ptr2 = getelementptr inbounds <8 x i16>* %ptr, i64 8
+ %ptr1 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 7
+ %ptr2 = getelementptr inbounds <8 x i16>, <8 x i16>* %ptr, i64 8
%v1 = load <8 x i16>* %ptr1, align 1
%v2 = load <8 x i16>* %ptr2, align 1
%v3 = shufflevector <8 x i16> %v1, <8 x i16> %v2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; HASWELL: vpaddb
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <16 x i8>* %ptr, i64 8
- %ptr2 = getelementptr inbounds <16 x i8>* %ptr, i64 9
+ %ptr1 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 8
+ %ptr2 = getelementptr inbounds <16 x i8>, <16 x i8>* %ptr, i64 9
%v1 = load <16 x i8>* %ptr1, align 1
%v2 = load <16 x i8>* %ptr2, align 1
%v3 = shufflevector <16 x i8> %v1, <16 x i8> %v2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
; HASWELL: vaddpd
; HASWELL-NEXT: retq
- %ptr1 = getelementptr inbounds <2 x double>* %ptr, i64 9
- %ptr2 = getelementptr inbounds <2 x double>* %ptr, i64 10
+ %ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 9
+ %ptr2 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 10
%v1 = load <2 x double>* %ptr1, align 1
%v2 = load <2 x double>* %ptr2, align 1
%v3 = shufflevector <2 x double> %v1, <2 x double> %v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
br label %bb
bb: ; preds = %bb, %entry
- %String2Loc9 = getelementptr inbounds [31 x i8]* %String2Loc, i64 0, i64 0
+ %String2Loc9 = getelementptr inbounds [31 x i8], [31 x i8]* %String2Loc, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %String2Loc9, i8* getelementptr inbounds ([31 x i8]* @.str3, i64 0, i64 0), i64 31, i32 1, i1 false)
br label %bb
vector.body:
%index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds [32 x i32]* @arr, i32 0, i32 %index
+ %0 = getelementptr inbounds [32 x i32], [32 x i32]* @arr, i32 0, i32 %index
%1 = bitcast i32* %0 to <4 x i32>*
%wide.load = load <4 x i32>* %1, align 16
%2 = add nsw <4 x i32> %wide.load, <i32 10, i32 10, i32 10, i32 10>
%2 = bitcast %struct._Unwind_Context* %cur_context to i8*
%3 = bitcast %struct._Unwind_Context* %this_context to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false)
- %personality = getelementptr inbounds %struct._Unwind_FrameState* %fs, i64 0, i32 6
- %retaddr_column.i = getelementptr inbounds %struct._Unwind_FrameState* %fs, i64 0, i32 9
- %flags.i.i.i.i = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 5
- %ra.i = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 2
- %exception_class = getelementptr inbounds %struct._Unwind_Exception* %exc, i64 0, i32 0
+ %personality = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs, i64 0, i32 6
+ %retaddr_column.i = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs, i64 0, i32 9
+ %flags.i.i.i.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 5
+ %ra.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 2
+ %exception_class = getelementptr inbounds %struct._Unwind_Exception, %struct._Unwind_Exception* %exc, i64 0, i32 0
br label %while.body
while.body: ; preds = %uw_update_context.exit, %entry
cond.end.i.i.i: ; preds = %if.end13
%sext.i = shl i64 %6, 32
%idxprom.i.i.i = ashr exact i64 %sext.i, 32
- %arrayidx.i.i.i = getelementptr inbounds [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i
+ %arrayidx.i.i.i = getelementptr inbounds [18 x i8], [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i
%7 = load i8* %arrayidx.i.i.i, align 1
- %arrayidx2.i.i.i = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i
+ %arrayidx2.i.i.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i
%8 = load i8** %arrayidx2.i.i.i, align 8
%9 = load i64* %flags.i.i.i.i, align 8
%and.i.i.i.i = and i64 %9, 4611686018427387904
br i1 %tobool.i.i.i, label %if.end.i.i.i, label %land.lhs.true.i.i.i
land.lhs.true.i.i.i: ; preds = %cond.end.i.i.i
- %arrayidx4.i.i.i = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i
+ %arrayidx4.i.i.i = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i
%10 = load i8* %arrayidx4.i.i.i, align 1
%tobool6.i.i.i = icmp eq i8 %10, 0
br i1 %tobool6.i.i.i, label %if.end.i.i.i, label %if.then.i.i.i
br label %while.body
while.end: ; preds = %if.then4
- %private_1 = getelementptr inbounds %struct._Unwind_Exception* %exc, i64 0, i32 2
+ %private_1 = getelementptr inbounds %struct._Unwind_Exception, %struct._Unwind_Exception* %exc, i64 0, i32 2
store i64 0, i64* %private_1, align 8
%15 = load i8** %ra.i, align 8
%16 = ptrtoint i8* %15 to i64
- %private_2 = getelementptr inbounds %struct._Unwind_Exception* %exc, i64 0, i32 3
+ %private_2 = getelementptr inbounds %struct._Unwind_Exception, %struct._Unwind_Exception* %exc, i64 0, i32 3
store i64 %16, i64* %private_2, align 8
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 240, i32 8, i1 false)
%17 = bitcast %struct._Unwind_FrameState* %fs.i to i8*
call void @llvm.lifetime.start(i64 -1, i8* %17)
- %personality.i = getelementptr inbounds %struct._Unwind_FrameState* %fs.i, i64 0, i32 6
- %retaddr_column.i22 = getelementptr inbounds %struct._Unwind_FrameState* %fs.i, i64 0, i32 9
+ %personality.i = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs.i, i64 0, i32 6
+ %retaddr_column.i22 = getelementptr inbounds %struct._Unwind_FrameState, %struct._Unwind_FrameState* %fs.i, i64 0, i32 9
br label %while.body.i
while.body.i: ; preds = %uw_update_context.exit44, %while.end
cond.end.i.i.i33: ; preds = %cond.end.i
%sext.i26 = shl i64 %23, 32
%idxprom.i.i.i27 = ashr exact i64 %sext.i26, 32
- %arrayidx.i.i.i28 = getelementptr inbounds [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i27
+ %arrayidx.i.i.i28 = getelementptr inbounds [18 x i8], [18 x i8]* @dwarf_reg_size_table, i64 0, i64 %idxprom.i.i.i27
%24 = load i8* %arrayidx.i.i.i28, align 1
- %arrayidx2.i.i.i29 = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i27
+ %arrayidx2.i.i.i29 = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 0, i64 %idxprom.i.i.i27
%25 = load i8** %arrayidx2.i.i.i29, align 8
%26 = load i64* %flags.i.i.i.i, align 8
%and.i.i.i.i31 = and i64 %26, 4611686018427387904
br i1 %tobool.i.i.i32, label %if.end.i.i.i39, label %land.lhs.true.i.i.i36
land.lhs.true.i.i.i36: ; preds = %cond.end.i.i.i33
- %arrayidx4.i.i.i34 = getelementptr inbounds %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i27
+ %arrayidx4.i.i.i34 = getelementptr inbounds %struct._Unwind_Context, %struct._Unwind_Context* %cur_context, i64 0, i32 8, i64 %idxprom.i.i.i27
%27 = load i8* %arrayidx4.i.i.i34, align 1
%tobool6.i.i.i35 = icmp eq i8 %27, 0
br i1 %tobool6.i.i.i35, label %if.end.i.i.i39, label %if.then.i.i.i37
br i1 %2, label %.lr.ph, label %._crit_edge
.lr.ph: ; preds = %0
- %3 = getelementptr inbounds [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0
- %4 = getelementptr inbounds [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2
+ %3 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 0
+ %4 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i64 0, i64 0, i32 2
%.pre = load i32* %3, align 16
br label %5
; <label>:10 ; preds = %5
%11 = load i8** %4, align 8
- %12 = getelementptr i8* %11, i64 8
+ %12 = getelementptr i8, i8* %11, i64 8
store i8* %12, i8** %4, align 8
br label %13
}
define %f4 @test2() nounwind {
- %Wp = getelementptr { float,float,float,float}* @G, i32 0, i32 0
- %Xp = getelementptr { float,float,float,float}* @G, i32 0, i32 1
- %Yp = getelementptr { float,float,float,float}* @G, i32 0, i32 2
- %Zp = getelementptr { float,float,float,float}* @G, i32 0, i32 3
+ %Wp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 0
+ %Xp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 1
+ %Yp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 2
+ %Zp = getelementptr { float,float,float,float}, { float,float,float,float}* @G, i32 0, i32 3
%W = load float* %Wp
%X = load float* %Xp
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
- %tmp.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
store float %f, float* %tmp.upgrd.1
%tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
%tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
- %tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
+ %tmp.upgrd.2 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
%tmp.upgrd.3 = load float* %tmp.upgrd.2 ; <float> [#uses=1]
store float %tmp.upgrd.3, float* %f
ret void
define <4 x float> @merge_2_floats(float* nocapture %p) nounwind readonly {
%tmp1 = load float* %p
%vecins = insertelement <4 x float> undef, float %tmp1, i32 0
- %add.ptr = getelementptr float* %p, i32 1
+ %add.ptr = getelementptr float, float* %p, i32 1
%tmp5 = load float* %add.ptr
%vecins7 = insertelement <4 x float> %vecins, float %tmp5, i32 1
ret <4 x float> %vecins7
; two i64s of a <4 x i64> as a load of two i32s.
define <4 x i64> @merge_2_floats_into_4() {
%1 = load i64** undef, align 8
- %2 = getelementptr inbounds i64* %1, i64 0
+ %2 = getelementptr inbounds i64, i64* %1, i64 0
%3 = load i64* %2
%4 = insertelement <4 x i64> undef, i64 %3, i32 0
%5 = load i64** undef, align 8
- %6 = getelementptr inbounds i64* %5, i64 1
+ %6 = getelementptr inbounds i64, i64* %5, i64 1
%7 = load i64* %6
%8 = insertelement <4 x i64> %4, i64 %7, i32 1
%9 = shufflevector <4 x i64> %8, <4 x i64> undef, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
define <4 x float> @merge_4_floats(float* %ptr) {
%a = load float* %ptr, align 8
%vec = insertelement <4 x float> undef, float %a, i32 0
- %idx1 = getelementptr inbounds float* %ptr, i64 1
+ %idx1 = getelementptr inbounds float, float* %ptr, i64 1
%b = load float* %idx1, align 8
%vec2 = insertelement <4 x float> %vec, float %b, i32 1
- %idx3 = getelementptr inbounds float* %ptr, i64 2
+ %idx3 = getelementptr inbounds float, float* %ptr, i64 2
%c = load float* %idx3, align 8
%vec4 = insertelement <4 x float> %vec2, float %c, i32 2
- %idx5 = getelementptr inbounds float* %ptr, i64 3
+ %idx5 = getelementptr inbounds float, float* %ptr, i64 3
%d = load float* %idx5, align 8
%vec6 = insertelement <4 x float> %vec4, float %d, i32 3
ret <4 x float> %vec6
define <8 x float> @merge_8_floats(float* %ptr) {
%a = load float* %ptr, align 4
%vec = insertelement <8 x float> undef, float %a, i32 0
- %idx1 = getelementptr inbounds float* %ptr, i64 1
+ %idx1 = getelementptr inbounds float, float* %ptr, i64 1
%b = load float* %idx1, align 4
%vec2 = insertelement <8 x float> %vec, float %b, i32 1
- %idx3 = getelementptr inbounds float* %ptr, i64 2
+ %idx3 = getelementptr inbounds float, float* %ptr, i64 2
%c = load float* %idx3, align 4
%vec4 = insertelement <8 x float> %vec2, float %c, i32 2
- %idx5 = getelementptr inbounds float* %ptr, i64 3
+ %idx5 = getelementptr inbounds float, float* %ptr, i64 3
%d = load float* %idx5, align 4
%vec6 = insertelement <8 x float> %vec4, float %d, i32 3
- %idx7 = getelementptr inbounds float* %ptr, i64 4
+ %idx7 = getelementptr inbounds float, float* %ptr, i64 4
%e = load float* %idx7, align 4
%vec8 = insertelement <8 x float> %vec6, float %e, i32 4
- %idx9 = getelementptr inbounds float* %ptr, i64 5
+ %idx9 = getelementptr inbounds float, float* %ptr, i64 5
%f = load float* %idx9, align 4
%vec10 = insertelement <8 x float> %vec8, float %f, i32 5
- %idx11 = getelementptr inbounds float* %ptr, i64 6
+ %idx11 = getelementptr inbounds float, float* %ptr, i64 6
%g = load float* %idx11, align 4
%vec12 = insertelement <8 x float> %vec10, float %g, i32 6
- %idx13 = getelementptr inbounds float* %ptr, i64 7
+ %idx13 = getelementptr inbounds float, float* %ptr, i64 7
%h = load float* %idx13, align 4
%vec14 = insertelement <8 x float> %vec12, float %h, i32 7
ret <8 x float> %vec14
define <4 x double> @merge_4_doubles(double* %ptr) {
%a = load double* %ptr, align 8
%vec = insertelement <4 x double> undef, double %a, i32 0
- %idx1 = getelementptr inbounds double* %ptr, i64 1
+ %idx1 = getelementptr inbounds double, double* %ptr, i64 1
%b = load double* %idx1, align 8
%vec2 = insertelement <4 x double> %vec, double %b, i32 1
- %idx3 = getelementptr inbounds double* %ptr, i64 2
+ %idx3 = getelementptr inbounds double, double* %ptr, i64 2
%c = load double* %idx3, align 8
%vec4 = insertelement <4 x double> %vec2, double %c, i32 2
- %idx5 = getelementptr inbounds double* %ptr, i64 3
+ %idx5 = getelementptr inbounds double, double* %ptr, i64 3
%d = load double* %idx5, align 8
%vec6 = insertelement <4 x double> %vec4, double %d, i32 3
ret <4 x double> %vec6
; Recognize and combine consecutive loads even when the
; first of the combined loads is offset from the base address.
define <4 x double> @merge_4_doubles_offset(double* %ptr) {
- %arrayidx4 = getelementptr inbounds double* %ptr, i64 4
- %arrayidx5 = getelementptr inbounds double* %ptr, i64 5
- %arrayidx6 = getelementptr inbounds double* %ptr, i64 6
- %arrayidx7 = getelementptr inbounds double* %ptr, i64 7
+ %arrayidx4 = getelementptr inbounds double, double* %ptr, i64 4
+ %arrayidx5 = getelementptr inbounds double, double* %ptr, i64 5
+ %arrayidx6 = getelementptr inbounds double, double* %ptr, i64 6
+ %arrayidx7 = getelementptr inbounds double, double* %ptr, i64 7
%e = load double* %arrayidx4, align 8
%f = load double* %arrayidx5, align 8
%g = load double* %arrayidx6, align 8
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx1 = getelementptr inbounds <2 x i64>* %in, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %in, i64 %indvars.iv
%arrayidx1.val = load <2 x i64>* %arrayidx1, align 16
%0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
%cmp.i.i = icmp ult <8 x i16> %0, <i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
%sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
%1 = bitcast <8 x i16> %sext.i.i to <2 x i64>
- %arrayidx5 = getelementptr inbounds <2 x i64>* %out, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds <2 x i64>, <2 x i64>* %out, i64 %indvars.iv
store <2 x i64> %1, <2 x i64>* %arrayidx5, align 16
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx1 = getelementptr inbounds <2 x i64>* %in, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds <2 x i64>, <2 x i64>* %in, i64 %indvars.iv
%arrayidx1.val = load <2 x i64>* %arrayidx1, align 16
%0 = bitcast <2 x i64> %arrayidx1.val to <8 x i16>
%cmp.i.i = icmp ult <8 x i16> %0, <i16 0, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26, i16 26>
%sext.i.i = sext <8 x i1> %cmp.i.i to <8 x i16>
%1 = bitcast <8 x i16> %sext.i.i to <2 x i64>
- %arrayidx5 = getelementptr inbounds <2 x i64>* %out, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds <2 x i64>, <2 x i64>* %out, i64 %indvars.iv
store <2 x i64> %1, <2 x i64>* %arrayidx5, align 16
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%vecinit2.i = insertelement <4 x i32*> %vecinit.i, i32* %ptr, i32 1
%vecinit4.i = insertelement <4 x i32*> %vecinit2.i, i32* %ptr, i32 2
%vecinit6.i = insertelement <4 x i32*> %vecinit4.i, i32* %ptr, i32 3
- %A2 = getelementptr <4 x i32*> %vecinit6.i, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
- %A3 = getelementptr <4 x i32*> %A2, <4 x i32> <i32 10, i32 14, i32 19, i32 233>
+ %A2 = getelementptr i32, <4 x i32*> %vecinit6.i, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ %A3 = getelementptr i32, <4 x i32*> %A2, <4 x i32> <i32 10, i32 14, i32 19, i32 233>
ret <4 x i32*> %A3
}
;CHECK: vpaddd
;CHECK-NEXT: vpextrd
;CHECK-NEXT: movl
- %A2 = getelementptr <4 x i32*> %param, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ %A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
%k = extractelement <4 x i32*> %A2, i32 3
%v = load i32* %k
ret i32 %v
;CHECK-LABEL: AGEP2
;CHECK: vpslld $2
;CHECK-NEXT: vpadd
- %A2 = getelementptr <4 x i32*> %param, <4 x i32> %off
+ %A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> %off
%k = extractelement <4 x i32*> %A2, i32 3
%v = load i32* %k
ret i32 %v
;CHECK-LABEL: AGEP3
;CHECK: vpslld $2
;CHECK-NEXT: vpadd
- %A2 = getelementptr <4 x i32*> %param, <4 x i32> %off
+ %A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> %off
%v = alloca i32
%k = insertelement <4 x i32*> %A2, i32* %v, i32 3
ret <4 x i32*> %k
;CHECK: vpadd
; add the base to the offset
;CHECK-NEXT: vpadd
- %A = getelementptr <4 x i16*> %param, <4 x i32> %off
+ %A = getelementptr i16, <4 x i16*> %param, <4 x i32> %off
ret <4 x i16*> %A
;CHECK: ret
}
entry:
;CHECK-LABEL: AGEP5
;CHECK: vpaddd
- %A = getelementptr <4 x i8*> %param, <4 x i8> %off
+ %A = getelementptr i8, <4 x i8*> %param, <4 x i8> %off
ret <4 x i8*> %A
;CHECK: ret
}
entry:
;CHECK-LABEL: AGEP6
;CHECK-NOT: pslld
- %A = getelementptr <4 x i8*> %param, <4 x i32> %off
+ %A = getelementptr i8, <4 x i8*> %param, <4 x i32> %off
ret <4 x i8*> %A
;CHECK: ret
}
br i1 false, label %bb137.i, label %bb149.i.loopexit
bb149.i.loopexit: ; preds = %bb137.i
- %tmp139.i = getelementptr i8* %FieldName, i64 %tmp139.rec.i ; <i8*> [#uses=0]
+ %tmp139.i = getelementptr i8, i8* %FieldName, i64 %tmp139.rec.i ; <i8*> [#uses=0]
unreachable
}
; CHECK: retq
define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
bb:
- %arrayidx1928 = getelementptr inbounds double** %call1559, i64 %indvars.iv4198
+ %arrayidx1928 = getelementptr inbounds double*, double** %call1559, i64 %indvars.iv4198
%tmp1888 = load double** %arrayidx1928, align 8
%predphi.v.v = select <4 x i1> %tmp1895, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>
%tmp1900 = bitcast double* %tmp1888 to <4 x double>*
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <16 x i8>*
%ptr.b = bitcast i8* %gep.b to <16 x i8>*
%load.a = load <16 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <8 x i16>*
%ptr.b = bitcast i16* %gep.b to <8 x i16>*
%load.a = load <8 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i32>*
%ptr.b = bitcast i32* %gep.b to <4 x i32>*
%load.a = load <4 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <32 x i8>*
%ptr.b = bitcast i8* %gep.b to <32 x i8>*
%load.a = load <32 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <16 x i16>*
%ptr.b = bitcast i16* %gep.b to <16 x i16>*
%load.a = load <16 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i32>*
%ptr.b = bitcast i32* %gep.b to <8 x i32>*
%load.a = load <8 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i8* %a, i64 %index
- %gep.b = getelementptr inbounds i8* %b, i64 %index
+ %gep.a = getelementptr inbounds i8, i8* %a, i64 %index
+ %gep.b = getelementptr inbounds i8, i8* %b, i64 %index
%ptr.a = bitcast i8* %gep.a to <64 x i8>*
%ptr.b = bitcast i8* %gep.b to <64 x i8>*
%load.a = load <64 x i8>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i16* %a, i64 %index
- %gep.b = getelementptr inbounds i16* %b, i64 %index
+ %gep.a = getelementptr inbounds i16, i16* %a, i64 %index
+ %gep.b = getelementptr inbounds i16, i16* %b, i64 %index
%ptr.a = bitcast i16* %gep.a to <32 x i16>*
%ptr.b = bitcast i16* %gep.b to <32 x i16>*
%load.a = load <32 x i16>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <16 x i32>*
%ptr.b = bitcast i32* %gep.b to <16 x i32>*
%load.a = load <16 x i32>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <8 x i64>*
%ptr.b = bitcast i32* %gep.b to <8 x i64>*
%load.a = load <8 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <4 x i64>*
%ptr.b = bitcast i32* %gep.b to <4 x i64>*
%load.a = load <4 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
vector.body: ; preds = %vector.body, %vector.ph
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
- %gep.a = getelementptr inbounds i32* %a, i64 %index
- %gep.b = getelementptr inbounds i32* %b, i64 %index
+ %gep.a = getelementptr inbounds i32, i32* %a, i64 %index
+ %gep.b = getelementptr inbounds i32, i32* %b, i64 %index
%ptr.a = bitcast i32* %gep.a to <2 x i64>*
%ptr.b = bitcast i32* %gep.b to <2 x i64>*
%load.a = load <2 x i64>* %ptr.a, align 2
define void @nowarn() nounwind ssp {
entry:
%buffer = alloca [12 x i8], align 1
- %arraydecay = getelementptr inbounds [12 x i8]* %buffer, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [12 x i8], [12 x i8]* %buffer, i64 0, i64 0
call void @doit(i8* %arraydecay) nounwind
ret void
}
define void @warn() nounwind ssp {
entry:
%buffer = alloca [80 x i8], align 1
- %arraydecay = getelementptr inbounds [80 x i8]* %buffer, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [80 x i8], [80 x i8]* %buffer, i64 0, i64 0
call void @doit(i8* %arraydecay) nounwind
ret void
}
forbody: ; preds = %forcond
%tmp2 = load i32* %i ; <i32> [#uses=1]
%tmp3 = load <3 x i8>** %dst.addr ; <<3 x i8>*> [#uses=1]
- %arrayidx = getelementptr <3 x i8>* %tmp3, i32 %tmp2 ; <<3 x i8>*> [#uses=1]
+ %arrayidx = getelementptr <3 x i8>, <3 x i8>* %tmp3, i32 %tmp2 ; <<3 x i8>*> [#uses=1]
%tmp4 = load i32* %i ; <i32> [#uses=1]
%tmp5 = load <3 x i8>** %src.addr ; <<3 x i8>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x i8>* %tmp5, i32 %tmp4 ; <<3 x i8>*> [#uses=1]
+ %arrayidx6 = getelementptr <3 x i8>, <3 x i8>* %tmp5, i32 %tmp4 ; <<3 x i8>*> [#uses=1]
%tmp7 = load <3 x i8>* %arrayidx6 ; <<3 x i8>> [#uses=1]
%add = add <3 x i8> %tmp7, < i8 1, i8 1, i8 1 > ; <<3 x i8>> [#uses=1]
store <3 x i8> %add, <3 x i8>* %arrayidx
forbody: ; preds = %forcond
%tmp2 = load i32* %i ; <i32> [#uses=1]
%tmp3 = load i64** %dst_i.addr ; <i64*> [#uses=1]
- %arrayidx = getelementptr i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
+ %arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
%conv = bitcast i64* %arrayidx to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv, <8 x i8>** %dst
%tmp4 = load i32* %i ; <i32> [#uses=1]
%tmp5 = load i64** %src_i.addr ; <i64*> [#uses=1]
- %arrayidx6 = getelementptr i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
+ %arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
%conv7 = bitcast i64* %arrayidx6 to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv7, <8 x i8>** %src
%tmp8 = load i32* %i ; <i32> [#uses=1]
%tmp9 = load <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
- %arrayidx10 = getelementptr <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
+ %arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
%tmp11 = load i32* %i ; <i32> [#uses=1]
%tmp12 = load <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
- %arrayidx13 = getelementptr <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
+ %arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
%tmp14 = load <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
%add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > ; <<8 x i8>> [#uses=1]
%and = and <8 x i8> %add, < i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4 > ; <<8 x i8>> [#uses=1]
forbody: ; preds = %forcond
%tmp2 = load i32* %i ; <i32> [#uses=1]
%tmp3 = load <3 x i16>** %dst.addr ; <<3 x i16>*> [#uses=1]
- %arrayidx = getelementptr <3 x i16>* %tmp3, i32 %tmp2 ; <<3 x i16>*> [#uses=1]
+ %arrayidx = getelementptr <3 x i16>, <3 x i16>* %tmp3, i32 %tmp2 ; <<3 x i16>*> [#uses=1]
%tmp4 = load i32* %i ; <i32> [#uses=1]
%tmp5 = load <3 x i16>** %src.addr ; <<3 x i16>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x i16>* %tmp5, i32 %tmp4 ; <<3 x i16>*> [#uses=1]
+ %arrayidx6 = getelementptr <3 x i16>, <3 x i16>* %tmp5, i32 %tmp4 ; <<3 x i16>*> [#uses=1]
%tmp7 = load <3 x i16>* %arrayidx6 ; <<3 x i16>> [#uses=1]
%add = add <3 x i16> %tmp7, < i16 1, i16 1, i16 1 > ; <<3 x i16>> [#uses=1]
store <3 x i16> %add, <3 x i16>* %arrayidx
forbody: ; preds = %forcond
%tmp2 = load i32* %i ; <i32> [#uses=1]
%tmp3 = load <5 x i16>** %dst.addr ; <<5 x i16>*> [#uses=1]
- %arrayidx = getelementptr <5 x i16>* %tmp3, i32 %tmp2 ; <<5 x i16>*> [#uses=1]
+ %arrayidx = getelementptr <5 x i16>, <5 x i16>* %tmp3, i32 %tmp2 ; <<5 x i16>*> [#uses=1]
%tmp4 = load i32* %i ; <i32> [#uses=1]
%tmp5 = load <5 x i16>** %src.addr ; <<5 x i16>*> [#uses=1]
- %arrayidx6 = getelementptr <5 x i16>* %tmp5, i32 %tmp4 ; <<5 x i16>*> [#uses=1]
+ %arrayidx6 = getelementptr <5 x i16>, <5 x i16>* %tmp5, i32 %tmp4 ; <<5 x i16>*> [#uses=1]
%tmp7 = load <5 x i16>* %arrayidx6 ; <<5 x i16>> [#uses=1]
%sub = sub <5 x i16> %tmp7, < i16 271, i16 271, i16 271, i16 271, i16 271 > ; <<5 x i16>> [#uses=1]
%mul = mul <5 x i16> %sub, < i16 2, i16 4, i16 2, i16 2, i16 2 > ; <<5 x i16>> [#uses=1]
forbody: ; preds = %forcond
%tmp2 = load i32* %i ; <i32> [#uses=1]
%tmp3 = load <3 x i32>** %dst.addr ; <<3 x i32>*> [#uses=1]
- %arrayidx = getelementptr <3 x i32>* %tmp3, i32 %tmp2 ; <<3 x i32>*> [#uses=1]
+ %arrayidx = getelementptr <3 x i32>, <3 x i32>* %tmp3, i32 %tmp2 ; <<3 x i32>*> [#uses=1]
%tmp4 = load i32* %i ; <i32> [#uses=1]
%tmp5 = load <3 x i32>** %src.addr ; <<3 x i32>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x i32>* %tmp5, i32 %tmp4 ; <<3 x i32>*> [#uses=1]
+ %arrayidx6 = getelementptr <3 x i32>, <3 x i32>* %tmp5, i32 %tmp4 ; <<3 x i32>*> [#uses=1]
%tmp7 = load <3 x i32>* %arrayidx6 ; <<3 x i32>> [#uses=1]
%mul = mul <3 x i32> %tmp7, < i32 4, i32 4, i32 4 > ; <<3 x i32>> [#uses=1]
%sub = sub <3 x i32> %mul, < i32 3, i32 3, i32 3 > ; <<3 x i32>> [#uses=1]
forbody: ; preds = %forcond
%tmp2 = load i32* %i ; <i32> [#uses=1]
%tmp3 = load <3 x float>** %dst.addr ; <<3 x float>*> [#uses=1]
- %arrayidx = getelementptr <3 x float>* %tmp3, i32 %tmp2 ; <<3 x float>*> [#uses=1]
+ %arrayidx = getelementptr <3 x float>, <3 x float>* %tmp3, i32 %tmp2 ; <<3 x float>*> [#uses=1]
%tmp4 = load i32* %i ; <i32> [#uses=1]
%tmp5 = load <3 x float>** %src.addr ; <<3 x float>*> [#uses=1]
- %arrayidx6 = getelementptr <3 x float>* %tmp5, i32 %tmp4 ; <<3 x float>*> [#uses=1]
+ %arrayidx6 = getelementptr <3 x float>, <3 x float>* %tmp5, i32 %tmp4 ; <<3 x float>*> [#uses=1]
%tmp7 = load <3 x float>* %arrayidx6 ; <<3 x float>> [#uses=1]
%tmp8 = load <3 x float>* %v ; <<3 x float>> [#uses=1]
%mul = fmul <3 x float> %tmp7, %tmp8 ; <<3 x float>> [#uses=1]
forbody: ; preds = %forcond
%tmp1 = load i32* %i ; <i32> [#uses=1]
%tmp2 = load <2 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
- %arrayidx = getelementptr <2 x i32>* %tmp2, i32 %tmp1 ; <<2 x i32>*> [#uses=1]
+ %arrayidx = getelementptr <2 x i32>, <2 x i32>* %tmp2, i32 %tmp1 ; <<2 x i32>*> [#uses=1]
%tmp3 = load i32* %i ; <i32> [#uses=1]
%tmp4 = load <4 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
- %arrayidx5 = getelementptr <4 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
+ %arrayidx5 = getelementptr <4 x i16>, <4 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
%tmp6 = load <4 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
%add = add <4 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1 > ; <<4 x i16>> [#uses=1]
%conv = bitcast <4 x i16> %add to <2 x i32> ; <<2 x i32>> [#uses=1]
forbody: ; preds = %forcond
%tmp1 = load i32* %i ; <i32> [#uses=1]
%tmp2 = load <7 x i32>** %dst.addr ; <<2 x i32>*> [#uses=1]
- %arrayidx = getelementptr <7 x i32>* %tmp2, i32 %tmp1 ; <<7 x i32>*> [#uses=1]
+ %arrayidx = getelementptr <7 x i32>, <7 x i32>* %tmp2, i32 %tmp1 ; <<7 x i32>*> [#uses=1]
%tmp3 = load i32* %i ; <i32> [#uses=1]
%tmp4 = load <14 x i16>** %src.addr ; <<4 x i16>*> [#uses=1]
- %arrayidx5 = getelementptr <14 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
+ %arrayidx5 = getelementptr <14 x i16>, <14 x i16>* %tmp4, i32 %tmp3 ; <<4 x i16>*> [#uses=1]
%tmp6 = load <14 x i16>* %arrayidx5 ; <<4 x i16>> [#uses=1]
%add = add <14 x i16> %tmp6, < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1 > ; <<4 x i16>> [#uses=1]
%conv = bitcast <14 x i16> %add to <7 x i32> ; <<7 x i32>> [#uses=1]
forbody: ; preds = %forcond
%tmp2 = load i32* %i ; <i32> [#uses=1]
%tmp3 = load i64** %dst_i.addr ; <i64*> [#uses=1]
- %arrayidx = getelementptr i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
+ %arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2 ; <i64*> [#uses=1]
%conv = bitcast i64* %arrayidx to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv, <8 x i8>** %dst
%tmp4 = load i32* %i ; <i32> [#uses=1]
%tmp5 = load i64** %src_i.addr ; <i64*> [#uses=1]
- %arrayidx6 = getelementptr i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
+ %arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4 ; <i64*> [#uses=1]
%conv7 = bitcast i64* %arrayidx6 to <8 x i8>* ; <<8 x i8>*> [#uses=1]
store <8 x i8>* %conv7, <8 x i8>** %src
%tmp8 = load i32* %i ; <i32> [#uses=1]
%tmp9 = load <8 x i8>** %dst ; <<8 x i8>*> [#uses=1]
- %arrayidx10 = getelementptr <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
+ %arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1]
%tmp11 = load i32* %i ; <i32> [#uses=1]
%tmp12 = load <8 x i8>** %src ; <<8 x i8>*> [#uses=1]
- %arrayidx13 = getelementptr <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
+ %arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1]
%tmp14 = load <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1]
%add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > ; <<8 x i8>> [#uses=1]
%shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 > ; <<8 x i8>> [#uses=1]
"file complex.c, line 27, bb13": ; preds = %"file complex.c, line 27, bb1"
store i32 0, i32* %changed, align 4
- %r2 = getelementptr float* bitcast ([20 x i64]* @compl to float*), i64 32 ; <float*> [#uses=1]
+ %r2 = getelementptr float, float* bitcast ([20 x i64]* @compl to float*), i64 32 ; <float*> [#uses=1]
%r3 = bitcast float* %r2 to <2 x float>* ; <<2 x float>*> [#uses=1]
%r4 = load <2 x float>* %r3, align 4 ; <<2 x float>> [#uses=1]
call void @killcommon(i32* %changed)
; LINUX-LABEL: sret4:
; LINUX: retl $4
- %x = getelementptr inbounds %struct.S4* %agg.result, i32 0, i32 0
+ %x = getelementptr inbounds %struct.S4, %struct.S4* %agg.result, i32 0, i32 0
store i32 42, i32* %x, align 4
ret void
}
%this.addr = alloca %class.C5*, align 4
store %class.C5* %this, %class.C5** %this.addr, align 4
%this1 = load %class.C5** %this.addr
- %x = getelementptr inbounds %struct.S5* %agg.result, i32 0, i32 0
+ %x = getelementptr inbounds %struct.S5, %struct.S5* %agg.result, i32 0, i32 0
store i32 42, i32* %x, align 4
ret void
; WIN32-LABEL: {{^}}"?foo@C5@@QAE?AUS5@@XZ":
}
define x86_thiscallcc void @test7_g(%struct.test7* %in, %struct.test7* sret %out) {
- %s = getelementptr %struct.test7* %in, i32 0, i32 0
- %d = getelementptr %struct.test7* %out, i32 0, i32 0
+ %s = getelementptr %struct.test7, %struct.test7* %in, i32 0, i32 0
+ %d = getelementptr %struct.test7, %struct.test7* %out, i32 0, i32 0
%v = load i32* %s
store i32 %v, i32* %d
call void @clobber_eax()
; CHECK: leaq -92(%rbp), %rcx
; CHECK: callq external
%a = alloca [300 x i8]
- %gep = getelementptr [300 x i8]* %a, i32 0, i32 0
+ %gep = getelementptr [300 x i8], [300 x i8]* %a, i32 0, i32 0
call void @external(i8* %gep)
ret void
}
; CHECK: leaq -92(%rbp), %rcx
; CHECK: callq external
%a = alloca [300 x i8]
- %gep = getelementptr [300 x i8]* %a, i32 0, i32 0
+ %gep = getelementptr [300 x i8], [300 x i8]* %a, i32 0, i32 0
call void @external(i8* %gep)
ret void
}
; CHECK: callq __chkstk
; CHECK: subq %rax, %rsp
- %gep = getelementptr [300 x i8]* %alloca, i32 0, i32 0
+ %gep = getelementptr [300 x i8], [300 x i8]* %alloca, i32 0, i32 0
call void @external(i8* %gep)
; CHECK: subq $32, %rsp
; CHECK: leaq (%rbx), %rcx
define void @foo(i32** %p) {
%a = alloca i32, i32 10
- %addr = getelementptr i32* %a, i32 4
+ %addr = getelementptr i32, i32* %a, i32 4
store i32* %addr, i32** %p
ret void
}
@call_used_regs = external global [53 x i8], align 32
define fastcc void @foo() nounwind {
- %t = getelementptr [53 x i8]* @call_used_regs, i64 0, i64 4294967295
+ %t = getelementptr [53 x i8], [53 x i8]* @call_used_regs, i64 0, i64 4294967295
store i8 1, i8* %t, align 1
ret void
}
store i32 %i, i32* %i.addr
%tmp = load i32* %i.addr ; <i32> [#uses=1]
%idxprom = sext i32 %tmp to i64 ; <i64> [#uses=1]
- %arrayidx = getelementptr inbounds i32* getelementptr inbounds ([3 x i32]* @test.array, i32 0, i32 0), i64 %idxprom ; <i32*> [#uses=1]
+ %arrayidx = getelementptr inbounds i32, i32* getelementptr inbounds ([3 x i32]* @test.array, i32 0, i32 0), i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
%idx.ext = sext i32 %tmp1 to i64 ; <i64> [#uses=1]
- %add.ptr = getelementptr i8* blockaddress(@test2, %foo), i64 %idx.ext ; <i8*> [#uses=1]
+ %add.ptr = getelementptr i8, i8* blockaddress(@test2, %foo), i64 %idx.ext ; <i8*> [#uses=1]
br label %indirectgoto
foo: ; preds = %indirectgoto, %indirectgoto, %indirectgoto, %indirectgoto, %indirectgoto
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %struct.foo* %d, %struct.foo** %d_addr
%tmp = load %struct.foo** %d_addr, align 8 ; <%struct.foo*> [#uses=1]
- %tmp1 = getelementptr %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp2 = getelementptr %struct.foo* %tmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp3 = getelementptr [4 x i64]* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp4 = getelementptr [4 x i64]* %tmp2, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp1 = getelementptr %struct.foo, %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
+ %tmp2 = getelementptr %struct.foo, %struct.foo* %tmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
+ %tmp3 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp5 = load i64* %tmp4, align 8 ; <i64> [#uses=1]
store i64 %tmp5, i64* %tmp3, align 8
- %tmp6 = getelementptr [4 x i64]* %tmp1, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp7 = getelementptr [4 x i64]* %tmp2, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp6 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp7 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 1 ; <i64*> [#uses=1]
%tmp8 = load i64* %tmp7, align 8 ; <i64> [#uses=1]
store i64 %tmp8, i64* %tmp6, align 8
- %tmp9 = getelementptr [4 x i64]* %tmp1, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp10 = getelementptr [4 x i64]* %tmp2, i32 0, i32 2 ; <i64*> [#uses=1]
+ %tmp9 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 2 ; <i64*> [#uses=1]
+ %tmp10 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 2 ; <i64*> [#uses=1]
%tmp11 = load i64* %tmp10, align 8 ; <i64> [#uses=1]
store i64 %tmp11, i64* %tmp9, align 8
- %tmp12 = getelementptr [4 x i64]* %tmp1, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp13 = getelementptr [4 x i64]* %tmp2, i32 0, i32 3 ; <i64*> [#uses=1]
+ %tmp12 = getelementptr [4 x i64], [4 x i64]* %tmp1, i32 0, i32 3 ; <i64*> [#uses=1]
+ %tmp13 = getelementptr [4 x i64], [4 x i64]* %tmp2, i32 0, i32 3 ; <i64*> [#uses=1]
%tmp14 = load i64* %tmp13, align 8 ; <i64> [#uses=1]
store i64 %tmp14, i64* %tmp12, align 8
- %tmp15 = getelementptr %struct.foo* %memtmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp16 = getelementptr %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
- %tmp17 = getelementptr [4 x i64]* %tmp15, i32 0, i32 0 ; <i64*> [#uses=1]
- %tmp18 = getelementptr [4 x i64]* %tmp16, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp15 = getelementptr %struct.foo, %struct.foo* %memtmp, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
+ %tmp16 = getelementptr %struct.foo, %struct.foo* %agg.result, i32 0, i32 0 ; <[4 x i64]*> [#uses=4]
+ %tmp17 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp18 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp19 = load i64* %tmp18, align 8 ; <i64> [#uses=1]
store i64 %tmp19, i64* %tmp17, align 8
- %tmp20 = getelementptr [4 x i64]* %tmp15, i32 0, i32 1 ; <i64*> [#uses=1]
- %tmp21 = getelementptr [4 x i64]* %tmp16, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp20 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp21 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 1 ; <i64*> [#uses=1]
%tmp22 = load i64* %tmp21, align 8 ; <i64> [#uses=1]
store i64 %tmp22, i64* %tmp20, align 8
- %tmp23 = getelementptr [4 x i64]* %tmp15, i32 0, i32 2 ; <i64*> [#uses=1]
- %tmp24 = getelementptr [4 x i64]* %tmp16, i32 0, i32 2 ; <i64*> [#uses=1]
+ %tmp23 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 2 ; <i64*> [#uses=1]
+ %tmp24 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 2 ; <i64*> [#uses=1]
%tmp25 = load i64* %tmp24, align 8 ; <i64> [#uses=1]
store i64 %tmp25, i64* %tmp23, align 8
- %tmp26 = getelementptr [4 x i64]* %tmp15, i32 0, i32 3 ; <i64*> [#uses=1]
- %tmp27 = getelementptr [4 x i64]* %tmp16, i32 0, i32 3 ; <i64*> [#uses=1]
+ %tmp26 = getelementptr [4 x i64], [4 x i64]* %tmp15, i32 0, i32 3 ; <i64*> [#uses=1]
+ %tmp27 = getelementptr [4 x i64], [4 x i64]* %tmp16, i32 0, i32 3 ; <i64*> [#uses=1]
%tmp28 = load i64* %tmp27, align 8 ; <i64> [#uses=1]
store i64 %tmp28, i64* %tmp26, align 8
br label %return
%pending = alloca %struct.MatchInfo, align 8
%t = bitcast %struct.MatchInfo* %pending to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %t, i8* bitcast (%struct.MatchInfo* @NO_MATCH to i8*), i64 512, i32 8, i1 false)
- %u = getelementptr inbounds %struct.MatchInfo* %pending, i32 0, i32 2
+ %u = getelementptr inbounds %struct.MatchInfo, %struct.MatchInfo* %pending, i32 0, i32 2
%v = load i64* %u, align 8
br label %done
done:
define void @func([40 x i16]* %a, i32* %b, i16** %c, i64* %d) nounwind {
entry:
- %tmp103 = getelementptr inbounds [40 x i16]* %a, i64 0, i64 4
+ %tmp103 = getelementptr inbounds [40 x i16], [40 x i16]* %a, i64 0, i64 4
%tmp104 = load i16* %tmp103, align 2
%tmp105 = sext i16 %tmp104 to i32
%tmp106 = load i32* %b, align 4
%tmp107 = sub nsw i32 4, %tmp106
%tmp108 = load i16** %c, align 8
%tmp109 = sext i32 %tmp107 to i64
- %tmp110 = getelementptr inbounds i16* %tmp108, i64 %tmp109
+ %tmp110 = getelementptr inbounds i16, i16* %tmp108, i64 %tmp109
%tmp111 = load i16* %tmp110, align 1
%tmp112 = sext i16 %tmp111 to i32
%tmp = mul i32 355244649, %tmp112
; CHECK: jmp
define i32 @longest_match(%struct.internal_state* nocapture %s, i32 %cur_match) nounwind {
entry:
- %max_chain_length = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 31
+ %max_chain_length = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 31
%0 = load i32* %max_chain_length, align 4
- %window = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 14
+ %window = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 14
%1 = load i8** %window, align 8
- %strstart = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 27
+ %strstart = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 27
%2 = load i32* %strstart, align 4
%idx.ext = zext i32 %2 to i64
- %add.ptr = getelementptr inbounds i8* %1, i64 %idx.ext
- %prev_length = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 30
+ %add.ptr = getelementptr inbounds i8, i8* %1, i64 %idx.ext
+ %prev_length = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 30
%3 = load i32* %prev_length, align 4
- %nice_match1 = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 36
+ %nice_match1 = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 36
%4 = load i32* %nice_match1, align 4
- %w_size = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 11
+ %w_size = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 11
%5 = load i32* %w_size, align 4
%sub = add i32 %5, -262
%cmp = icmp ugt i32 %2, %sub
%sub6 = sub i32 %2, %sub
%sub6. = select i1 %cmp, i32 %sub6, i32 0
- %prev7 = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 16
+ %prev7 = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 16
%6 = load i16** %prev7, align 8
- %w_mask = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 13
+ %w_mask = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 13
%7 = load i32* %w_mask, align 4
%add.ptr11.sum = add i64 %idx.ext, 258
- %add.ptr12 = getelementptr inbounds i8* %1, i64 %add.ptr11.sum
+ %add.ptr12 = getelementptr inbounds i8, i8* %1, i64 %add.ptr11.sum
%sub13 = add nsw i32 %3, -1
%idxprom = sext i32 %sub13 to i64
%add.ptr.sum = add i64 %idxprom, %idx.ext
- %arrayidx = getelementptr inbounds i8* %1, i64 %add.ptr.sum
+ %arrayidx = getelementptr inbounds i8, i8* %1, i64 %add.ptr.sum
%8 = load i8* %arrayidx, align 1
%idxprom14 = sext i32 %3 to i64
%add.ptr.sum213 = add i64 %idxprom14, %idx.ext
- %arrayidx15 = getelementptr inbounds i8* %1, i64 %add.ptr.sum213
+ %arrayidx15 = getelementptr inbounds i8, i8* %1, i64 %add.ptr.sum213
%9 = load i8* %arrayidx15, align 1
- %good_match = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 35
+ %good_match = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 35
%10 = load i32* %good_match, align 4
%cmp17 = icmp ult i32 %3, %10
%shr = lshr i32 %0, 2
%chain_length.0 = select i1 %cmp17, i32 %0, i32 %shr
- %lookahead = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 29
+ %lookahead = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 29
%11 = load i32* %lookahead, align 4
%cmp18 = icmp ugt i32 %4, %11
%. = select i1 %cmp18, i32 %11, i32 %4
- %match_start = getelementptr inbounds %struct.internal_state* %s, i64 0, i32 28
+ %match_start = getelementptr inbounds %struct.internal_state, %struct.internal_state* %s, i64 0, i32 28
%add.ptr.sum217 = add i64 %idx.ext, 1
- %arrayidx44 = getelementptr inbounds i8* %1, i64 %add.ptr.sum217
+ %arrayidx44 = getelementptr inbounds i8, i8* %1, i64 %add.ptr.sum217
%add.ptr.sum218 = add i64 %idx.ext, 2
- %add.ptr50 = getelementptr inbounds i8* %1, i64 %add.ptr.sum218
+ %add.ptr50 = getelementptr inbounds i8, i8* %1, i64 %add.ptr.sum218
%sub.ptr.lhs.cast = ptrtoint i8* %add.ptr12 to i64
br label %do.body
%scan_end1.0 = phi i8 [ %scan_end1.1, %land.rhs131 ], [ %8, %entry ]
%scan_end.0 = phi i8 [ %scan_end.1, %land.rhs131 ], [ %9, %entry ]
%idx.ext23 = zext i32 %cur_match.addr.0 to i64
- %add.ptr24 = getelementptr inbounds i8* %1, i64 %idx.ext23
+ %add.ptr24 = getelementptr inbounds i8, i8* %1, i64 %idx.ext23
%idxprom25 = sext i32 %best_len.0 to i64
%add.ptr24.sum = add i64 %idx.ext23, %idxprom25
- %arrayidx26 = getelementptr inbounds i8* %1, i64 %add.ptr24.sum
+ %arrayidx26 = getelementptr inbounds i8, i8* %1, i64 %add.ptr24.sum
%12 = load i8* %arrayidx26, align 1
%cmp28 = icmp eq i8 %12, %scan_end.0
br i1 %cmp28, label %lor.lhs.false, label %do.cond125
%sub30 = add nsw i32 %best_len.0, -1
%idxprom31 = sext i32 %sub30 to i64
%add.ptr24.sum214 = add i64 %idx.ext23, %idxprom31
- %arrayidx32 = getelementptr inbounds i8* %1, i64 %add.ptr24.sum214
+ %arrayidx32 = getelementptr inbounds i8, i8* %1, i64 %add.ptr24.sum214
%13 = load i8* %arrayidx32, align 1
%cmp35 = icmp eq i8 %13, %scan_end1.0
br i1 %cmp35, label %lor.lhs.false37, label %do.cond125
lor.lhs.false42: ; preds = %lor.lhs.false37
%add.ptr24.sum215 = add i64 %idx.ext23, 1
- %incdec.ptr = getelementptr inbounds i8* %1, i64 %add.ptr24.sum215
+ %incdec.ptr = getelementptr inbounds i8, i8* %1, i64 %add.ptr24.sum215
%16 = load i8* %incdec.ptr, align 1
%17 = load i8* %arrayidx44, align 1
%cmp46 = icmp eq i8 %16, %17
if.end49: ; preds = %lor.lhs.false42
%incdec.ptr.sum = add i64 %idx.ext23, 2
- %incdec.ptr51 = getelementptr inbounds i8* %1, i64 %incdec.ptr.sum
+ %incdec.ptr51 = getelementptr inbounds i8, i8* %1, i64 %incdec.ptr.sum
br label %do.cond
do.cond: ; preds = %land.lhs.true100, %if.end49
%match.0 = phi i8* [ %incdec.ptr51, %if.end49 ], [ %incdec.ptr103, %land.lhs.true100 ]
%scan.1 = phi i8* [ %add.ptr50, %if.end49 ], [ %incdec.ptr101, %land.lhs.true100 ]
- %incdec.ptr53 = getelementptr inbounds i8* %scan.1, i64 1
+ %incdec.ptr53 = getelementptr inbounds i8, i8* %scan.1, i64 1
%18 = load i8* %incdec.ptr53, align 1
- %incdec.ptr55 = getelementptr inbounds i8* %match.0, i64 1
+ %incdec.ptr55 = getelementptr inbounds i8, i8* %match.0, i64 1
%19 = load i8* %incdec.ptr55, align 1
%cmp57 = icmp eq i8 %18, %19
br i1 %cmp57, label %land.lhs.true, label %do.end
land.lhs.true: ; preds = %do.cond
- %incdec.ptr59 = getelementptr inbounds i8* %scan.1, i64 2
+ %incdec.ptr59 = getelementptr inbounds i8, i8* %scan.1, i64 2
%20 = load i8* %incdec.ptr59, align 1
- %incdec.ptr61 = getelementptr inbounds i8* %match.0, i64 2
+ %incdec.ptr61 = getelementptr inbounds i8, i8* %match.0, i64 2
%21 = load i8* %incdec.ptr61, align 1
%cmp63 = icmp eq i8 %20, %21
br i1 %cmp63, label %land.lhs.true65, label %do.end
land.lhs.true65: ; preds = %land.lhs.true
- %incdec.ptr66 = getelementptr inbounds i8* %scan.1, i64 3
+ %incdec.ptr66 = getelementptr inbounds i8, i8* %scan.1, i64 3
%22 = load i8* %incdec.ptr66, align 1
- %incdec.ptr68 = getelementptr inbounds i8* %match.0, i64 3
+ %incdec.ptr68 = getelementptr inbounds i8, i8* %match.0, i64 3
%23 = load i8* %incdec.ptr68, align 1
%cmp70 = icmp eq i8 %22, %23
br i1 %cmp70, label %land.lhs.true72, label %do.end
land.lhs.true72: ; preds = %land.lhs.true65
- %incdec.ptr73 = getelementptr inbounds i8* %scan.1, i64 4
+ %incdec.ptr73 = getelementptr inbounds i8, i8* %scan.1, i64 4
%24 = load i8* %incdec.ptr73, align 1
- %incdec.ptr75 = getelementptr inbounds i8* %match.0, i64 4
+ %incdec.ptr75 = getelementptr inbounds i8, i8* %match.0, i64 4
%25 = load i8* %incdec.ptr75, align 1
%cmp77 = icmp eq i8 %24, %25
br i1 %cmp77, label %land.lhs.true79, label %do.end
land.lhs.true79: ; preds = %land.lhs.true72
- %incdec.ptr80 = getelementptr inbounds i8* %scan.1, i64 5
+ %incdec.ptr80 = getelementptr inbounds i8, i8* %scan.1, i64 5
%26 = load i8* %incdec.ptr80, align 1
- %incdec.ptr82 = getelementptr inbounds i8* %match.0, i64 5
+ %incdec.ptr82 = getelementptr inbounds i8, i8* %match.0, i64 5
%27 = load i8* %incdec.ptr82, align 1
%cmp84 = icmp eq i8 %26, %27
br i1 %cmp84, label %land.lhs.true86, label %do.end
land.lhs.true86: ; preds = %land.lhs.true79
- %incdec.ptr87 = getelementptr inbounds i8* %scan.1, i64 6
+ %incdec.ptr87 = getelementptr inbounds i8, i8* %scan.1, i64 6
%28 = load i8* %incdec.ptr87, align 1
- %incdec.ptr89 = getelementptr inbounds i8* %match.0, i64 6
+ %incdec.ptr89 = getelementptr inbounds i8, i8* %match.0, i64 6
%29 = load i8* %incdec.ptr89, align 1
%cmp91 = icmp eq i8 %28, %29
br i1 %cmp91, label %land.lhs.true93, label %do.end
land.lhs.true93: ; preds = %land.lhs.true86
- %incdec.ptr94 = getelementptr inbounds i8* %scan.1, i64 7
+ %incdec.ptr94 = getelementptr inbounds i8, i8* %scan.1, i64 7
%30 = load i8* %incdec.ptr94, align 1
- %incdec.ptr96 = getelementptr inbounds i8* %match.0, i64 7
+ %incdec.ptr96 = getelementptr inbounds i8, i8* %match.0, i64 7
%31 = load i8* %incdec.ptr96, align 1
%cmp98 = icmp eq i8 %30, %31
br i1 %cmp98, label %land.lhs.true100, label %do.end
land.lhs.true100: ; preds = %land.lhs.true93
- %incdec.ptr101 = getelementptr inbounds i8* %scan.1, i64 8
+ %incdec.ptr101 = getelementptr inbounds i8, i8* %scan.1, i64 8
%32 = load i8* %incdec.ptr101, align 1
- %incdec.ptr103 = getelementptr inbounds i8* %match.0, i64 8
+ %incdec.ptr103 = getelementptr inbounds i8, i8* %match.0, i64 8
%33 = load i8* %incdec.ptr103, align 1
%cmp105 = icmp eq i8 %32, %33
%cmp107 = icmp ult i8* %incdec.ptr101, %add.ptr12
%sub119 = add nsw i32 %sub110, -1
%idxprom120 = sext i32 %sub119 to i64
%add.ptr111.sum = add i64 %idxprom120, %idx.ext
- %arrayidx121 = getelementptr inbounds i8* %1, i64 %add.ptr111.sum
+ %arrayidx121 = getelementptr inbounds i8, i8* %1, i64 %add.ptr111.sum
%34 = load i8* %arrayidx121, align 1
%idxprom122 = sext i32 %sub110 to i64
%add.ptr111.sum216 = add i64 %idxprom122, %idx.ext
- %arrayidx123 = getelementptr inbounds i8* %1, i64 %add.ptr111.sum216
+ %arrayidx123 = getelementptr inbounds i8, i8* %1, i64 %add.ptr111.sum216
%35 = load i8* %arrayidx123, align 1
br label %do.cond125
%scan_end.1 = phi i8 [ %scan_end.0, %do.body ], [ %scan_end.0, %lor.lhs.false ], [ %scan_end.0, %lor.lhs.false37 ], [ %scan_end.0, %lor.lhs.false42 ], [ %35, %if.end118 ], [ %scan_end.0, %do.end ]
%and = and i32 %cur_match.addr.0, %7
%idxprom126 = zext i32 %and to i64
- %arrayidx127 = getelementptr inbounds i16* %6, i64 %idxprom126
+ %arrayidx127 = getelementptr inbounds i16, i16* %6, i64 %idxprom126
%36 = load i16* %arrayidx127, align 2
%conv128 = zext i16 %36 to i32
%cmp129 = icmp ugt i32 %conv128, %sub6.
define i32 @test(i32 %bar) nounwind readnone {
entry:
%bar_addr = alloca i32
- %0 = getelementptr i32* %bar_addr, i32 -1
+ %0 = getelementptr i32, i32* %bar_addr, i32 -1
%1 = load i32* %0, align 4
ret i32 %1
}
bb3.i15.i.i: ; preds = %bb3.i15.i.i, %entry
%indvar.i.i.i = phi i32 [ %indvar.next.i.i.i, %bb3.i15.i.i ], [ 0, %entry ] ; <i32> [#uses=2]
%tmp137 = sub i32 0, %indvar.i.i.i ; <i32> [#uses=1]
- %scevgep13.i.i.i = getelementptr i32* undef, i32 %tmp137 ; <i32*> [#uses=2]
+ %scevgep13.i.i.i = getelementptr i32, i32* undef, i32 %tmp137 ; <i32*> [#uses=2]
%scevgep1314.i.i.i = bitcast i32* %scevgep13.i.i.i to %struct.dwarf_fde** ; <%struct.dwarf_fde**> [#uses=1]
%0 = load %struct.dwarf_fde** %scevgep1314.i.i.i, align 4 ; <%struct.dwarf_fde*> [#uses=0]
store i32 undef, i32* %scevgep13.i.i.i
; LARGE: retsp 0
define i32 @f(i32* %i) {
entry:
- %0 = getelementptr inbounds i32* %i, i32 16383
+ %0 = getelementptr inbounds i32, i32* %i, i32 16383
%1 = load i32* %0
- %2 = getelementptr inbounds i32* %i, i32 16384
+ %2 = getelementptr inbounds i32, i32* %i, i32 16384
%3 = load i32* %2
%4 = add nsw i32 %1, %3
%5 = load i32* getelementptr inbounds ([100 x i32]* @l, i32 0, i32 0)
define i32 @f6(i32 %i) {
entry:
%0 = alloca [200000 x i32]
- %1 = getelementptr inbounds [200000 x i32]* %0, i32 0, i32 0
+ %1 = getelementptr inbounds [200000 x i32], [200000 x i32]* %0, i32 0, i32 0
call void @f5(i32* %1)
- %2 = getelementptr inbounds [200000 x i32]* %0, i32 0, i32 199999
+ %2 = getelementptr inbounds [200000 x i32], [200000 x i32]* %0, i32 0, i32 199999
call void @f5(i32* %2)
ret i32 %i
}
define void @f8() nounwind {
entry:
%0 = alloca [256 x i32]
- %1 = getelementptr inbounds [256 x i32]* %0, i32 0, i32 253
+ %1 = getelementptr inbounds [256 x i32], [256 x i32]* %0, i32 0, i32 253
call void @f5(i32* %1)
ret void
}
define void @f9() nounwind {
entry:
%0 = alloca [32768 x i32]
- %1 = getelementptr inbounds [32768 x i32]* %0, i32 0, i32 32765
+ %1 = getelementptr inbounds [32768 x i32], [32768 x i32]* %0, i32 0, i32 32765
call void @f5(i32* %1)
ret void
}
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
- %2 = getelementptr inbounds [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
+ %2 = getelementptr inbounds [5 x i8*], [5 x i8*]* @C.0.2070, i32 0, i32 %i ; <i8**> [#uses=1]
%gotovar.4.0.pre = load i8** %2, align 4 ; <i8*> [#uses=1]
br label %bb2
entry:
; CHECK-LABEL: load32:
; CHECK: ldw r0, r0[r1]
- %0 = getelementptr i32* %p, i32 %offset
+ %0 = getelementptr i32, i32* %p, i32 %offset
%1 = load i32* %0, align 4
ret i32 %1
}
entry:
; CHECK-LABEL: load32_imm:
; CHECK: ldw r0, r0[11]
- %0 = getelementptr i32* %p, i32 11
+ %0 = getelementptr i32, i32* %p, i32 11
%1 = load i32* %0, align 4
ret i32 %1
}
; CHECK-LABEL: load16:
; CHECK: ld16s r0, r0[r1]
; CHECK-NOT: sext
- %0 = getelementptr i16* %p, i32 %offset
+ %0 = getelementptr i16, i16* %p, i32 %offset
%1 = load i16* %0, align 2
%2 = sext i16 %1 to i32
ret i32 %2
; CHECK-LABEL: load8:
; CHECK: ld8u r0, r0[r1]
; CHECK-NOT: zext
- %0 = getelementptr i8* %p, i32 %offset
+ %0 = getelementptr i8, i8* %p, i32 %offset
%1 = load i8* %0, align 1
%2 = zext i8 %1 to i32
ret i32 %2
; CHECK-LABEL: f1:
; CHECK: ldaw r11, cp[a+4]
; CHECK: mov r0, r11
- %0 = getelementptr [0 x i32]* @a, i32 0, i32 1
+ %0 = getelementptr [0 x i32], [0 x i32]* @a, i32 0, i32 1
ret i32* %0
}
entry:
; CHECK-LABEL: f2:
; CHECK: ldaw r0, dp[b+4]
- %0 = getelementptr [0 x i32]* @b, i32 0, i32 1
+ %0 = getelementptr [0 x i32], [0 x i32]* @b, i32 0, i32 1
ret i32* %0
}
; CHECK-LABEL: f3:
; CHECK: ldaw r11, cp[a]
; CHECK: sub r0, r11, 4
- %0 = getelementptr [0 x i32]* @a, i32 0, i32 -1
+ %0 = getelementptr [0 x i32], [0 x i32]* @a, i32 0, i32 -1
ret i32* %0
}
; CHECK-LABEL: f4:
; CHECK: ldaw [[REG:r[0-9]+]], dp[b]
; CHECK: sub r0, [[REG]], 4
- %0 = getelementptr [0 x i32]* @b, i32 0, i32 -1
+ %0 = getelementptr [0 x i32], [0 x i32]* @b, i32 0, i32 -1
ret i32* %0
}
%11 = load volatile i32* @g9, align 4 ; <i32> [#uses=1]
%12 = load volatile i32* @g10, align 4 ; <i32> [#uses=1]
%13 = load volatile i32* @g11, align 4 ; <i32> [#uses=2]
- %14 = getelementptr [100 x i32]* %x, i32 0, i32 50 ; <i32*> [#uses=1]
+ %14 = getelementptr [100 x i32], [100 x i32]* %x, i32 0, i32 50 ; <i32*> [#uses=1]
store i32 %13, i32* %14, align 4
store volatile i32 %13, i32* @g11, align 4
store volatile i32 %12, i32* @g10, align 4
store volatile i32 %4, i32* @g2, align 4
store volatile i32 %3, i32* @g1, align 4
store volatile i32 %2, i32* @g0, align 4
- %x1 = getelementptr [100 x i32]* %x, i32 0, i32 0 ; <i32*> [#uses=1]
+ %x1 = getelementptr [100 x i32], [100 x i32]* %x, i32 0, i32 0 ; <i32*> [#uses=1]
call void @g(i32* %x1, i32* %1) nounwind
ret void
}
define void @ScavengeSlots(i32 %r0, i32 %r1, i32 %r2, i32 %r3, i32 %r4) nounwind {
entry:
%Data = alloca [100000 x i32]
- %i0 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 80000
+ %i0 = getelementptr inbounds [100000 x i32], [100000 x i32]* %Data, i32 0, i32 80000
store volatile i32 %r0, i32* %i0
- %i1 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 81000
+ %i1 = getelementptr inbounds [100000 x i32], [100000 x i32]* %Data, i32 0, i32 81000
store volatile i32 %r1, i32* %i1
- %i2 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 82000
+ %i2 = getelementptr inbounds [100000 x i32], [100000 x i32]* %Data, i32 0, i32 82000
store volatile i32 %r2, i32* %i2
- %i3 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 83000
+ %i3 = getelementptr inbounds [100000 x i32], [100000 x i32]* %Data, i32 0, i32 83000
store volatile i32 %r3, i32* %i3
- %i4 = getelementptr inbounds [100000 x i32]* %Data, i32 0, i32 84000
+ %i4 = getelementptr inbounds [100000 x i32], [100000 x i32]* %Data, i32 0, i32 84000
store volatile i32 %r4, i32* %i4
ret void
}
entry:
; CHECK-LABEL: store32:
; CHECK: stw r2, r0[r1]
- %0 = getelementptr i32* %p, i32 %offset
+ %0 = getelementptr i32, i32* %p, i32 %offset
store i32 %val, i32* %0, align 4
ret void
}
entry:
; CHECK-LABEL: store32_imm:
; CHECK: stw r1, r0[11]
- %0 = getelementptr i32* %p, i32 11
+ %0 = getelementptr i32, i32* %p, i32 11
store i32 %val, i32* %0, align 4
ret void
}
entry:
; CHECK-LABEL: store16:
; CHECK: st16 r2, r0[r1]
- %0 = getelementptr i16* %p, i32 %offset
+ %0 = getelementptr i16, i16* %p, i32 %offset
store i16 %val, i16* %0, align 2
ret void
}
entry:
; CHECK-LABEL: store8:
; CHECK: st8 r2, r0[r1]
- %0 = getelementptr i8* %p, i32 %offset
+ %0 = getelementptr i8, i8* %p, i32 %offset
store i8 %val, i8* %0, align 1
ret void
}
; CHECK: stw r11, sp[7]
%TRAMP.23 = alloca [20 x i8], align 2
%FRAME.0 = alloca %struct.FRAME.f, align 4
- %TRAMP.23.sub = getelementptr inbounds [20 x i8]* %TRAMP.23, i32 0, i32 0
+ %TRAMP.23.sub = getelementptr inbounds [20 x i8], [20 x i8]* %TRAMP.23, i32 0, i32 0
%FRAME.02 = bitcast %struct.FRAME.f* %FRAME.0 to i8*
call void @llvm.init.trampoline(i8* %TRAMP.23.sub, i8* bitcast (i32 (%struct.FRAME.f*)* @g.1101 to i8*), i8* %FRAME.02)
%tramp = call i8* @llvm.adjust.trampoline(i8* %TRAMP.23.sub)
- %0 = getelementptr inbounds %struct.FRAME.f* %FRAME.0, i32 0, i32 1
+ %0 = getelementptr inbounds %struct.FRAME.f, %struct.FRAME.f* %FRAME.0, i32 0, i32 1
%1 = bitcast i8* %tramp to i32 ()*
store i32 ()* %1, i32 ()** %0, align 4
- %2 = getelementptr inbounds %struct.FRAME.f* %FRAME.0, i32 0, i32 0
+ %2 = getelementptr inbounds %struct.FRAME.f, %struct.FRAME.f* %FRAME.0, i32 0, i32 0
store i32 1, i32* %2, align 4
call void @h(i32 ()* %1) nounwind
ret void
; CHECK: ldw r11, sp[0]
; CHECK-NEXT: ldw r0, r11[0]
; CHECK-NEXT: retsp 0
- %0 = getelementptr inbounds %struct.FRAME.f* %CHAIN.1, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.FRAME.f, %struct.FRAME.f* %CHAIN.1, i32 0, i32 0
%1 = load i32* %0, align 4
ret i32 %1
}
%data_addr.i16 = alloca i64, align 8 ; <i64*> [#uses=0]
%data_addr.i15 = alloca i32, align 4 ; <i32*> [#uses=0]
%data_addr.i = alloca i64, align 8 ; <i64*> [#uses=0]
- %0 = getelementptr inbounds %struct.gpm_t* %gpm, i32 0, i32 2, i32 0 ; <i8*> [#uses=1]
- %1 = getelementptr inbounds %struct.gpt_t* %gpt, i32 0, i32 9, i32 0 ; <i8*> [#uses=1]
+ %0 = getelementptr inbounds %struct.gpm_t, %struct.gpm_t* %gpm, i32 0, i32 2, i32 0 ; <i8*> [#uses=1]
+ %1 = getelementptr inbounds %struct.gpt_t, %struct.gpt_t* %gpt, i32 0, i32 9, i32 0 ; <i8*> [#uses=1]
call void @uuid_LtoB(i8* %0, i8* %1) nounwind, !dbg !0
%a9 = load volatile i64* %data_addr.i18, align 8 ; <i64> [#uses=1]
%a10 = call i64 @llvm.bswap.i64(i64 %a9) nounwind ; <i64> [#uses=1]
- %a11 = getelementptr inbounds %struct.gpt_t* %gpt, i32 0, i32 8, !dbg !7 ; <i64*> [#uses=1]
+ %a11 = getelementptr inbounds %struct.gpt_t, %struct.gpt_t* %gpt, i32 0, i32 8, !dbg !7 ; <i64*> [#uses=1]
%a12 = load i64* %a11, align 4, !dbg !7 ; <i64> [#uses=1]
call void @llvm.dbg.declare(metadata i64* %data_addr.i17, metadata !8, metadata !{!"0x102"}) nounwind, !dbg !14
store i64 %a12, i64* %data_addr.i17, align 8
%a14 = call i64 @llvm.bswap.i64(i64 %a13) nounwind ; <i64> [#uses=2]
%a15 = add i64 %a10, %a14, !dbg !7 ; <i64> [#uses=1]
%a16 = sub i64 %a15, %a14 ; <i64> [#uses=1]
- %a17 = getelementptr inbounds %struct.gpm_t* %gpm, i32 0, i32 5, !dbg !7 ; <i64*> [#uses=1]
+ %a17 = getelementptr inbounds %struct.gpm_t, %struct.gpm_t* %gpm, i32 0, i32 5, !dbg !7 ; <i64*> [#uses=1]
store i64 %a16, i64* %a17, align 4, !dbg !7
ret void, !dbg !7
}
define %struct.B* @_ZN1BC2Ev(%struct.B* %this) unnamed_addr #0 align 2 {
entry:
tail call void @llvm.dbg.value(metadata %struct.B* %this, i64 0, metadata !30, metadata !38), !dbg !39
- %0 = getelementptr inbounds %struct.B* %this, i64 0, i32 0, !dbg !40
+ %0 = getelementptr inbounds %struct.B, %struct.B* %this, i64 0, i32 0, !dbg !40
%call = tail call %struct.A* @_ZN1AC2Ev(%struct.A* %0) #3, !dbg !40
- %1 = getelementptr inbounds %struct.B* %this, i64 0, i32 0, i32 0, !dbg !40
+ %1 = getelementptr inbounds %struct.B, %struct.B* %this, i64 0, i32 0, i32 0, !dbg !40
store i32 (...)** bitcast (i8** getelementptr inbounds ([4 x i8*]* @_ZTV1B, i64 0, i64 2) to i32 (...)**), i32 (...)*** %1, align 8, !dbg !40, !tbaa !41
ret %struct.B* %this, !dbg !40
}
entry:
tail call void @llvm.dbg.value(metadata %struct.B* %this, i64 0, metadata !34, metadata !38), !dbg !44
tail call void @llvm.dbg.value(metadata %struct.B* %this, i64 0, metadata !45, metadata !38) #3, !dbg !47
- %0 = getelementptr inbounds %struct.B* %this, i64 0, i32 0, !dbg !48
+ %0 = getelementptr inbounds %struct.B, %struct.B* %this, i64 0, i32 0, !dbg !48
%call.i = tail call %struct.A* @_ZN1AC2Ev(%struct.A* %0) #3, !dbg !48
- %1 = getelementptr inbounds %struct.B* %this, i64 0, i32 0, i32 0, !dbg !48
+ %1 = getelementptr inbounds %struct.B, %struct.B* %this, i64 0, i32 0, i32 0, !dbg !48
store i32 (...)** bitcast (i8** getelementptr inbounds ([4 x i8*]* @_ZTV1B, i64 0, i64 2) to i32 (...)**), i32 (...)*** %1, align 8, !dbg !48, !tbaa !41
ret %struct.B* %this, !dbg !46
}
%0 = load i64* @a, align 8, !dbg !48, !tbaa !49
%call = tail call noalias i8* @_Znwm(i64 %0) #5, !dbg !53
store i8* %call, i8** bitcast (i32** @b to i8**), align 8, !dbg !54, !tbaa !55
- %1 = getelementptr inbounds %struct.A* %agg.tmp, i64 0, i32 0, !dbg !57
- %2 = getelementptr inbounds %struct.A* %p1, i64 0, i32 0, !dbg !57
+ %1 = getelementptr inbounds %struct.A, %struct.A* %agg.tmp, i64 0, i32 0, !dbg !57
+ %2 = getelementptr inbounds %struct.A, %struct.A* %p1, i64 0, i32 0, !dbg !57
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 24, i32 8, i1 false), !dbg !57, !tbaa.struct !58
call void @_Z2f91A(%struct.A* %agg.tmp), !dbg !61
ret void, !dbg !62
entry:
%agg.tmp.i = alloca %struct.A, align 8
tail call void @llvm.dbg.declare(metadata %struct.A* %p1, metadata !33, metadata !46), !dbg !63
- %0 = getelementptr inbounds %struct.A* %p1, i64 0, i32 0, !dbg !64
- %1 = getelementptr inbounds %struct.A* %agg.tmp.i, i64 0, i32 0, !dbg !65
+ %0 = getelementptr inbounds %struct.A, %struct.A* %p1, i64 0, i32 0, !dbg !64
+ %1 = getelementptr inbounds %struct.A, %struct.A* %agg.tmp.i, i64 0, i32 0, !dbg !65
call void @llvm.lifetime.start(i64 24, i8* %1), !dbg !65
%2 = load i64* @a, align 8, !dbg !67, !tbaa !49
%call.i = tail call noalias i8* @_Znwm(i64 %2) #5, !dbg !68
call void @llvm.dbg.value(metadata i8 1, i64 0, metadata !74, metadata !83), !dbg !77
call void @llvm.dbg.value(metadata i8 1, i64 0, metadata !74, metadata !84), !dbg !77
call void @llvm.dbg.declare(metadata %struct.A* undef, metadata !74, metadata !46), !dbg !77
- %1 = getelementptr inbounds %struct.A* %agg.tmp.i.i, i64 0, i32 0, !dbg !85
+ %1 = getelementptr inbounds %struct.A, %struct.A* %agg.tmp.i.i, i64 0, i32 0, !dbg !85
call void @llvm.lifetime.start(i64 24, i8* %1), !dbg !85
%2 = load i64* @a, align 8, !dbg !87, !tbaa !49
%call.i.i5 = invoke noalias i8* @_Znwm(i64 %2) #5
to label %call.i.i.noexc unwind label %lpad, !dbg !88
call.i.i.noexc: ; preds = %entry
- %agg.tmp.sroa.4.17..sroa_idx = getelementptr inbounds [7 x i8]* %agg.tmp.sroa.4, i64 0, i64 0, !dbg !89
- %agg.tmp.sroa.2.1..sroa_idx = getelementptr inbounds [15 x i8]* %agg.tmp.sroa.2, i64 0, i64 0, !dbg !89
+ %agg.tmp.sroa.4.17..sroa_idx = getelementptr inbounds [7 x i8], [7 x i8]* %agg.tmp.sroa.4, i64 0, i64 0, !dbg !89
+ %agg.tmp.sroa.2.1..sroa_idx = getelementptr inbounds [15 x i8], [15 x i8]* %agg.tmp.sroa.2, i64 0, i64 0, !dbg !89
store i8* %call.i.i5, i8** bitcast (i32** @b to i8**), align 8, !dbg !90, !tbaa !55
store i8 1, i8* %1, align 8, !dbg !91
- %agg.tmp.sroa.2.0..sroa_raw_idx = getelementptr inbounds i8* %1, i64 1, !dbg !91
+ %agg.tmp.sroa.2.0..sroa_raw_idx = getelementptr inbounds i8, i8* %1, i64 1, !dbg !91
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %agg.tmp.sroa.2.0..sroa_raw_idx, i8* %agg.tmp.sroa.2.1..sroa_idx, i64 15, i32 1, i1 false), !dbg !91
- %agg.tmp.sroa.3.0..sroa_idx = getelementptr inbounds %struct.A* %agg.tmp.i.i, i64 0, i32 2, !dbg !91
+ %agg.tmp.sroa.3.0..sroa_idx = getelementptr inbounds %struct.A, %struct.A* %agg.tmp.i.i, i64 0, i32 2, !dbg !91
store i8 1, i8* %agg.tmp.sroa.3.0..sroa_idx, align 8, !dbg !91
- %agg.tmp.sroa.4.0..sroa_raw_idx = getelementptr inbounds i8* %1, i64 17, !dbg !91
+ %agg.tmp.sroa.4.0..sroa_raw_idx = getelementptr inbounds i8, i8* %1, i64 17, !dbg !91
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %agg.tmp.sroa.4.0..sroa_raw_idx, i8* %agg.tmp.sroa.4.17..sroa_idx, i64 7, i32 1, i1 false), !dbg !91
invoke void @_Z2f91A(%struct.A* %agg.tmp.i.i)
to label %invoke.cont unwind label %lpad, !dbg !92
define i32 @return_five_int(%struct.five* %f) #0 {
entry:
call void @llvm.dbg.declare(metadata %struct.five* %f, metadata !17, metadata !{!"0x102\006"}), !dbg !18
- %a = getelementptr inbounds %struct.five* %f, i32 0, i32 0, !dbg !19
+ %a = getelementptr inbounds %struct.five, %struct.five* %f, i32 0, i32 0, !dbg !19
%0 = load i32* %a, align 4, !dbg !19
ret i32 %0, !dbg !19
}
define %struct.B* @_ZN1BC2Ev(%struct.B* %this) unnamed_addr #0 align 2 {
entry:
tail call void @llvm.dbg.value(metadata %struct.B* %this, i64 0, metadata !30, metadata !40), !dbg !41
- %0 = getelementptr inbounds %struct.B* %this, i32 0, i32 0, !dbg !42
+ %0 = getelementptr inbounds %struct.B, %struct.B* %this, i32 0, i32 0, !dbg !42
%call = tail call %struct.A* @_ZN1AC2Ev(%struct.A* %0) #3, !dbg !42
- %1 = getelementptr inbounds %struct.B* %this, i32 0, i32 0, i32 0, !dbg !42
+ %1 = getelementptr inbounds %struct.B, %struct.B* %this, i32 0, i32 0, i32 0, !dbg !42
store i32 (...)** bitcast (i8** getelementptr inbounds ([4 x i8*]* @_ZTV1B, i32 0, i32 2) to i32 (...)**), i32 (...)*** %1, align 4, !dbg !42, !tbaa !43
ret %struct.B* %this, !dbg !42
}
entry:
tail call void @llvm.dbg.value(metadata %struct.B* %this, i64 0, metadata !34, metadata !40), !dbg !46
tail call void @llvm.dbg.value(metadata %struct.B* %this, i64 0, metadata !47, metadata !40) #3, !dbg !49
- %0 = getelementptr inbounds %struct.B* %this, i32 0, i32 0, !dbg !50
+ %0 = getelementptr inbounds %struct.B, %struct.B* %this, i32 0, i32 0, !dbg !50
%call.i = tail call %struct.A* @_ZN1AC2Ev(%struct.A* %0) #3, !dbg !50
- %1 = getelementptr inbounds %struct.B* %this, i32 0, i32 0, i32 0, !dbg !50
+ %1 = getelementptr inbounds %struct.B, %struct.B* %this, i32 0, i32 0, i32 0, !dbg !50
store i32 (...)** bitcast (i8** getelementptr inbounds ([4 x i8*]* @_ZTV1B, i32 0, i32 2) to i32 (...)**), i32 (...)*** %1, align 4, !dbg !50, !tbaa !43
ret %struct.B* %this, !dbg !48
}
for.body: ; preds = %entry, %for.body.for.body_crit_edge
%0 = phi float [ %.pre, %for.body.for.body_crit_edge ], [ %r, %entry ]
%i.09 = phi i32 [ %inc, %for.body.for.body_crit_edge ], [ 0, %entry ]
- %arrayidx2 = getelementptr inbounds float* %vla, i32 %i.09, !dbg !31
+ %arrayidx2 = getelementptr inbounds float, float* %vla, i32 %i.09, !dbg !31
%div = fdiv float %0, %r, !dbg !31
store float %div, float* %arrayidx2, align 4, !dbg !31, !tbaa !26
%inc = add nsw i32 %i.09, 1, !dbg !30
br i1 %exitcond, label %for.end, label %for.body.for.body_crit_edge, !dbg !30
for.body.for.body_crit_edge: ; preds = %for.body
- %arrayidx2.phi.trans.insert = getelementptr inbounds float* %vla, i32 %inc
+ %arrayidx2.phi.trans.insert = getelementptr inbounds float, float* %vla, i32 %inc
%.pre = load float* %arrayidx2.phi.trans.insert, align 4, !dbg !31, !tbaa !26
br label %for.body, !dbg !30
; crash. Drop the dbg_value instead.
; CHECK-NOT: "matrix"
tail call void @llvm.dbg.declare(metadata %class.Matrix3.0.6.10* %agg.result, metadata !45, metadata !{!"0x102\006"})
- %2 = getelementptr inbounds %class.Matrix3.0.6.10* %agg.result, i32 0, i32 0, i32 8
+ %2 = getelementptr inbounds %class.Matrix3.0.6.10, %class.Matrix3.0.6.10* %agg.result, i32 0, i32 0, i32 8
ret void
}
declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
store volatile i32 0, i32* %retval
call void @llvm.dbg.declare(metadata [100 x i32]* %main_arr, metadata !17, metadata !{!"0x102"}), !dbg !22
call void @llvm.dbg.declare(metadata i32* %val, metadata !23, metadata !{!"0x102"}), !dbg !24
- %arraydecay = getelementptr inbounds [100 x i32]* %main_arr, i32 0, i32 0, !dbg !25
+ %arraydecay = getelementptr inbounds [100 x i32], [100 x i32]* %main_arr, i32 0, i32 0, !dbg !25
call void @populate_array(i32* %arraydecay, i32 100), !dbg !25
- %arraydecay1 = getelementptr inbounds [100 x i32]* %main_arr, i32 0, i32 0, !dbg !26
+ %arraydecay1 = getelementptr inbounds [100 x i32], [100 x i32]* %main_arr, i32 0, i32 0, !dbg !26
%call = call i32 @sum_array(i32* %arraydecay1, i32 100), !dbg !26
store i32 %call, i32* %val, align 4, !dbg !26
%0 = load i32* %val, align 4, !dbg !27
store i32 %x, i32* %x.addr, align 4
call void @llvm.dbg.declare(metadata i32* %x.addr, metadata !65, metadata !{!"0x102"}), !dbg !66
%this1 = load %struct.bar** %this.addr
- %b = getelementptr inbounds %struct.bar* %this1, i32 0, i32 0, !dbg !67
+ %b = getelementptr inbounds %struct.bar, %struct.bar* %this1, i32 0, i32 0, !dbg !67
%0 = load i32* %x.addr, align 4, !dbg !67
call void @_ZN3bazC1Ei(%struct.baz* %b, i32 %0), !dbg !67
- %1 = getelementptr inbounds %struct.bar* %this1, i32 0, i32 1, !dbg !67
- %b2 = getelementptr inbounds %struct.bar* %this1, i32 0, i32 0, !dbg !67
+ %1 = getelementptr inbounds %struct.bar, %struct.bar* %this1, i32 0, i32 1, !dbg !67
+ %b2 = getelementptr inbounds %struct.bar, %struct.bar* %this1, i32 0, i32 0, !dbg !67
store %struct.baz* %b2, %struct.baz** %1, align 8, !dbg !67
ret void, !dbg !68
}
store i32 %a, i32* %a.addr, align 4
call void @llvm.dbg.declare(metadata i32* %a.addr, metadata !77, metadata !{!"0x102"}), !dbg !78
%this1 = load %struct.baz** %this.addr
- %h = getelementptr inbounds %struct.baz* %this1, i32 0, i32 0, !dbg !79
+ %h = getelementptr inbounds %struct.baz, %struct.baz* %this1, i32 0, i32 0, !dbg !79
%0 = load i32* %a.addr, align 4, !dbg !79
store i32 %0, i32* %h, align 4, !dbg !79
ret void, !dbg !80
store %struct.A* %a, %struct.A** %a.addr, align 8
call void @llvm.dbg.declare(metadata %struct.A** %a.addr, metadata !16, metadata !{!"0x102"}), !dbg !17
%0 = load %struct.A** %a.addr, align 8, !dbg !18
- %b = getelementptr inbounds %struct.A* %0, i32 0, i32 0, !dbg !18
+ %b = getelementptr inbounds %struct.A, %struct.A* %0, i32 0, i32 0, !dbg !18
%1 = load i32* %b, align 4, !dbg !18
ret i32 %1, !dbg !18
}
call void @llvm.dbg.declare(metadata i32* %.addr, metadata !36, metadata !{!"0x102"}), !dbg !35
call void @llvm.dbg.declare(metadata %class.A* %a, metadata !21, metadata !{!"0x102"}), !dbg !23
call void @_ZN1AC1Ev(%class.A* %a), !dbg !24
- %m_a = getelementptr inbounds %class.A* %a, i32 0, i32 0, !dbg !25
+ %m_a = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0, !dbg !25
%1 = load i32* %m_a, align 4, !dbg !25
ret i32 %1, !dbg !25
}
store %class.A* %this, %class.A** %this.addr, align 8
call void @llvm.dbg.declare(metadata %class.A** %this.addr, metadata !30, metadata !{!"0x102"}), !dbg !31
%this1 = load %class.A** %this.addr
- %m_a = getelementptr inbounds %class.A* %this1, i32 0, i32 0, !dbg !32
+ %m_a = getelementptr inbounds %class.A, %class.A* %this1, i32 0, i32 0, !dbg !32
store i32 0, i32* %m_a, align 4, !dbg !32
ret void, !dbg !34
}
entry:
call void @llvm.dbg.declare(metadata %struct.foo* %f, metadata !19, metadata !{!"0x102"}), !dbg !20
call void @llvm.dbg.declare(metadata %struct.foo* %g, metadata !21, metadata !{!"0x102"}), !dbg !20
- %i = getelementptr inbounds %struct.foo* %f, i32 0, i32 0, !dbg !22
+ %i = getelementptr inbounds %struct.foo, %struct.foo* %f, i32 0, i32 0, !dbg !22
%0 = load i32* %i, align 4, !dbg !22
%inc = add nsw i32 %0, 1, !dbg !22
store i32 %inc, i32* %i, align 4, !dbg !22
%1 = bitcast [4 x i32]* %array to i8*, !dbg !36
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* bitcast ([4 x i32]* @main.array to i8*), i64 16, i32 16, i1 false), !dbg !36
tail call void @llvm.dbg.value(metadata [4 x i32]* %array, i64 0, metadata !21, metadata !{!"0x102"}), !dbg !36
- %2 = getelementptr inbounds [4 x i32]* %array, i64 0, i64 0, !dbg !37
+ %2 = getelementptr inbounds [4 x i32], [4 x i32]* %array, i64 0, i64 0, !dbg !37
call void @f(i32* %2), !dbg !37
tail call void @llvm.dbg.value(metadata [4 x i32]* %array, i64 0, metadata !21, metadata !{!"0x102"}), !dbg !36
%3 = load i32* %2, align 16, !dbg !38, !tbaa !30
store i32* %p, i32** %p.addr, align 8
call void @llvm.dbg.declare(metadata i32** %p.addr, metadata !19, metadata !{!"0x102"}), !dbg !20
%0 = load i32** %p.addr, align 8, !dbg !21
- %arrayidx = getelementptr inbounds i32* %0, i64 0, !dbg !21
+ %arrayidx = getelementptr inbounds i32, i32* %0, i64 0, !dbg !21
store i32 42, i32* %arrayidx, align 4, !dbg !21
ret void, !dbg !22
}
call void @llvm.dbg.declare(metadata [4 x i32]* %array, metadata !26, metadata !{!"0x102"}), !dbg !30
%0 = bitcast [4 x i32]* %array to i8*, !dbg !30
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast ([4 x i32]* @main.array to i8*), i64 16, i32 16, i1 false), !dbg !30
- %arraydecay = getelementptr inbounds [4 x i32]* %array, i32 0, i32 0, !dbg !31
+ %arraydecay = getelementptr inbounds [4 x i32], [4 x i32]* %array, i32 0, i32 0, !dbg !31
call void @f(i32* %arraydecay), !dbg !31
- %arrayidx = getelementptr inbounds [4 x i32]* %array, i32 0, i64 0, !dbg !32
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %array, i32 0, i64 0, !dbg !32
%1 = load i32* %arrayidx, align 4, !dbg !32
ret i32 %1, !dbg !32
}
%block = bitcast i8* %.block_descriptor to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, void (...)* }>*, !dbg !67
store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, void (...)* }>* %block, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, void (...)* }>** %block.addr, align 8
call void @llvm.dbg.declare(metadata <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, void (...)* }>** %block.addr, metadata !68, metadata !69), !dbg !70
- %block.capture.addr = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, void (...)* }>* %block, i32 0, i32 5, !dbg !71
+ %block.capture.addr = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, void (...)* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, void (...)* }>* %block, i32 0, i32 5, !dbg !71
%1 = load void (...)** %block.capture.addr, align 8, !dbg !71
%block.literal = bitcast void (...)* %1 to %struct.__block_literal_generic*, !dbg !71
- %2 = getelementptr inbounds %struct.__block_literal_generic* %block.literal, i32 0, i32 3, !dbg !71
+ %2 = getelementptr inbounds %struct.__block_literal_generic, %struct.__block_literal_generic* %block.literal, i32 0, i32 3, !dbg !71
%3 = bitcast %struct.__block_literal_generic* %block.literal to i8*, !dbg !71
%4 = load i8** %2, !dbg !71
%5 = bitcast i8* %4 to void (i8*, ...)*, !dbg !71
store i32 %i, i32* %i.addr, align 4
call void @llvm.dbg.declare(metadata i32* %i.addr, metadata !30, metadata !{!"0x102"}), !dbg !31
%this1 = load %class.A** %this.addr
- %a = getelementptr inbounds %class.A* %this1, i32 0, i32 0, !dbg !31
+ %a = getelementptr inbounds %class.A, %class.A* %this1, i32 0, i32 0, !dbg !31
%0 = load i32* %i.addr, align 4, !dbg !31
store i32 %0, i32* %a, align 4, !dbg !31
ret void, !dbg !31
%0 = alloca double ; <double*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
call void @llvm.dbg.declare(metadata %struct.Rect* %my_r0, metadata !0, metadata !{!"0x102"}), !dbg !15
- %1 = getelementptr inbounds %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
- %2 = getelementptr inbounds %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
+ %1 = getelementptr inbounds %struct.Rect, %struct.Rect* %my_r0, i32 0, i32 0, !dbg !16 ; <%struct.Pt*> [#uses=1]
+ %2 = getelementptr inbounds %struct.Pt, %struct.Pt* %1, i32 0, i32 0, !dbg !16 ; <double*> [#uses=1]
%3 = load double* %2, align 8, !dbg !16 ; <double> [#uses=1]
store double %3, double* %0, align 8, !dbg !16
%4 = load double* %0, align 8, !dbg !16 ; <double> [#uses=1]
store i1 false, i1* %nrvo, !dbg !36
call void @llvm.dbg.declare(metadata %class.A* %agg.result, metadata !37, metadata !{!"0x102"}), !dbg !39
%tmp2 = load i32* %j, align 4, !dbg !40
- %x = getelementptr inbounds %class.A* %agg.result, i32 0, i32 0, !dbg !40
+ %x = getelementptr inbounds %class.A, %class.A* %agg.result, i32 0, i32 0, !dbg !40
store i32 %tmp2, i32* %x, align 4, !dbg !40
store i1 true, i1* %nrvo, !dbg !41
store i32 1, i32* %cleanup.dest.slot
store %class.A* %this, %class.A** %this.addr, align 8
call void @llvm.dbg.declare(metadata %class.A** %this.addr, metadata !46, metadata !{!"0x102"}), !dbg !47
%this1 = load %class.A** %this.addr
- %x = getelementptr inbounds %class.A* %this1, i32 0, i32 0, !dbg !48
+ %x = getelementptr inbounds %class.A, %class.A* %this1, i32 0, i32 0, !dbg !48
store i32 1, i32* %x, align 4, !dbg !48
ret void, !dbg !48
}
%tmp3 = add i32 0, %tmp2, !dbg !15
; CHECK: ##DEBUG_VALUE: idx <- E{{..$}}
call void @llvm.dbg.value(metadata i32 %tmp3, i64 0, metadata !13, metadata !{!"0x102"}), !dbg !15
- %arrayidx = getelementptr i32 addrspace(1)* %ip, i32 %1, !dbg !16
+ %arrayidx = getelementptr i32, i32 addrspace(1)* %ip, i32 %1, !dbg !16
store i32 %tmp3, i32 addrspace(1)* %arrayidx, align 4, !dbg !16
ret void, !dbg !17
}
entry:
tail call void @llvm.dbg.value(metadata %struct.S1* %sp, i64 0, metadata !9, metadata !{!"0x102"}), !dbg !20
tail call void @llvm.dbg.value(metadata i32 %nums, i64 0, metadata !18, metadata !{!"0x102"}), !dbg !21
- %tmp2 = getelementptr inbounds %struct.S1* %sp, i64 0, i32 1, !dbg !22
+ %tmp2 = getelementptr inbounds %struct.S1, %struct.S1* %sp, i64 0, i32 1, !dbg !22
store i32 %nums, i32* %tmp2, align 4, !dbg !22
%call = tail call float* @bar(i32 %nums) nounwind optsize, !dbg !27
- %tmp5 = getelementptr inbounds %struct.S1* %sp, i64 0, i32 0, !dbg !27
+ %tmp5 = getelementptr inbounds %struct.S1, %struct.S1* %sp, i64 0, i32 0, !dbg !27
store float* %call, float** %tmp5, align 8, !dbg !27
%cmp = icmp ne float* %call, null, !dbg !29
%cond = zext i1 %cmp to i32, !dbg !29
define i32 @bar(%struct.a* nocapture %b) nounwind ssp {
entry:
tail call void @llvm.dbg.value(metadata %struct.a* %b, i64 0, metadata !6, metadata !{!"0x102"}), !dbg !13
- %tmp1 = getelementptr inbounds %struct.a* %b, i64 0, i32 0, !dbg !14
+ %tmp1 = getelementptr inbounds %struct.a, %struct.a* %b, i64 0, i32 0, !dbg !14
%tmp2 = load i32* %tmp1, align 4, !dbg !14
tail call void @llvm.dbg.value(metadata i32 %tmp2, i64 0, metadata !11, metadata !{!"0x102"}), !dbg !14
%call = tail call i32 (...)* @foo(i32 %tmp2) nounwind , !dbg !18
declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
define internal void @"__24-[Main initWithContext:]_block_invoke"(i8* %.block_descriptor, i8* %obj) #0 {
%block = bitcast i8* %.block_descriptor to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>*, !dbg !84
- %block.captured-self = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %block, i32 0, i32 5, !dbg !84
+ %block.captured-self = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %block, i32 0, i32 5, !dbg !84
call void @llvm.dbg.declare(metadata <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %block, metadata !86, metadata !110), !dbg !87
ret void, !dbg !87
}
define internal void @"__24-[Main initWithContext:]_block_invoke_2"(i8* %.block_descriptor, i8* %object) #0 {
%block = bitcast i8* %.block_descriptor to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>*, !dbg !103
- %block.captured-self = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %block, i32 0, i32 5, !dbg !103
+ %block.captured-self = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %block, i32 0, i32 5, !dbg !103
call void @llvm.dbg.declare(metadata <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %block, metadata !105, metadata !109), !dbg !106
ret void, !dbg !106
}
call void @llvm.dbg.declare(metadata i8** %2, metadata !63, metadata !{!"0x102"}), !dbg !62
%5 = load %0** %1, !dbg !65
%6 = bitcast %0* %5 to i8*, !dbg !65
- %7 = getelementptr inbounds %struct._objc_super* %3, i32 0, i32 0, !dbg !65
+ %7 = getelementptr inbounds %struct._objc_super, %struct._objc_super* %3, i32 0, i32 0, !dbg !65
store i8* %6, i8** %7, !dbg !65
%8 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_SUP_REFS_$_", !dbg !65
%9 = bitcast %struct._class_t* %8 to i8*, !dbg !65
- %10 = getelementptr inbounds %struct._objc_super* %3, i32 0, i32 1, !dbg !65
+ %10 = getelementptr inbounds %struct._objc_super, %struct._objc_super* %3, i32 0, i32 1, !dbg !65
store i8* %9, i8** %10, !dbg !65
%11 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_", !dbg !65, !invariant.load !67
%12 = call i8* bitcast (i8* (%struct._objc_super*, i8*, ...)* @objc_msgSendSuper2 to i8* (%struct._objc_super*, i8*)*)(%struct._objc_super* %3, i8* %11), !dbg !65
br i1 %14, label %15, label %24, !dbg !65
; <label>:15 ; preds = %0
- %16 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 0, !dbg !68
+ %16 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 0, !dbg !68
store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %16, !dbg !68
- %17 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 1, !dbg !68
+ %17 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 1, !dbg !68
store i32 -1040187392, i32* %17, !dbg !68
- %18 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 2, !dbg !68
+ %18 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 2, !dbg !68
store i32 0, i32* %18, !dbg !68
- %19 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 3, !dbg !68
+ %19 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 3, !dbg !68
store i8* bitcast (void (i8*)* @"__9-[A init]_block_invoke" to i8*), i8** %19, !dbg !68
- %20 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 4, !dbg !68
+ %20 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 4, !dbg !68
store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8*, i8*, i64 }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** %20, !dbg !68
- %21 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 5, !dbg !68
+ %21 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 5, !dbg !68
%22 = load %0** %1, align 8, !dbg !68
store %0* %22, %0** %21, align 8, !dbg !68
%23 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4 to void ()*, !dbg !68
call void @llvm.dbg.declare(metadata void ()** %1, metadata !72, metadata !{!"0x102"}), !dbg !73
%2 = load void ()** %1, align 8, !dbg !74
%3 = bitcast void ()* %2 to %struct.__block_literal_generic*, !dbg !74
- %4 = getelementptr inbounds %struct.__block_literal_generic* %3, i32 0, i32 3, !dbg !74
+ %4 = getelementptr inbounds %struct.__block_literal_generic, %struct.__block_literal_generic* %3, i32 0, i32 3, !dbg !74
%5 = bitcast %struct.__block_literal_generic* %3 to i8*, !dbg !74
%6 = load i8** %4, !dbg !74
%7 = bitcast i8* %6 to void (i8*)*, !dbg !74
call void @llvm.dbg.declare(metadata i8* %.block_descriptor, metadata !76, metadata !{!"0x102"}), !dbg !88
%4 = bitcast i8* %.block_descriptor to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>*, !dbg !88
store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>** %2, align 8, !dbg !88
- %5 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 5, !dbg !88
+ %5 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 5, !dbg !88
call void @llvm.dbg.declare(metadata <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>** %2, metadata !89, metadata !111), !dbg !90
call void @llvm.dbg.declare(metadata %1** %d, metadata !91, metadata !{!"0x102"}), !dbg !100
%6 = load %struct._class_t** @"\01L_OBJC_CLASSLIST_REFERENCES_$_", !dbg !100
%22 = load %0** %5, align 8, !dbg !101
%23 = load i64* @"OBJC_IVAR_$_A.ivar", !dbg !101, !invariant.load !67
%24 = bitcast %0* %22 to i8*, !dbg !101
- %25 = getelementptr inbounds i8* %24, i64 %23, !dbg !101
+ %25 = getelementptr inbounds i8, i8* %24, i64 %23, !dbg !101
%26 = bitcast i8* %25 to i32*, !dbg !101
store i32 %21, i32* %26, align 4, !dbg !101
ret void, !dbg !90
%6 = bitcast i8* %5 to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>*, !dbg !103
%7 = load i8** %3, !dbg !103
%8 = bitcast i8* %7 to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>*, !dbg !103
- %9 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %6, i32 0, i32 5, !dbg !103
- %10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %8, i32 0, i32 5, !dbg !103
+ %9 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %6, i32 0, i32 5, !dbg !103
+ %10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %8, i32 0, i32 5, !dbg !103
%11 = load %0** %9, !dbg !103
%12 = bitcast %0* %11 to i8*, !dbg !103
%13 = bitcast %0** %10 to i8*, !dbg !103
call void @llvm.dbg.declare(metadata i8** %2, metadata !105, metadata !{!"0x102"}), !dbg !106
%3 = load i8** %2, !dbg !106
%4 = bitcast i8* %3 to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>*, !dbg !106
- %5 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 5, !dbg !106
+ %5 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0* }>* %4, i32 0, i32 5, !dbg !106
%6 = load %0** %5, !dbg !106
%7 = bitcast %0* %6 to i8*, !dbg !106
call void @_Block_object_dispose(i8* %7, i32 3) #3, !dbg !106
%instance_C = alloca %class.C, align 4
store i32 0, i32* %retval
call void @llvm.dbg.declare(metadata %class.C* %instance_C, metadata !29, metadata !{!"0x102"}), !dbg !30
- %d = getelementptr inbounds %class.C* %instance_C, i32 0, i32 0, !dbg !31
+ %d = getelementptr inbounds %class.C, %class.C* %instance_C, i32 0, i32 0, !dbg !31
store i32 8, i32* %d, align 4, !dbg !31
%0 = load i32* @_ZN1C1cE, align 4, !dbg !32
ret i32 %0, !dbg !32
call void @llvm.dbg.declare(metadata %struct.A* %a, metadata !24, metadata !{!"0x102\006"}), !dbg !25
call void @llvm.dbg.declare(metadata i32* %z, metadata !26, metadata !{!"0x102"}), !dbg !27
store i32 2, i32* %z, align 4, !dbg !27
- %var = getelementptr inbounds %struct.A* %a, i32 0, i32 1, !dbg !28
+ %var = getelementptr inbounds %struct.A, %struct.A* %a, i32 0, i32 1, !dbg !28
%0 = load i32* %var, align 4, !dbg !28
%cmp = icmp sgt i32 %0, 2, !dbg !28
br i1 %cmp, label %if.then, label %if.end, !dbg !28
store %struct.foo* %this, %struct.foo** %this.addr, align 8
call void @llvm.dbg.declare(metadata %struct.foo** %this.addr, metadata !34, metadata !36), !dbg !37
%this1 = load %struct.foo** %this.addr
- %b = getelementptr inbounds %struct.foo* %this1, i32 0, i32 0, !dbg !38
+ %b = getelementptr inbounds %struct.foo, %struct.foo* %this1, i32 0, i32 0, !dbg !38
call void @_ZN4baseC2Ev(%struct.base* %b) #2, !dbg !38
ret void, !dbg !38
}
store %struct.foo* %this, %struct.foo** %this.addr, align 8
call void @llvm.dbg.declare(metadata %struct.foo** %this.addr, metadata !39, metadata !36), !dbg !40
%this1 = load %struct.foo** %this.addr
- %b = getelementptr inbounds %struct.foo* %this1, i32 0, i32 0, !dbg !41
+ %b = getelementptr inbounds %struct.foo, %struct.foo* %this1, i32 0, i32 0, !dbg !41
call void @_ZN4baseD1Ev(%struct.base* %b), !dbg !41
ret void, !dbg !43
}
br i1 undef, label %bb18, label %bb31.preheader
bb31.preheader: ; preds = %bb19, %bb
- %tmp2 = getelementptr inbounds i8* %fname, i32 0
+ %tmp2 = getelementptr inbounds i8, i8* %fname, i32 0
br label %bb31
bb18: ; preds = %bb
define void @_ZN1DC2Ev(%class.D* nocapture %this) unnamed_addr nounwind uwtable align 2 {
entry:
tail call void @llvm.dbg.value(metadata %class.D* %this, i64 0, metadata !29, metadata !{!"0x102"}), !dbg !36
- %c1 = getelementptr inbounds %class.D* %this, i64 0, i32 0, !dbg !37
+ %c1 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 0, !dbg !37
store i32 1, i32* %c1, align 4, !dbg !37
- %c2 = getelementptr inbounds %class.D* %this, i64 0, i32 1, !dbg !42
+ %c2 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 1, !dbg !42
store i32 2, i32* %c2, align 4, !dbg !42
- %c3 = getelementptr inbounds %class.D* %this, i64 0, i32 2, !dbg !43
+ %c3 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 2, !dbg !43
store i32 3, i32* %c3, align 4, !dbg !43
- %c4 = getelementptr inbounds %class.D* %this, i64 0, i32 3, !dbg !44
+ %c4 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 3, !dbg !44
store i32 4, i32* %c4, align 4, !dbg !44
ret void, !dbg !45
}
entry:
tail call void @llvm.dbg.value(metadata %class.D* %this, i64 0, metadata !34, metadata !{!"0x102"}), !dbg !46
tail call void @llvm.dbg.value(metadata %class.D* %d, i64 0, metadata !35, metadata !{!"0x102"}), !dbg !46
- %c1 = getelementptr inbounds %class.D* %d, i64 0, i32 0, !dbg !47
+ %c1 = getelementptr inbounds %class.D, %class.D* %d, i64 0, i32 0, !dbg !47
%0 = load i32* %c1, align 4, !dbg !47
- %c12 = getelementptr inbounds %class.D* %this, i64 0, i32 0, !dbg !47
+ %c12 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 0, !dbg !47
store i32 %0, i32* %c12, align 4, !dbg !47
- %c2 = getelementptr inbounds %class.D* %d, i64 0, i32 1, !dbg !49
+ %c2 = getelementptr inbounds %class.D, %class.D* %d, i64 0, i32 1, !dbg !49
%1 = load i32* %c2, align 4, !dbg !49
- %c23 = getelementptr inbounds %class.D* %this, i64 0, i32 1, !dbg !49
+ %c23 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 1, !dbg !49
store i32 %1, i32* %c23, align 4, !dbg !49
- %c3 = getelementptr inbounds %class.D* %d, i64 0, i32 2, !dbg !50
+ %c3 = getelementptr inbounds %class.D, %class.D* %d, i64 0, i32 2, !dbg !50
%2 = load i32* %c3, align 4, !dbg !50
- %c34 = getelementptr inbounds %class.D* %this, i64 0, i32 2, !dbg !50
+ %c34 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 2, !dbg !50
store i32 %2, i32* %c34, align 4, !dbg !50
- %c4 = getelementptr inbounds %class.D* %d, i64 0, i32 3, !dbg !51
+ %c4 = getelementptr inbounds %class.D, %class.D* %d, i64 0, i32 3, !dbg !51
%3 = load i32* %c4, align 4, !dbg !51
- %c45 = getelementptr inbounds %class.D* %this, i64 0, i32 3, !dbg !51
+ %c45 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 3, !dbg !51
store i32 %3, i32* %c45, align 4, !dbg !51
ret void, !dbg !52
}
%my_bar = alloca %struct.bar, align 4
call void @llvm.dbg.declare(metadata %struct.foo* %my_foo, metadata !10, metadata !{!"0x102"}), !dbg !19
call void @llvm.dbg.declare(metadata %struct.bar* %my_bar, metadata !20, metadata !{!"0x102"}), !dbg !28
- %a = getelementptr inbounds %struct.foo* %my_foo, i32 0, i32 0, !dbg !29
+ %a = getelementptr inbounds %struct.foo, %struct.foo* %my_foo, i32 0, i32 0, !dbg !29
store i32 3, i32* %a, align 4, !dbg !29
- %a1 = getelementptr inbounds %struct.bar* %my_bar, i32 0, i32 0, !dbg !30
+ %a1 = getelementptr inbounds %struct.bar, %struct.bar* %my_bar, i32 0, i32 0, !dbg !30
store i32 5, i32* %a1, align 4, !dbg !30
- %a2 = getelementptr inbounds %struct.foo* %my_foo, i32 0, i32 0, !dbg !31
+ %a2 = getelementptr inbounds %struct.foo, %struct.foo* %my_foo, i32 0, i32 0, !dbg !31
%0 = load i32* %a2, align 4, !dbg !31
- %a3 = getelementptr inbounds %struct.bar* %my_bar, i32 0, i32 0, !dbg !31
+ %a3 = getelementptr inbounds %struct.bar, %struct.bar* %my_bar, i32 0, i32 0, !dbg !31
%1 = load i32* %a3, align 4, !dbg !31
%add = add nsw i32 %0, %1, !dbg !31
ret i32 %add, !dbg !31
store %struct.i14* null, %struct.i14** %p, align 8, !dbg !18
%1 = call i32 @foo(%struct.i14** %p), !dbg !19
%2 = load %struct.i14** %p, align 8, !dbg !20
- %3 = getelementptr inbounds %struct.i14* %2, i32 0, i32 0, !dbg !20
+ %3 = getelementptr inbounds %struct.i14, %struct.i14* %2, i32 0, i32 0, !dbg !20
%4 = load i64* %3, align 8, !dbg !20
%5 = or i64 %4, 4, !dbg !20
store i64 %5, i64* %3, align 8, !dbg !20
%add = add i32 %IntParI1, 5, !dbg !68
tail call void @llvm.dbg.value(metadata i32 %add, i64 0, metadata !27, metadata !{!"0x102"}), !dbg !68
%idxprom = sext i32 %add to i64, !dbg !69
- %arrayidx = getelementptr inbounds i32* %Array1Par, i64 %idxprom, !dbg !69
+ %arrayidx = getelementptr inbounds i32, i32* %Array1Par, i64 %idxprom, !dbg !69
store i32 %IntParI2, i32* %arrayidx, align 4, !dbg !69
%add3 = add nsw i32 %IntParI1, 6, !dbg !73
%idxprom4 = sext i32 %add3 to i64, !dbg !73
- %arrayidx5 = getelementptr inbounds i32* %Array1Par, i64 %idxprom4, !dbg !73
+ %arrayidx5 = getelementptr inbounds i32, i32* %Array1Par, i64 %idxprom4, !dbg !73
store i32 %IntParI2, i32* %arrayidx5, align 4, !dbg !73
%add6 = add nsw i32 %IntParI1, 35, !dbg !74
%idxprom7 = sext i32 %add6 to i64, !dbg !74
- %arrayidx8 = getelementptr inbounds i32* %Array1Par, i64 %idxprom7, !dbg !74
+ %arrayidx8 = getelementptr inbounds i32, i32* %Array1Par, i64 %idxprom7, !dbg !74
store i32 %add, i32* %arrayidx8, align 4, !dbg !74
tail call void @llvm.dbg.value(metadata i32 %add, i64 0, metadata !28, metadata !{!"0x102"}), !dbg !75
br label %for.body, !dbg !75
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %idxprom, %entry ], [ %indvars.iv.next, %for.body ]
%IntIndex.046 = phi i32 [ %add, %entry ], [ %inc, %for.body ]
- %arrayidx13 = getelementptr inbounds [51 x i32]* %Array2Par, i64 %idxprom, i64 %indvars.iv, !dbg !77
+ %arrayidx13 = getelementptr inbounds [51 x i32], [51 x i32]* %Array2Par, i64 %idxprom, i64 %indvars.iv, !dbg !77
store i32 %add, i32* %arrayidx13, align 4, !dbg !77
%inc = add nsw i32 %IntIndex.046, 1, !dbg !75
tail call void @llvm.dbg.value(metadata i32 %inc, i64 0, metadata !28, metadata !{!"0x102"}), !dbg !75
for.end: ; preds = %for.body
%sub = add nsw i32 %IntParI1, 4, !dbg !78
%idxprom14 = sext i32 %sub to i64, !dbg !78
- %arrayidx17 = getelementptr inbounds [51 x i32]* %Array2Par, i64 %idxprom, i64 %idxprom14, !dbg !78
+ %arrayidx17 = getelementptr inbounds [51 x i32], [51 x i32]* %Array2Par, i64 %idxprom, i64 %idxprom14, !dbg !78
%0 = load i32* %arrayidx17, align 4, !dbg !78
%inc18 = add nsw i32 %0, 1, !dbg !78
store i32 %inc18, i32* %arrayidx17, align 4, !dbg !78
%1 = load i32* %arrayidx, align 4, !dbg !79
%add22 = add nsw i32 %IntParI1, 25, !dbg !79
%idxprom23 = sext i32 %add22 to i64, !dbg !79
- %arrayidx25 = getelementptr inbounds [51 x i32]* %Array2Par, i64 %idxprom23, i64 %idxprom, !dbg !79
+ %arrayidx25 = getelementptr inbounds [51 x i32], [51 x i32]* %Array2Par, i64 %idxprom23, i64 %idxprom, !dbg !79
store i32 %1, i32* %arrayidx25, align 4, !dbg !79
store i32 5, i32* @IntGlob, align 4, !dbg !80
ret void, !dbg !81
%str2.i = alloca %struct.string, align 4
%0 = bitcast %struct.string* %str2.i to i8*, !dbg !26
%1 = load %struct.string** @str, align 4
- %mem = getelementptr inbounds %struct.string* %1, i32 0, i32 0
+ %mem = getelementptr inbounds %struct.string, %struct.string* %1, i32 0, i32 0
br label %for.body
for.body: ; preds = %for.body, %entry
%mul = mul nsw i32 %5, %6, !dbg !23
%7 = load i32* %i, align 4, !dbg !23
%idxprom = sext i32 %7 to i64, !dbg !23
- %arrayidx = getelementptr inbounds i32* %vla, i64 %idxprom, !dbg !23
+ %arrayidx = getelementptr inbounds i32, i32* %vla, i64 %idxprom, !dbg !23
store i32 %mul, i32* %arrayidx, align 4, !dbg !23
br label %for.inc, !dbg !25
define i32 @foo(%struct.Outer* byval align 8 %outer) #0 {
entry:
call void @llvm.dbg.declare(metadata %struct.Outer* %outer, metadata !25, metadata !{!"0x102"}), !dbg !26
- %i1.sroa.0.0..sroa_idx = getelementptr inbounds %struct.Outer* %outer, i64 0, i32 0, i64 1, i32 0, !dbg !27
+ %i1.sroa.0.0..sroa_idx = getelementptr inbounds %struct.Outer, %struct.Outer* %outer, i64 0, i32 0, i64 1, i32 0, !dbg !27
%i1.sroa.0.0.copyload = load i32* %i1.sroa.0.0..sroa_idx, align 8, !dbg !27
call void @llvm.dbg.value(metadata i32 %i1.sroa.0.0.copyload, i64 0, metadata !28, metadata !29), !dbg !27
%i1.sroa.2.0..sroa_raw_cast = bitcast %struct.Outer* %outer to i8*, !dbg !27
- %i1.sroa.2.0..sroa_raw_idx = getelementptr inbounds i8* %i1.sroa.2.0..sroa_raw_cast, i64 20, !dbg !27
+ %i1.sroa.2.0..sroa_raw_idx = getelementptr inbounds i8, i8* %i1.sroa.2.0..sroa_raw_cast, i64 20, !dbg !27
ret i32 %i1.sroa.0.0.copyload, !dbg !32
}
store %class.BPLFunctionWriter* %this, %class.BPLFunctionWriter** %this.addr, align 8
call void @llvm.dbg.declare(metadata %class.BPLFunctionWriter** %this.addr, metadata !133, metadata !{!"0x102"}), !dbg !135
%this1 = load %class.BPLFunctionWriter** %this.addr
- %MW = getelementptr inbounds %class.BPLFunctionWriter* %this1, i32 0, i32 0, !dbg !136
+ %MW = getelementptr inbounds %class.BPLFunctionWriter, %class.BPLFunctionWriter* %this1, i32 0, i32 0, !dbg !136
%0 = load %struct.BPLModuleWriter** %MW, align 8, !dbg !136
call void @"_ZN8functionIFvvEEC1IZN17BPLFunctionWriter9writeExprEvE3$_0EET_"(%class.function* %agg.tmp), !dbg !136
call void @_ZN15BPLModuleWriter14writeIntrinsicE8functionIFvvEE(%struct.BPLModuleWriter* %0), !dbg !136
- %MW3 = getelementptr inbounds %class.BPLFunctionWriter* %this1, i32 0, i32 0, !dbg !138
+ %MW3 = getelementptr inbounds %class.BPLFunctionWriter, %class.BPLFunctionWriter* %this1, i32 0, i32 0, !dbg !138
%1 = load %struct.BPLModuleWriter** %MW3, align 8, !dbg !138
call void @"_ZN8functionIFvvEEC1IZN17BPLFunctionWriter9writeExprEvE3$_1_0EET_"(%class.function* %agg.tmp4), !dbg !138
call void @_ZN15BPLModuleWriter14writeIntrinsicE8functionIFvvEE(%struct.BPLModuleWriter* %1), !dbg !138
%0 = load %struct.C** @x, align 8, !dbg !32, !tbaa !33
tail call void @llvm.dbg.value(metadata %struct.C* %0, i64 0, metadata !37, metadata !{!"0x102"}) #3, !dbg !38
tail call void @_Z3fn8v() #3, !dbg !39
- %b.i = getelementptr inbounds %struct.C* %0, i64 0, i32 0, !dbg !40
+ %b.i = getelementptr inbounds %struct.C, %struct.C* %0, i64 0, i32 0, !dbg !40
%1 = load i32* %b.i, align 4, !dbg !40, !tbaa !42
%tobool.i = icmp eq i32 %1, 0, !dbg !40
br i1 %tobool.i, label %_ZN1C5m_fn2Ev.exit, label %if.then.i, !dbg !40
entry:
tail call void @llvm.dbg.value(metadata %struct.C* %this, i64 0, metadata !24, metadata !{!"0x102"}), !dbg !49
tail call void @_Z3fn8v() #3, !dbg !50
- %b = getelementptr inbounds %struct.C* %this, i64 0, i32 0, !dbg !51
+ %b = getelementptr inbounds %struct.C, %struct.C* %this, i64 0, i32 0, !dbg !51
%0 = load i32* %b, align 4, !dbg !51, !tbaa !42
%tobool = icmp eq i32 %0, 0, !dbg !51
br i1 %tobool, label %if.end, label %if.then, !dbg !51
%1 = load %struct.C** @x, align 8, !dbg !56, !tbaa !33
tail call void @llvm.dbg.value(metadata %struct.C* %1, i64 0, metadata !57, metadata !{!"0x102"}) #3, !dbg !58
tail call void @_Z3fn8v() #3, !dbg !59
- %b.i.i = getelementptr inbounds %struct.C* %1, i64 0, i32 0, !dbg !60
+ %b.i.i = getelementptr inbounds %struct.C, %struct.C* %1, i64 0, i32 0, !dbg !60
%2 = load i32* %b.i.i, align 4, !dbg !60, !tbaa !42
%tobool.i.i = icmp eq i32 %2, 0, !dbg !60
br i1 %tobool.i.i, label %_Z3fn6v.exit, label %if.then.i.i, !dbg !60
%0 = load %struct.C** @x, align 8, !dbg !66, !tbaa !33
tail call void @llvm.dbg.value(metadata %struct.C* %0, i64 0, metadata !67, metadata !{!"0x102"}) #3, !dbg !68
tail call void @_Z3fn8v() #3, !dbg !69
- %b.i.i = getelementptr inbounds %struct.C* %0, i64 0, i32 0, !dbg !70
+ %b.i.i = getelementptr inbounds %struct.C, %struct.C* %0, i64 0, i32 0, !dbg !70
%1 = load i32* %b.i.i, align 4, !dbg !70, !tbaa !42
%tobool.i.i = icmp eq i32 %1, 0, !dbg !70
br i1 %tobool.i.i, label %tailrecurse.backedge, label %if.then.i.i, !dbg !70
%this1 = load %class.A** %this.addr
%0 = bitcast %class.A* %this1 to i8***, !dbg !72
store i8** getelementptr inbounds ([4 x i8*]* @_ZTV1A, i64 0, i64 2), i8*** %0, !dbg !72
- %m_int = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !72
+ %m_int = getelementptr inbounds %class.A, %class.A* %this1, i32 0, i32 1, !dbg !72
%1 = load i32* %i.addr, align 4, !dbg !72
store i32 %1, i32* %m_int, align 4, !dbg !72
ret void, !dbg !73
%this1 = load %class.A** %this.addr
%0 = bitcast %class.A* %this1 to i8***, !dbg !78
store i8** getelementptr inbounds ([4 x i8*]* @_ZTV1A, i64 0, i64 2), i8*** %0, !dbg !78
- %m_int = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !78
+ %m_int = getelementptr inbounds %class.A, %class.A* %this1, i32 0, i32 1, !dbg !78
%1 = load %class.A** %rhs.addr, align 8, !dbg !78
- %m_int2 = getelementptr inbounds %class.A* %1, i32 0, i32 1, !dbg !78
+ %m_int2 = getelementptr inbounds %class.A, %class.A* %1, i32 0, i32 1, !dbg !78
%2 = load i32* %m_int2, align 4, !dbg !78
store i32 %2, i32* %m_int, align 4, !dbg !78
ret void, !dbg !79
call void @llvm.dbg.declare(metadata %class.A** %rhs.addr, metadata !82, metadata !{!"0x102"}), !dbg !83
%this1 = load %class.A** %this.addr
%0 = load %class.A** %rhs.addr, align 8, !dbg !84
- %m_int = getelementptr inbounds %class.A* %0, i32 0, i32 1, !dbg !84
+ %m_int = getelementptr inbounds %class.A, %class.A* %0, i32 0, i32 1, !dbg !84
%1 = load i32* %m_int, align 4, !dbg !84
- %m_int2 = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !84
+ %m_int2 = getelementptr inbounds %class.A, %class.A* %this1, i32 0, i32 1, !dbg !84
store i32 %1, i32* %m_int2, align 4, !dbg !84
ret %class.A* %this1, !dbg !85
}
store %class.A* %this, %class.A** %this.addr, align 8
call void @llvm.dbg.declare(metadata %class.A** %this.addr, metadata !86, metadata !{!"0x102"}), !dbg !87
%this1 = load %class.A** %this.addr
- %m_int = getelementptr inbounds %class.A* %this1, i32 0, i32 1, !dbg !88
+ %m_int = getelementptr inbounds %class.A, %class.A* %this1, i32 0, i32 1, !dbg !88
%0 = load i32* %m_int, align 4, !dbg !88
ret i32 %0, !dbg !88
}
%i1 = alloca %struct.Inner, align 8
call void @llvm.dbg.declare(metadata %struct.Outer* %outer, metadata !25, metadata !2), !dbg !26
call void @llvm.dbg.declare(metadata %struct.Inner* %i1, metadata !27, metadata !2), !dbg !28
- %inner = getelementptr inbounds %struct.Outer* %outer, i32 0, i32 0, !dbg !28
- %arrayidx = getelementptr inbounds [2 x %struct.Inner]* %inner, i32 0, i64 1, !dbg !28
+ %inner = getelementptr inbounds %struct.Outer, %struct.Outer* %outer, i32 0, i32 0, !dbg !28
+ %arrayidx = getelementptr inbounds [2 x %struct.Inner], [2 x %struct.Inner]* %inner, i32 0, i64 1, !dbg !28
%0 = bitcast %struct.Inner* %i1 to i8*, !dbg !28
%1 = bitcast %struct.Inner* %arrayidx to i8*, !dbg !28
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 16, i32 8, i1 false), !dbg !28
- %a = getelementptr inbounds %struct.Inner* %i1, i32 0, i32 0, !dbg !29
+ %a = getelementptr inbounds %struct.Inner, %struct.Inner* %i1, i32 0, i32 0, !dbg !29
%2 = load i32* %a, align 4, !dbg !29
ret i32 %2, !dbg !29
}
%outer = alloca %struct.Outer, align 8
%i1 = alloca %struct.Inner, align 4
%1 = bitcast %struct.Outer* %outer to { i64, i64 }*
- %2 = getelementptr { i64, i64 }* %1, i32 0, i32 0
+ %2 = getelementptr { i64, i64 }, { i64, i64 }* %1, i32 0, i32 0
store i64 %outer.coerce0, i64* %2
- %3 = getelementptr { i64, i64 }* %1, i32 0, i32 1
+ %3 = getelementptr { i64, i64 }, { i64, i64 }* %1, i32 0, i32 1
store i64 %outer.coerce1, i64* %3
call void @llvm.dbg.declare(metadata %struct.Outer* %outer, metadata !24, metadata !2), !dbg !25
call void @llvm.dbg.declare(metadata %struct.Inner* %i1, metadata !26, metadata !2), !dbg !27
- %4 = getelementptr inbounds %struct.Outer* %outer, i32 0, i32 0, !dbg !27
- %5 = getelementptr inbounds [2 x %struct.Inner]* %4, i32 0, i64 1, !dbg !27
+ %4 = getelementptr inbounds %struct.Outer, %struct.Outer* %outer, i32 0, i32 0, !dbg !27
+ %5 = getelementptr inbounds [2 x %struct.Inner], [2 x %struct.Inner]* %4, i32 0, i64 1, !dbg !27
%6 = bitcast %struct.Inner* %i1 to i8*, !dbg !27
%7 = bitcast %struct.Inner* %5 to i8*, !dbg !27
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* %7, i64 8, i32 4, i1 false), !dbg !27
- %8 = getelementptr inbounds %struct.Inner* %i1, i32 0, i32 0, !dbg !28
+ %8 = getelementptr inbounds %struct.Inner, %struct.Inner* %i1, i32 0, i32 0, !dbg !28
%9 = load i32* %8, align 4, !dbg !28
ret i32 %9, !dbg !28
}
define float @foo(float %s.coerce) #0 {
entry:
%s = alloca %struct.S, align 4
- %coerce.dive = getelementptr %struct.S* %s, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0
store float %s.coerce, float* %coerce.dive, align 1
call void @llvm.dbg.declare(metadata %struct.S* %s, metadata !16, metadata !17), !dbg !18
- %f = getelementptr inbounds %struct.S* %s, i32 0, i32 0, !dbg !19
+ %f = getelementptr inbounds %struct.S, %struct.S* %s, i32 0, i32 0, !dbg !19
%0 = load float* %f, align 4, !dbg !19
ret float %0, !dbg !19
}
if.end: ; preds = %entry
call void @llvm.dbg.declare(metadata %struct.p* %y, metadata !28, metadata !29), !dbg !30
- %s = getelementptr inbounds %struct.p* %y, i32 0, i32 0, !dbg !30
+ %s = getelementptr inbounds %struct.p, %struct.p* %y, i32 0, i32 0, !dbg !30
%0 = load i64* @t, align 8, !dbg !30
store i64 %0, i64* %s, align 8, !dbg !30
- %t = getelementptr inbounds %struct.p* %y, i32 0, i32 1, !dbg !30
+ %t = getelementptr inbounds %struct.p, %struct.p* %y, i32 0, i32 1, !dbg !30
%1 = load i64* @t, align 8, !dbg !30
store i64 %1, i64* %t, align 8, !dbg !30
call void @llvm.dbg.declare(metadata %struct.r* %r, metadata !31, metadata !29), !dbg !32
- %i = getelementptr inbounds %struct.r* %r, i32 0, i32 0, !dbg !32
+ %i = getelementptr inbounds %struct.r, %struct.r* %r, i32 0, i32 0, !dbg !32
store i32 0, i32* %i, align 4, !dbg !32
- %x = getelementptr inbounds %struct.r* %r, i32 0, i32 1, !dbg !32
- %s1 = getelementptr inbounds %struct.p* %x, i32 0, i32 0, !dbg !32
+ %x = getelementptr inbounds %struct.r, %struct.r* %r, i32 0, i32 1, !dbg !32
+ %s1 = getelementptr inbounds %struct.p, %struct.p* %x, i32 0, i32 0, !dbg !32
store i64 0, i64* %s1, align 8, !dbg !32
- %t2 = getelementptr inbounds %struct.p* %x, i32 0, i32 1, !dbg !32
+ %t2 = getelementptr inbounds %struct.p, %struct.p* %x, i32 0, i32 1, !dbg !32
store i64 0, i64* %t2, align 8, !dbg !32
- %y3 = getelementptr inbounds %struct.r* %r, i32 0, i32 2, !dbg !32
+ %y3 = getelementptr inbounds %struct.r, %struct.r* %r, i32 0, i32 2, !dbg !32
%2 = bitcast %struct.p* %y3 to i8*, !dbg !32
%3 = bitcast %struct.p* %y to i8*, !dbg !32
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 16, i32 8, i1 false), !dbg !32
define void @doSomething(%struct.bar* nocapture readonly %b) #0 {
entry:
tail call void @llvm.dbg.value(metadata %struct.bar* %b, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !25
- %a1 = getelementptr inbounds %struct.bar* %b, i64 0, i32 0, !dbg !26
+ %a1 = getelementptr inbounds %struct.bar, %struct.bar* %b, i64 0, i32 0, !dbg !26
%0 = load i32* %a1, align 4, !dbg !26, !tbaa !27
tail call void @llvm.dbg.value(metadata i32 %0, i64 0, metadata !16, metadata !{!"0x102"}), !dbg !26
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %0) #4, !dbg !32
store i8* %2, i8** %saved_stack, !dbg !17
%vla = alloca i32, i64 %1, align 16, !dbg !17
call void @llvm.dbg.declare(metadata i32* %vla, metadata !18, metadata !{!"0x102\006"}), !dbg !17
- %arrayidx = getelementptr inbounds i32* %vla, i64 0, !dbg !22
+ %arrayidx = getelementptr inbounds i32, i32* %vla, i64 0, !dbg !22
store i32 42, i32* %arrayidx, align 4, !dbg !22
%3 = load i32* %n.addr, align 4, !dbg !23
%sub = sub nsw i32 %3, 1, !dbg !23
%idxprom = sext i32 %sub to i64, !dbg !23
- %arrayidx1 = getelementptr inbounds i32* %vla, i64 %idxprom, !dbg !23
+ %arrayidx1 = getelementptr inbounds i32, i32* %vla, i64 %idxprom, !dbg !23
%4 = load i32* %arrayidx1, align 4, !dbg !23
store i32 1, i32* %cleanup.dest.slot
%5 = load i8** %saved_stack, !dbg !24
entry:
%x = alloca %struct.__block_byref_x, align 8
call void @llvm.dbg.declare(metadata %struct.__block_byref_x* %x, metadata !12, metadata !22), !dbg !23
- %byref.isa = getelementptr inbounds %struct.__block_byref_x* %x, i32 0, i32 0, !dbg !24
+ %byref.isa = getelementptr inbounds %struct.__block_byref_x, %struct.__block_byref_x* %x, i32 0, i32 0, !dbg !24
store i8* null, i8** %byref.isa, !dbg !24
- %byref.forwarding = getelementptr inbounds %struct.__block_byref_x* %x, i32 0, i32 1, !dbg !24
+ %byref.forwarding = getelementptr inbounds %struct.__block_byref_x, %struct.__block_byref_x* %x, i32 0, i32 1, !dbg !24
store %struct.__block_byref_x* %x, %struct.__block_byref_x** %byref.forwarding, !dbg !24
- %byref.flags = getelementptr inbounds %struct.__block_byref_x* %x, i32 0, i32 2, !dbg !24
+ %byref.flags = getelementptr inbounds %struct.__block_byref_x, %struct.__block_byref_x* %x, i32 0, i32 2, !dbg !24
store i32 0, i32* %byref.flags, !dbg !24
- %byref.size = getelementptr inbounds %struct.__block_byref_x* %x, i32 0, i32 3, !dbg !24
+ %byref.size = getelementptr inbounds %struct.__block_byref_x, %struct.__block_byref_x* %x, i32 0, i32 3, !dbg !24
store i32 32, i32* %byref.size, !dbg !24
- %forwarding = getelementptr inbounds %struct.__block_byref_x* %x, i32 0, i32 1, !dbg !25
+ %forwarding = getelementptr inbounds %struct.__block_byref_x, %struct.__block_byref_x* %x, i32 0, i32 1, !dbg !25
%0 = load %struct.__block_byref_x** %forwarding, !dbg !25
- %x1 = getelementptr inbounds %struct.__block_byref_x* %0, i32 0, i32 4, !dbg !25
+ %x1 = getelementptr inbounds %struct.__block_byref_x, %struct.__block_byref_x* %0, i32 0, i32 4, !dbg !25
%1 = load i32* %x1, align 4, !dbg !25
call void @bar(i32 %1), !dbg !25
%2 = bitcast %struct.__block_byref_x* %x to i8*, !dbg !26
%arr = alloca [10 x i32], align 16
%sum = alloca i32, align 4
call void @llvm.dbg.declare(metadata [10 x i32]* %arr, metadata !14), !dbg !18
- %arrayidx = getelementptr inbounds [10 x i32]* %arr, i32 0, i64 0, !dbg !19
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %arr, i32 0, i64 0, !dbg !19
store i32 5, i32* %arrayidx, align 4, !dbg !19
call void @llvm.dbg.declare(metadata i32* %sum, metadata !20), !dbg !21
store i32 4, i32* %sum, align 4, !dbg !21
%16 = add i64 %15, 0, !dbg !39
%17 = inttoptr i64 %16 to i64*, !dbg !39
store i64 -868083117767659023, i64* %17, !dbg !39
- %i.i = getelementptr inbounds %struct.C* %8, i64 0, i32 1, i32 0, !dbg !39
+ %i.i = getelementptr inbounds %struct.C, %struct.C* %8, i64 0, i32 1, i32 0, !dbg !39
%18 = ptrtoint i32* %i.i to i64, !dbg !39
%19 = lshr i64 %18, 3, !dbg !39
%20 = add i64 %19, 2147450880, !dbg !39
store i64 -868083113472691727, i64* %17, !dbg !48
tail call void @llvm.dbg.value(metadata %struct.C* %this, i64 0, metadata !30, metadata !{!"0x102"}), !dbg !48
%call = call i32 @_ZN1A5m_fn1Ev(%struct.A* %8), !dbg !49
- %i.i = getelementptr inbounds %struct.C* %this, i64 0, i32 1, i32 0, !dbg !50
+ %i.i = getelementptr inbounds %struct.C, %struct.C* %this, i64 0, i32 1, i32 0, !dbg !50
%18 = ptrtoint i32* %i.i to i64, !dbg !50
%19 = lshr i64 %18, 3, !dbg !50
%20 = add i64 %19, 2147450880, !dbg !50
call void @llvm.dbg.declare(metadata %struct.test1** %this_addr, metadata !24, metadata !{!"0x102"}), !dbg !28
store %struct.test1* %this, %struct.test1** %this_addr
%0 = load %struct.test1** %this_addr, align 8, !dbg !28 ; <%struct.test1*> [#uses=1]
- %1 = getelementptr inbounds %struct.test1* %0, i32 0, i32 0, !dbg !28 ; <i32 (...)***> [#uses=1]
+ %1 = getelementptr inbounds %struct.test1, %struct.test1* %0, i32 0, i32 0, !dbg !28 ; <i32 (...)***> [#uses=1]
store i32 (...)** getelementptr inbounds ([4 x i32 (...)*]* @_ZTV5test1, i64 0, i64 2), i32 (...)*** %1, align 8, !dbg !28
br label %return, !dbg !28
call void @llvm.dbg.declare(metadata %struct.test1** %this_addr, metadata !32, metadata !{!"0x102"}), !dbg !34
store %struct.test1* %this, %struct.test1** %this_addr
%0 = load %struct.test1** %this_addr, align 8, !dbg !35 ; <%struct.test1*> [#uses=1]
- %1 = getelementptr inbounds %struct.test1* %0, i32 0, i32 0, !dbg !35 ; <i32 (...)***> [#uses=1]
+ %1 = getelementptr inbounds %struct.test1, %struct.test1* %0, i32 0, i32 0, !dbg !35 ; <i32 (...)***> [#uses=1]
store i32 (...)** getelementptr inbounds ([4 x i32 (...)*]* @_ZTV5test1, i64 0, i64 2), i32 (...)*** %1, align 8, !dbg !35
br label %bb, !dbg !37
call void @llvm.dbg.declare(metadata %struct.test1** %this_addr, metadata !38, metadata !{!"0x102"}), !dbg !40
store %struct.test1* %this, %struct.test1** %this_addr
%0 = load %struct.test1** %this_addr, align 8, !dbg !41 ; <%struct.test1*> [#uses=1]
- %1 = getelementptr inbounds %struct.test1* %0, i32 0, i32 0, !dbg !41 ; <i32 (...)***> [#uses=1]
+ %1 = getelementptr inbounds %struct.test1, %struct.test1* %0, i32 0, i32 0, !dbg !41 ; <i32 (...)***> [#uses=1]
store i32 (...)** getelementptr inbounds ([4 x i32 (...)*]* @_ZTV5test1, i64 0, i64 2), i32 (...)*** %1, align 8, !dbg !41
br label %bb, !dbg !43
%cann-indvar-idxcast = sext i32 %cann-indvar to i64 ; <i64> [#uses=1]
%CT = bitcast i8** %local to i8*** ; <i8***> [#uses=1]
%reg115 = load i8*** %CT ; <i8**> [#uses=1]
- %cast235 = getelementptr i8** %reg115, i64 %cann-indvar-idxcast ; <i8**> [#uses=1]
+ %cast235 = getelementptr i8*, i8** %reg115, i64 %cann-indvar-idxcast ; <i8**> [#uses=1]
%reg117 = load i8** %cast235 ; <i8*> [#uses=1]
%reg236 = call i32 @puts( i8* %reg117 ) ; <i32> [#uses=0]
%cond239 = icmp slt i32 %add1-indvar, %argc ; <i1> [#uses=1]
declare i32 @puts(i8*)
define i32 @main(i32 %argc.1, i8** %argv.1) {
- %tmp.5 = getelementptr i8** %argv.1, i64 1 ; <i8**> [#uses=1]
+ %tmp.5 = getelementptr i8*, i8** %argv.1, i64 1 ; <i8**> [#uses=1]
%tmp.6 = load i8** %tmp.5 ; <i8*> [#uses=1]
%tmp.0 = call i32 @puts( i8* %tmp.6 ) ; <i32> [#uses=0]
ret i32 0
br label %cond_next
cond_next: ; preds = %cond_false, %cond_true
- %tmp5 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp5 = getelementptr [10 x i8], [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp6 = load i32* %iftmp.0, align 4 ; <i32> [#uses=1]
%tmp7 = call i32 (i8*, ...)* @printf( i8* noalias %tmp5, i32 %tmp6 ) nounwind ; <i32> [#uses=0]
br label %return
define i32 @main()
{
%res = call i32 @test(double 3.14)
- %ptr = getelementptr [4 x i8]* @format, i32 0, i32 0
+ %ptr = getelementptr [4 x i8], [4 x i8]* @format, i32 0, i32 0
call i32 (i8*,...)* @printf(i8* %ptr, i32 %res)
ret i32 0
}
%0 = load i32* @zero_int, align 4
%add = add nsw i32 %0, 5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom
store i32 40, i32* %arrayidx, align 4
%1 = load double* @zero_double, align 8
%cmp = fcmp olt double %1, 1.100000e+00
%2 = load i32* @zero_int, align 4
%add1 = add nsw i32 %2, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
store i32 70, i32* %arrayidx3, align 4
br label %if.end
%4 = load i32* %i, align 4
%sub = sub nsw i32 %4, 1
%idxprom5 = sext i32 %sub to i64
- %arrayidx6 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
%5 = load i32* %arrayidx6, align 4
%6 = load i32* %i, align 4
%idxprom7 = sext i32 %6 to i64
- %arrayidx8 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
+ %arrayidx8 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
%7 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %5, %7
%8 = load i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
store i32 %add9, i32* %arrayidx11, align 4
br label %for.inc
%0 = load i32* @zero_int, align 4
%add = add nsw i32 %0, 5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom
store i32 40, i32* %arrayidx, align 4
%1 = load double* @zero_double, align 8
%cmp = fcmp olt double %1, 1.000000e+00
%2 = load i32* @zero_int, align 4
%add1 = add nsw i32 %2, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
store i32 70, i32* %arrayidx3, align 4
br label %if.end
%4 = load i32* %i, align 4
%sub = sub nsw i32 %4, 1
%idxprom5 = sext i32 %sub to i64
- %arrayidx6 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
%5 = load i32* %arrayidx6, align 4
%6 = load i32* %i, align 4
%idxprom7 = sext i32 %6 to i64
- %arrayidx8 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
+ %arrayidx8 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
%7 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %5, %7
%8 = load i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
store i32 %add9, i32* %arrayidx11, align 4
br label %for.inc
%0 = load i32* @zero_int, align 4
%add = add nsw i32 %0, 5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom
store i32 40, i32* %arrayidx, align 4
%1 = load double* @zero_double, align 8
%cmp = fcmp olt double %1, 1.000000e+00
%2 = load i32* @zero_int, align 4
%add1 = add nsw i32 %2, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
store i32 70, i32* %arrayidx3, align 4
br label %if.end
%4 = load i32* %i, align 4
%sub = sub nsw i32 %4, 1
%idxprom5 = sext i32 %sub to i64
- %arrayidx6 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
%5 = load i32* %arrayidx6, align 4
%6 = load i32* %i, align 4
%idxprom7 = sext i32 %6 to i64
- %arrayidx8 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
+ %arrayidx8 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
%7 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %5, %7
%8 = load i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
store i32 %add9, i32* %arrayidx11, align 4
br label %for.inc
%cann-indvar-idxcast = sext i32 %cann-indvar to i64 ; <i64> [#uses=1]
%CT = bitcast i8** %local to i8*** ; <i8***> [#uses=1]
%reg115 = load i8*** %CT ; <i8**> [#uses=1]
- %cast235 = getelementptr i8** %reg115, i64 %cann-indvar-idxcast ; <i8**> [#uses=1]
+ %cast235 = getelementptr i8*, i8** %reg115, i64 %cann-indvar-idxcast ; <i8**> [#uses=1]
%reg117 = load i8** %cast235 ; <i8*> [#uses=1]
%reg236 = call i32 @puts( i8* %reg117 ) ; <i32> [#uses=0]
%cond239 = icmp slt i32 %add1-indvar, %argc ; <i1> [#uses=1]
declare i32 @puts(i8*)
define i32 @main(i32 %argc.1, i8** %argv.1) {
- %tmp.5 = getelementptr i8** %argv.1, i64 1 ; <i8**> [#uses=1]
+ %tmp.5 = getelementptr i8*, i8** %argv.1, i64 1 ; <i8**> [#uses=1]
%tmp.6 = load i8** %tmp.5 ; <i8*> [#uses=1]
%tmp.0 = call i32 @puts( i8* %tmp.6 ) ; <i32> [#uses=0]
ret i32 0
br label %cond_next
cond_next: ; preds = %cond_false, %cond_true
- %tmp5 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp5 = getelementptr [10 x i8], [10 x i8]* @.str, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp6 = load i32* %iftmp.0, align 4 ; <i32> [#uses=1]
%tmp7 = call i32 (i8*, ...)* @printf( i8* noalias %tmp5, i32 %tmp6 ) nounwind ; <i32> [#uses=0]
br label %return
define i32 @main()
{
%res = call i32 @test(double 3.14)
- %ptr = getelementptr [4 x i8]* @format, i32 0, i32 0
+ %ptr = getelementptr [4 x i8], [4 x i8]* @format, i32 0, i32 0
call i32 (i8*,...)* @printf(i8* %ptr, i32 %res)
ret i32 0
}
%0 = load i32* @zero_int, align 4
%add = add nsw i32 %0, 5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom
store i32 40, i32* %arrayidx, align 4
%1 = load double* @zero_double, align 8
%cmp = fcmp olt double %1, 1.100000e+00
%2 = load i32* @zero_int, align 4
%add1 = add nsw i32 %2, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
store i32 70, i32* %arrayidx3, align 4
br label %if.end
%4 = load i32* %i, align 4
%sub = sub nsw i32 %4, 1
%idxprom5 = sext i32 %sub to i64
- %arrayidx6 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
%5 = load i32* %arrayidx6, align 4
%6 = load i32* %i, align 4
%idxprom7 = sext i32 %6 to i64
- %arrayidx8 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
+ %arrayidx8 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
%7 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %5, %7
%8 = load i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
store i32 %add9, i32* %arrayidx11, align 4
br label %for.inc
%0 = load i32* @zero_int, align 4
%add = add nsw i32 %0, 5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom
store i32 40, i32* %arrayidx, align 4
%1 = load double* @zero_double, align 8
%cmp = fcmp olt double %1, 1.000000e+00
%2 = load i32* @zero_int, align 4
%add1 = add nsw i32 %2, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
store i32 70, i32* %arrayidx3, align 4
br label %if.end
%4 = load i32* %i, align 4
%sub = sub nsw i32 %4, 1
%idxprom5 = sext i32 %sub to i64
- %arrayidx6 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
%5 = load i32* %arrayidx6, align 4
%6 = load i32* %i, align 4
%idxprom7 = sext i32 %6 to i64
- %arrayidx8 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
+ %arrayidx8 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
%7 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %5, %7
%8 = load i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
store i32 %add9, i32* %arrayidx11, align 4
br label %for.inc
%0 = load i32* @zero_int, align 4
%add = add nsw i32 %0, 5
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom
store i32 40, i32* %arrayidx, align 4
%1 = load double* @zero_double, align 8
%cmp = fcmp olt double %1, 1.000000e+00
%2 = load i32* @zero_int, align 4
%add1 = add nsw i32 %2, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom2
store i32 70, i32* %arrayidx3, align 4
br label %if.end
%4 = load i32* %i, align 4
%sub = sub nsw i32 %4, 1
%idxprom5 = sext i32 %sub to i64
- %arrayidx6 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom5
%5 = load i32* %arrayidx6, align 4
%6 = load i32* %i, align 4
%idxprom7 = sext i32 %6 to i64
- %arrayidx8 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
+ %arrayidx8 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom7
%7 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %5, %7
%8 = load i32* %i, align 4
%idxprom10 = sext i32 %8 to i64
- %arrayidx11 = getelementptr inbounds [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds [10 x i32], [10 x i32]* @zero_arr, i32 0, i64 %idxprom10
store i32 %add9, i32* %arrayidx11, align 4
br label %for.inc
define i32 @main() {
%fma = tail call double @llvm.fma.f64(double 3.0, double 3.0, double 3.0) nounwind readnone
- %ptr1 = getelementptr [4 x i8]* @msg_double, i32 0, i32 0
+ %ptr1 = getelementptr [4 x i8], [4 x i8]* @msg_double, i32 0, i32 0
call i32 (i8*,...)* @printf(i8* %ptr1, double %fma)
ret i32 0
; if TRUE print message
br i1 %res_i, label %Print_int, label %Double
Print_int:
- %ptr0 = getelementptr [17 x i8]* @msg_int, i32 0, i32 0
+ %ptr0 = getelementptr [17 x i8], [17 x i8]* @msg_int, i32 0, i32 0
call i32 (i8*,...)* @printf(i8* %ptr0)
br label %Double
Double:
br i1 %res_double, label %Print_double, label %Float
Print_double:
- %ptr1 = getelementptr [20 x i8]* @msg_double, i32 0, i32 0
+ %ptr1 = getelementptr [20 x i8], [20 x i8]* @msg_double, i32 0, i32 0
call i32 (i8*,...)* @printf(i8* %ptr1)
br label %Float
Float:
br i1 %res_float, label %Print_float, label %Exit
Print_float:
- %ptr2 = getelementptr [19 x i8]* @msg_float, i32 0, i32 0
+ %ptr2 = getelementptr [19 x i8], [19 x i8]* @msg_float, i32 0, i32 0
call i32 (i8*,...)* @printf(i8* %ptr2)
br label %Exit
Exit:
define i32 @foo(i32 %blah) {
store i32 5, i32* @MyVar
- %idx = getelementptr { i32*, i32 }* @MyIntList, i64 0, i32 1 ; <i32*> [#uses=1]
+ %idx = getelementptr { i32*, i32 }, { i32*, i32 }* @MyIntList, i64 0, i32 1 ; <i32*> [#uses=1]
store i32 12, i32* %idx
ret i32 %blah
}
;; preceded by an alloca, but shown for completeness).
call void @llvm.lifetime.start(i64 8, i8* %y)
- %0 = getelementptr %"struct.std::pair<int,int>"* %x, i32 0, i32 0
+ %0 = getelementptr %"struct.std::pair<int,int>", %"struct.std::pair<int,int>"* %x, i32 0, i32 0
store i32 4, i32* %0, align 8
- %1 = getelementptr %"struct.std::pair<int,int>"* %x, i32 0, i32 1
+ %1 = getelementptr %"struct.std::pair<int,int>", %"struct.std::pair<int,int>"* %x, i32 0, i32 1
store i32 5, i32* %1, align 4
;; Constructor has finished here.
ret %list* null
bb4: ; preds = %bb2
- %idx = getelementptr %list* %reg115, i64 0, i32 1 ; <i32*> [#uses=1]
+ %idx = getelementptr %list, %list* %reg115, i64 0, i32 1 ; <i32*> [#uses=1]
%reg111 = load i32* %idx ; <i32> [#uses=1]
%cond1013 = icmp ne i32 %reg111, %Data ; <i1> [#uses=1]
br i1 %cond1013, label %bb6, label %bb5
ret %list* %reg115
bb6: ; preds = %bb4
- %idx2 = getelementptr %list* %reg115, i64 0, i32 0 ; <%list**> [#uses=1]
+ %idx2 = getelementptr %list, %list* %reg115, i64 0, i32 0 ; <%list**> [#uses=1]
%reg116 = load %list** %idx2 ; <%list*> [#uses=1]
br label %bb2
}
store i32 3, i32* %ptr
%val = load i32* %ptr ; <i32> [#uses=0]
%sptr = alloca %struct ; <%struct*> [#uses=2]
- %nsptr = getelementptr %struct* %sptr, i64 0, i32 1 ; <%inners*> [#uses=1]
- %ubsptr = getelementptr %inners* %nsptr, i64 0, i32 1 ; <{ i8 }*> [#uses=1]
- %idx = getelementptr { i8 }* %ubsptr, i64 0, i32 0 ; <i8*> [#uses=1]
+ %nsptr = getelementptr %struct, %struct* %sptr, i64 0, i32 1 ; <%inners*> [#uses=1]
+ %ubsptr = getelementptr %inners, %inners* %nsptr, i64 0, i32 1 ; <{ i8 }*> [#uses=1]
+ %idx = getelementptr { i8 }, { i8 }* %ubsptr, i64 0, i32 0 ; <i8*> [#uses=1]
store i8 4, i8* %idx
- %fptr = getelementptr %struct* %sptr, i64 0, i32 1, i32 0 ; <float*> [#uses=1]
+ %fptr = getelementptr %struct, %struct* %sptr, i64 0, i32 1, i32 0 ; <float*> [#uses=1]
store float 4.000000e+00, float* %fptr
ret i32 3
}
}
define i8* @otherfunc(i32, double) {
- %somestr = getelementptr [11 x i8]* @somestr, i64 0, i64 0 ; <i8*> [#uses=1]
+ %somestr = getelementptr [11 x i8], [11 x i8]* @somestr, i64 0, i64 0 ; <i8*> [#uses=1]
ret i8* %somestr
}
}
define i32 @lc() {
- %g = getelementptr %0* @c, i32 0, i32 0 ; <i32*> [#uses=1]
+ %g = getelementptr %0, %0* @c, i32 0, i32 0 ; <i32*> [#uses=1]
%u = load i32* %g ; <i32> [#uses=1]
- %h = getelementptr %0* @c, i32 0, i32 1 ; <i32*> [#uses=1]
+ %h = getelementptr %0, %0* @c, i32 0, i32 1 ; <i32*> [#uses=1]
%v = load i32* %h ; <i32> [#uses=1]
%r = add i32 %u, %v
ret i32 %r
define void @ff_mlp_init_x86(%struct.DSPContext* nocapture %c, %struct.AVCodecContext* nocapture %avctx) nounwind sanitize_address {
entry:
- %mlp_filter_channel = getelementptr inbounds %struct.DSPContext* %c, i32 0, i32 131
+ %mlp_filter_channel = getelementptr inbounds %struct.DSPContext, %struct.DSPContext* %c, i32 0, i32 131
store void (i32*, i32*, i32, i32, i32, i32, i32, i32*)* @mlp_filter_channel_x86, void (i32*, i32*, i32, i32, i32, i32, i32, i32*)** %mlp_filter_channel, align 4, !tbaa !0
ret void
}
%iirjump = alloca i8*, align 4
store i32 %filter_shift, i32* %filter_shift.addr, align 4, !tbaa !3
store i32 %mask, i32* %mask.addr, align 4, !tbaa !3
- %arrayidx = getelementptr inbounds [9 x i8*]* @firtable, i32 0, i32 %firorder
+ %arrayidx = getelementptr inbounds [9 x i8*], [9 x i8*]* @firtable, i32 0, i32 %firorder
%0 = load i8** %arrayidx, align 4, !tbaa !0
store i8* %0, i8** %firjump, align 4, !tbaa !0
- %arrayidx1 = getelementptr inbounds [5 x i8*]* @iirtable, i32 0, i32 %iirorder
+ %arrayidx1 = getelementptr inbounds [5 x i8*], [5 x i8*]* @iirtable, i32 0, i32 %iirorder
%1 = load i8** %arrayidx1, align 4, !tbaa !0
store i8* %1, i8** %iirjump, align 4, !tbaa !0
%sub = sub nsw i32 0, %blocksize
; CHECK-UAR: ret void
%x = alloca [20 x i8], align 16
- %arraydecay = getelementptr inbounds [20 x i8]* %x, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [20 x i8], [20 x i8]* %x, i64 0, i64 0
call void @Foo(i8* %arraydecay)
ret void
}
%10 = xor i64 %9, %8, !nosanitize !0
%11 = mul i64 %10, -7070675565921424023, !nosanitize !0
%12 = and i64 %11, 127, !nosanitize !0
- %13 = getelementptr inbounds [128 x i64]* @__ubsan_vptr_type_cache, i64 0, i64 %12, !nosanitize !0
+ %13 = getelementptr inbounds [128 x i64], [128 x i64]* @__ubsan_vptr_type_cache, i64 0, i64 %12, !nosanitize !0
; CHECK-NOT: __asan_report_load8
%14 = load i64* %13, align 8, !nosanitize !0
%15 = icmp eq i64 %14, %11, !nosanitize !0
while.body:
%c.addr.02 = phi i8* [ %incdec.ptr, %while.body ], [ %c, %entry ]
- %incdec.ptr = getelementptr inbounds i8* %c.addr.02, i64 -1
+ %incdec.ptr = getelementptr inbounds i8, i8* %c.addr.02, i64 -1
store i8 100, i8* %c.addr.02, align 1
%1 = load i8* %incdec.ptr, align 1
%tobool = icmp eq i8 %1, 0
; CHECK: f2
define void @f2() {
while.body.i.preheader:
- %addr = getelementptr inbounds [10 x i8]* @global, i64 0, i64 9
+ %addr = getelementptr inbounds [10 x i8], [10 x i8]* @global, i64 0, i64 9
br label %while.body.i
while.body.i:
; CHECK-NEXT: phi
; CHECK-NOT: phi
%c.addr.02.i = phi i8* [ %incdec.ptr.i, %while.body.i ], [ %addr, %while.body.i.preheader ]
- %incdec.ptr.i = getelementptr inbounds i8* %c.addr.02.i, i64 -1
+ %incdec.ptr.i = getelementptr inbounds i8, i8* %c.addr.02.i, i64 -1
; CHECK: sub i64 10, %0
; CHECK-NEXT: icmp ult i64 10, %0
; CHECK-NEXT: icmp ult i64 {{.*}}, 1
while.body:
%c.addr.02 = phi i8 addrspace(1)* [ %incdec.ptr, %while.body ], [ %c, %entry ]
- %incdec.ptr = getelementptr inbounds i8 addrspace(1)* %c.addr.02, i64 -1
+ %incdec.ptr = getelementptr inbounds i8, i8 addrspace(1)* %c.addr.02, i64 -1
store i8 100, i8 addrspace(1)* %c.addr.02, align 1
%1 = load i8 addrspace(1)* %incdec.ptr, align 1
%tobool = icmp eq i8 %1, 0
define void @f2_as1() {
; CHECK: @f2_as1
while.body.i.preheader:
- %addr = getelementptr inbounds [10 x i8] addrspace(1)* @global_as1, i16 0, i16 9
+ %addr = getelementptr inbounds [10 x i8], [10 x i8] addrspace(1)* @global_as1, i16 0, i16 9
br label %while.body.i
while.body.i:
; CHECK-NEXT: phi
; CHECK-NOT: phi
%c.addr.02.i = phi i8 addrspace(1)* [ %incdec.ptr.i, %while.body.i ], [ %addr, %while.body.i.preheader ]
- %incdec.ptr.i = getelementptr inbounds i8 addrspace(1)* %c.addr.02.i, i16 -1
+ %incdec.ptr.i = getelementptr inbounds i8, i8 addrspace(1)* %c.addr.02.i, i16 -1
; CHECK: sub i16 10, %0
; CHECK-NEXT: icmp ult i16 10, %0
; CHECK-NEXT: icmp ult i16 {{.*}}, 1
define i16 @f() {
entry:
%packed1 = alloca %struct.s2_packed, align 8
- %gep = getelementptr inbounds %struct.s2_packed* %packed1, i32 0, i32 4
+ %gep = getelementptr inbounds %struct.s2_packed, %struct.s2_packed* %packed1, i32 0, i32 4
%ptr = bitcast i16* %gep to i32*
%val = load i32* %ptr, align 4
%valt = trunc i32 %val to i16
define i16 @f2() {
entry:
%packed1 = alloca %struct.s2_packed, align 8
- %gep = getelementptr inbounds %struct.s2_packed* %packed1, i32 0, i32 4
+ %gep = getelementptr inbounds %struct.s2_packed, %struct.s2_packed* %packed1, i32 0, i32 4
%ptr = bitcast i16* %gep to i48*
%val = load i48* %ptr, align 4
%valt = trunc i48 %val to i16
define void @f1() nounwind {
%1 = tail call i8* @malloc(i64 32)
%2 = bitcast i8* %1 to i32*
- %idx = getelementptr inbounds i32* %2, i64 2
+ %idx = getelementptr inbounds i32, i32* %2, i64 2
; CHECK-NOT: trap
store i32 3, i32* %idx, align 4
ret void
define void @f2() nounwind {
%1 = tail call i8* @malloc(i64 32)
%2 = bitcast i8* %1 to i32*
- %idx = getelementptr inbounds i32* %2, i64 8
+ %idx = getelementptr inbounds i32, i32* %2, i64 8
; CHECK: trap
store i32 3, i32* %idx, align 4
ret void
define void @f3(i64 %x) nounwind {
%1 = tail call i8* @calloc(i64 4, i64 %x)
%2 = bitcast i8* %1 to i32*
- %idx = getelementptr inbounds i32* %2, i64 8
+ %idx = getelementptr inbounds i32, i32* %2, i64 8
; CHECK: mul i64 4, %
; CHECK: sub i64 {{.*}}, 32
; CHECK-NEXT: icmp ult i64 {{.*}}, 32
define void @f4(i64 %x) nounwind {
%1 = tail call i8* @realloc(i8* null, i64 %x) nounwind
%2 = bitcast i8* %1 to i32*
- %idx = getelementptr inbounds i32* %2, i64 8
+ %idx = getelementptr inbounds i32, i32* %2, i64 8
; CHECK: trap
%3 = load i32* %idx, align 4
ret void
; CHECK: @f5
define void @f5(i64 %x) nounwind {
- %idx = getelementptr inbounds [8 x i8]* @.str, i64 0, i64 %x
+ %idx = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i64 0, i64 %x
; CHECK: trap
%1 = load i8* %idx, align 4
ret void
define void @f5_as1(i64 %x) nounwind {
; CHECK: @f5_as1
- %idx = getelementptr inbounds [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x
+ %idx = getelementptr inbounds [8 x i8], [8 x i8] addrspace(1)* @.str_as1, i64 0, i64 %x
; CHECK: sub i16
; CHECK icmp ult i16
; CHECK: trap
; CHECK: @f11
define void @f11(i128* byval %x) nounwind {
%1 = bitcast i128* %x to i8*
- %2 = getelementptr inbounds i8* %1, i64 16
+ %2 = getelementptr inbounds i8, i8* %1, i64 16
; CHECK: br label
%3 = load i8* %2, align 4
ret void
; CHECK: @f11_as1
define void @f11_as1(i128 addrspace(1)* byval %x) nounwind {
%1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)*
- %2 = getelementptr inbounds i8 addrspace(1)* %1, i16 16
+ %2 = getelementptr inbounds i8, i8 addrspace(1)* %1, i16 16
; CHECK: br label
%3 = load i8 addrspace(1)* %2, align 4
ret void
%1 = tail call i8* @calloc(i64 1, i64 %x)
; CHECK: mul i64 %y, 8
%2 = bitcast i8* %1 to i64*
- %3 = getelementptr inbounds i64* %2, i64 %y
+ %3 = getelementptr inbounds i64, i64* %2, i64 %y
%4 = load i64* %3, align 8
ret i64 %4
}
dead:
; Self-refential GEPs can occur in dead code.
- %incdec.ptr = getelementptr inbounds i32* %incdec.ptr, i64 1
- ; CHECK: %incdec.ptr = getelementptr inbounds i32* %incdec.ptr
+ %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr, i64 1
+ ; CHECK: %incdec.ptr = getelementptr inbounds i32, i32* %incdec.ptr
%l = load i32* %incdec.ptr
br label %alive
; CHECK: call void @__dfsw_customcb({{.*}} @"dfst0$customcb", i8* bitcast ({{.*}} @"dfs$cb" to i8*), i16 0)
call void @customcb(i32 (i32)* @cb)
- ; CHECK: %[[LABELVA1_0:.*]] = getelementptr inbounds [2 x i16]* %[[LABELVA1]], i32 0, i32 0
+ ; CHECK: %[[LABELVA1_0:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA1]], i32 0, i32 0
; CHECK: store i16 0, i16* %[[LABELVA1_0]]
- ; CHECK: %[[LABELVA1_1:.*]] = getelementptr inbounds [2 x i16]* %[[LABELVA1]], i32 0, i32 1
+ ; CHECK: %[[LABELVA1_1:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA1]], i32 0, i32 1
; CHECK: store i16 %{{.*}}, i16* %[[LABELVA1_1]]
- ; CHECK: %[[LABELVA1_0A:.*]] = getelementptr inbounds [2 x i16]* %[[LABELVA1]], i32 0, i32 0
+ ; CHECK: %[[LABELVA1_0A:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA1]], i32 0, i32 0
; CHECK: call void (i32, i16, i16*, ...)* @__dfsw_custom3(i32 1, i16 0, i16* %[[LABELVA1_0A]], i32 2, i32 %{{.*}})
call void (i32, ...)* @custom3(i32 1, i32 2, i32 %x)
- ; CHECK: %[[LABELVA2_0:.*]] = getelementptr inbounds [2 x i16]* %[[LABELVA2]], i32 0, i32 0
- ; CHECK: %[[LABELVA2_0A:.*]] = getelementptr inbounds [2 x i16]* %[[LABELVA2]], i32 0, i32 0
+ ; CHECK: %[[LABELVA2_0:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA2]], i32 0, i32 0
+ ; CHECK: %[[LABELVA2_0A:.*]] = getelementptr inbounds [2 x i16], [2 x i16]* %[[LABELVA2]], i32 0, i32 0
; CHECK: call i32 (i32, i16, i16*, i16*, ...)* @__dfsw_custom4(i32 1, i16 0, i16* %[[LABELVA2_0A]], i16* %[[LABELRETURN]], i32 2, i32 3)
call i32 (i32, ...)* @custom4(i32 1, i32 2, i32 3)
; NO_COMBINE_PTR_LABEL: and i64
; NO_COMBINE_PTR_LABEL: mul i64
; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: load i16*
; NO_COMBINE_PTR_LABEL: load i16*
; NO_COMBINE_PTR_LABEL: icmp ne i16
; COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
; COMBINE_PTR_LABEL: ret i64
; COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union_load
- ; COMBINE_PTR_LABEL: getelementptr i64* {{.*}} i64
+ ; COMBINE_PTR_LABEL: getelementptr i64, i64* {{.*}} i64
; COMBINE_PTR_LABEL: load i64*
; COMBINE_PTR_LABEL: icmp eq i64
; NO_COMBINE_PTR_LABEL: store i16 {{.*}} @__dfsan_retval_tls
; NO_COMBINE_PTR_LABEL: ret i64
; NO_COMBINE_PTR_LABEL: call {{.*}} @__dfsan_union_load
- ; NO_COMBINE_PTR_LABEL: getelementptr i64* {{.*}} i64
+ ; NO_COMBINE_PTR_LABEL: getelementptr i64, i64* {{.*}} i64
; NO_COMBINE_PTR_LABEL: load i64*
; NO_COMBINE_PTR_LABEL: icmp eq i64
; NO_COMBINE_PTR_LABEL: and i64
; NO_COMBINE_PTR_LABEL: mul i64
; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: store i16
; NO_COMBINE_PTR_LABEL: store i8
; COMBINE_PTR_LABEL: and i64
; COMBINE_PTR_LABEL: mul i64
; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
- ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16, i16*
; COMBINE_PTR_LABEL: store i16
; COMBINE_PTR_LABEL: store i8
; NO_COMBINE_PTR_LABEL: and i64
; NO_COMBINE_PTR_LABEL: mul i64
; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: store i16
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: store i16
; NO_COMBINE_PTR_LABEL: store i16
; COMBINE_PTR_LABEL: and i64
; COMBINE_PTR_LABEL: mul i64
; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
- ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16, i16*
; COMBINE_PTR_LABEL: store i16
- ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16, i16*
; COMBINE_PTR_LABEL: store i16
; COMBINE_PTR_LABEL: store i16
; NO_COMBINE_PTR_LABEL: and i64
; NO_COMBINE_PTR_LABEL: mul i64
; NO_COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: store i16
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: store i16
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: store i16
- ; NO_COMBINE_PTR_LABEL: getelementptr i16*
+ ; NO_COMBINE_PTR_LABEL: getelementptr i16, i16*
; NO_COMBINE_PTR_LABEL: store i16
; NO_COMBINE_PTR_LABEL: store i32
; COMBINE_PTR_LABEL: and i64
; COMBINE_PTR_LABEL: mul i64
; COMBINE_PTR_LABEL: inttoptr i64 {{.*}} i16*
- ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16, i16*
; COMBINE_PTR_LABEL: store i16
- ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16, i16*
; COMBINE_PTR_LABEL: store i16
- ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16, i16*
; COMBINE_PTR_LABEL: store i16
- ; COMBINE_PTR_LABEL: getelementptr i16*
+ ; COMBINE_PTR_LABEL: getelementptr i16, i16*
; COMBINE_PTR_LABEL: store i16
; COMBINE_PTR_LABEL: store i32
%x.addr = alloca i32, align 4
%va = alloca [1 x %struct.__va_list_tag], align 16
store i32 %x, i32* %x.addr, align 4
- %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
%arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
call void @llvm.va_start(i8* %arraydecay1)
ret void
%0 = bitcast %struct.StructByVal* %s to i8*
%agg.tmp.sroa.0.0..sroa_cast = bitcast %struct.StructByVal* %s to i64*
%agg.tmp.sroa.0.0.copyload = load i64* %agg.tmp.sroa.0.0..sroa_cast, align 4
- %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal* %s, i64 0, i32 2
+ %agg.tmp.sroa.2.0..sroa_idx = getelementptr inbounds %struct.StructByVal, %struct.StructByVal* %s, i64 0, i32 2
%agg.tmp.sroa.2.0..sroa_cast = bitcast i32* %agg.tmp.sroa.2.0..sroa_idx to i64*
%agg.tmp.sroa.2.0.copyload = load i64* %agg.tmp.sroa.2.0..sroa_cast, align 4
%1 = bitcast %struct.StructByVal* %agg.tmp2 to i8*
; CHECK-LABEL: define void @Store8_align4(
; CHECK: store i64 {{.*}}, align 4
; CHECK: store i32 {{.*}}, align 4
-; CHECK: getelementptr i32* {{.*}}, i32 1
+; CHECK: getelementptr i32, i32* {{.*}}, i32 1
; CHECK: store i32 {{.*}}, align 4
; CHECK: store i64 {{.*}}, align 4
; CHECK: ret void
; CHECK-LABEL: define void @StoreAgg(
; CHECK: store { i32, i32, i32 } {{.*}}, align 4
; CHECK: store i32 {{.*}}, align 4
-; CHECK: getelementptr i32* {{.*}}, i32 1
+; CHECK: getelementptr i32, i32* {{.*}}, i32 1
; CHECK: store i32 {{.*}}, align 4
-; CHECK: getelementptr i32* {{.*}}, i32 2
+; CHECK: getelementptr i32, i32* {{.*}}, i32 2
; CHECK: store i32 {{.*}}, align 4
; CHECK: store %struct.S {{.*}}, align 4
; CHECK: ret void
; CHECK-LABEL: define void @StoreAgg8(
; CHECK: store { i32, i32, i32 } {{.*}}, align 8
; CHECK: store i64 {{.*}}, align 8
-; CHECK: getelementptr i32* {{.*}}, i32 2
+; CHECK: getelementptr i32, i32* {{.*}}, i32 2
; CHECK: store i32 {{.*}}, align 8
; CHECK: store %struct.S {{.*}}, align 8
; CHECK: ret void
; CHECK-LABEL: define void @StoreAgg24(
; CHECK: store { i64, i64, i64 } {{.*}}, align 8
; CHECK: store i64 {{.*}}, align 8
-; CHECK: getelementptr i64* {{.*}}, i32 1
+; CHECK: getelementptr i64, i64* {{.*}}, i32 1
; CHECK: store i64 {{.*}}, align 8
-; CHECK: getelementptr i64* {{.*}}, i32 2
+; CHECK: getelementptr i64, i64* {{.*}}, i32 2
; CHECK: store i64 {{.*}}, align 8
; CHECK: store %struct.Q {{.*}}, align 8
; CHECK: ret void
define i32 @_ZN1A1fEv(%struct.A* nocapture readonly %this) #0 align 2 {
entry:
tail call void @llvm.dbg.value(metadata %struct.A* %this, i64 0, metadata !15, metadata !{!"0x102"}), !dbg !20
- %x = getelementptr inbounds %struct.A* %this, i64 0, i32 0, !dbg !21
+ %x = getelementptr inbounds %struct.A, %struct.A* %this, i64 0, i32 0, !dbg !21
%0 = load i32* %x, align 4, !dbg !21
ret i32 %0, !dbg !21
}
define i32 @read_from_const_global_array(i32 %idx) nounwind uwtable sanitize_thread readnone {
entry:
%idxprom = sext i32 %idx to i64
- %arrayidx = getelementptr inbounds [10 x i32]* @const_global_array, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @const_global_array, i64 0, i64 %idxprom
%0 = load i32* %arrayidx, align 4
ret i32 %0
}
define void @multiply(i32 %index, i32 %X, i32 %Y) {
%Z = mul i32 %X, %Y
- %P = getelementptr [20 x i17]* @ARRAY, i32 0, i32 %index
+ %P = getelementptr [20 x i17], [20 x i17]* @ARRAY, i32 0, i32 %index
%Result = trunc i32 %Z to i17
store i17 %Result, i17* %P
ret void
define i32 @main(i32 %argc, i8** %argv) {
%i = bitcast i32 0 to i32
call void @multiply(i32 %i, i32 -1, i32 255)
- %P = getelementptr [20 x i17]* @ARRAY, i32 0, i32 0
+ %P = getelementptr [20 x i17], [20 x i17]* @ARRAY, i32 0, i32 0
%X = load i17* %P
%result = sext i17 %X to i32
- %fmt = getelementptr [4 x i8]* @FORMAT, i32 0, i32 0
+ %fmt = getelementptr [4 x i8], [4 x i8]* @FORMAT, i32 0, i32 0
call i32 (i8*,...)* @printf(i8* %fmt, i32 %result)
ret i32 0
}
define i32 @uses_printf(i32 %i) {
entry:
- %s = getelementptr [13 x i8]* @str, i64 0, i64 0
+ %s = getelementptr [13 x i8], [13 x i8]* @str, i64 0, i64 0
call i32 (i8*, ...)* @printf(i8* %s)
ret i32 0
}
}
define i32 @uses_printf(i32 %i) {
entry:
- %s = getelementptr [13 x i8]* @str, i64 0, i64 0
+ %s = getelementptr [13 x i8], [13 x i8]* @str, i64 0, i64 0
call i32 (i8*, ...)* @printf(i8* %s)
ret i32 0
}
define internal void @f1(%struct1* %tty) {
loopentry.preheader:
- %tmp.2.i.i = getelementptr %struct1* %tty, i64 0, i32 1 ; <void (%struct2*)**> [#uses=1]
+ %tmp.2.i.i = getelementptr %struct1, %struct1* %tty, i64 0, i32 1 ; <void (%struct2*)**> [#uses=1]
%tmp.3.i.i = load volatile void (%struct2*)** %tmp.2.i.i ; <void (%struct2*)*> [#uses=0]
ret void
}
@Y = global i32* getelementptr ([2 x i32]* @X, i64 0, i64 0) ; <i32**> [#uses=0]
define void @foo(i64 %V) {
- %Y = getelementptr [2 x i32]* @X, i64 0, i64 %V ; <i32*> [#uses=0]
+ %Y = getelementptr [2 x i32], [2 x i32]* @X, i64 0, i64 %V ; <i32*> [#uses=0]
ret void
}
%2 = load i32* %i, align 4, !dbg !21
%idxprom = sext i32 %2 to i64, !dbg !21
%3 = load i8*** %argv.addr, align 8, !dbg !21
- %arrayidx = getelementptr inbounds i8** %3, i64 %idxprom, !dbg !21
+ %arrayidx = getelementptr inbounds i8*, i8** %3, i64 %idxprom, !dbg !21
%4 = load i8** %arrayidx, align 8, !dbg !21
%call = call i32 @puts(i8* %4), !dbg !21
br label %for.inc, !dbg !23
define i32 @bug_a(%bug_type* %fp) nounwind uwtable {
entry:
- %d_stream = getelementptr inbounds %bug_type* %fp, i64 0, i32 0
+ %d_stream = getelementptr inbounds %bug_type, %bug_type* %fp, i64 0, i32 0
ret i32 0
}
@g3 = external global %B
define void @f1() {
- getelementptr %A* null, i32 0
+ getelementptr %A, %A* null, i32 0
ret void
}
define i32 @foo(i32 %blah) {
store i32 %blah, i32* @MyVar
- %idx = getelementptr %intlist* @MyIntList, i64 0, i32 1
+ %idx = getelementptr %intlist, %intlist* @MyIntList, i64 0, i32 1
store i32 12, i32* %idx
%ack = load i32* @0
%fzo = add i32 %ack, %blah
; CHECK-DAG: @g2 = external global %A
; CHECK-DAG: @g3 = external global %B.1
-; CHECK-DAG: getelementptr %A* null, i32 0
+; CHECK-DAG: getelementptr %A, %A* null, i32 0
%A = type opaque
%B = type { %C, %C, %B* }
define void @f(%"SmallVImpl<const PI*>"* %this) {
entry:
- %x = getelementptr inbounds %"SmallVImpl<const PI*>"* %this, i64 0, i32 1
+ %x = getelementptr inbounds %"SmallVImpl<const PI*>", %"SmallVImpl<const PI*>"* %this, i64 0, i32 1
ret void
}
define void @main() {
%v1 = load i32* @MyVar
call void @print(i32 %v1)
- %idx = getelementptr %intlist* @MyIntList, i64 0, i32 1
+ %idx = getelementptr %intlist, %intlist* @MyIntList, i64 0, i32 1
%v2 = load i32* %idx
call void @print(i32 %v2)
%1 = call i32 @foo(i32 5)
; CHECK-NEXT: %A = type { i8 }
; CHECK: @g1 = external global %C.0
-; CHECK: getelementptr %C.0* null, i64 0, i32 0, i32 0
+; CHECK: getelementptr %C.0, %C.0* null, i64 0, i32 0, i32 0
%A = type { i8 }
%B = type { %A }
%C = type { %B }
%C.0 = type { %B }
define void @f1() {
- getelementptr %C* null, i64 0, i32 0, i32 0
+ getelementptr %C, %C* null, i64 0, i32 0, i32 0
ret void
}
@g1 = external global %C.0
%sa = alloca %struct.SA, align 4
%a.addr = alloca %class.A*, align 8
%agg.tmp = alloca %struct.SA, align 4
- %coerce.dive = getelementptr %struct.SA* %sa, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.SA, %struct.SA* %sa, i32 0, i32 0
store i32 %sa.coerce, i32* %coerce.dive
store %class.A* %a, %class.A** %a.addr, align 8
call void @llvm.dbg.declare(metadata %class.A** %a.addr, metadata !24, metadata !{!"0x102"}), !dbg !25
%1 = bitcast %struct.SA* %agg.tmp to i8*, !dbg !28
%2 = bitcast %struct.SA* %sa to i8*, !dbg !28
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 4, i32 4, i1 false), !dbg !28
- %coerce.dive1 = getelementptr %struct.SA* %agg.tmp, i32 0, i32 0, !dbg !28
+ %coerce.dive1 = getelementptr %struct.SA, %struct.SA* %agg.tmp, i32 0, i32 0, !dbg !28
%3 = load i32* %coerce.dive1, !dbg !28
call void @_ZN1A5testAE2SA(%class.A* %0, i32 %3), !dbg !28
ret void, !dbg !29
entry:
%a = alloca %struct.SA, align 4
%this.addr = alloca %class.A*, align 8
- %coerce.dive = getelementptr %struct.SA* %a, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.SA, %struct.SA* %a, i32 0, i32 0
store i32 %a.coerce, i32* %coerce.dive
store %class.A* %this, %class.A** %this.addr, align 8
call void @llvm.dbg.declare(metadata %class.A** %this.addr, metadata !30, metadata !{!"0x102"}), !dbg !31
%sa = alloca %struct.SA, align 4
%b.addr = alloca %class.B*, align 8
%agg.tmp = alloca %struct.SA, align 4
- %coerce.dive = getelementptr %struct.SA* %sa, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.SA, %struct.SA* %sa, i32 0, i32 0
store i32 %sa.coerce, i32* %coerce.dive
store %class.B* %b, %class.B** %b.addr, align 8
call void @llvm.dbg.declare(metadata %class.B** %b.addr, metadata !24, metadata !{!"0x102"}), !dbg !25
%1 = bitcast %struct.SA* %agg.tmp to i8*, !dbg !28
%2 = bitcast %struct.SA* %sa to i8*, !dbg !28
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 4, i32 4, i1 false), !dbg !28
- %coerce.dive1 = getelementptr %struct.SA* %agg.tmp, i32 0, i32 0, !dbg !28
+ %coerce.dive1 = getelementptr %struct.SA, %struct.SA* %agg.tmp, i32 0, i32 0, !dbg !28
%3 = load i32* %coerce.dive1, !dbg !28
call void @_ZN1B5testBE2SA(%class.B* %0, i32 %3), !dbg !28
ret void, !dbg !29
entry:
%sa = alloca %struct.SA, align 4
%this.addr = alloca %class.B*, align 8
- %coerce.dive = getelementptr %struct.SA* %sa, i32 0, i32 0
+ %coerce.dive = getelementptr %struct.SA, %struct.SA* %sa, i32 0, i32 0
store i32 %sa.coerce, i32* %coerce.dive
store %class.B* %this, %class.B** %this.addr, align 8
call void @llvm.dbg.declare(metadata %class.B** %this.addr, metadata !30, metadata !{!"0x102"}), !dbg !31
define i32 @main() nounwind {
entry:
%0 = load i32* @startval, align 4
- %1 = getelementptr inbounds [10 x i32 (...)*]* @vtable, i32 0, i32 %0
+ %1 = getelementptr inbounds [10 x i32 (...)*], [10 x i32 (...)*]* @vtable, i32 0, i32 %0
%2 = load i32 (...)** %1, align 4
%3 = tail call i32 (...)* %2() nounwind
tail call void @exit(i32 %3) noreturn nounwind
define i1 @q() nounwind {
entry:
- %char_p = getelementptr %packed* null, i32 0, i32 1 ; <i8*> [#uses=1]
- %char_u = getelementptr %unpacked* null, i32 0, i32 1 ; <i8*> [#uses=1]
+ %char_p = getelementptr %packed, %packed* null, i32 0, i32 1 ; <i8*> [#uses=1]
+ %char_u = getelementptr %unpacked, %unpacked* null, i32 0, i32 1 ; <i8*> [#uses=1]
%res = icmp eq i8* %char_p, %char_u ; <i1> [#uses=1]
ret i1 %res
}
define i8* @different_addrspace() nounwind noinline {
; OPT: different_addrspace
- %p = getelementptr inbounds i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*),
+ %p = getelementptr inbounds i8, i8* addrspacecast ([4 x i8] addrspace(12)* @p12 to i8*),
i32 2
ret i8* %p
; OPT: ret i8* getelementptr (i8* addrspacecast (i8 addrspace(12)* getelementptr inbounds ([4 x i8] addrspace(12)* @p12, i32 0, i32 0) to i8*), i32 2)
define i8* @same_addrspace() nounwind noinline {
; OPT: same_addrspace
- %p = getelementptr inbounds i8* bitcast ([4 x i8] * @p0 to i8*), i32 2
+ %p = getelementptr inbounds i8, i8* bitcast ([4 x i8] * @p0 to i8*), i32 2
ret i8* %p
; OPT: ret i8* getelementptr inbounds ([4 x i8]* @p0, i32 0, i32 2)
}
; CHECK: Memory reference address is misaligned
store i8 0, i8* %buf, align 2
; CHECK: Memory reference address is misaligned
- %gep = getelementptr {i8, i8}* %buf2, i32 0, i32 1
+ %gep = getelementptr {i8, i8}, {i8, i8}* %buf2, i32 0, i32 1
store i8 0, i8* %gep, align 2
; CHECK: Division by zero
%sd = sdiv i32 2, 0
%wider = bitcast i8* %buf to i16*
store i16 0, i16* %wider
; CHECK: Undefined behavior: Buffer overflow
- %inner = getelementptr {i8, i8}* %buf2, i32 0, i32 1
+ %inner = getelementptr {i8, i8}, {i8, i8}* %buf2, i32 0, i32 1
%wider2 = bitcast i8* %inner to i16*
store i16 0, i16* %wider2
; CHECK: Undefined behavior: Buffer overflow
- %before = getelementptr i8* %buf, i32 -1
+ %before = getelementptr i8, i8* %buf, i32 -1
%wider3 = bitcast i8* %before to i16*
store i16 0, i16* %wider3
; CHECK: Unusual: Returning alloca value
define i8* @return_local(i32 %n, i32 %m) {
%t = alloca i8, i32 %n
- %s = getelementptr i8* %t, i32 %m
+ %s = getelementptr i8, i8* %t, i32 %m
ret i8* %s
}
bb2: ; preds = %bb2, %bb1
%reg109 = phi %node_t* [ %reg110, %bb2 ], [ %reg107, %bb1 ] ; <%node_t*> [#uses=1]
- %reg212 = getelementptr %node_t* %reg109, i64 0, i32 1 ; <%node_t**> [#uses=1]
+ %reg212 = getelementptr %node_t, %node_t* %reg109, i64 0, i32 1 ; <%node_t**> [#uses=1]
%reg110 = load %node_t** %reg212 ; <%node_t*> [#uses=2]
%cond213 = icmp ne %node_t* %reg110, null ; <i1> [#uses=1]
br i1 %cond213, label %bb2, label %bb3
%reg114-idxcast = trunc i64 %reg114 to i32 ; <i32> [#uses=1]
%reg114-idxcast-offset = add i32 %reg114-idxcast, 1073741823 ; <i32> [#uses=1]
%reg114-idxcast-offset.upgrd.1 = zext i32 %reg114-idxcast-offset to i64 ; <i64> [#uses=1]
- %reg124 = getelementptr i32* %set, i64 %reg114-idxcast-offset.upgrd.1 ; <i32*> [#uses=1]
+ %reg124 = getelementptr i32, i32* %set, i64 %reg114-idxcast-offset.upgrd.1 ; <i32*> [#uses=1]
%reg125 = load i32* %reg124 ; <i32> [#uses=1]
%cond232 = icmp ne i32 %reg125, 0 ; <i1> [#uses=1]
br i1 %cond232, label %bb3, label %bb2
%reg130-idxcast = bitcast i32 %reg130 to i32 ; <i32> [#uses=1]
%reg130-idxcast-offset = add i32 %reg130-idxcast, 1073741823 ; <i32> [#uses=1]
%reg130-idxcast-offset.upgrd.2 = zext i32 %reg130-idxcast-offset to i64 ; <i64> [#uses=1]
- %reg118 = getelementptr i32* %set, i64 %reg130-idxcast-offset.upgrd.2 ; <i32*> [#uses=1]
+ %reg118 = getelementptr i32, i32* %set, i64 %reg130-idxcast-offset.upgrd.2 ; <i32*> [#uses=1]
%reg119 = load i32* %reg118 ; <i32> [#uses=1]
%cond233 = icmp eq i32 %reg119, 0 ; <i1> [#uses=1]
br i1 %cond233, label %bb2, label %bb3
br i1 false, label %no_exit.2, label %loopexit.2
no_exit.2: ; preds = %shortcirc_next.4
- %tmp.897 = getelementptr i32* %SubArrays.10, i64 0 ; <i32*> [#uses=1]
+ %tmp.897 = getelementptr i32, i32* %SubArrays.10, i64 0 ; <i32*> [#uses=1]
%tmp.899 = load i32* %tmp.897 ; <i32> [#uses=1]
store i32 %tmp.899, i32* null
ret i32 0
br i1 %tmp.41, label %no_exit.preheader, label %return
no_exit.preheader: ; preds = %entry
- %tmp.11 = getelementptr i32* %data.1, i64 1 ; <i32*> [#uses=1]
+ %tmp.11 = getelementptr i32, i32* %data.1, i64 1 ; <i32*> [#uses=1]
%tmp.22-idxcast = sext i32 %idx.1 to i64 ; <i64> [#uses=1]
- %tmp.28 = getelementptr i32* %data.1, i64 %tmp.22-idxcast ; <i32*> [#uses=1]
+ %tmp.28 = getelementptr i32, i32* %data.1, i64 %tmp.22-idxcast ; <i32*> [#uses=1]
br label %no_exit
no_exit: ; preds = %endif, %no_exit.preheader
br i1 %cond266, label %bb3, label %bb2
bb2: ; preds = %0
- %cast273 = getelementptr [17 x i8]* @.LC12, i64 0, i64 0 ; <i8*> [#uses=0]
+ %cast273 = getelementptr [17 x i8], [17 x i8]* @.LC12, i64 0, i64 0 ; <i8*> [#uses=0]
br label %bb3
bb3: ; preds = %bb2, %0
br i1 %cond267, label %bb5, label %bb4
bb4: ; preds = %bb3
- %reg111 = getelementptr [20 x %FILE]* @__iob, i64 0, i64 1, i32 3 ; <i8*> [#uses=1]
- %cast274 = getelementptr [34 x i8]* @.LC9, i64 0, i64 0 ; <i8*> [#uses=0]
+ %reg111 = getelementptr [20 x %FILE], [20 x %FILE]* @__iob, i64 0, i64 1, i32 3 ; <i8*> [#uses=1]
+ %cast274 = getelementptr [34 x i8], [34 x i8]* @.LC9, i64 0, i64 0 ; <i8*> [#uses=0]
%cast282 = bitcast i8* %reg111 to %FILE* ; <%FILE*> [#uses=0]
call void @exit( i32 1 )
br label %UnifiedExitNode
bb5: ; preds = %bb3
%reg107-idxcast1 = sext i32 %fd to i64 ; <i64> [#uses=2]
%reg107-idxcast2 = sext i32 %fd to i64 ; <i64> [#uses=1]
- %reg1311 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast2 ; <%spec_fd_t*> [#uses=1]
- %idx1 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
+ %reg1311 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast2 ; <%spec_fd_t*> [#uses=1]
+ %idx1 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
%reg1321 = load i32* %idx1 ; <i32> [#uses=3]
- %idx2 = getelementptr %spec_fd_t* %reg1311, i64 0, i32 1 ; <i32*> [#uses=1]
+ %idx2 = getelementptr %spec_fd_t, %spec_fd_t* %reg1311, i64 0, i32 1 ; <i32*> [#uses=1]
%reg1331 = load i32* %idx2 ; <i32> [#uses=1]
%cond270 = icmp slt i32 %reg1321, %reg1331 ; <i1> [#uses=1]
br i1 %cond270, label %bb9, label %bb6
br i1 %cond271, label %bb8, label %bb7
bb7: ; preds = %bb6
- %cast277 = getelementptr [4 x i8]* @.LC10, i64 0, i64 0 ; <i8*> [#uses=0]
+ %cast277 = getelementptr [4 x i8], [4 x i8]* @.LC10, i64 0, i64 0 ; <i8*> [#uses=0]
br label %bb8
bb8: ; preds = %bb7, %bb6
bb9: ; preds = %bb5
%reg107-idxcast3 = sext i32 %fd to i64 ; <i64> [#uses=1]
- %idx3 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast3, i32 3 ; <i8**> [#uses=1]
+ %idx3 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast3, i32 3 ; <i8**> [#uses=1]
%reg1601 = load i8** %idx3 ; <i8*> [#uses=1]
%reg132-idxcast1 = sext i32 %reg1321 to i64 ; <i64> [#uses=1]
- %idx4 = getelementptr i8* %reg1601, i64 %reg132-idxcast1 ; <i8*> [#uses=1]
+ %idx4 = getelementptr i8, i8* %reg1601, i64 %reg132-idxcast1 ; <i8*> [#uses=1]
%reg1621 = load i8* %idx4 ; <i8> [#uses=2]
%cast108 = zext i8 %reg1621 to i64 ; <i64> [#uses=0]
%reg157 = add i32 %reg1321, 1 ; <i32> [#uses=1]
- %idx5 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
+ %idx5 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
store i32 %reg157, i32* %idx5
%reg163 = load i32* @dbglvl ; <i32> [#uses=1]
%cond272 = icmp sle i32 %reg163, 4 ; <i1> [#uses=1]
br i1 %cond272, label %bb11, label %bb10
bb10: ; preds = %bb9
- %cast279 = getelementptr [4 x i8]* @.LC11, i64 0, i64 0 ; <i8*> [#uses=0]
+ %cast279 = getelementptr [4 x i8], [4 x i8]* @.LC11, i64 0, i64 0 ; <i8*> [#uses=0]
br label %bb11
bb11: ; preds = %bb10, %bb9
br i1 %cond266, label %bb3, label %bb2
bb2: ; preds = %0
- %cast273 = getelementptr [17 x i8]* @.LC12, i64 0, i64 0 ; <i8*> [#uses=0]
+ %cast273 = getelementptr [17 x i8], [17 x i8]* @.LC12, i64 0, i64 0 ; <i8*> [#uses=0]
br label %bb3
bb3: ; preds = %bb2, %0
br label %bb5
bb4: ; No predecessors!
- %reg111 = getelementptr [20 x %FILE]* @__iob, i64 0, i64 1, i32 3 ; <i8*> [#uses=1]
- %cast274 = getelementptr [34 x i8]* @.LC9, i64 0, i64 0 ; <i8*> [#uses=0]
+ %reg111 = getelementptr [20 x %FILE], [20 x %FILE]* @__iob, i64 0, i64 1, i32 3 ; <i8*> [#uses=1]
+ %cast274 = getelementptr [34 x i8], [34 x i8]* @.LC9, i64 0, i64 0 ; <i8*> [#uses=0]
%cast282 = bitcast i8* %reg111 to %FILE* ; <%FILE*> [#uses=0]
call void @exit( i32 1 )
br label %UnifiedExitNode
bb5: ; preds = %bb3
%reg107-idxcast1 = sext i32 %fd to i64 ; <i64> [#uses=2]
%reg107-idxcast2 = sext i32 %fd to i64 ; <i64> [#uses=1]
- %reg1311 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast2 ; <%spec_fd_t*> [#uses=1]
- %idx1 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
+ %reg1311 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast2 ; <%spec_fd_t*> [#uses=1]
+ %idx1 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
%reg1321 = load i32* %idx1 ; <i32> [#uses=3]
- %idx2 = getelementptr %spec_fd_t* %reg1311, i64 0, i32 1 ; <i32*> [#uses=1]
+ %idx2 = getelementptr %spec_fd_t, %spec_fd_t* %reg1311, i64 0, i32 1 ; <i32*> [#uses=1]
%reg1331 = load i32* %idx2 ; <i32> [#uses=1]
%cond270 = icmp slt i32 %reg1321, %reg1331 ; <i1> [#uses=1]
br i1 %cond270, label %bb9, label %bb6
br i1 %cond271, label %bb8, label %bb7
bb7: ; preds = %bb6
- %cast277 = getelementptr [4 x i8]* @.LC10, i64 0, i64 0 ; <i8*> [#uses=0]
+ %cast277 = getelementptr [4 x i8], [4 x i8]* @.LC10, i64 0, i64 0 ; <i8*> [#uses=0]
br label %bb8
bb8: ; preds = %bb7, %bb6
bb9: ; preds = %bb5
%reg107-idxcast3 = sext i32 %fd to i64 ; <i64> [#uses=1]
- %idx3 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast3, i32 3 ; <i8**> [#uses=1]
+ %idx3 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast3, i32 3 ; <i8**> [#uses=1]
%reg1601 = load i8** %idx3 ; <i8*> [#uses=1]
%reg132-idxcast1 = sext i32 %reg1321 to i64 ; <i64> [#uses=1]
- %idx4 = getelementptr i8* %reg1601, i64 %reg132-idxcast1 ; <i8*> [#uses=1]
+ %idx4 = getelementptr i8, i8* %reg1601, i64 %reg132-idxcast1 ; <i8*> [#uses=1]
%reg1621 = load i8* %idx4 ; <i8> [#uses=2]
%cast108 = zext i8 %reg1621 to i64 ; <i64> [#uses=0]
%reg157 = add i32 %reg1321, 1 ; <i32> [#uses=1]
- %idx5 = getelementptr [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
+ %idx5 = getelementptr [3 x %spec_fd_t], [3 x %spec_fd_t]* @spec_fd, i64 0, i64 %reg107-idxcast1, i32 2 ; <i32*> [#uses=1]
store i32 %reg157, i32* %idx5
%reg163 = load i32* @dbglvl ; <i32> [#uses=1]
%cond272 = icmp sle i32 %reg163, 4 ; <i1> [#uses=1]
br i1 %cond272, label %bb11, label %bb10
bb10: ; preds = %bb9
- %cast279 = getelementptr [4 x i8]* @.LC11, i64 0, i64 0 ; <i8*> [#uses=0]
+ %cast279 = getelementptr [4 x i8], [4 x i8]* @.LC11, i64 0, i64 0 ; <i8*> [#uses=0]
br label %bb11
bb11: ; preds = %bb10, %bb9
%maskedptr = and i64 %offsetptr, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds i32* %a, i64 2
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 2
%0 = load i32* %arrayidx, align 4
ret i32 %0
%maskedptr = and i64 %offsetptr, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds i32* %a, i64 -1
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
%0 = load i32* %arrayidx, align 4
ret i32 %0
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ -4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
%maskedptr = and i64 %offsetptr, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds i32* %a, i64 2
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 2
%0 = load i32* %arrayidx, align 4
ret i32 %0
%maskedptr = and i64 %offsetptr, 31
%maskcond = icmp eq i64 %maskedptr, 0
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds i32* %a, i64 -1
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 -1
%0 = load i32* %arrayidx, align 4
ret i32 %0
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 8
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ -4, %entry ], [ %indvars.iv.next, %for.body ]
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.06
%indvars.iv.next = add i64 %indvars.iv, 4
%0 = tail call i32 @llvm.bswap.i32(i32 %ret.0.copyload.i.i189) #0
%conv131 = zext i32 %0 to i64
- %add.ptr132 = getelementptr inbounds i8* undef, i64 %conv131
+ %add.ptr132 = getelementptr inbounds i8, i8* undef, i64 %conv131
%1 = bitcast i8* %add.ptr132 to %type1*
br i1 undef, label %if.end150, label %if.end.i173
br i1 undef, label %for.body137.if.end146_crit_edge, label %if.then140
for.body137.if.end146_crit_edge: ; preds = %for.body137
- %incdec.ptr = getelementptr inbounds %type1* %ShndxTable.0309, i64 1
+ %incdec.ptr = getelementptr inbounds %type1, %type1* %ShndxTable.0309, i64 1
br i1 undef, label %cond.false.i70, label %cond.end.i
if.then140: ; preds = %for.body137
ret i32 %A.0
F:
; Load the element at offset two from %A. This should not be promoted!
- %A.2 = getelementptr i32* %A, i32 2
+ %A.2 = getelementptr i32, i32* %A, i32 2
%R = load i32* %A.2
ret i32 %R
}
i32 25 } ; <%QuadTy*> [#uses=1]
define internal i32 @test(%QuadTy* %P) {
- %A = getelementptr %QuadTy* %P, i64 0, i32 3 ; <i32*> [#uses=1]
- %B = getelementptr %QuadTy* %P, i64 0, i32 2 ; <i32*> [#uses=1]
+ %A = getelementptr %QuadTy, %QuadTy* %P, i64 0, i32 3 ; <i32*> [#uses=1]
+ %B = getelementptr %QuadTy, %QuadTy* %P, i64 0, i32 2 ; <i32*> [#uses=1]
%a = load i32* %A ; <i32> [#uses=1]
%b = load i32* %B ; <i32> [#uses=1]
%V = add i32 %a, %b ; <i32> [#uses=1]
define internal void @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind {
entry:
- %tmp = getelementptr %struct.ss* %b, i32 0, i32 0
+ %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0
%tmp1 = load i32* %tmp, align 4
%tmp2 = add i32 %tmp1, 1
store i32 %tmp2, i32* %tmp, align 4
define i32 @test(i32* %X) {
entry:
%S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
- %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 2, i64* %tmp4, align 4
call void @f( %struct.ss* byval %S, i32* byval %X, i32 zeroext 0)
ret i32 0
define internal void @f(%struct.ss* byval %b, i32* byval %X) nounwind {
; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1, i32* byval %X)
entry:
- %tmp = getelementptr %struct.ss* %b, i32 0, i32 0
+ %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0
%tmp1 = load i32* %tmp, align 4
%tmp2 = add i32 %tmp1, 1
store i32 %tmp2, i32* %tmp, align 4
; CHECK-LABEL: define i32 @test
entry:
%S = alloca %struct.ss
- %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0
+ %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0
store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1
+ %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i64 2, i64* %tmp4, align 4
call void @f( %struct.ss* byval %S, i32* byval %X)
; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}}, i32* byval %{{.*}})
define internal void @f(%struct.ss* byval %b) nounwind {
; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1)
entry:
- %tmp = getelementptr %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
store i32 %tmp2, i32* %tmp, align 4
; CHECK-LABEL: define i32 @main
entry:
%S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
- %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 2, i64* %tmp4, align 4
call void @f( %struct.ss* byval %S ) nounwind
; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}})
define internal i32 @"clay_assign(Chain, Chain)"(%0* %c, %0* %d) {
init:
- %0 = getelementptr %0* %d, i32 0, i32 0
+ %0 = getelementptr %0, %0* %d, i32 0, i32 0
%1 = load %0** %0
- %2 = getelementptr %0* %c, i32 0, i32 0
+ %2 = getelementptr %0, %0* %c, i32 0, i32 0
%3 = load %0** %2
%4 = call i32 @"clay_assign(Chain, Chain)"(%0* %3, %0* %1)
ret i32 0
define internal i8 @UseLongDoubleUnsafely(%union.u* byval align 16 %arg) {
entry:
%bitcast = bitcast %union.u* %arg to %struct.s*
- %gep = getelementptr inbounds %struct.s* %bitcast, i64 0, i32 2
+ %gep = getelementptr inbounds %struct.s, %struct.s* %bitcast, i64 0, i32 2
%result = load i8* %gep
ret i8 %result
}
; CHECK: internal x86_fp80 @UseLongDoubleSafely(x86_fp80 {{%.*}}) {
define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 %arg) {
- %gep = getelementptr inbounds %union.u* %arg, i64 0, i32 0
+ %gep = getelementptr inbounds %union.u, %union.u* %arg, i64 0, i32 0
%fp80 = load x86_fp80* %gep
ret x86_fp80 %fp80
}
%phi = phi %struct.Foo* [ null, %entry ], [ %gep, %loop ]
%0 = phi %struct.Foo* [ %a, %entry ], [ %0, %loop ]
store %struct.Foo* %phi, %struct.Foo** %a_ptr
- %gep = getelementptr %struct.Foo* %a, i64 0
+ %gep = getelementptr %struct.Foo, %struct.Foo* %a, i64 0
br label %loop
}
; Argpromote + scalarrepl should change this to passing the two integers by value.
define internal i32 @f(%struct.ss* inalloca %s) {
entry:
- %f0 = getelementptr %struct.ss* %s, i32 0, i32 0
- %f1 = getelementptr %struct.ss* %s, i32 0, i32 1
+ %f0 = getelementptr %struct.ss, %struct.ss* %s, i32 0, i32 0
+ %f1 = getelementptr %struct.ss, %struct.ss* %s, i32 0, i32 1
%a = load i32* %f0, align 4
%b = load i32* %f1, align 4
%r = add i32 %a, %b
define i32 @main() {
entry:
%S = alloca inalloca %struct.ss
- %f0 = getelementptr %struct.ss* %S, i32 0, i32 0
- %f1 = getelementptr %struct.ss* %S, i32 0, i32 1
+ %f0 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0
+ %f1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1
store i32 1, i32* %f0, align 4
store i32 2, i32* %f1, align 4
%r = call i32 @f(%struct.ss* inalloca %S)
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double* %in1, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds double* %in2, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
%1 = load double* %arrayidx2, align 8
%mul = fmul double %0, %0
%mul3 = fmul double %0, %1
%add10 = fadd double %add9, %0
%mul11 = fmul double %mul8, %add10
%add12 = fadd double %add7, %mul11
- %arrayidx14 = getelementptr inbounds double* %out, i64 %indvars.iv
+ %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
store double %add12, double* %arrayidx14, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%12 = fsub double undef, %7
%13 = fmul double %3, %12
%14 = fmul double %3, undef
- %15 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 0
+ %15 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 0
store double %13, double* %15, align 8
- %16 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 1
+ %16 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 0, i32 1
%17 = fmul double undef, %8
%18 = fmul double %17, undef
%19 = fmul double undef, %18
%20 = fadd double undef, undef
%21 = fmul double %3, %19
%22 = fsub double -0.000000e+00, %21
- %23 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 0
+ %23 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 0
store double %22, double* %23, align 8
- %24 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 1
+ %24 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 1, i32 1
%25 = fmul double undef, 0x3FE42F601A8C6794
%26 = fmul double undef, 2.000000e+00
%27 = fsub double %26, %0
%28 = fmul double %6, undef
%29 = fsub double undef, %28
- %30 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 0
+ %30 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 0
store double undef, double* %30, align 8
- %31 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 1
+ %31 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 2, i32 1
%32 = fmul double undef, %17
%33 = fmul double undef, %17
%34 = fmul double undef, %32
%35 = fmul double undef, %33
%36 = fsub double undef, %35
%37 = fmul double %3, %34
- %38 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 0
+ %38 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 0
store double %37, double* %38, align 8
- %39 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 1
+ %39 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 3, i32 1
%40 = fmul double undef, %8
%41 = fmul double undef, %40
%42 = fmul double undef, %41
%43 = fsub double undef, %42
%44 = fmul double %3, %43
- %45 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 0
+ %45 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 0
store double %13, double* %45, align 8
- %46 = getelementptr inbounds [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 1
+ %46 = getelementptr inbounds [5 x { double, double }], [5 x { double, double }]* %c2ten, i64 0, i64 4, i32 1
%47 = fsub double -0.000000e+00, %14
store double %47, double* %16, align 8
store double undef, double* %24, align 8
br i1 undef, label %return, label %if.end10
if.end10: ; preds = %entry
- %incdec.ptr = getelementptr inbounds i8* %call, i64 undef
+ %incdec.ptr = getelementptr inbounds i8, i8* %call, i64 undef
%call17 = call i32 @ptou() nounwind
- %incdec.ptr26.1 = getelementptr inbounds i8* %incdec.ptr, i64 -2
+ %incdec.ptr26.1 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -2
store i8 undef, i8* %incdec.ptr26.1, align 1
%div27.1 = udiv i32 %call17, 100
%rem.2 = urem i32 %div27.1, 10
%add2230.2 = or i32 %rem.2, 48
%conv25.2 = trunc i32 %add2230.2 to i8
- %incdec.ptr26.2 = getelementptr inbounds i8* %incdec.ptr, i64 -3
+ %incdec.ptr26.2 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -3
store i8 %conv25.2, i8* %incdec.ptr26.2, align 1
- %incdec.ptr26.3 = getelementptr inbounds i8* %incdec.ptr, i64 -4
+ %incdec.ptr26.3 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -4
store i8 undef, i8* %incdec.ptr26.3, align 1
%div27.3 = udiv i32 %call17, 10000
%rem.4 = urem i32 %div27.3, 10
%add2230.4 = or i32 %rem.4, 48
%conv25.4 = trunc i32 %add2230.4 to i8
- %incdec.ptr26.4 = getelementptr inbounds i8* %incdec.ptr, i64 -5
+ %incdec.ptr26.4 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -5
store i8 %conv25.4, i8* %incdec.ptr26.4, align 1
%div27.4 = udiv i32 %call17, 100000
%rem.5 = urem i32 %div27.4, 10
%add2230.5 = or i32 %rem.5, 48
%conv25.5 = trunc i32 %add2230.5 to i8
- %incdec.ptr26.5 = getelementptr inbounds i8* %incdec.ptr, i64 -6
+ %incdec.ptr26.5 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -6
store i8 %conv25.5, i8* %incdec.ptr26.5, align 1
- %incdec.ptr26.6 = getelementptr inbounds i8* %incdec.ptr, i64 -7
+ %incdec.ptr26.6 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -7
store i8 0, i8* %incdec.ptr26.6, align 1
- %incdec.ptr26.7 = getelementptr inbounds i8* %incdec.ptr, i64 -8
+ %incdec.ptr26.7 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -8
store i8 undef, i8* %incdec.ptr26.7, align 1
%div27.7 = udiv i32 %call17, 100000000
%rem.8 = urem i32 %div27.7, 10
%add2230.8 = or i32 %rem.8, 48
%conv25.8 = trunc i32 %add2230.8 to i8
- %incdec.ptr26.8 = getelementptr inbounds i8* %incdec.ptr, i64 -9
+ %incdec.ptr26.8 = getelementptr inbounds i8, i8* %incdec.ptr, i64 -9
store i8 %conv25.8, i8* %incdec.ptr26.8, align 1
unreachable
define void @gsm_encode(%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352* %s, i16* %source, i8* %c) nounwind uwtable {
entry:
%xmc = alloca [52 x i16], align 16
- %arraydecay5 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 0
+ %arraydecay5 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 0
call void @Gsm_Coder(%struct.gsm_state.2.8.14.15.16.17.19.22.23.25.26.28.29.31.32.33.35.36.37.38.40.41.42.44.45.47.48.50.52.53.54.56.57.58.59.60.61.62.63.66.73.83.84.89.90.91.92.93.94.95.96.99.100.101.102.103.104.106.107.114.116.121.122.129.130.135.136.137.138.139.140.141.142.143.144.147.148.149.158.159.160.161.164.165.166.167.168.169.172.179.181.182.183.188.195.200.201.202.203.204.205.208.209.210.212.213.214.215.222.223.225.226.230.231.232.233.234.235.236.237.238.239.240.241.242.243.244.352* %s, i16* %source, i16* undef, i16* null, i16* undef, i16* undef, i16* undef, i16* %arraydecay5) nounwind
- %incdec.ptr136 = getelementptr inbounds i8* %c, i64 10
- %incdec.ptr157 = getelementptr inbounds i8* %c, i64 11
+ %incdec.ptr136 = getelementptr inbounds i8, i8* %c, i64 10
+ %incdec.ptr157 = getelementptr inbounds i8, i8* %c, i64 11
store i8 0, i8* %incdec.ptr136, align 1
- %arrayidx162 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 11
+ %arrayidx162 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 11
%0 = load i16* %arrayidx162, align 2
%conv1631 = trunc i16 %0 to i8
%and164 = shl i8 %conv1631, 3
%shl165 = and i8 %and164, 56
- %incdec.ptr172 = getelementptr inbounds i8* %c, i64 12
+ %incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12
store i8 %shl165, i8* %incdec.ptr157, align 1
%1 = load i16* inttoptr (i64 2 to i16*), align 2
%conv1742 = trunc i16 %1 to i8
%and175 = shl i8 %conv1742, 1
- %incdec.ptr183 = getelementptr inbounds i8* %c, i64 13
+ %incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13
store i8 %and175, i8* %incdec.ptr172, align 1
- %incdec.ptr199 = getelementptr inbounds i8* %c, i64 14
+ %incdec.ptr199 = getelementptr inbounds i8, i8* %c, i64 14
store i8 0, i8* %incdec.ptr183, align 1
- %arrayidx214 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 15
- %incdec.ptr220 = getelementptr inbounds i8* %c, i64 15
+ %arrayidx214 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 15
+ %incdec.ptr220 = getelementptr inbounds i8, i8* %c, i64 15
store i8 0, i8* %incdec.ptr199, align 1
%2 = load i16* %arrayidx214, align 2
%conv2223 = trunc i16 %2 to i8
%and223 = shl i8 %conv2223, 6
- %incdec.ptr235 = getelementptr inbounds i8* %c, i64 16
+ %incdec.ptr235 = getelementptr inbounds i8, i8* %c, i64 16
store i8 %and223, i8* %incdec.ptr220, align 1
- %arrayidx240 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 19
+ %arrayidx240 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 19
%3 = load i16* %arrayidx240, align 2
%conv2414 = trunc i16 %3 to i8
%and242 = shl i8 %conv2414, 2
%shl243 = and i8 %and242, 28
- %incdec.ptr251 = getelementptr inbounds i8* %c, i64 17
+ %incdec.ptr251 = getelementptr inbounds i8, i8* %c, i64 17
store i8 %shl243, i8* %incdec.ptr235, align 1
- %incdec.ptr272 = getelementptr inbounds i8* %c, i64 18
+ %incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18
store i8 0, i8* %incdec.ptr251, align 1
- %arrayidx282 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 25
+ %arrayidx282 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 25
%4 = load i16* %arrayidx282, align 2
%conv2835 = trunc i16 %4 to i8
%and284 = and i8 %conv2835, 7
- %incdec.ptr287 = getelementptr inbounds i8* %c, i64 19
+ %incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19
store i8 %and284, i8* %incdec.ptr272, align 1
- %incdec.ptr298 = getelementptr inbounds i8* %c, i64 20
+ %incdec.ptr298 = getelementptr inbounds i8, i8* %c, i64 20
store i8 0, i8* %incdec.ptr287, align 1
- %incdec.ptr314 = getelementptr inbounds i8* %c, i64 21
+ %incdec.ptr314 = getelementptr inbounds i8, i8* %c, i64 21
store i8 0, i8* %incdec.ptr298, align 1
- %arrayidx319 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 26
+ %arrayidx319 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 26
%5 = load i16* %arrayidx319, align 4
%conv3206 = trunc i16 %5 to i8
%and321 = shl i8 %conv3206, 4
%shl322 = and i8 %and321, 112
- %incdec.ptr335 = getelementptr inbounds i8* %c, i64 22
+ %incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22
store i8 %shl322, i8* %incdec.ptr314, align 1
- %arrayidx340 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 29
+ %arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29
%6 = load i16* %arrayidx340, align 2
%conv3417 = trunc i16 %6 to i8
%and342 = shl i8 %conv3417, 3
%shl343 = and i8 %and342, 56
- %incdec.ptr350 = getelementptr inbounds i8* %c, i64 23
+ %incdec.ptr350 = getelementptr inbounds i8, i8* %c, i64 23
store i8 %shl343, i8* %incdec.ptr335, align 1
- %incdec.ptr366 = getelementptr inbounds i8* %c, i64 24
+ %incdec.ptr366 = getelementptr inbounds i8, i8* %c, i64 24
store i8 0, i8* %incdec.ptr350, align 1
- %arrayidx381 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 36
- %incdec.ptr387 = getelementptr inbounds i8* %c, i64 25
+ %arrayidx381 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 36
+ %incdec.ptr387 = getelementptr inbounds i8, i8* %c, i64 25
store i8 0, i8* %incdec.ptr366, align 1
%7 = load i16* %arrayidx381, align 8
%conv3898 = trunc i16 %7 to i8
define void @gsm_encode(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565* %s, i16* %source, i8* %c) nounwind uwtable {
entry:
%LARc28 = alloca [2 x i64], align 16
- %LARc28.sub = getelementptr inbounds [2 x i64]* %LARc28, i64 0, i64 0
+ %LARc28.sub = getelementptr inbounds [2 x i64], [2 x i64]* %LARc28, i64 0, i64 0
%tmpcast = bitcast [2 x i64]* %LARc28 to [8 x i16]*
%Nc = alloca [4 x i16], align 2
%Mc = alloca [4 x i16], align 2
%bc = alloca [4 x i16], align 2
%xmc = alloca [52 x i16], align 16
%arraydecay = bitcast [2 x i64]* %LARc28 to i16*
- %arraydecay1 = getelementptr inbounds [4 x i16]* %Nc, i64 0, i64 0
- %arraydecay2 = getelementptr inbounds [4 x i16]* %bc, i64 0, i64 0
- %arraydecay3 = getelementptr inbounds [4 x i16]* %Mc, i64 0, i64 0
- %arraydecay5 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 0
+ %arraydecay1 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 0
+ %arraydecay2 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 0
+ %arraydecay3 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 0
+ %arraydecay5 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 0
call void @Gsm_Coder(%struct.gsm_state.2.8.39.44.45.55.56.57.58.59.62.63.64.65.74.75.76.77.80.87.92.93.94.95.96.97.110.111.112.113.114.128.130.135.136.137.138.139.140.141.142.143.144.145.148.149.150.151.152.169.170.177.178.179.184.185.186.187.188.201.208.209.219.220.221.223.224.225.230.231.232.233.235.236.237.238.245.246.248.249.272.274.279.280.281.282.283.286.293.298.299.314.315.316.317.318.319.320.321.322.323.324.325.326.327.328.329.330.331.332.333.334.335.336.337.338.339.340.341.342.343.344.345.346.347.348.349.350.351.352.353.565* %s, i16* %source, i16* %arraydecay, i16* %arraydecay1, i16* %arraydecay2, i16* %arraydecay3, i16* undef, i16* %arraydecay5) nounwind
%0 = load i64* %LARc28.sub, align 16
%1 = trunc i64 %0 to i32
%and = and i32 %conv1, 15
%or = or i32 %and, 208
%conv6 = trunc i32 %or to i8
- %incdec.ptr = getelementptr inbounds i8* %c, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %c, i64 1
store i8 %conv6, i8* %c, align 1
%conv84 = trunc i64 %0 to i8
%and9 = shl i8 %conv84, 6
- %incdec.ptr15 = getelementptr inbounds i8* %c, i64 2
+ %incdec.ptr15 = getelementptr inbounds i8, i8* %c, i64 2
store i8 %and9, i8* %incdec.ptr, align 1
%2 = lshr i64 %0, 50
%shr226.tr = trunc i64 %2 to i8
%conv25 = and i8 %shr226.tr, 7
- %incdec.ptr26 = getelementptr inbounds i8* %c, i64 3
+ %incdec.ptr26 = getelementptr inbounds i8, i8* %c, i64 3
store i8 %conv25, i8* %incdec.ptr15, align 1
- %incdec.ptr42 = getelementptr inbounds i8* %c, i64 4
+ %incdec.ptr42 = getelementptr inbounds i8, i8* %c, i64 4
store i8 0, i8* %incdec.ptr26, align 1
- %arrayidx52 = getelementptr inbounds [8 x i16]* %tmpcast, i64 0, i64 7
+ %arrayidx52 = getelementptr inbounds [8 x i16], [8 x i16]* %tmpcast, i64 0, i64 7
%3 = load i16* %arrayidx52, align 2
%conv537 = trunc i16 %3 to i8
%and54 = and i8 %conv537, 7
- %incdec.ptr57 = getelementptr inbounds i8* %c, i64 5
+ %incdec.ptr57 = getelementptr inbounds i8, i8* %c, i64 5
store i8 %and54, i8* %incdec.ptr42, align 1
- %incdec.ptr68 = getelementptr inbounds i8* %c, i64 6
+ %incdec.ptr68 = getelementptr inbounds i8, i8* %c, i64 6
store i8 0, i8* %incdec.ptr57, align 1
%4 = load i16* %arraydecay3, align 2
%conv748 = trunc i16 %4 to i8
%and75 = shl i8 %conv748, 5
%shl76 = and i8 %and75, 96
- %incdec.ptr84 = getelementptr inbounds i8* %c, i64 7
+ %incdec.ptr84 = getelementptr inbounds i8, i8* %c, i64 7
store i8 %shl76, i8* %incdec.ptr68, align 1
- %arrayidx94 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 1
+ %arrayidx94 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 1
%5 = load i16* %arrayidx94, align 2
%conv959 = trunc i16 %5 to i8
%and96 = shl i8 %conv959, 1
%shl97 = and i8 %and96, 14
%or103 = or i8 %shl97, 1
- %incdec.ptr105 = getelementptr inbounds i8* %c, i64 8
+ %incdec.ptr105 = getelementptr inbounds i8, i8* %c, i64 8
store i8 %or103, i8* %incdec.ptr84, align 1
- %arrayidx115 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 4
+ %arrayidx115 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 4
%6 = bitcast i16* %arrayidx115 to i32*
%7 = load i32* %6, align 8
%conv11610 = trunc i32 %7 to i8
%and117 = and i8 %conv11610, 7
- %incdec.ptr120 = getelementptr inbounds i8* %c, i64 9
+ %incdec.ptr120 = getelementptr inbounds i8, i8* %c, i64 9
store i8 %and117, i8* %incdec.ptr105, align 1
%8 = lshr i32 %7, 16
%and12330 = shl nuw nsw i32 %8, 5
%and123 = trunc i32 %and12330 to i8
- %incdec.ptr136 = getelementptr inbounds i8* %c, i64 10
+ %incdec.ptr136 = getelementptr inbounds i8, i8* %c, i64 10
store i8 %and123, i8* %incdec.ptr120, align 1
- %incdec.ptr157 = getelementptr inbounds i8* %c, i64 11
+ %incdec.ptr157 = getelementptr inbounds i8, i8* %c, i64 11
store i8 0, i8* %incdec.ptr136, align 1
- %incdec.ptr172 = getelementptr inbounds i8* %c, i64 12
+ %incdec.ptr172 = getelementptr inbounds i8, i8* %c, i64 12
store i8 0, i8* %incdec.ptr157, align 1
- %arrayidx173 = getelementptr inbounds [4 x i16]* %Nc, i64 0, i64 1
+ %arrayidx173 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 1
%9 = load i16* %arrayidx173, align 2
%conv17412 = zext i16 %9 to i32
%and175 = shl nuw nsw i32 %conv17412, 1
- %arrayidx177 = getelementptr inbounds [4 x i16]* %bc, i64 0, i64 1
+ %arrayidx177 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 1
%10 = load i16* %arrayidx177, align 2
%conv17826 = zext i16 %10 to i32
%shr17913 = lshr i32 %conv17826, 1
%and180 = and i32 %shr17913, 1
%or181 = or i32 %and175, %and180
%conv182 = trunc i32 %or181 to i8
- %incdec.ptr183 = getelementptr inbounds i8* %c, i64 13
+ %incdec.ptr183 = getelementptr inbounds i8, i8* %c, i64 13
store i8 %conv182, i8* %incdec.ptr172, align 1
- %arrayidx188 = getelementptr inbounds [4 x i16]* %Mc, i64 0, i64 1
+ %arrayidx188 = getelementptr inbounds [4 x i16], [4 x i16]* %Mc, i64 0, i64 1
%11 = load i16* %arrayidx188, align 2
%conv18914 = trunc i16 %11 to i8
%and190 = shl i8 %conv18914, 5
%shl191 = and i8 %and190, 96
- %incdec.ptr199 = getelementptr inbounds i8* %c, i64 14
+ %incdec.ptr199 = getelementptr inbounds i8, i8* %c, i64 14
store i8 %shl191, i8* %incdec.ptr183, align 1
- %arrayidx209 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 14
+ %arrayidx209 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 14
%12 = load i16* %arrayidx209, align 4
%conv21015 = trunc i16 %12 to i8
%and211 = shl i8 %conv21015, 1
%shl212 = and i8 %and211, 14
%or218 = or i8 %shl212, 1
- %incdec.ptr220 = getelementptr inbounds i8* %c, i64 15
+ %incdec.ptr220 = getelementptr inbounds i8, i8* %c, i64 15
store i8 %or218, i8* %incdec.ptr199, align 1
- %arrayidx225 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 16
+ %arrayidx225 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 16
%13 = bitcast i16* %arrayidx225 to i64*
%14 = load i64* %13, align 16
%conv22616 = trunc i64 %14 to i8
%and227 = shl i8 %conv22616, 3
%shl228 = and i8 %and227, 56
- %incdec.ptr235 = getelementptr inbounds i8* %c, i64 16
+ %incdec.ptr235 = getelementptr inbounds i8, i8* %c, i64 16
store i8 %shl228, i8* %incdec.ptr220, align 1
%15 = lshr i64 %14, 32
%and23832 = shl nuw nsw i64 %15, 5
%and238 = trunc i64 %and23832 to i8
- %incdec.ptr251 = getelementptr inbounds i8* %c, i64 17
+ %incdec.ptr251 = getelementptr inbounds i8, i8* %c, i64 17
store i8 %and238, i8* %incdec.ptr235, align 1
- %arrayidx266 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 23
- %incdec.ptr272 = getelementptr inbounds i8* %c, i64 18
+ %arrayidx266 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 23
+ %incdec.ptr272 = getelementptr inbounds i8, i8* %c, i64 18
store i8 0, i8* %incdec.ptr251, align 1
%16 = load i16* %arrayidx266, align 2
%conv27418 = trunc i16 %16 to i8
%and275 = shl i8 %conv27418, 6
- %incdec.ptr287 = getelementptr inbounds i8* %c, i64 19
+ %incdec.ptr287 = getelementptr inbounds i8, i8* %c, i64 19
store i8 %and275, i8* %incdec.ptr272, align 1
- %arrayidx288 = getelementptr inbounds [4 x i16]* %Nc, i64 0, i64 2
+ %arrayidx288 = getelementptr inbounds [4 x i16], [4 x i16]* %Nc, i64 0, i64 2
%17 = load i16* %arrayidx288, align 2
%conv28919 = zext i16 %17 to i32
%and290 = shl nuw nsw i32 %conv28919, 1
- %arrayidx292 = getelementptr inbounds [4 x i16]* %bc, i64 0, i64 2
+ %arrayidx292 = getelementptr inbounds [4 x i16], [4 x i16]* %bc, i64 0, i64 2
%18 = load i16* %arrayidx292, align 2
%conv29327 = zext i16 %18 to i32
%shr29420 = lshr i32 %conv29327, 1
%and295 = and i32 %shr29420, 1
%or296 = or i32 %and290, %and295
%conv297 = trunc i32 %or296 to i8
- %incdec.ptr298 = getelementptr inbounds i8* %c, i64 20
+ %incdec.ptr298 = getelementptr inbounds i8, i8* %c, i64 20
store i8 %conv297, i8* %incdec.ptr287, align 1
%conv30021 = trunc i16 %18 to i8
%and301 = shl i8 %conv30021, 7
- %incdec.ptr314 = getelementptr inbounds i8* %c, i64 21
+ %incdec.ptr314 = getelementptr inbounds i8, i8* %c, i64 21
store i8 %and301, i8* %incdec.ptr298, align 1
- %incdec.ptr335 = getelementptr inbounds i8* %c, i64 22
+ %incdec.ptr335 = getelementptr inbounds i8, i8* %c, i64 22
store i8 0, i8* %incdec.ptr314, align 1
- %arrayidx340 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 29
+ %arrayidx340 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 29
%19 = load i16* %arrayidx340, align 2
%conv34122 = trunc i16 %19 to i8
%and342 = shl i8 %conv34122, 3
%shl343 = and i8 %and342, 56
- %incdec.ptr350 = getelementptr inbounds i8* %c, i64 23
+ %incdec.ptr350 = getelementptr inbounds i8, i8* %c, i64 23
store i8 %shl343, i8* %incdec.ptr335, align 1
- %arrayidx355 = getelementptr inbounds [52 x i16]* %xmc, i64 0, i64 32
+ %arrayidx355 = getelementptr inbounds [52 x i16], [52 x i16]* %xmc, i64 0, i64 32
%20 = bitcast i16* %arrayidx355 to i32*
%21 = load i32* %20, align 16
%conv35623 = shl i32 %21, 2
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
; CHECK-LABEL: @test1(
; CHECK: ret
arrayctor.cont: ; preds = %newFuncRoot
- %ref.tmp.sroa.0.0.idx = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0
+ %ref.tmp.sroa.0.0.idx = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0
store double 1.000000e+01, double* %ref.tmp.sroa.0.0.idx, align 16
- %ref.tmp.sroa.2.0.idx1 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1
+ %ref.tmp.sroa.2.0.idx1 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1
store double 2.000000e+01, double* %ref.tmp.sroa.2.0.idx1, align 8
- %ref.tmp.sroa.3.0.idx2 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2
+ %ref.tmp.sroa.3.0.idx2 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2
store double 3.000000e+01, double* %ref.tmp.sroa.3.0.idx2, align 16
- %ref.tmp.sroa.4.0.idx3 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3
+ %ref.tmp.sroa.4.0.idx3 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3
store double 4.000000e+01, double* %ref.tmp.sroa.4.0.idx3, align 8
- %ref.tmp.sroa.5.0.idx4 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4
+ %ref.tmp.sroa.5.0.idx4 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4
store double 5.000000e+01, double* %ref.tmp.sroa.5.0.idx4, align 16
- %ref.tmp.sroa.6.0.idx5 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5
+ %ref.tmp.sroa.6.0.idx5 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5
store double 6.000000e+01, double* %ref.tmp.sroa.6.0.idx5, align 8
- %ref.tmp.sroa.7.0.idx6 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6
+ %ref.tmp.sroa.7.0.idx6 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6
store double 7.000000e+01, double* %ref.tmp.sroa.7.0.idx6, align 16
- %ref.tmp.sroa.8.0.idx7 = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7
+ %ref.tmp.sroa.8.0.idx7 = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7
store double 8.000000e+01, double* %ref.tmp.sroa.8.0.idx7, align 8
- %add.ptr = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1
+ %add.ptr = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1
%v0 = bitcast %class.QBezier.15* %agg.tmp.i to i8*
call void @llvm.lifetime.start(i64 64, i8* %v0)
%v1 = bitcast %class.QBezier.15* %agg.tmp55.i to i8*
%v3 = bitcast [10 x %class.QBezier.15]* %beziers to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v0, i8* %v3, i64 64, i32 8, i1 false)
call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp.i)
- %x2.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2
+ %x2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2
%v4 = load double* %x2.i, align 16
- %x3.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4
+ %x3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4
%v5 = load double* %x3.i, align 16
%add.i = fadd double %v4, %v5
%mul.i = fmul double 5.000000e-01, %add.i
- %x1.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0
+ %x1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 0
%v6 = load double* %x1.i, align 16
%add3.i = fadd double %v4, %v6
%mul4.i = fmul double 5.000000e-01, %add3.i
- %x25.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 2
+ %x25.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 2
store double %mul4.i, double* %x25.i, align 16
%v7 = load double* %x3.i, align 16
- %x4.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6
+ %x4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 6
%v8 = load double* %x4.i, align 16
%add7.i = fadd double %v7, %v8
%mul8.i = fmul double 5.000000e-01, %add7.i
store double %mul8.i, double* %x3.i, align 16
%v9 = load double* %x1.i, align 16
- %x111.i = getelementptr inbounds %class.QBezier.15* %add.ptr, i64 0, i32 0
+ %x111.i = getelementptr inbounds %class.QBezier.15, %class.QBezier.15* %add.ptr, i64 0, i32 0
store double %v9, double* %x111.i, align 16
%v10 = load double* %x25.i, align 16
%add15.i = fadd double %mul.i, %v10
%mul16.i = fmul double 5.000000e-01, %add15.i
- %x317.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 4
+ %x317.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 4
store double %mul16.i, double* %x317.i, align 16
%v11 = load double* %x3.i, align 16
%add19.i = fadd double %mul.i, %v11
%add24.i = fadd double %v12, %mul20.i
%mul25.i = fmul double 5.000000e-01, %add24.i
store double %mul25.i, double* %x1.i, align 16
- %x427.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 6
+ %x427.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 6
store double %mul25.i, double* %x427.i, align 16
- %y2.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3
+ %y2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 3
%v13 = load double* %y2.i, align 8
- %y3.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5
+ %y3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 5
%v14 = load double* %y3.i, align 8
%add28.i = fadd double %v13, %v14
%div.i = fmul double 5.000000e-01, %add28.i
- %y1.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1
+ %y1.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 1
%v15 = load double* %y1.i, align 8
%add30.i = fadd double %v13, %v15
%mul31.i = fmul double 5.000000e-01, %add30.i
- %y232.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 3
+ %y232.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 3
store double %mul31.i, double* %y232.i, align 8
%v16 = load double* %y3.i, align 8
- %y4.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7
+ %y4.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 7
%v17 = load double* %y4.i, align 8
%add34.i = fadd double %v16, %v17
%mul35.i = fmul double 5.000000e-01, %add34.i
store double %mul35.i, double* %y3.i, align 8
%v18 = load double* %y1.i, align 8
- %y138.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 1
+ %y138.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 1
store double %v18, double* %y138.i, align 8
%v19 = load double* %y232.i, align 8
%add42.i = fadd double %div.i, %v19
%mul43.i = fmul double 5.000000e-01, %add42.i
- %y344.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 5
+ %y344.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 5
store double %mul43.i, double* %y344.i, align 8
%v20 = load double* %y3.i, align 8
%add46.i = fadd double %div.i, %v20
%add51.i = fadd double %v21, %mul47.i
%mul52.i = fmul double 5.000000e-01, %add51.i
store double %mul52.i, double* %y1.i, align 8
- %y454.i = getelementptr inbounds [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 7
+ %y454.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 1, i32 7
store double %mul52.i, double* %y454.i, align 8
%v22 = bitcast %class.QBezier.15* %add.ptr to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v1, i8* %v22, i64 64, i32 8, i1 false)
ret i1 false
"<bb 34>": ; preds = %newFuncRoot
- %tmp128 = getelementptr inbounds %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp129 = getelementptr inbounds %struct.__st_parameter_common* %tmp128, i32 0, i32 2
+ %tmp128 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
+ %tmp129 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp128, i32 0, i32 2
store i8* getelementptr inbounds ([11 x i8]* @.cst4, i64 0, i64 0), i8** %tmp129, align 8
- %tmp130 = getelementptr inbounds %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp131 = getelementptr inbounds %struct.__st_parameter_common* %tmp130, i32 0, i32 3
+ %tmp130 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
+ %tmp131 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp130, i32 0, i32 3
store i32 31495, i32* %tmp131, align 4
- %tmp132 = getelementptr inbounds %struct.__st_parameter_dt* %memtmp3, i32 0, i32 5
+ %tmp132 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 5
store i8* getelementptr inbounds ([214 x i8]* @.cst823, i64 0, i64 0), i8** %tmp132, align 8
- %tmp133 = getelementptr inbounds %struct.__st_parameter_dt* %memtmp3, i32 0, i32 6
+ %tmp133 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 6
store i32 214, i32* %tmp133, align 4
- %tmp134 = getelementptr inbounds %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp135 = getelementptr inbounds %struct.__st_parameter_common* %tmp134, i32 0, i32 0
+ %tmp134 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
+ %tmp135 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp134, i32 0, i32 0
store i32 4096, i32* %tmp135, align 4
%iounit.8748_288 = load i32* @__main1_MOD_iounit, align 4
- %tmp136 = getelementptr inbounds %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
- %tmp137 = getelementptr inbounds %struct.__st_parameter_common* %tmp136, i32 0, i32 1
+ %tmp136 = getelementptr inbounds %struct.__st_parameter_dt, %struct.__st_parameter_dt* %memtmp3, i32 0, i32 0
+ %tmp137 = getelementptr inbounds %struct.__st_parameter_common, %struct.__st_parameter_common* %tmp136, i32 0, i32 1
store i32 %iounit.8748_288, i32* %tmp137, align 4
call void @_gfortran_st_write(%struct.__st_parameter_dt* %memtmp3) nounwind
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* @j.4580, i32 4) nounwind
%D.75818_308 = add nsw i64 %D.75816_306, %D.75817_307
%tmp138 = bitcast i8* %D.75807_289 to [0 x float]*
%tmp139 = bitcast [0 x float]* %tmp138 to float*
- %D.75819_309 = getelementptr inbounds float* %tmp139, i64 %D.75818_308
+ %D.75819_309 = getelementptr inbounds float, float* %tmp139, i64 %D.75818_308
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* %D.75819_309, i32 4) nounwind
; CHECK: @_gfortran_transfer_real_write
%D.75820_310 = load i8** getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
%D.75831_329 = add nsw i64 %D.75829_327, %D.75830_328
%tmp140 = bitcast i8* %D.75820_310 to [0 x [1 x i8]]*
%tmp141 = bitcast [0 x [1 x i8]]* %tmp140 to [1 x i8]*
- %D.75832_330 = getelementptr inbounds [1 x i8]* %tmp141, i64 %D.75831_329
+ %D.75832_330 = getelementptr inbounds [1 x i8], [1 x i8]* %tmp141, i64 %D.75831_329
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [1 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [1 x i8]* %D.75832_330, i32 1) nounwind
; CHECK: @_gfortran_transfer_character_write
%D.75833_331 = load i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
%D.75844_350 = add nsw i64 %D.75842_348, %D.75843_349
%tmp142 = bitcast i8* %D.75833_331 to [0 x i32]*
%tmp143 = bitcast [0 x i32]* %tmp142 to i32*
- %D.75845_351 = getelementptr inbounds i32* %tmp143, i64 %D.75844_350
+ %D.75845_351 = getelementptr inbounds i32, i32* %tmp143, i64 %D.75844_350
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* %D.75845_351, i32 4) nounwind
; CHECK: @_gfortran_transfer_integer_write
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* @xr1.4592, i32 4) nounwind
%D.75848_371 = add nsw i64 %D.75847_369, %D.75817_370
%tmp144 = bitcast i8* %D.75807_352 to [0 x float]*
%tmp145 = bitcast [0 x float]* %tmp144 to float*
- %D.75849_372 = getelementptr inbounds float* %tmp145, i64 %D.75848_371
+ %D.75849_372 = getelementptr inbounds float, float* %tmp145, i64 %D.75848_371
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* %D.75849_372, i32 4) nounwind
; CHECK: @_gfortran_transfer_real_write
%D.75820_373 = load i8** getelementptr inbounds (%struct.array4_unknown* @__main1_MOD_mclmsg, i64 0, i32 0), align 8
%D.75852_392 = add nsw i64 %D.75851_390, %D.75830_391
%tmp146 = bitcast i8* %D.75820_373 to [0 x [1 x i8]]*
%tmp147 = bitcast [0 x [1 x i8]]* %tmp146 to [1 x i8]*
- %D.75853_393 = getelementptr inbounds [1 x i8]* %tmp147, i64 %D.75852_392
+ %D.75853_393 = getelementptr inbounds [1 x i8], [1 x i8]* %tmp147, i64 %D.75852_392
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_character_write to void (%struct.__st_parameter_dt*, [1 x i8]*, i32)*)(%struct.__st_parameter_dt* %memtmp3, [1 x i8]* %D.75853_393, i32 1) nounwind
; CHECK: @_gfortran_transfer_character_write
%D.75833_394 = load i8** getelementptr inbounds (%"struct.array4_integer(kind=4).73"* @__main1_MOD_mxdate, i64 0, i32 0), align 8
%D.75856_413 = add nsw i64 %D.75855_411, %D.75843_412
%tmp148 = bitcast i8* %D.75833_394 to [0 x i32]*
%tmp149 = bitcast [0 x i32]* %tmp148 to i32*
- %D.75857_414 = getelementptr inbounds i32* %tmp149, i64 %D.75856_413
+ %D.75857_414 = getelementptr inbounds i32, i32* %tmp149, i64 %D.75856_413
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_integer_write to void (%struct.__st_parameter_dt*, i32*, i32)*)(%struct.__st_parameter_dt* %memtmp3, i32* %D.75857_414, i32 4) nounwind
; CHECK: @_gfortran_transfer_integer_write
call void bitcast (void (%struct.__st_parameter_dt*, i8*, i32)* @_gfortran_transfer_real_write to void (%struct.__st_parameter_dt*, float*, i32)*)(%struct.__st_parameter_dt* %memtmp3, float* @xr2.4593, i32 4) nounwind
%mul = fmul double %i0, %i1
%i2 = load double* %c, align 8
%add = fadd double %mul, %i2
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
- %arrayidx6 = getelementptr inbounds double* %c, i64 1
+ %arrayidx6 = getelementptr inbounds double, double* %c, i64 1
%i5 = load double* %arrayidx6, align 8
%add7 = fadd double %mul5, %i5
%mul9 = fmul double %add, %i1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double* %in1, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds double* %in2, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
%1 = load double* %arrayidx2, align 8
%mul = fmul double %0, %0
%mul3 = fmul double %0, %1
%add10 = fadd double %add9, %0
%mul11 = fmul double %mul8, %add10
%add12 = fadd double %add7, %mul11
- %arrayidx14 = getelementptr inbounds double* %out, i64 %indvars.iv
+ %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
store double %add12, double* %arrayidx14, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 10
br i1 %exitcond, label %for.end, label %for.body
; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
-; CHECK: %arrayidx = getelementptr inbounds double* %in1, i64 %indvars.iv
+; CHECK: %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
; CHECK: %0 = load double* %arrayidx, align 8
-; CHECK: %arrayidx2 = getelementptr inbounds double* %in2, i64 %indvars.iv
+; CHECK: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
; CHECK: %1 = load double* %arrayidx2, align 8
; CHECK: %mul = fmul double %0, %0
; CHECK: %mul3 = fmul double %0, %1
; CHECK: %mul6.v.r2 = extractelement <2 x double> %mul6, i32 1
; CHECK: %add7 = fadd double %add, %mul6.v.r1
; CHECK: %add12 = fadd double %add7, %mul6.v.r2
-; CHECK: %arrayidx14 = getelementptr inbounds double* %out, i64 %indvars.iv
+; CHECK: %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
; CHECK: store double %add12, double* %arrayidx14, align 8
; CHECK: %indvars.iv.next = add i64 %indvars.iv, 1
; CHECK: %lftr.wideiv = trunc i64 %indvars.iv.next to i32
; CHECK: %exitcond = icmp eq i32 %lftr.wideiv, 10
; CHECK: br i1 %exitcond, label %for.end, label %for.body
; CHECK-UNRL: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next.1, %for.body ]
-; CHECK-UNRL: %arrayidx = getelementptr inbounds double* %in1, i64 %indvars.iv
+; CHECK-UNRL: %arrayidx = getelementptr inbounds double, double* %in1, i64 %indvars.iv
; CHECK-UNRL: %0 = bitcast double* %arrayidx to <2 x double>*
-; CHECK-UNRL: %arrayidx2 = getelementptr inbounds double* %in2, i64 %indvars.iv
+; CHECK-UNRL: %arrayidx2 = getelementptr inbounds double, double* %in2, i64 %indvars.iv
; CHECK-UNRL: %1 = bitcast double* %arrayidx2 to <2 x double>*
-; CHECK-UNRL: %arrayidx14 = getelementptr inbounds double* %out, i64 %indvars.iv
+; CHECK-UNRL: %arrayidx14 = getelementptr inbounds double, double* %out, i64 %indvars.iv
; CHECK-UNRL: %2 = load <2 x double>* %0, align 8
; CHECK-UNRL: %3 = load <2 x double>* %1, align 8
; CHECK-UNRL: %mul = fmul <2 x double> %2, %2
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1, !fpmath !2
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4, !fpmath !3
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
; CHECK-LABEL: @test1(
%i0 = load i64* %a, align 8, !range !0
%i1 = load i64* %b, align 8
%mul = mul i64 %i0, %i1
- %arrayidx3 = getelementptr inbounds i64* %a, i64 1
+ %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
%i3 = load i64* %arrayidx3, align 8, !range !1
- %arrayidx4 = getelementptr inbounds i64* %b, i64 1
+ %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
%i4 = load i64* %arrayidx4, align 8
%mul5 = mul i64 %i3, %i4
store i64 %mul, i64* %c, align 8
- %arrayidx5 = getelementptr inbounds i64* %c, i64 1
+ %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
store i64 %mul5, i64* %arrayidx5, align 8
ret void
; CHECK-LABEL: @test2(
define i64 @test2(i64 %a) nounwind uwtable readonly {
entry:
%a1 = inttoptr i64 %a to i64*
- %a2 = getelementptr i64* %a1, i64 1
- %a3 = getelementptr i64* %a1, i64 2
+ %a2 = getelementptr i64, i64* %a1, i64 1
+ %a3 = getelementptr i64, i64* %a1, i64 2
%v2 = load i64* %a2, align 8
%v3 = load i64* %a3, align 8
%v2a = add i64 %v2, 5
%r = add i64 %v2, %v3
ret i64 %r
; CHECK-LABEL: @test2(
-; CHECK-NOT: getelementptr <2 x i64*>
+; CHECK-NOT: getelementptr i64, <2 x i64*>
}
%i0 = load i64* %a, align 8
%i1 = load i64* %b, align 8
%mul = mul i64 %i0, %i1
- %arrayidx3 = getelementptr inbounds i64* %a, i64 1
+ %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
%i3 = load i64* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds i64* %b, i64 1
+ %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
%i4 = load i64* %arrayidx4, align 8
%mul5 = mul i64 %i3, %i4
%ptr = inttoptr i64 %mul to double*
%ptr5 = inttoptr i64 %mul5 to double*
- %aptr = getelementptr inbounds double* %ptr, i64 2
- %aptr5 = getelementptr inbounds double* %ptr5, i64 3
+ %aptr = getelementptr inbounds double, double* %ptr, i64 2
+ %aptr5 = getelementptr inbounds double, double* %ptr5, i64 3
%av = load double* %aptr, align 16
%av5 = load double* %aptr5, align 16
%r = fmul double %av, %av5
store i64 %mul, i64* %c, align 8
- %arrayidx5 = getelementptr inbounds i64* %c, i64 1
+ %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
store i64 %mul5, i64* %arrayidx5, align 8
ret double %r
; CHECK-LABEL: @test1(
; CHECK: %i1 = load <2 x i64>* %i1.v.i0, align 8
; CHECK: %mul = mul <2 x i64> %i0, %i1
; CHECK: %ptr = inttoptr <2 x i64> %mul to <2 x double*>
-; CHECK: %aptr = getelementptr inbounds <2 x double*> %ptr, <2 x i64> <i64 2, i64 3>
+; CHECK: %aptr = getelementptr inbounds double, <2 x double*> %ptr, <2 x i64> <i64 2, i64 3>
; CHECK: %aptr.v.r1 = extractelement <2 x double*> %aptr, i32 0
; CHECK: %aptr.v.r2 = extractelement <2 x double*> %aptr, i32 1
; CHECK: %av = load double* %aptr.v.r1, align 16
entry:
%i0 = load i64** %a, align 8
%i1 = load i64** %b, align 8
- %arrayidx3 = getelementptr inbounds i64** %a, i64 1
+ %arrayidx3 = getelementptr inbounds i64*, i64** %a, i64 1
%i3 = load i64** %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds i64** %b, i64 1
+ %arrayidx4 = getelementptr inbounds i64*, i64** %b, i64 1
%i4 = load i64** %arrayidx4, align 8
%o1 = load i64* %i1, align 8
%o4 = load i64* %i4, align 8
- %ptr0 = getelementptr inbounds i64* %i0, i64 %o1
- %ptr3 = getelementptr inbounds i64* %i3, i64 %o4
+ %ptr0 = getelementptr inbounds i64, i64* %i0, i64 %o1
+ %ptr3 = getelementptr inbounds i64, i64* %i3, i64 %o4
store i64* %ptr0, i64** %c, align 8
- %arrayidx5 = getelementptr inbounds i64** %c, i64 1
+ %arrayidx5 = getelementptr inbounds i64*, i64** %c, i64 1
store i64* %ptr3, i64** %arrayidx5, align 8
ret void
; CHECK-LABEL: @test2(
; CHECK: %i0.v.i0 = bitcast i64** %a to <2 x i64*>*
; CHECK: %i1 = load i64** %b, align 8
; CHECK: %i0 = load <2 x i64*>* %i0.v.i0, align 8
-; CHECK: %arrayidx4 = getelementptr inbounds i64** %b, i64 1
+; CHECK: %arrayidx4 = getelementptr inbounds i64*, i64** %b, i64 1
; CHECK: %i4 = load i64** %arrayidx4, align 8
; CHECK: %o1 = load i64* %i1, align 8
; CHECK: %o4 = load i64* %i4, align 8
; CHECK: %ptr0.v.i1.1 = insertelement <2 x i64> undef, i64 %o1, i32 0
; CHECK: %ptr0.v.i1.2 = insertelement <2 x i64> %ptr0.v.i1.1, i64 %o4, i32 1
-; CHECK: %ptr0 = getelementptr inbounds <2 x i64*> %i0, <2 x i64> %ptr0.v.i1.2
+; CHECK: %ptr0 = getelementptr inbounds i64, <2 x i64*> %i0, <2 x i64> %ptr0.v.i1.2
; CHECK: %0 = bitcast i64** %c to <2 x i64*>*
; CHECK: store <2 x i64*> %ptr0, <2 x i64*>* %0, align 8
; CHECK: ret void
entry:
%i0 = load <2 x i64*>* %a, align 8
%i1 = load <2 x i64*>* %b, align 8
- %arrayidx3 = getelementptr inbounds <2 x i64*>* %a, i64 1
+ %arrayidx3 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %a, i64 1
%i3 = load <2 x i64*>* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds <2 x i64*>* %b, i64 1
+ %arrayidx4 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %b, i64 1
%i4 = load <2 x i64*>* %arrayidx4, align 8
%j1 = extractelement <2 x i64*> %i1, i32 0
%j4 = extractelement <2 x i64*> %i4, i32 0
%o4 = load i64* %j4, align 8
%j0 = extractelement <2 x i64*> %i0, i32 0
%j3 = extractelement <2 x i64*> %i3, i32 0
- %ptr0 = getelementptr inbounds i64* %j0, i64 %o1
- %ptr3 = getelementptr inbounds i64* %j3, i64 %o4
+ %ptr0 = getelementptr inbounds i64, i64* %j0, i64 %o1
+ %ptr3 = getelementptr inbounds i64, i64* %j3, i64 %o4
%qtr0 = insertelement <2 x i64*> undef, i64* %ptr0, i32 0
%rtr0 = insertelement <2 x i64*> %qtr0, i64* %ptr0, i32 1
%qtr3 = insertelement <2 x i64*> undef, i64* %ptr3, i32 0
%rtr3 = insertelement <2 x i64*> %qtr3, i64* %ptr3, i32 1
store <2 x i64*> %rtr0, <2 x i64*>* %c, align 8
- %arrayidx5 = getelementptr inbounds <2 x i64*>* %c, i64 1
+ %arrayidx5 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %c, i64 1
store <2 x i64*> %rtr3, <2 x i64*>* %arrayidx5, align 8
ret void
; CHECK-LABEL: @test3(
; CHECK: %i0.v.i0 = bitcast <2 x i64*>* %a to <4 x i64*>*
; CHECK: %i1 = load <2 x i64*>* %b, align 8
; CHECK: %i0 = load <4 x i64*>* %i0.v.i0, align 8
-; CHECK: %arrayidx4 = getelementptr inbounds <2 x i64*>* %b, i64 1
+; CHECK: %arrayidx4 = getelementptr inbounds <2 x i64*>, <2 x i64*>* %b, i64 1
; CHECK: %i4 = load <2 x i64*>* %arrayidx4, align 8
; CHECK: %j1 = extractelement <2 x i64*> %i1, i32 0
; CHECK: %j4 = extractelement <2 x i64*> %i4, i32 0
; CHECK: %ptr0.v.i1.1 = insertelement <2 x i64> undef, i64 %o1, i32 0
; CHECK: %ptr0.v.i1.2 = insertelement <2 x i64> %ptr0.v.i1.1, i64 %o4, i32 1
; CHECK: %ptr0.v.i0 = shufflevector <4 x i64*> %i0, <4 x i64*> undef, <2 x i32> <i32 0, i32 2>
-; CHECK: %ptr0 = getelementptr inbounds <2 x i64*> %ptr0.v.i0, <2 x i64> %ptr0.v.i1.2
+; CHECK: %ptr0 = getelementptr inbounds i64, <2 x i64*> %ptr0.v.i0, <2 x i64> %ptr0.v.i1.2
; CHECK: %rtr0 = shufflevector <2 x i64*> %ptr0, <2 x i64*> undef, <2 x i32> zeroinitializer
; CHECK: %rtr3 = shufflevector <2 x i64*> %ptr0, <2 x i64*> undef, <2 x i32> <i32 1, i32 1>
; CHECK: %0 = bitcast <2 x i64*>* %c to <4 x i64*>*
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
; CHECK-LABEL: @test1(
%i1f = load float* %b, align 4
%i1 = fpext float %i1f to double
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds float* %a, i64 1
+ %arrayidx3 = getelementptr inbounds float, float* %a, i64 1
%i3f = load float* %arrayidx3, align 4
%i3 = fpext float %i3f to double
- %arrayidx4 = getelementptr inbounds float* %b, i64 1
+ %arrayidx4 = getelementptr inbounds float, float* %b, i64 1
%i4f = load float* %arrayidx4, align 4
%i4 = fpext float %i4f to double
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
; CHECK-LABEL: @test2(
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
%mulf = fptrunc double %mul to float
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%mul5f = fptrunc double %mul5 to float
store float %mulf, float* %c, align 8
- %arrayidx5 = getelementptr inbounds float* %c, i64 1
+ %arrayidx5 = getelementptr inbounds float, float* %c, i64 1
store float %mul5f, float* %arrayidx5, align 4
ret void
; CHECK-LABEL: @test3(
; CHECK-AO-LABEL: @test3(
; CHECK-AO: %i0 = load double* %a, align 8
; CHECK-AO: %i1 = load double* %b, align 8
-; CHECK-AO: %arrayidx3 = getelementptr inbounds double* %a, i64 1
+; CHECK-AO: %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
; CHECK-AO: %i3 = load double* %arrayidx3, align 8
-; CHECK-AO: %arrayidx4 = getelementptr inbounds double* %b, i64 1
+; CHECK-AO: %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
; CHECK-AO: %i4 = load double* %arrayidx4, align 8
; CHECK-AO: %mul.v.i1.1 = insertelement <2 x double> undef, double %i1, i32 0
; CHECK-AO: %mul.v.i1.2 = insertelement <2 x double> %mul.v.i1.1, double %i4, i32 1
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
br label %if.end
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
store double %mul, double* %c, align 4
ret void
; CHECK: add i64 %sunkaddr, 40
define void @load_cast_gep(i1 %cond, i64* %base) {
entry:
- %addr = getelementptr inbounds i64* %base, i64 5
+ %addr = getelementptr inbounds i64, i64* %base, i64 5
%casted = addrspacecast i64* %addr to i32 addrspace(1)*
br i1 %cond, label %if.then, label %fallthrough
define void @store_gep_cast(i1 %cond, i64* %base) {
entry:
%casted = addrspacecast i64* %base to i32 addrspace(1)*
- %addr = getelementptr inbounds i32 addrspace(1)* %casted, i64 5
+ %addr = getelementptr inbounds i32, i32 addrspace(1)* %casted, i64 5
br i1 %cond, label %if.then, label %fallthrough
if.then:
declare zeroext i1 @return_i1()
define i32 @test_sor_basic(i32* %base) {
-; CHECK: getelementptr i32* %base, i32 15
-; CHECK: getelementptr i32* %base-new, i32 15
+; CHECK: getelementptr i32, i32* %base, i32 15
+; CHECK: getelementptr i32, i32* %base-new, i32 15
entry:
- %ptr = getelementptr i32* %base, i32 15
+ %ptr = getelementptr i32, i32* %base, i32 15
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %base, i32* %ptr)
%base-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
}
define i32 @test_sor_two_derived(i32* %base) {
-; CHECK: getelementptr i32* %base, i32 15
-; CHECK: getelementptr i32* %base, i32 12
-; CHECK: getelementptr i32* %base-new, i32 15
-; CHECK: getelementptr i32* %base-new, i32 12
+; CHECK: getelementptr i32, i32* %base, i32 15
+; CHECK: getelementptr i32, i32* %base, i32 12
+; CHECK: getelementptr i32, i32* %base-new, i32 15
+; CHECK: getelementptr i32, i32* %base-new, i32 12
entry:
- %ptr = getelementptr i32* %base, i32 15
- %ptr2 = getelementptr i32* %base, i32 12
+ %ptr = getelementptr i32, i32* %base, i32 15
+ %ptr2 = getelementptr i32, i32* %base, i32 12
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %base, i32* %ptr, i32* %ptr2)
%base-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
}
define i32 @test_sor_ooo(i32* %base) {
-; CHECK: getelementptr i32* %base, i32 15
-; CHECK: getelementptr i32* %base-new, i32 15
+; CHECK: getelementptr i32, i32* %base, i32 15
+; CHECK: getelementptr i32, i32* %base-new, i32 15
entry:
- %ptr = getelementptr i32* %base, i32 15
+ %ptr = getelementptr i32, i32* %base, i32 15
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %base, i32* %ptr)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
%base-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 4)
}
define i32 @test_sor_gep_smallint([3 x i32]* %base) {
-; CHECK: getelementptr [3 x i32]* %base, i32 0, i32 2
-; CHECK: getelementptr [3 x i32]* %base-new, i32 0, i32 2
+; CHECK: getelementptr [3 x i32], [3 x i32]* %base, i32 0, i32 2
+; CHECK: getelementptr [3 x i32], [3 x i32]* %base-new, i32 0, i32 2
entry:
- %ptr = getelementptr [3 x i32]* %base, i32 0, i32 2
+ %ptr = getelementptr [3 x i32], [3 x i32]* %base, i32 0, i32 2
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, [3 x i32]* %base, i32* %ptr)
%base-new = call [3 x i32]* @llvm.experimental.gc.relocate.p0a3i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
}
define i32 @test_sor_gep_largeint([3 x i32]* %base) {
-; CHECK: getelementptr [3 x i32]* %base, i32 0, i32 21
-; CHECK-NOT: getelementptr [3 x i32]* %base-new, i32 0, i32 21
+; CHECK: getelementptr [3 x i32], [3 x i32]* %base, i32 0, i32 21
+; CHECK-NOT: getelementptr [3 x i32], [3 x i32]* %base-new, i32 0, i32 21
entry:
- %ptr = getelementptr [3 x i32]* %base, i32 0, i32 21
+ %ptr = getelementptr [3 x i32], [3 x i32]* %base, i32 0, i32 21
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, [3 x i32]* %base, i32* %ptr)
%base-new = call [3 x i32]* @llvm.experimental.gc.relocate.p0a3i32(i32 %tok, i32 4, i32 4)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
}
define i32 @test_sor_noop(i32* %base) {
-; CHECK: getelementptr i32* %base, i32 15
+; CHECK: getelementptr i32, i32* %base, i32 15
; CHECK: call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
; CHECK: call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 6)
entry:
- %ptr = getelementptr i32* %base, i32 15
- %ptr2 = getelementptr i32* %base, i32 12
+ %ptr = getelementptr i32, i32* %base, i32 15
+ %ptr2 = getelementptr i32, i32* %base, i32 12
%tok = call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32* %base, i32* %ptr, i32* %ptr2)
%ptr-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 5)
%ptr2-new = call i32* @llvm.experimental.gc.relocate.p0i32(i32 %tok, i32 4, i32 6)
define void @init_hrtimers_cpu(i32 %cpu) nounwind noredzone section ".cpuinit.text" {
entry:
- %tmp3 = getelementptr %struct.hrtimer_cpu_base* bitcast (%1* @per_cpu__hrtimer_bases to %struct.hrtimer_cpu_base*), i32 0, i32 0 ; <%0*> [#uses=1]
+ %tmp3 = getelementptr %struct.hrtimer_cpu_base, %struct.hrtimer_cpu_base* bitcast (%1* @per_cpu__hrtimer_bases to %struct.hrtimer_cpu_base*), i32 0, i32 0 ; <%0*> [#uses=1]
%tmp5 = bitcast %0* %tmp3 to i8* ; <i8*> [#uses=0]
unreachable
}
; CHECK-LABEL: test1
; CHECK: %const = bitcast i64 68141056 to i64
; CHECK: %1 = inttoptr i64 %const to %T*
-; CHECK: %o1 = getelementptr %T* %1, i32 0, i32 1
-; CHECK: %o2 = getelementptr %T* %1, i32 0, i32 2
-; CHECK: %o3 = getelementptr %T* %1, i32 0, i32 3
+; CHECK: %o1 = getelementptr %T, %T* %1, i32 0, i32 1
+; CHECK: %o2 = getelementptr %T, %T* %1, i32 0, i32 2
+; CHECK: %o3 = getelementptr %T, %T* %1, i32 0, i32 3
%at = inttoptr i64 68141056 to %T*
- %o1 = getelementptr %T* %at, i32 0, i32 1
+ %o1 = getelementptr %T, %T* %at, i32 0, i32 1
%t1 = load i32* %o1
- %o2 = getelementptr %T* %at, i32 0, i32 2
+ %o2 = getelementptr %T, %T* %at, i32 0, i32 2
%t2 = load i32* %o2
%a1 = add i32 %t1, %t2
- %o3 = getelementptr %T* %at, i32 0, i32 3
+ %o3 = getelementptr %T, %T* %at, i32 0, i32 3
%t3 = load i32* %o3
%a2 = add i32 %a1, %t3
ret i32 %a2
; CHECK-LABEL: @test1
; CHECK: %const = bitcast i32 12345678 to i32
; CHECK: %1 = inttoptr i32 %const to %T*
-; CHECK: %addr1 = getelementptr %T* %1, i32 0, i32 1
- %addr1 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
+; CHECK: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
+ %addr1 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
%tmp1 = load i32* %addr1
- %addr2 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
+ %addr2 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
%tmp2 = load i32* %addr2
- %addr3 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
+ %addr3 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
%tmp3 = load i32* %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
; CHECK-LABEL: @test1
; CHECK: %const = bitcast i32 12345678 to i32
; CHECK: %1 = inttoptr i32 %const to %T*
-; CHECK: %addr1 = getelementptr %T* %1, i32 0, i32 1
- %addr1 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
+; CHECK: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
+ %addr1 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 1
%tmp1 = load i32* %addr1
- %addr2 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
+ %addr2 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 2
%tmp2 = load i32* %addr2
- %addr3 = getelementptr %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
+ %addr3 = getelementptr %T, %T* inttoptr (i32 12345678 to %T*), i32 0, i32 3
%tmp3 = load i32* %addr3
%tmp4 = add i32 %tmp1, %tmp2
%tmp5 = add i32 %tmp3, %tmp4
; CHECK: %const = bitcast i32 12345678 to i32
; CHECK-NOT: %base = inttoptr i32 12345678 to %T*
; CHECK-NEXT: %1 = inttoptr i32 %const to %T*
-; CHECK-NEXT: %addr1 = getelementptr %T* %1, i32 0, i32 1
-; CHECK-NEXT: %addr2 = getelementptr %T* %1, i32 0, i32 2
-; CHECK-NEXT: %addr3 = getelementptr %T* %1, i32 0, i32 3
+; CHECK-NEXT: %addr1 = getelementptr %T, %T* %1, i32 0, i32 1
+; CHECK-NEXT: %addr2 = getelementptr %T, %T* %1, i32 0, i32 2
+; CHECK-NEXT: %addr3 = getelementptr %T, %T* %1, i32 0, i32 3
%base = inttoptr i32 12345678 to %T*
- %addr1 = getelementptr %T* %base, i32 0, i32 1
- %addr2 = getelementptr %T* %base, i32 0, i32 2
- %addr3 = getelementptr %T* %base, i32 0, i32 3
+ %addr1 = getelementptr %T, %T* %base, i32 0, i32 1
+ %addr2 = getelementptr %T, %T* %base, i32 0, i32 2
+ %addr3 = getelementptr %T, %T* %base, i32 0, i32 3
ret i32 12345678
}
define i32 @main() {
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %tmp = getelementptr [4 x %struct.point]* @pts, i32 0, i32 0 ; <%struct.point*> [#uses=1]
+ %tmp = getelementptr [4 x %struct.point], [4 x %struct.point]* @pts, i32 0, i32 0 ; <%struct.point*> [#uses=1]
%tmp1 = call i32 (i32, ...)* @va1( i32 1, %struct.point* byval %tmp ) nounwind ; <i32> [#uses=0]
call void @exit( i32 0 ) noreturn nounwind
unreachable
entry:
store i32 add (i32 ptrtoint ([0 x i32]* @A to i32), i32 1), i32* %Arg2
; CHECK: store i32 add (i32 ptrtoint ([0 x i32]* @A to i32), i32 1), i32* %Arg2
- %ln2gz = getelementptr i32* %Arg1, i32 14
+ %ln2gz = getelementptr i32, i32* %Arg1, i32 14
%ln2gA = bitcast i32* %ln2gz to double*
%ln2gB = load double* %ln2gA
- %ln2gD = getelementptr i32* %Arg2, i32 -3
+ %ln2gD = getelementptr i32, i32* %Arg2, i32 -3
%ln2gE = bitcast i32* %ln2gD to double*
store double %ln2gB, double* %ln2gE
; CHECK: store double %ln2gB, double* %ln2gE
_ZNSt8auto_ptrIiED1Ev.exit:
%temp.lvalue = alloca %"class.std::auto_ptr", align 8
call void @_Z3barv(%"class.std::auto_ptr"* sret %temp.lvalue)
- %_M_ptr.i.i = getelementptr inbounds %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
+ %_M_ptr.i.i = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
%tmp.i.i = load i32** %_M_ptr.i.i, align 8
; CHECK-NOT: store i32* null
store i32* null, i32** %_M_ptr.i.i, align 8
- %_M_ptr.i.i4 = getelementptr inbounds %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
+ %_M_ptr.i.i4 = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
store i32* %tmp.i.i, i32** %_M_ptr.i.i4, align 8
; CHECK: ret void
ret void
define void @_ZSt9iter_swapIPSt4pairIPN4llvm10BasicBlockEjES5_EvT_T0_(%struct.pair.162* %__a, %struct.pair.162* %__b) nounwind uwtable inlinehint {
entry:
%memtmp = alloca %struct.pair.162, align 8
- %0 = getelementptr inbounds %struct.pair.162* %memtmp, i64 0, i32 0
- %1 = getelementptr inbounds %struct.pair.162* %__a, i64 0, i32 0
+ %0 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %memtmp, i64 0, i32 0
+ %1 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %__a, i64 0, i32 0
%2 = load %struct.BasicBlock** %1, align 8
store %struct.BasicBlock* %2, %struct.BasicBlock** %0, align 8
- %3 = getelementptr inbounds %struct.pair.162* %memtmp, i64 0, i32 1
- %4 = getelementptr inbounds %struct.pair.162* %__a, i64 0, i32 1
+ %3 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %memtmp, i64 0, i32 1
+ %4 = getelementptr inbounds %struct.pair.162, %struct.pair.162* %__a, i64 0, i32 1
%5 = load i32* %4, align 4
store i32 %5, i32* %3, align 8
%6 = bitcast %struct.pair.162* %__a to i8*
define void @write24to28(i32* nocapture %p) nounwind uwtable ssp {
; CHECK-LABEL: @write24to28(
entry:
- %arrayidx0 = getelementptr inbounds i32* %p, i64 1
+ %arrayidx0 = getelementptr inbounds i32, i32* %p, i64 1
%p3 = bitcast i32* %arrayidx0 to i8*
; CHECK: call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 24, i32 4, i1 false)
call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
- %arrayidx1 = getelementptr inbounds i32* %p, i64 7
+ %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
store i32 1, i32* %arrayidx1, align 4
ret void
}
%p3 = bitcast i32* %p to i8*
; CHECK: call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 28, i32 4, i1 false)
call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 32, i32 4, i1 false)
- %arrayidx1 = getelementptr inbounds i32* %p, i64 7
+ %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
store i32 1, i32* %arrayidx1, align 4
ret void
}
%p3 = bitcast i32* %p to i8*
; CHECK: call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 32, i32 16, i1 false)
call void @llvm.memset.p0i8.i64(i8* %p3, i8 0, i64 32, i32 16, i1 false)
- %arrayidx1 = getelementptr inbounds i32* %p, i64 7
+ %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 7
store i32 1, i32* %arrayidx1, align 4
ret void
}
%0 = bitcast %struct.vec2plusi* %p to i8*
; CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.vec2plusi* @glob2 to i8*), i64 32, i32 16, i1 false)
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.vec2plusi* @glob2 to i8*), i64 36, i32 16, i1 false)
- %c = getelementptr inbounds %struct.vec2plusi* %p, i64 0, i32 2
+ %c = getelementptr inbounds %struct.vec2plusi, %struct.vec2plusi* %p, i64 0, i32 2
store i32 1, i32* %c, align 4
ret void
}
%0 = bitcast %struct.vec2* %p to i8*
; CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.vec2* @glob1 to i8*), i64 16, i32 16, i1 false)
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 16, i1 false)
- %c = getelementptr inbounds %struct.vec2* %p, i64 0, i32 1
+ %c = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 1
store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32>* %c, align 4
ret void
}
%0 = bitcast %struct.vec2* %p to i8*
; CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 16, i1 false)
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* bitcast (%struct.vec2* @glob1 to i8*), i64 32, i32 16, i1 false)
- %arrayidx1 = getelementptr inbounds %struct.vec2* %p, i64 0, i32 0, i64 7
+ %arrayidx1 = getelementptr inbounds %struct.vec2, %struct.vec2* %p, i64 0, i32 0, i64 7
store i32 1, i32* %arrayidx1, align 4
ret void
}
define void @cpu_lwp_fork(%struct.trapframe* %md_regs, i64 %pcb_rsp0) nounwind uwtable noinline ssp {
entry:
%0 = inttoptr i64 %pcb_rsp0 to %struct.trapframe*
- %add.ptr = getelementptr inbounds %struct.trapframe* %0, i64 -1
+ %add.ptr = getelementptr inbounds %struct.trapframe, %struct.trapframe* %0, i64 -1
%1 = bitcast %struct.trapframe* %add.ptr to i8*
%2 = bitcast %struct.trapframe* %md_regs to i8*
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 24, i32 1, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %2, i64 24, i32 1, i1 false)
- %tf_trapno = getelementptr inbounds %struct.trapframe* %0, i64 -1, i32 1
+ %tf_trapno = getelementptr inbounds %struct.trapframe, %struct.trapframe* %0, i64 -1, i32 1
store i64 3, i64* %tf_trapno, align 8
ret void
}
%__u = alloca { [3 x i32] }
%tmp.1 = bitcast { [3 x i32] }* %__u to double*
store double %__x, double* %tmp.1
- %tmp.4 = getelementptr { [3 x i32] }* %__u, i32 0, i32 0, i32 1
+ %tmp.4 = getelementptr { [3 x i32] }, { [3 x i32] }* %__u, i32 0, i32 0, i32 1
%tmp.5 = load i32* %tmp.4
%tmp.6 = icmp slt i32 %tmp.5, 0
%tmp.7 = zext i1 %tmp.6 to i32
; CHECK-NEXT: store double
store i8 19, i8* %P ;; dead
- %A = getelementptr i8* %P, i32 3
+ %A = getelementptr i8, i8* %P, i32 3
store i8 42, i8* %A ;; dead
define void @test5(i32 %i) nounwind ssp {
%A = alloca i32
%B = bitcast i32* %A to i8*
- %C = getelementptr i8* %B, i32 %i
+ %C = getelementptr i8, i8* %B, i32 %i
store i8 10, i8* %C ;; Dead store to variable index.
store i32 20, i32* %A
define void @test5_addrspacecast(i32 %i) nounwind ssp {
%A = alloca i32
%B = addrspacecast i32* %A to i8 addrspace(1)*
- %C = getelementptr i8 addrspace(1)* %B, i32 %i
+ %C = getelementptr i8, i8 addrspace(1)* %B, i32 %i
store i8 10, i8 addrspace(1)* %C ;; Dead store to variable index.
store i32 20, i32* %A
@g = global i32 42
define void @test1(%t* noalias %pp) {
- %p = getelementptr inbounds %t* %pp, i32 0, i32 0
+ %p = getelementptr inbounds %t, %t* %pp, i32 0, i32 0
store i32 1, i32* %p; <-- This is dead
%x = load i32* inttoptr (i32 12345 to i32*)
define fastcc i32 @test2() nounwind ssp {
bb14: ; preds = %bb4
%0 = bitcast i8* undef to i8** ; <i8**> [#uses=1]
- %1 = getelementptr inbounds i8** %0, i64 undef ; <i8**> [#uses=1]
+ %1 = getelementptr inbounds i8*, i8** %0, i64 undef ; <i8**> [#uses=1]
%2 = bitcast i8** %1 to i16* ; <i16*> [#uses=2]
- %3 = getelementptr inbounds i16* %2, i64 undef ; <i16*> [#uses=1]
+ %3 = getelementptr inbounds i16, i16* %2, i64 undef ; <i16*> [#uses=1]
%4 = bitcast i16* %3 to i8* ; <i8*> [#uses=1]
- %5 = getelementptr inbounds i8* %4, i64 undef ; <i8*> [#uses=1]
- %6 = getelementptr inbounds i16* %2, i64 undef ; <i16*> [#uses=1]
+ %5 = getelementptr inbounds i8, i8* %4, i64 undef ; <i8*> [#uses=1]
+ %6 = getelementptr inbounds i16, i16* %2, i64 undef ; <i16*> [#uses=1]
store i16 undef, i16* %6, align 2
- %7 = getelementptr inbounds i8* %5, i64 undef ; <i8*> [#uses=1]
+ %7 = getelementptr inbounds i8, i8* %5, i64 undef ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %7, i8* undef, i64 undef, i32 1, i1 false)
unreachable
}
ret i32 0
dead:
- %P2 = getelementptr i32 *%P2, i32 52
- %Q2 = getelementptr i32 *%Q2, i32 52
+ %P2 = getelementptr i32, i32 *%P2, i32 52
+ %Q2 = getelementptr i32, i32 *%Q2, i32 52
store i32 4, i32* %P2
store i32 4, i32* %Q2
br label %dead
define void @test4(%struct.f393a00_2__windmill* %a, %struct.f393a00_2__windmill* %b) {
entry:
%t = alloca %struct.f393a00_2__windmill ; <%struct.f393a00_2__windmill*> [#uses=1]
- %0 = getelementptr %struct.f393a00_2__windmill* %t, i32 0, i32 0, i32 0 ; <%struct.ada__tags__dispatch_table**> [#uses=1]
+ %0 = getelementptr %struct.f393a00_2__windmill, %struct.f393a00_2__windmill* %t, i32 0, i32 0, i32 0 ; <%struct.ada__tags__dispatch_table**> [#uses=1]
%1 = load %struct.ada__tags__dispatch_table** null, align 4 ; <%struct.ada__tags__dispatch_table*> [#uses=1]
%2 = load %struct.ada__tags__dispatch_table** %0, align 8 ; <%struct.ada__tags__dispatch_table*> [#uses=1]
store %struct.ada__tags__dispatch_table* %2, %struct.ada__tags__dispatch_table** null, align 4
bb:
%tmp = alloca %class.basic_string, align 8
%tmp1 = alloca %class.basic_string, align 8
- %tmp3 = getelementptr inbounds %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 2
+ %tmp3 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 2
%tmp4 = bitcast %union.anon* %tmp3 to i8*
- %tmp5 = getelementptr inbounds %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 0, i32 0
- %tmp6 = getelementptr inbounds %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 1
- %tmp7 = getelementptr inbounds i8* %tmp4, i64 1
+ %tmp5 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 0, i32 0
+ %tmp6 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp, i64 0, i32 0, i32 0, i32 1
+ %tmp7 = getelementptr inbounds i8, i8* %tmp4, i64 1
%tmp8 = bitcast %class.basic_string* %tmp to i8*
%tmp9 = bitcast i64 0 to i64
- %tmp10 = getelementptr inbounds %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 2
+ %tmp10 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 2
%tmp11 = bitcast %union.anon* %tmp10 to i8*
- %tmp12 = getelementptr inbounds %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 0, i32 0
- %tmp13 = getelementptr inbounds %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 1
- %tmp14 = getelementptr inbounds i8* %tmp11, i64 1
+ %tmp12 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 0, i32 0
+ %tmp13 = getelementptr inbounds %class.basic_string, %class.basic_string* %tmp1, i64 0, i32 0, i32 0, i32 1
+ %tmp14 = getelementptr inbounds i8, i8* %tmp11, i64 1
%tmp15 = bitcast %class.basic_string* %tmp1 to i8*
br label %_ZN12basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS2_.exit
define void @foo() {
bb1:
%memtmp3.i = alloca [21 x i8], align 1
- %0 = getelementptr inbounds [21 x i8]* %memtmp3.i, i64 0, i64 0
+ %0 = getelementptr inbounds [21 x i8], [21 x i8]* %memtmp3.i, i64 0, i64 0
br label %bb3
bb2:
; CHECK-NEXT: @free
; CHECK-NEXT: ret void
define void @test2({i32, i32}* %P) {
- %Q = getelementptr {i32, i32} *%P, i32 0, i32 1
+ %Q = getelementptr {i32, i32}, {i32, i32} *%P, i32 0, i32 1
store i32 4, i32* %Q
%1 = bitcast {i32, i32}* %P to i8*
tail call void @free(i8* %1)
define void @test3() {
%m = call i8* @malloc(i64 24)
store i8 0, i8* %m
- %m1 = getelementptr i8* %m, i64 1
+ %m1 = getelementptr i8, i8* %m, i64 1
store i8 1, i8* %m1
call void @free(i8* %m)
ret void
define void @test1(i8* %src) {
; CHECK-LABEL: @test1(
%B = alloca [16 x i8]
- %dest = getelementptr inbounds [16 x i8]* %B, i64 0, i64 0
+ %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
; CHECK-NOT: @strcpy
%call = call i8* @strcpy(i8* %dest, i8* %src)
; CHECK: ret void
define void @test2(i8* %src) {
; CHECK-LABEL: @test2(
%B = alloca [16 x i8]
- %dest = getelementptr inbounds [16 x i8]* %B, i64 0, i64 0
+ %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
; CHECK-NOT: @strncpy
%call = call i8* @strncpy(i8* %dest, i8* %src, i32 12)
; CHECK: ret void
define void @test3(i8* %src) {
; CHECK-LABEL: @test3(
%B = alloca [16 x i8]
- %dest = getelementptr inbounds [16 x i8]* %B, i64 0, i64 0
+ %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
; CHECK-NOT: @strcat
%call = call i8* @strcat(i8* %dest, i8* %src)
; CHECK: ret void
define void @test4(i8* %src) {
; CHECK-LABEL: @test4(
%B = alloca [16 x i8]
- %dest = getelementptr inbounds [16 x i8]* %B, i64 0, i64 0
+ %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
; CHECK-NOT: @strncat
%call = call i8* @strncat(i8* %dest, i8* %src, i32 12)
; CHECK: ret void
define void @test5(i8* nocapture %src) {
; CHECK-LABEL: @test5(
%dest = alloca [100 x i8], align 16
- %arraydecay = getelementptr inbounds [100 x i8]* %dest, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [100 x i8], [100 x i8]* %dest, i64 0, i64 0
%call = call i8* @strcpy(i8* %arraydecay, i8* %src)
; CHECK: %call = call i8* @strcpy
- %arrayidx = getelementptr inbounds i8* %call, i64 10
+ %arrayidx = getelementptr inbounds i8, i8* %call, i64 10
store i8 97, i8* %arrayidx, align 1
ret void
}
define void @test6(i8* %src) {
; CHECK-LABEL: @test6(
%B = alloca [16 x i8]
- %dest = getelementptr inbounds [16 x i8]* %B, i64 0, i64 0
+ %dest = getelementptr inbounds [16 x i8], [16 x i8]* %B, i64 0, i64 0
; CHECK: @strcpy
%call = call i8* @strcpy(i8* %dest, i8* %src)
; CHECK: @user
define void @test2(i32* %P) {
; CHECK: test2
- %Q = getelementptr i32* %P, i32 1
+ %Q = getelementptr i32, i32* %P, i32 1
%R = bitcast i32* %Q to i8*
call void @llvm.lifetime.start(i64 4, i8* %R)
; CHECK: lifetime.start
; CHECK: tail call void @llvm.memcpy.p0i8.p0i8.i64
; CHECK: ret void
- %arrayidx = getelementptr i8* %buf, i64 18
+ %arrayidx = getelementptr i8, i8* %buf, i64 18
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arrayidx, i8* %buf, i64 18, i32 1, i1 false)
store i8 1, i8* %arrayidx, align 1
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %buf, i8* %arrayidx, i64 18, i32 1, i1 false)
if.end: ; preds = %entry
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %call4, i8* %name, i64 %call, i32 1, i1 false)
- %arrayidx = getelementptr inbounds i8* %call4, i64 %call
+ %arrayidx = getelementptr inbounds i8, i8* %call4, i64 %call
store i8 46, i8* %arrayidx, align 1
; CHECK: store i8 46
- %add.ptr5 = getelementptr inbounds i8* %call4, i64 %add
+ %add.ptr5 = getelementptr inbounds i8, i8* %call4, i64 %add
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %add.ptr5, i8* %domain, i64 %call1, i32 1, i1 false)
- %arrayidx8 = getelementptr inbounds i8* %call4, i64 %add2
+ %arrayidx8 = getelementptr inbounds i8, i8* %call4, i64 %add2
store i8 0, i8* %arrayidx8, align 1
br label %return
; Test for byval handling.
%struct.x = type { i32, i32, i32, i32 }
define void @test9(%struct.x* byval %a) nounwind {
- %tmp2 = getelementptr %struct.x* %a, i32 0, i32 0
+ %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0
store i32 1, i32* %tmp2, align 4
ret void
; CHECK-LABEL: @test9(
; Test for inalloca handling.
define void @test9_2(%struct.x* inalloca %a) nounwind {
- %tmp2 = getelementptr %struct.x* %a, i32 0, i32 0
+ %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0
store i32 1, i32* %tmp2, align 4
ret void
; CHECK-LABEL: @test9_2(
; CHECK-LABEL: @test11(
%storage = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
; CHECK-NOT: alloca
- %cast = getelementptr [10 x i8]* %storage, i32 0, i32 0 ; <i8*> [#uses=1]
+ %cast = getelementptr [10 x i8], [10 x i8]* %storage, i32 0, i32 0 ; <i8*> [#uses=1]
%tramp = call i8* @llvm.init.trampoline( i8* %cast, i8* bitcast (void ()* @test11f to i8*), i8* null ) ; <i8*> [#uses=1]
; CHECK-NOT: trampoline
ret void
; PR2599 - load -> store to same address.
define void @test12({ i32, i32 }* %x) nounwind {
- %tmp4 = getelementptr { i32, i32 }* %x, i32 0, i32 0
+ %tmp4 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 0
%tmp5 = load i32* %tmp4, align 4
- %tmp7 = getelementptr { i32, i32 }* %x, i32 0, i32 1
+ %tmp7 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 1
%tmp8 = load i32* %tmp7, align 4
%tmp17 = sub i32 0, %tmp8
store i32 %tmp5, i32* %tmp4, align 4
define void @test19({i32} * nocapture byval align 4 %arg5) nounwind ssp {
bb:
- %tmp7 = getelementptr inbounds {i32}* %arg5, i32 0, i32 0
+ %tmp7 = getelementptr inbounds {i32}, {i32}* %arg5, i32 0, i32 0
store i32 912, i32* %tmp7
call void @test19f({i32}* byval align 4 %arg5)
ret void
declare noalias i8* @strdup(i8* nocapture) nounwind
define noalias i8* @test23() nounwind uwtable ssp {
%x = alloca [2 x i8], align 1
- %arrayidx = getelementptr inbounds [2 x i8]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %x, i64 0, i64 0
store i8 97, i8* %arrayidx, align 1
- %arrayidx1 = getelementptr inbounds [2 x i8]* %x, i64 0, i64 1
+ %arrayidx1 = getelementptr inbounds [2 x i8], [2 x i8]* %x, i64 0, i64 1
store i8 0, i8* %arrayidx1, align 1
%call = call i8* @strdup(i8* %arrayidx) nounwind
ret i8* %call
; CHECK: store i32 %c
; CHECK: ret void
define void @test24([2 x i32]* %a, i32 %b, i32 %c) nounwind {
- %1 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 0
+ %1 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 0
store i32 0, i32* %1, align 4
- %2 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 1
+ %2 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 1
store i32 0, i32* %2, align 4
- %3 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 0
+ %3 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 0
store i32 %b, i32* %3, align 4
- %4 = getelementptr inbounds [2 x i32]* %a, i64 0, i64 1
+ %4 = getelementptr inbounds [2 x i32], [2 x i32]* %a, i64 0, i64 1
store i32 %c, i32* %4, align 4
ret void
}
; CHECK: store i8 0
; CHECK: store i8 %tmp
define i8* @test25(i8* %p) nounwind {
- %p.4 = getelementptr i8* %p, i64 4
+ %p.4 = getelementptr i8, i8* %p, i64 4
%tmp = load i8* %p.4, align 1
store i8 0, i8* %p.4, align 1
%q = call i8* @strdup(i8* %p) nounwind optsize
%tmp2 = lshr i32 %tmp, %bitno
%bit = and i32 %tmp2, 1
; subtle escape mechanism follows
- %lookup = getelementptr [2 x i1]* @lookup_table, i32 0, i32 %bit
+ %lookup = getelementptr [2 x i1], [2 x i1]* @lookup_table, i32 0, i32 %bit
%val = load i1* %lookup
ret i1 %val
}
%tmp = ptrtoint i32* %q to i32
%tmp2 = lshr i32 %tmp, %bitno
%bit = and i32 %tmp2, 1
- %lookup = getelementptr [2 x i1]* @lookup_table, i32 0, i32 %bit
+ %lookup = getelementptr [2 x i1], [2 x i1]* @lookup_table, i32 0, i32 %bit
ret i1* %lookup
}
for.inc: ; preds = %for.body
%7 = load i8** %__begin, align 8, !dbg !40
- %incdec.ptr = getelementptr inbounds i8* %7, i32 1, !dbg !40
+ %incdec.ptr = getelementptr inbounds i8, i8* %7, i32 1, !dbg !40
store i8* %incdec.ptr, i8** %__begin, align 8, !dbg !40
br label %for.cond, !dbg !40
ret %struct.TypHeader* null
bb556.preheader: ; preds = %entry
- %tmp56119 = getelementptr %struct.TypHeader* %hdR, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp56119 = getelementptr %struct.TypHeader, %struct.TypHeader* %hdR, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp56220 = load i32* %tmp56119 ; <i32> [#uses=0]
br i1 false, label %bb.nph23, label %bb675.preheader
br i1 false, label %bb684, label %bb656
bb675.preheader: ; preds = %bb556.preheader
- %tmp67711 = getelementptr %struct.TypHeader* %hdR, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp677 = getelementptr %struct.TypHeader* %hdR, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp67711 = getelementptr %struct.TypHeader, %struct.TypHeader* %hdR, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp677 = getelementptr %struct.TypHeader, %struct.TypHeader* %hdR, i32 0, i32 0 ; <i32*> [#uses=1]
br label %bb675.outer
bb924.preheader: ; preds = %bb684
define i32 @_ZNK21mrZEllipticalCylinder10viewingHitERK6ggRay3dddR18mrViewingHitRecordR16ggMaterialRecord(%struct.mrXEllipticalCylinder* %this, %struct.ggBox3* %ray, double %unnamed_arg, double %tmin, double %tmax, %struct.mrViewingHitRecord* %VHR, %struct.ggMaterialRecord* %unnamed_arg2) {
entry:
- %tmp80.i = getelementptr %struct.mrViewingHitRecord* %VHR, i32 0, i32 1, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp80.i = getelementptr %struct.mrViewingHitRecord, %struct.mrViewingHitRecord* %VHR, i32 0, i32 1, i32 0, i32 0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %tmp80.i
br i1 false, label %return, label %cond_next.i
br i1 false, label %return, label %cond_true
cond_true: ; preds = %cond_next.i
- %tmp3.i8 = getelementptr %struct.mrViewingHitRecord* %VHR, i32 0, i32 1, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp3.i8 = getelementptr %struct.mrViewingHitRecord, %struct.mrViewingHitRecord* %VHR, i32 0, i32 1, i32 0, i32 0 ; <double*> [#uses=1]
%tmp46 = load double* %tmp3.i8 ; <double> [#uses=0]
ret i32 1
bb: ; preds = %cond_next97
%tmp1 = load i32* @numi ; <i32> [#uses=1]
- %tmp2 = getelementptr [44 x i8]* @.str43, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp2 = getelementptr [44 x i8], [44 x i8]* @.str43, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp3 = call i32 (i8*, ...)* @printf( i8* %tmp2, i32 %tmp1 ) ; <i32> [#uses=0]
store i32 0, i32* %i
br label %bb13
bb4: ; preds = %bb13
%tmp5 = load i32* %i ; <i32> [#uses=1]
%tmp6 = load i32* %i ; <i32> [#uses=1]
- %tmp7 = getelementptr [17 x i32]* @trialx, i32 0, i32 %tmp6 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr [17 x i32], [17 x i32]* @trialx, i32 0, i32 %tmp6 ; <i32*> [#uses=1]
%tmp8 = load i32* %tmp7 ; <i32> [#uses=1]
%tmp9 = call i32 @userfun( i32 %tmp8 ) ; <i32> [#uses=1]
- %tmp10 = getelementptr [17 x i32]* @correct_result, i32 0, i32 %tmp5 ; <i32*> [#uses=1]
+ %tmp10 = getelementptr [17 x i32], [17 x i32]* @correct_result, i32 0, i32 %tmp5 ; <i32*> [#uses=1]
store i32 %tmp9, i32* %tmp10
%tmp11 = load i32* %i ; <i32> [#uses=1]
%tmp12 = add i32 %tmp11, 1 ; <i32> [#uses=1]
bb18: ; preds = %bb49
%tmp19 = load i32* %i ; <i32> [#uses=1]
- %tmp20 = getelementptr [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp19 ; <{ i32, [3 x i32] }*> [#uses=1]
- %tmp21 = getelementptr { i32, [3 x i32] }* %tmp20, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp20 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp19 ; <{ i32, [3 x i32] }*> [#uses=1]
+ %tmp21 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp20, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp21
%tmp22 = load i32* %i ; <i32> [#uses=1]
- %tmp23 = getelementptr [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
- %tmp24 = getelementptr %struct.anon* %tmp23, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
- %tmp25 = getelementptr [3 x i32]* %tmp24, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp23 = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
+ %tmp24 = getelementptr %struct.anon, %struct.anon* %tmp23, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
+ %tmp25 = getelementptr [3 x i32], [3 x i32]* %tmp24, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp26 = load i32* %tmp25 ; <i32> [#uses=1]
- %tmp27 = getelementptr [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp22 ; <{ i32, [3 x i32] }*> [#uses=1]
- %tmp28 = getelementptr { i32, [3 x i32] }* %tmp27, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
- %tmp29 = getelementptr [3 x i32]* %tmp28, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp27 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp22 ; <{ i32, [3 x i32] }*> [#uses=1]
+ %tmp28 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp27, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
+ %tmp29 = getelementptr [3 x i32], [3 x i32]* %tmp28, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 %tmp26, i32* %tmp29
%tmp30 = load i32* %i ; <i32> [#uses=1]
- %tmp31 = getelementptr [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
- %tmp32 = getelementptr %struct.anon* %tmp31, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
- %tmp33 = getelementptr [3 x i32]* %tmp32, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp31 = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
+ %tmp32 = getelementptr %struct.anon, %struct.anon* %tmp31, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
+ %tmp33 = getelementptr [3 x i32], [3 x i32]* %tmp32, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp34 = load i32* %tmp33 ; <i32> [#uses=1]
- %tmp35 = getelementptr [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp30 ; <{ i32, [3 x i32] }*> [#uses=1]
- %tmp36 = getelementptr { i32, [3 x i32] }* %tmp35, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
- %tmp37 = getelementptr [3 x i32]* %tmp36, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp35 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp30 ; <{ i32, [3 x i32] }*> [#uses=1]
+ %tmp36 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp35, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
+ %tmp37 = getelementptr [3 x i32], [3 x i32]* %tmp36, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 %tmp34, i32* %tmp37
%tmp38 = load i32* %i ; <i32> [#uses=1]
- %tmp39 = getelementptr [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
- %tmp40 = getelementptr %struct.anon* %tmp39, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
- %tmp41 = getelementptr [3 x i32]* %tmp40, i32 0, i32 2 ; <i32*> [#uses=1]
+ %tmp39 = getelementptr [13 x %struct.anon], [13 x %struct.anon]* @isa, i32 0, i32 0 ; <%struct.anon*> [#uses=1]
+ %tmp40 = getelementptr %struct.anon, %struct.anon* %tmp39, i32 0, i32 3 ; <[3 x i32]*> [#uses=1]
+ %tmp41 = getelementptr [3 x i32], [3 x i32]* %tmp40, i32 0, i32 2 ; <i32*> [#uses=1]
%tmp42 = load i32* %tmp41 ; <i32> [#uses=1]
- %tmp43 = getelementptr [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp38 ; <{ i32, [3 x i32] }*> [#uses=1]
- %tmp44 = getelementptr { i32, [3 x i32] }* %tmp43, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
- %tmp45 = getelementptr [3 x i32]* %tmp44, i32 0, i32 2 ; <i32*> [#uses=1]
+ %tmp43 = getelementptr [5 x { i32, [3 x i32] }], [5 x { i32, [3 x i32] }]* @pgm, i32 0, i32 %tmp38 ; <{ i32, [3 x i32] }*> [#uses=1]
+ %tmp44 = getelementptr { i32, [3 x i32] }, { i32, [3 x i32] }* %tmp43, i32 0, i32 1 ; <[3 x i32]*> [#uses=1]
+ %tmp45 = getelementptr [3 x i32], [3 x i32]* %tmp44, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 %tmp42, i32* %tmp45
%tmp46 = load i32* %i ; <i32> [#uses=1]
call void @fix_operands( i32 %tmp46 )
bb55: ; preds = %bb49
%tmp56 = call i32 @search( ) ; <i32> [#uses=1]
store i32 %tmp56, i32* %num_sol
- %tmp57 = getelementptr [21 x i8]* @.str44, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp57 = getelementptr [21 x i8], [21 x i8]* @.str44, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp58 = load i32* %num_sol ; <i32> [#uses=1]
%tmp59 = call i32 (i8*, ...)* @printf( i8* %tmp57, i32 %tmp58 ) ; <i32> [#uses=0]
%tmp60 = load i32* @counters ; <i32> [#uses=1]
cond_true: ; preds = %bb55
store i32 0, i32* %total
- %tmp64 = getelementptr [12 x i8]* @.str45, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp64 = getelementptr [12 x i8], [12 x i8]* @.str45, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp65 = call i32 (i8*, ...)* @printf( i8* %tmp64 ) ; <i32> [#uses=0]
store i32 0, i32* %i
br label %bb79
bb66: ; preds = %bb79
%tmp67 = load i32* %i ; <i32> [#uses=1]
- %tmp68 = getelementptr [5 x i32]* @counter, i32 0, i32 %tmp67 ; <i32*> [#uses=1]
+ %tmp68 = getelementptr [5 x i32], [5 x i32]* @counter, i32 0, i32 %tmp67 ; <i32*> [#uses=1]
%tmp69 = load i32* %tmp68 ; <i32> [#uses=1]
- %tmp70 = getelementptr [5 x i8]* @.str46, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp70 = getelementptr [5 x i8], [5 x i8]* @.str46, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp71 = call i32 (i8*, ...)* @printf( i8* %tmp70, i32 %tmp69 ) ; <i32> [#uses=0]
%tmp72 = load i32* %i ; <i32> [#uses=1]
- %tmp73 = getelementptr [5 x i32]* @counter, i32 0, i32 %tmp72 ; <i32*> [#uses=1]
+ %tmp73 = getelementptr [5 x i32], [5 x i32]* @counter, i32 0, i32 %tmp72 ; <i32*> [#uses=1]
%tmp74 = load i32* %tmp73 ; <i32> [#uses=1]
%tmp75 = load i32* %total ; <i32> [#uses=1]
%tmp76 = add i32 %tmp74, %tmp75 ; <i32> [#uses=1]
br i1 %toBool84, label %bb66, label %bb85
bb85: ; preds = %bb79
- %tmp86 = getelementptr [12 x i8]* @.str47, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp86 = getelementptr [12 x i8], [12 x i8]* @.str47, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp87 = load i32* %total ; <i32> [#uses=1]
%tmp88 = call i32 (i8*, ...)* @printf( i8* %tmp86, i32 %tmp87 ) ; <i32> [#uses=0]
br label %cond_next
define i32 @a() {
entry:
%c = alloca %struct.anon ; <%struct.anon*> [#uses=2]
- %tmp = getelementptr %struct.anon* %c, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp1 = getelementptr i32* %tmp, i32 1 ; <i32*> [#uses=2]
+ %tmp = getelementptr %struct.anon, %struct.anon* %c, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr i32, i32* %tmp, i32 1 ; <i32*> [#uses=2]
%tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
%tmp3 = or i32 %tmp2, 11 ; <i32> [#uses=1]
%tmp4 = and i32 %tmp3, -21 ; <i32> [#uses=1]
define void @d_print_mod_list(%struct.d_print_info* %dpi, %struct.d_print_mod* %mods, i32 %suffix) nounwind {
entry:
- %0 = getelementptr %struct.d_print_info* %dpi, i32 0, i32 1 ; <i8**> [#uses=1]
+ %0 = getelementptr %struct.d_print_info, %struct.d_print_info* %dpi, i32 0, i32 1 ; <i8**> [#uses=1]
br i1 false, label %return, label %bb
bb: ; preds = %entry
%1 = load i8** %0, align 4 ; <i8*> [#uses=0]
- %2 = getelementptr %struct.d_print_info* %dpi, i32 0, i32 1 ; <i8**> [#uses=0]
+ %2 = getelementptr %struct.d_print_info, %struct.d_print_info* %dpi, i32 0, i32 1 ; <i8**> [#uses=0]
br label %bb21
bb21: ; preds = %bb21, %bb
bb41: ; preds = %bb82
%tmp = load i8* %opt.0, align 1 ; <i8> [#uses=0]
- %tmp1 = getelementptr i8* %opt.0, i32 1 ; <i8*> [#uses=2]
+ %tmp1 = getelementptr i8, i8* %opt.0, i32 1 ; <i8*> [#uses=2]
switch i32 0, label %bb81 [
i32 102, label %bb82
i32 110, label %bb79
br i1 false, label %bb84, label %bb41
bb84: ; preds = %bb82, %entry
- %tmp3 = getelementptr i8* null, i32 1 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr i8, i8* null, i32 1 ; <i8*> [#uses=1]
br label %bb82
}
bb23: ; preds = %bb23, %bb22
%sortv.233 = phi i32* [ getelementptr ([256 x i32]* @sort_value, i32 0, i32 0), %bb22 ], [ %sortv.2, %bb23 ] ; <i32*> [#uses=1]
%0 = load i32* %sortv.233, align 4 ; <i32> [#uses=0]
- %sortv.2 = getelementptr [256 x i32]* @sort_value, i32 0, i32 0 ; <i32*> [#uses=1]
+ %sortv.2 = getelementptr [256 x i32], [256 x i32]* @sort_value, i32 0, i32 0 ; <i32*> [#uses=1]
br i1 false, label %bb23, label %bb22
}
unreachable
bb203: ; preds = %entry
- %tmp = getelementptr i32* %decl, i32 1 ; <i32*> [#uses=1]
+ %tmp = getelementptr i32, i32* %decl, i32 1 ; <i32*> [#uses=1]
%tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=0]
br i1 false, label %bb207, label %bb204
bb204: ; preds = %bb203
- %tmp2 = getelementptr i32* %decl, i32 1 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr i32, i32* %decl, i32 1 ; <i32*> [#uses=1]
br label %bb208
bb207: ; preds = %bb203
br i1 false, label %bb67, label %bb63.preheader
bb67: ; preds = %bb65
- %2 = getelementptr %struct.IdList* %pColumn, i32 0, i32 0 ; <%struct..4sPragmaType**> [#uses=0]
+ %2 = getelementptr %struct.IdList, %struct.IdList* %pColumn, i32 0, i32 0 ; <%struct..4sPragmaType**> [#uses=0]
unreachable
bb69.loopexit: ; preds = %bb54, %entry
- %3 = getelementptr %struct.IdList* %pColumn, i32 0, i32 0 ; <%struct..4sPragmaType**> [#uses=1]
- %4 = getelementptr %struct.IdList* %pColumn, i32 0, i32 0 ; <%struct..4sPragmaType**> [#uses=1]
+ %3 = getelementptr %struct.IdList, %struct.IdList* %pColumn, i32 0, i32 0 ; <%struct..4sPragmaType**> [#uses=1]
+ %4 = getelementptr %struct.IdList, %struct.IdList* %pColumn, i32 0, i32 0 ; <%struct..4sPragmaType**> [#uses=1]
br label %bb63.preheader
bb63.preheader: ; preds = %bb69.loopexit, %bb65
br i1 false, label %bb77, label %bb84
bb77: ; preds = %bb76
- %3 = getelementptr [1 x %struct.cgraph_rtl_info]* null, i32 0, i32 0 ; <%struct.cgraph_rtl_info*> [#uses=0]
+ %3 = getelementptr [1 x %struct.cgraph_rtl_info], [1 x %struct.cgraph_rtl_info]* null, i32 0, i32 0 ; <%struct.cgraph_rtl_info*> [#uses=0]
unreachable
bb84: ; preds = %bb76
bb96: ; preds = %bb59, %entry
%5 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
- %6 = getelementptr %struct.rtx_def* %5, i32 0, i32 0 ; <i16*> [#uses=1]
+ %6 = getelementptr %struct.rtx_def, %struct.rtx_def* %5, i32 0, i32 0 ; <i16*> [#uses=1]
%7 = load i16* %6, align 2 ; <i16> [#uses=0]
br i1 false, label %bb147, label %bb97
bb147: ; preds = %bb97, %bb96
%9 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=1]
- %10 = getelementptr %struct.rtx_def* %9, i32 0, i32 0 ; <i16*> [#uses=1]
+ %10 = getelementptr %struct.rtx_def, %struct.rtx_def* %9, i32 0, i32 0 ; <i16*> [#uses=1]
%11 = load i16* %10, align 2 ; <i16> [#uses=0]
br i1 false, label %bb164, label %bb148
br label %bb164
bb164: ; preds = %bb152, %bb148, %bb147
- %12 = getelementptr [1 x %struct.cgraph_rtl_info]* null, i32 0, i32 1 ; <%struct.cgraph_rtl_info*> [#uses=0]
+ %12 = getelementptr [1 x %struct.cgraph_rtl_info], [1 x %struct.cgraph_rtl_info]* null, i32 0, i32 1 ; <%struct.cgraph_rtl_info*> [#uses=0]
br i1 false, label %bb165, label %bb166
bb165: ; preds = %bb164
bb211: ; preds = %bb168, %bb167
%14 = load %struct.rtx_def** %addr, align 4 ; <%struct.rtx_def*> [#uses=0]
- %15 = getelementptr [1 x %struct.cgraph_rtl_info]* null, i32 0, i32 0 ; <%struct.cgraph_rtl_info*> [#uses=0]
+ %15 = getelementptr [1 x %struct.cgraph_rtl_info], [1 x %struct.cgraph_rtl_info]* null, i32 0, i32 0 ; <%struct.cgraph_rtl_info*> [#uses=0]
store %struct.rtx_def* null, %struct.rtx_def** null, align 4
br i1 false, label %bb212, label %bb213
bb214: ; preds = %bb213, %bb212
%16 = bitcast %struct.block_symbol* null to [1 x %struct.cgraph_rtl_info]* ; <[1 x %struct.cgraph_rtl_info]*> [#uses=1]
- %17 = getelementptr [1 x %struct.cgraph_rtl_info]* %16, i32 0, i32 1 ; <%struct.cgraph_rtl_info*> [#uses=0]
+ %17 = getelementptr [1 x %struct.cgraph_rtl_info], [1 x %struct.cgraph_rtl_info]* %16, i32 0, i32 1 ; <%struct.cgraph_rtl_info*> [#uses=0]
%18 = load %struct.rtx_def** %iftmp.1532, align 4 ; <%struct.rtx_def*> [#uses=0]
- %19 = getelementptr %struct.rtx_def* null, i32 0, i32 3 ; <%struct.u*> [#uses=1]
- %20 = getelementptr %struct.u* %19, i32 0, i32 0 ; <%struct.block_symbol*> [#uses=1]
+ %19 = getelementptr %struct.rtx_def, %struct.rtx_def* null, i32 0, i32 3 ; <%struct.u*> [#uses=1]
+ %20 = getelementptr %struct.u, %struct.u* %19, i32 0, i32 0 ; <%struct.block_symbol*> [#uses=1]
%21 = bitcast %struct.block_symbol* %20 to [1 x i64]* ; <[1 x i64]*> [#uses=1]
- %22 = getelementptr [1 x i64]* %21, i32 0, i32 0 ; <i64*> [#uses=0]
+ %22 = getelementptr [1 x i64], [1 x i64]* %21, i32 0, i32 0 ; <i64*> [#uses=0]
%23 = call %struct.rtx_def* @plus_constant(%struct.rtx_def* null, i64 0) nounwind ; <%struct.rtx_def*> [#uses=0]
unreachable
}
bb2: ; preds = %bb1
%2 = sub i32 %len.0, %13 ; <i32> [#uses=1]
- %3 = getelementptr %struct.mbuf* %m.0.ph, i32 0, i32 2 ; <i32*> [#uses=1]
+ %3 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 0, i32* %3, align 4
- %4 = getelementptr %struct.mbuf* %m.0.ph, i32 0, i32 0 ; <%struct.mbuf**> [#uses=1]
+ %4 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 0 ; <%struct.mbuf**> [#uses=1]
%5 = load %struct.mbuf** %4, align 4 ; <%struct.mbuf*> [#uses=1]
br label %bb4.outer
%m.0.ph = phi %struct.mbuf* [ %5, %bb2 ], [ %mp, %bb4.preheader ] ; <%struct.mbuf*> [#uses=7]
%len.0.ph = phi i32 [ %2, %bb2 ], [ %req_len, %bb4.preheader ] ; <i32> [#uses=1]
%6 = icmp ne %struct.mbuf* %m.0.ph, null ; <i1> [#uses=1]
- %7 = getelementptr %struct.mbuf* %m.0.ph, i32 0, i32 2 ; <i32*> [#uses=1]
- %8 = getelementptr %struct.mbuf* %m.0.ph, i32 0, i32 2 ; <i32*> [#uses=1]
- %9 = getelementptr %struct.mbuf* %m.0.ph, i32 0, i32 3 ; <i8**> [#uses=1]
- %10 = getelementptr %struct.mbuf* %m.0.ph, i32 0, i32 3 ; <i8**> [#uses=1]
+ %7 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 2 ; <i32*> [#uses=1]
+ %8 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 2 ; <i32*> [#uses=1]
+ %9 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 3 ; <i8**> [#uses=1]
+ %10 = getelementptr %struct.mbuf, %struct.mbuf* %m.0.ph, i32 0, i32 3 ; <i8**> [#uses=1]
br label %bb4
bb4: ; preds = %bb4.outer, %bb3
%15 = sub i32 %13, %len.0 ; <i32> [#uses=1]
store i32 %15, i32* %8, align 4
%16 = load i8** %9, align 4 ; <i8*> [#uses=1]
- %17 = getelementptr i8* %16, i32 %len.0 ; <i8*> [#uses=1]
+ %17 = getelementptr i8, i8* %16, i32 %len.0 ; <i8*> [#uses=1]
store i8* %17, i8** %10, align 4
br label %bb4
bb7: ; preds = %bb4
- %18 = getelementptr %struct.mbuf* %mp, i32 0, i32 5 ; <i16*> [#uses=1]
+ %18 = getelementptr %struct.mbuf, %struct.mbuf* %mp, i32 0, i32 5 ; <i16*> [#uses=1]
%19 = load i16* %18, align 2 ; <i16> [#uses=1]
%20 = zext i16 %19 to i32 ; <i32> [#uses=1]
%21 = and i32 %20, 2 ; <i32> [#uses=1]
bb8: ; preds = %bb7
%23 = sub i32 %req_len, %len.0 ; <i32> [#uses=1]
- %24 = getelementptr %struct.mbuf* %mp, i32 0, i32 6 ; <i32*> [#uses=1]
+ %24 = getelementptr %struct.mbuf, %struct.mbuf* %mp, i32 0, i32 6 ; <i32*> [#uses=1]
store i32 %23, i32* %24, align 4
ret void
define i32 @main(i32 %argc, i8** nocapture %argv) {
entry:
- %0 = getelementptr inbounds i8* undef, i64 5 ; <i8*> [#uses=1]
+ %0 = getelementptr inbounds i8, i8* undef, i64 5 ; <i8*> [#uses=1]
%1 = bitcast i8* %0 to i32* ; <i32*> [#uses=1]
store i32 undef, i32* %1, align 1
br i1 undef, label %k121.i.i, label %l117.i.i
unreachable
k133.i.i: ; preds = %k121.i.i
- %2 = getelementptr i8* undef, i64 5 ; <i8*> [#uses=1]
+ %2 = getelementptr i8, i8* undef, i64 5 ; <i8*> [#uses=1]
%3 = bitcast i8* %2 to i1* ; <i1*> [#uses=1]
%4 = load i1* %3 ; <i1> [#uses=1]
br i1 %4, label %k151.i.i, label %l147.i.i
%tmp11 = load i8** %tmp, align 8
%tmp12 = load i8* %tmp11, align 1
%tmp13 = zext i8 %tmp12 to i64
- %tmp14 = getelementptr inbounds i8* null, i64 undef
+ %tmp14 = getelementptr inbounds i8, i8* null, i64 undef
store i8* %tmp14, i8** %tmp, align 8
br label %bb1
%4 = tail call i32 @strlen(i8* %P) ; <i32> [#uses=1]
%5 = add i32 %x_addr.0, %0 ; <i32> [#uses=1]
%.sum = sub i32 %5, %4 ; <i32> [#uses=1]
- %6 = getelementptr i8* %3, i32 %.sum ; <i8*> [#uses=1]
+ %6 = getelementptr i8, i8* %3, i32 %.sum ; <i8*> [#uses=1]
ret i8* %6
}
%sv = alloca %"class.llvm::SmallVector", align 16
%0 = bitcast %"class.llvm::SmallVector"* %sv to i8*
call void @llvm.lifetime.start(i64 64, i8* %0) #1
- %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
- %FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
+ %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
+ %FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
%1 = bitcast %"union.llvm::SmallVectorBase::U"* %FirstEl.i.i.i.i.i.i to i8*
store i8* %1, i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !4
- %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
+ %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
store i8* %1, i8** %EndX.i.i.i.i.i.i, align 8, !tbaa !4
- %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 2
- %add.ptr.i.i.i.i2.i.i = getelementptr inbounds %"union.llvm::SmallVectorBase::U"* %FirstEl.i.i.i.i.i.i, i64 2
+ %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 2
+ %add.ptr.i.i.i.i2.i.i = getelementptr inbounds %"union.llvm::SmallVectorBase::U", %"union.llvm::SmallVectorBase::U"* %FirstEl.i.i.i.i.i.i, i64 2
%add.ptr.i.i.i.i.i.i = bitcast %"union.llvm::SmallVectorBase::U"* %add.ptr.i.i.i.i2.i.i to i8*
store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 16, !tbaa !4
- %EndX.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
+ %EndX.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
%2 = load i8** %EndX.i, align 8, !tbaa !4
- %CapacityX.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 2
+ %CapacityX.i = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 2
%cmp.i = icmp ult i8* %2, %add.ptr.i.i.i.i.i.i
br i1 %cmp.i, label %Retry.i, label %if.end.i
br label %invoke.cont
if.end.i: ; preds = %entry
- %5 = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0
+ %5 = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0
invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(%"class.llvm::SmallVectorBase"* %5, i64 0, i64 4)
to label %.noexc unwind label %lpad
br label %Retry.i
invoke.cont: ; preds = %new.notnull.i, %Retry.i
- %add.ptr.i = getelementptr inbounds i8* %3, i64 4
+ %add.ptr.i = getelementptr inbounds i8, i8* %3, i64 4
store i8* %add.ptr.i, i8** %EndX.i, align 8, !tbaa !4
%6 = load i8** %CapacityX.i, align 16, !tbaa !4
%cmp.i8 = icmp ult i8* %add.ptr.i, %6
br label %invoke.cont2
if.end.i14: ; preds = %invoke.cont
- %9 = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0
+ %9 = getelementptr inbounds %"class.llvm::SmallVector", %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0
invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(%"class.llvm::SmallVectorBase"* %9, i64 0, i64 4)
to label %Retry.i10 unwind label %lpad
invoke.cont2: ; preds = %new.notnull.i11, %Retry.i10
%10 = phi i8* [ null, %Retry.i10 ], [ %7, %new.notnull.i11 ]
- %add.ptr.i12 = getelementptr inbounds i8* %10, i64 4
+ %add.ptr.i12 = getelementptr inbounds i8, i8* %10, i64 4
store i8* %add.ptr.i12, i8** %EndX.i, align 8, !tbaa !4
invoke void @_Z1gRN4llvm11SmallVectorIiLj8EEE(%"class.llvm::SmallVector"* %sv)
to label %invoke.cont3 unwind label %lpad
; PR5744
define i32 @test1({i16, i32} *%P) {
- %P2 = getelementptr {i16, i32} *%P, i32 0, i32 0
+ %P2 = getelementptr {i16, i32}, {i16, i32} *%P, i32 0, i32 0
store i16 42, i16* %P2
- %P3 = getelementptr {i16, i32} *%P, i32 0, i32 1
+ %P3 = getelementptr {i16, i32}, {i16, i32} *%P, i32 0, i32 1
%V = load i32* %P3
ret i32 %V
}
br label %bb69.i
bb69.i: ; preds = %bb57.i.preheader
- %tmp4 = getelementptr inbounds [4 x %struct.attribute_spec*]* @attribute_tables, i32 0, i32 undef ; <%struct.attribute_spec**> [#uses=1]
+ %tmp4 = getelementptr inbounds [4 x %struct.attribute_spec*], [4 x %struct.attribute_spec*]* @attribute_tables, i32 0, i32 undef ; <%struct.attribute_spec**> [#uses=1]
%tmp3 = load %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
br label %bb65.i
bb65.i: ; preds = %bb65.i.preheader, %bb64.i
%storemerge6.i = phi i32 [ 1, %bb64.i ], [ 0, %bb69.i ] ; <i32> [#uses=3]
- %scevgep14 = getelementptr inbounds %struct.attribute_spec* %tmp3, i32 %storemerge6.i, i32 0 ; <i8**> [#uses=1]
+ %scevgep14 = getelementptr inbounds %struct.attribute_spec, %struct.attribute_spec* %tmp3, i32 %storemerge6.i, i32 0 ; <i8**> [#uses=1]
%tmp2 = load i8** %scevgep14, align 4 ; <i8*> [#uses=0]
%tmp = load %struct.attribute_spec** %tmp4, align 4 ; <%struct.attribute_spec*> [#uses=1]
- %scevgep1516 = getelementptr inbounds %struct.attribute_spec* %tmp, i32 %storemerge6.i, i32 0 ; <i8**> [#uses=0]
+ %scevgep1516 = getelementptr inbounds %struct.attribute_spec, %struct.attribute_spec* %tmp, i32 %storemerge6.i, i32 0 ; <i8**> [#uses=0]
unreachable
bb64.i: ; Unreachable
%ivar38.i = load i64* @g
%tmp3 = bitcast i7* %tmp18.i to i8*
%add.ptr39.sum.i = add i64 %ivar38.i, 8
- %tmp40.i = getelementptr inbounds i8* %tmp3, i64 %add.ptr39.sum.i
+ %tmp40.i = getelementptr inbounds i8, i8* %tmp3, i64 %add.ptr39.sum.i
%tmp4 = bitcast i8* %tmp40.i to i64*
%tmp41.i = load i64* %tmp4
br i1 undef, label %if.then48.i, label %do.body57.i
%ivar59.i = load i64* @g
%tmp5 = bitcast i7* %tmp58.i to i8*
%add.ptr65.sum.i = add i64 %ivar59.i, 8
- %tmp66.i = getelementptr inbounds i8* %tmp5, i64 %add.ptr65.sum.i
+ %tmp66.i = getelementptr inbounds i8, i8* %tmp5, i64 %add.ptr65.sum.i
%tmp6 = bitcast i8* %tmp66.i to i64*
%tmp67.i = load i64* %tmp6
ret i32* undef
ret i32 0
dead:
- %P2 = getelementptr i32 *%P2, i32 52
- %Q2 = getelementptr i32 *%Q2, i32 52
+ %P2 = getelementptr i32, i32 *%P2, i32 52
+ %Q2 = getelementptr i32, i32 *%Q2, i32 52
store i32 4, i32* %P2
%A = load i32* %Q2
br i1 true, label %dead, label %dead2
entry:
%0 = load i8* %P, align 2
- %Q = getelementptr i8* %P, i32 1
+ %Q = getelementptr i8, i8* %P, i32 1
%1 = load i8* %Q, align 1
ret i8 %1
}
;; Unreachable code.
unreachable.bb:
- %gep.val = getelementptr inbounds %struct.type* %gep.val, i64 1
+ %gep.val = getelementptr inbounds %struct.type, %struct.type* %gep.val, i64 1
br i1 undef, label %u2.bb, label %u1.bb
u1.bb:
- %tmp1 = getelementptr inbounds %struct.type* %gep.val, i64 0, i32 0
+ %tmp1 = getelementptr inbounds %struct.type, %struct.type* %gep.val, i64 0, i32 0
store i64 -1, i64* %tmp1, align 8
br label %unreachable.bb
define i32 @test(i8* %p, i32 %i) nounwind {
entry:
- %P = getelementptr [4 x i32]* @G, i32 0, i32 %i
+ %P = getelementptr [4 x i32], [4 x i32]* @G, i32 0, i32 %i
%A = load i32* %P
store i8 4, i8* %p
%B = load i32* %P
while.body5:
%indvar = phi i32 [ 0, %entry ], [ %tmp6, %if.end ]
%tmp5 = add i32 %indvar, 2
- %arrayidx9 = getelementptr [5001 x i32]* @sortlist, i32 0, i32 %tmp5
+ %arrayidx9 = getelementptr [5001 x i32], [5001 x i32]* @sortlist, i32 0, i32 %tmp5
%tmp6 = add i32 %indvar, 1
- %arrayidx = getelementptr [5001 x i32]* @sortlist, i32 0, i32 %tmp6
+ %arrayidx = getelementptr [5001 x i32], [5001 x i32]* @sortlist, i32 0, i32 %tmp6
%tmp7 = load i32* %arrayidx, align 4
%tmp10 = load i32* %arrayidx9, align 4
%cmp11 = icmp sgt i32 %tmp7, %tmp10
%s.09 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
%p.08 = phi i32* [ %0, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
%2 = load i32* %p.08, align 4, !tbaa !5
- %arrayidx = getelementptr inbounds i32* %1, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
store i32 %2, i32* %arrayidx, align 4, !tbaa !5
%3 = load volatile i32* %p.08, align 4, !tbaa !5
%add = add nsw i32 %3, %s.09
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %incdec.ptr = getelementptr inbounds i32* %p.08, i64 1
+ %incdec.ptr = getelementptr inbounds i32, i32* %p.08, i64 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, %n
br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
br label %if.end
if.else:
- %arrayidx = getelementptr inbounds i32* %b, i64 2
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 2
store i32 10, i32* %arrayidx, align 4, !tbaa !5
br label %if.end
if.end:
%i.0 = phi i32 [ %0, %if.then ], [ 0, %if.else ]
%p.0 = phi i32* [ getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), %if.then ], [ %b, %if.else ]
- %add.ptr = getelementptr inbounds i32* %p.0, i64 1
+ %add.ptr = getelementptr inbounds i32, i32* %p.0, i64 1
%1 = load i32* %add.ptr, align 4, !tbaa !5
%add1 = add nsw i32 %1, %i.0
ret i32 %add1
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%0 = load i32* @outcnt, align 4 ; <i32> [#uses=1]
- %1 = getelementptr i8* %outbuf, i32 %0 ; <i8*> [#uses=1]
+ %1 = getelementptr i8, i8* %outbuf, i32 %0 ; <i8*> [#uses=1]
store i8 %bi_buf, i8* %1, align 1
%2 = load i32* @outcnt, align 4 ; <i32> [#uses=1]
%3 = icmp eq i32 %2, 16384 ; <i1> [#uses=1]
; CHECK-NEXT: phi
; CHECK-NEXT: getelementptr
%4 = load i32* @outcnt, align 4 ; <i32> [#uses=1]
- %5 = getelementptr i8* %outbuf, i32 %4 ; <i8*> [#uses=1]
+ %5 = getelementptr i8, i8* %outbuf, i32 %4 ; <i8*> [#uses=1]
store i8 %bi_buf, i8* %5, align 1
ret void
}
define void @_Z12testfunctionR1A(%struct.A* %iter) {
entry:
- %0 = getelementptr %struct.A* %iter, i32 0, i32 0 ; <i32*> [#uses=3]
+ %0 = getelementptr %struct.A, %struct.A* %iter, i32 0, i32 0 ; <i32*> [#uses=3]
%1 = load i32* %0, align 4 ; <i32> [#uses=2]
%2 = icmp eq i32 %1, 0 ; <i1> [#uses=1]
br i1 %2, label %return, label %bb.nph
bb.nph: ; preds = %entry
- %3 = getelementptr %struct.A* %iter, i32 0, i32 1 ; <i32*> [#uses=1]
+ %3 = getelementptr %struct.A, %struct.A* %iter, i32 0, i32 1 ; <i32*> [#uses=1]
br label %bb
bb: ; preds = %bb3.backedge, %bb.nph
define void @yes(i1 %c, i32* %p, i32* %q) nounwind {
entry:
store i32 0, i32* %p
- %p1 = getelementptr inbounds i32* %p, i64 1
+ %p1 = getelementptr inbounds i32, i32* %p, i64 1
store i32 1, i32* %p1
br i1 %c, label %if.else, label %if.then
define void @watch_out_for_size_change(i1 %c, i32* %p, i32* %q) nounwind {
entry:
store i32 0, i32* %p
- %p1 = getelementptr inbounds i32* %p, i64 1
+ %p1 = getelementptr inbounds i32, i32* %p, i64 1
store i32 1, i32* %p1
br i1 %c, label %if.else, label %if.then
define %"struct.llvm::StringMapEntry<void*>"* @_Z3fooRN4llvm9StringMapIPvNS_15MallocAllocatorEEEPKc(%"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %X, i8* %P) ssp {
entry:
%tmp = alloca %"struct.llvm::StringRef", align 8
- %tmp.i = getelementptr inbounds %"struct.llvm::StringRef"* %tmp, i64 0, i32 0
+ %tmp.i = getelementptr inbounds %"struct.llvm::StringRef", %"struct.llvm::StringRef"* %tmp, i64 0, i32 0
store i8* %P, i8** %tmp.i, align 8
%tmp1.i = call i64 @strlen(i8* %P) nounwind readonly
- %tmp2.i = getelementptr inbounds %"struct.llvm::StringRef"* %tmp, i64 0, i32 1
+ %tmp2.i = getelementptr inbounds %"struct.llvm::StringRef", %"struct.llvm::StringRef"* %tmp, i64 0, i32 1
store i64 %tmp1.i, i64* %tmp2.i, align 8
%tmp1 = call %"struct.llvm::StringMapEntry<void*>"* @_ZN4llvm9StringMapIPvNS_15MallocAllocatorEE16GetOrCreateValueERKNS_9StringRefE(%"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %X, %"struct.llvm::StringRef"* %tmp) ssp
ret %"struct.llvm::StringMapEntry<void*>"* %tmp1
entry:
%elt = bitcast %"struct.llvm::StringRef"* %Key to i64*
%val = load i64* %elt
- %tmp = getelementptr inbounds %"struct.llvm::StringRef"* %Key, i64 0, i32 1
+ %tmp = getelementptr inbounds %"struct.llvm::StringRef", %"struct.llvm::StringRef"* %Key, i64 0, i32 1
%val2 = load i64* %tmp
- %tmp2.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0
+ %tmp2.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>", %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0
%tmp3.i = tail call i32 @_ZN4llvm13StringMapImpl15LookupBucketForENS_9StringRefE(%"struct.llvm::StringMapImpl"* %tmp2.i, i64 %val, i64 %val2)
- %tmp4.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0, i32 0
+ %tmp4.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>", %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0, i32 0
%tmp5.i = load %"struct.llvm::StringMapImpl::ItemBucket"** %tmp4.i, align 8
%tmp6.i = zext i32 %tmp3.i to i64
- %tmp7.i = getelementptr inbounds %"struct.llvm::StringMapImpl::ItemBucket"* %tmp5.i, i64 %tmp6.i, i32 1
+ %tmp7.i = getelementptr inbounds %"struct.llvm::StringMapImpl::ItemBucket", %"struct.llvm::StringMapImpl::ItemBucket"* %tmp5.i, i64 %tmp6.i, i32 1
%tmp8.i = load %"struct.llvm::StringMapEntryBase"** %tmp7.i, align 8
%tmp9.i = icmp eq %"struct.llvm::StringMapEntryBase"* %tmp8.i, null
%tmp13.i = icmp eq %"struct.llvm::StringMapEntryBase"* %tmp8.i, inttoptr (i64 -1 to %"struct.llvm::StringMapEntryBase"*)
bb4.i: ; preds = %entry
%tmp41.i = inttoptr i64 %val to i8*
- %tmp4.i35.i = getelementptr inbounds i8* %tmp41.i, i64 %val2
+ %tmp4.i35.i = getelementptr inbounds i8, i8* %tmp41.i, i64 %val2
%tmp.i.i = ptrtoint i8* %tmp4.i35.i to i64
%tmp1.i.i = trunc i64 %tmp.i.i to i32
%tmp3.i.i = trunc i64 %val to i32
bb.i.i: ; preds = %bb4.i
%tmp.i.i.i.i = bitcast i8* %tmp.i20.i.i to i32*
store i32 %tmp4.i.i, i32* %tmp.i.i.i.i, align 4
- %tmp1.i19.i.i = getelementptr inbounds i8* %tmp.i20.i.i, i64 8
+ %tmp1.i19.i.i = getelementptr inbounds i8, i8* %tmp.i20.i.i, i64 8
%0 = bitcast i8* %tmp1.i19.i.i to i8**
store i8* null, i8** %0, align 8
br label %_ZN4llvm14StringMapEntryIPvE6CreateINS_15MallocAllocatorES1_EEPS2_PKcS7_RT_T0_.exit.i
_ZN4llvm14StringMapEntryIPvE6CreateINS_15MallocAllocatorES1_EEPS2_PKcS7_RT_T0_.exit.i: ; preds = %bb.i.i, %bb4.i
- %tmp.i18.i.i = getelementptr inbounds i8* %tmp.i20.i.i, i64 16
+ %tmp.i18.i.i = getelementptr inbounds i8, i8* %tmp.i20.i.i, i64 16
%tmp15.i.i = zext i32 %tmp4.i.i to i64
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp.i18.i.i, i8* %tmp41.i, i64 %tmp15.i.i, i32 1, i1 false)
%tmp.i18.sum.i.i = add i64 %tmp15.i.i, 16
- %tmp17.i.i = getelementptr inbounds i8* %tmp.i20.i.i, i64 %tmp.i18.sum.i.i
+ %tmp17.i.i = getelementptr inbounds i8, i8* %tmp.i20.i.i, i64 %tmp.i18.sum.i.i
store i8 0, i8* %tmp17.i.i, align 1
- %tmp.i.i.i = getelementptr inbounds i8* %tmp.i20.i.i, i64 8
+ %tmp.i.i.i = getelementptr inbounds i8, i8* %tmp.i20.i.i, i64 8
%1 = bitcast i8* %tmp.i.i.i to i8**
store i8* null, i8** %1, align 8
%tmp22.i = load %"struct.llvm::StringMapEntryBase"** %tmp7.i, align 8
ret %"struct.llvm::StringMapEntry<void*>"* %tmp16.i
bb9.i: ; preds = %_ZN4llvm14StringMapEntryIPvE6CreateINS_15MallocAllocatorES1_EEPS2_PKcS7_RT_T0_.exit.i
- %tmp25.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0, i32 3
+ %tmp25.i = getelementptr inbounds %"struct.llvm::StringMap<void*,llvm::MallocAllocator>", %"struct.llvm::StringMap<void*,llvm::MallocAllocator>"* %this, i64 0, i32 0, i32 3
%tmp26.i = load i32* %tmp25.i, align 8
%tmp27.i = add i32 %tmp26.i, -1
store i32 %tmp27.i, i32* %tmp25.i, align 8
declare void @test1f(i8*)
define void @test1(%t* noalias %stuff ) {
- %p = getelementptr inbounds %t* %stuff, i32 0, i32 0
+ %p = getelementptr inbounds %t, %t* %stuff, i32 0, i32 0
%before = load i32* %p
call void @test1f(i8* null)
%l0 = load i8* %phi
call void @bar(i8 %l0)
%l1 = load i8* %phi
- %next = getelementptr inbounds i8* %phi, i8 %l1
+ %next = getelementptr inbounds i8, i8* %phi, i8 %l1
br label %loop
}
then:
%i = sext i32 %x to i64
- %p = getelementptr [100 x i32]* @G, i64 0, i64 %i
+ %p = getelementptr [100 x i32], [100 x i32]* @G, i64 0, i64 %i
store i32 %z, i32* %p
br label %end
end:
%j = sext i32 %x to i64
- %q = getelementptr [100 x i32]* @G, i64 0, i64 %j
+ %q = getelementptr [100 x i32], [100 x i32]* @G, i64 0, i64 %j
%n = load i32* %q
ret i32 %n
}
br label %for.cond
for.cond: ; preds = %1, %0
br label %for.end
- %f2 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 2
- %f9 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 7
+ %f2 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 2
+ %f9 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 7
br label %for.cond
for.end: ; preds = %for.cond
br i1 true, label %if.else, label %if.then
if.then: ; preds = %for.end
- %f22 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 2
- %f7 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 5
+ %f22 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 2
+ %f7 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 5
%tmp7 = load i32* %f7, align 8
br label %if.end40
if.else: ; preds = %for.end
br i1 false, label %for.cond18, label %if.then6
if.then6: ; preds = %if.else
- %f3 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 2
+ %f3 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 2
%tmp10 = bitcast %struct.S0* %p1 to i16*
- %f5 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 3
+ %f5 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp11 = bitcast [2 x i8]* %f5 to i16*
%bf.load13 = load i16* %tmp11, align 8
br label %if.end36
call void @fn4()
br i1 true, label %if.end, label %if.end36
if.end: ; preds = %for.cond18
- %f321 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 2
- %f925 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 7
- %f526 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 3
+ %f321 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 2
+ %f925 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 7
+ %f526 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp15 = bitcast [2 x i8]* %f526 to i16*
%bf.load27 = load i16* %tmp15, align 8
%tmp16 = bitcast %struct.S0* %p1 to i16*
br label %if.end36
if.end36: ; preds = %if.end, %for.cond18, %if.then6
- %f537 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 3
+ %f537 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp17 = bitcast [2 x i8]* %f537 to i16*
%bf.load38 = load i16* %tmp17, align 8
%bf.clear39 = and i16 %bf.load38, -16384
br label %if.end40
if.end40: ; preds = %if.end36, %if.then
- %f6 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 4
+ %f6 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 4
%tmp18 = load i32* %f6, align 4
call void @fn2(i32 %tmp18)
- %f8 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 6
+ %f8 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 6
%tmp19 = load i32* %f8, align 4
%tobool41 = icmp eq i32 %tmp19, 0
br i1 true, label %if.end50, label %if.then42
if.then42: ; preds = %if.end40
%tmp20 = bitcast %struct.S0* %p1 to i16*
- %f547 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 3
+ %f547 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp21 = bitcast [2 x i8]* %f547 to i16*
%bf.load48 = load i16* %tmp21, align 8
br label %if.end50
if.end50: ; preds = %if.then42, %if.end40
- %f551 = getelementptr inbounds %struct.S0* %p1, i64 0, i32 3
+ %f551 = getelementptr inbounds %struct.S0, %struct.S0* %p1, i64 0, i32 3
%tmp22 = bitcast [2 x i8]* %f551 to i16*
%bf.load52 = load i16* %tmp22, align 8
%bf.clear53 = and i16 %bf.load52, -16384
sw.bb: ; preds = %entry, %entry
%idxprom = sext i32 %i to i64
- %arrayidx = getelementptr inbounds double** %p, i64 0
+ %arrayidx = getelementptr inbounds double*, double** %p, i64 0
%0 = load double** %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds double* %0, i64 %idxprom
+ %arrayidx1 = getelementptr inbounds double, double* %0, i64 %idxprom
%1 = load double* %arrayidx1, align 8
%sub = fsub double %1, 1.000000e+00
%cmp = fcmp olt double %sub, 0.000000e+00
sw.bb2: ; preds = %if.end, %entry
%idxprom3 = sext i32 %i to i64
- %arrayidx4 = getelementptr inbounds double** %p, i64 0
+ %arrayidx4 = getelementptr inbounds double*, double** %p, i64 0
%2 = load double** %arrayidx4, align 8
- %arrayidx5 = getelementptr inbounds double* %2, i64 %idxprom3
+ %arrayidx5 = getelementptr inbounds double, double* %2, i64 %idxprom3
%3 = load double* %arrayidx5, align 8
; CHECK: sw.bb2:
; CHECK-NEXT-NOT: sext
define i32 @test3(i32* %p, i32* %q, i32** %Hack, i1 %C) {
; CHECK-LABEL: @test3(
block1:
- %B = getelementptr i32* %q, i32 1
+ %B = getelementptr i32, i32* %q, i32 1
store i32* %B, i32** %Hack
br i1 %C, label %block2, label %block3
; CHECK-NEXT: load i32* %B
block3:
- %A = getelementptr i32* %p, i32 1
+ %A = getelementptr i32, i32* %p, i32 1
store i32 0, i32* %A
br label %block4
block4:
%P2 = phi i32* [%p, %block3], [%q, %block2]
- %P3 = getelementptr i32* %P2, i32 1
+ %P3 = getelementptr i32, i32* %P2, i32 1
%PRE = load i32* %P3
ret i32 %PRE
; CHECK: block4:
; CHECK: br label %block4
block3:
- %B = getelementptr i32* %q, i32 1
+ %B = getelementptr i32, i32* %q, i32 1
store i32* %B, i32** %Hack
- %A = getelementptr i32* %p, i32 1
+ %A = getelementptr i32, i32* %p, i32 1
store i32 0, i32* %A
br label %block4
block4:
%P2 = phi i32* [%p, %block3], [%q, %block2]
- %P3 = getelementptr i32* %P2, i32 1
+ %P3 = getelementptr i32, i32* %P2, i32 1
%PRE = load i32* %P3
ret i32 %PRE
; CHECK: block4:
bb:
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp6, %bb ]
%tmp6 = add i64 %indvar, 1
- %scevgep = getelementptr double* %G, i64 %tmp6
- %scevgep7 = getelementptr double* %G, i64 %indvar
+ %scevgep = getelementptr double, double* %G, i64 %tmp6
+ %scevgep7 = getelementptr double, double* %G, i64 %indvar
%2 = load double* %scevgep7, align 8
%3 = load double* %scevgep, align 8
%4 = fadd double %2, %3
bb:
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp6, %bb ]
%tmp6 = add i64 %indvar, 1
- %scevgep = getelementptr double* %G, i64 %tmp6
- %scevgep7 = getelementptr double* %G, i64 %indvar
+ %scevgep = getelementptr double, double* %G, i64 %tmp6
+ %scevgep7 = getelementptr double, double* %G, i64 %indvar
%2 = load double* %scevgep7, align 8
%3 = load double* %scevgep, align 8
%4 = fadd double %2, %3
; This requires phi translation of the adds.
define void @test7(i32 %N, double* nocapture %G) nounwind ssp {
entry:
- %0 = getelementptr inbounds double* %G, i64 1
+ %0 = getelementptr inbounds double, double* %G, i64 1
store double 1.000000e+00, double* %0, align 8
%1 = add i32 %N, -1
%2 = icmp sgt i32 %1, 1
bb:
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp9, %bb ]
%tmp8 = add i64 %indvar, 2
- %scevgep = getelementptr double* %G, i64 %tmp8
+ %scevgep = getelementptr double, double* %G, i64 %tmp8
%tmp9 = add i64 %indvar, 1
- %scevgep10 = getelementptr double* %G, i64 %tmp9
+ %scevgep10 = getelementptr double, double* %G, i64 %tmp9
%3 = load double* %scevgep10, align 8
%4 = load double* %scevgep, align 8
%5 = fadd double %3, %4
; CHECK: br label %block4
block3:
- %A = getelementptr i32* %p, i32 1
+ %A = getelementptr i32, i32* %p, i32 1
store i32 0, i32* %A
br label %block4
block4:
%P2 = phi i32* [%p, %block3], [%q, %block2]
- %P3 = getelementptr i32* %P2, i32 1
+ %P3 = getelementptr i32, i32* %P2, i32 1
%PRE = load i32* %P3
ret i32 %PRE
; CHECK: block4:
bb:
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp9, %bb ]
%tmp8 = add i64 %indvar, 2
- %scevgep = getelementptr double* %G, i64 %tmp8
+ %scevgep = getelementptr double, double* %G, i64 %tmp8
%tmp9 = add i64 %indvar, 1
- %scevgep10 = getelementptr double* %G, i64 %tmp9
+ %scevgep10 = getelementptr double, double* %G, i64 %tmp9
%3 = load double* %scevgep10, align 8
%4 = load double* %scevgep, align 8
%5 = fadd double %3, %4
bb:
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp11, %bb ]
- %scevgep = getelementptr double* %G, i64 %indvar
+ %scevgep = getelementptr double, double* %G, i64 %indvar
%tmp9 = add i64 %indvar, 2
- %scevgep10 = getelementptr double* %G, i64 %tmp9
+ %scevgep10 = getelementptr double, double* %G, i64 %tmp9
%tmp11 = add i64 %indvar, 1
- %scevgep12 = getelementptr double* %G, i64 %tmp11
+ %scevgep12 = getelementptr double, double* %G, i64 %tmp11
%2 = load double* %scevgep12, align 8
%3 = load double* %scevgep10, align 8
%4 = fadd double %2, %3
bb: ; preds = %entry
%2 = tail call i32 (...)* @bar() nounwind ; <i32> [#uses=0]
- %3 = getelementptr [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
+ %3 = getelementptr [100 x i32], [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
%4 = load i32* %3, align 4 ; <i32> [#uses=1]
store i32 %4, i32* @G, align 4
br label %bb3
bb1: ; preds = %entry
%5 = tail call i32 (...)* @baz() nounwind ; <i32> [#uses=0]
- %6 = getelementptr [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
+ %6 = getelementptr [100 x i32], [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
%7 = load i32* %6, align 4 ; <i32> [#uses=2]
store i32 %7, i32* @G, align 4
%8 = icmp eq i32 %7, 0 ; <i1> [#uses=1]
br i1 %8, label %bb3, label %bb4
bb3: ; preds = %bb1, %bb
- %9 = getelementptr [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
+ %9 = getelementptr [100 x i32], [100 x i32]* @H, i32 0, i32 %i ; <i32*> [#uses=1]
%DEAD = load i32* %9, align 4 ; <i32> [#uses=1]
ret i32 %DEAD
br i1 %cond, label %bb, label %bb1
bb:
- %b1 = getelementptr i32* %b, i32 17
+ %b1 = getelementptr i32, i32* %b, i32 17
store i32 4, i32* %b1
br label %bb2
bb1:
- %c1 = getelementptr i32* %c, i32 7
+ %c1 = getelementptr i32, i32* %c, i32 7
store i32 82, i32* %c1
br label %bb2
bb2:
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
%i = phi i32 [ 7, %bb1 ], [ 17, %bb ]
- %d1 = getelementptr i32* %d, i32 %i
+ %d1 = getelementptr i32, i32* %d, i32 %i
%dv = load i32* %d1
; CHECK: %dv = phi i32 [ 82, %bb1 ], [ 4, %bb ]
; CHECK-NOT: load
br label %bb2
bb1:
- %c1 = getelementptr i32* %c, i32 7
+ %c1 = getelementptr i32, i32* %c, i32 7
store i32 82, i32* %c1
br label %bb2
bb2:
%d = phi i32* [ %c, %bb1 ], [ %b, %bb ]
%i = phi i32 [ 7, %bb1 ], [ 0, %bb ]
- %d1 = getelementptr i32* %d, i32 %i
+ %d1 = getelementptr i32, i32* %d, i32 %i
%dv = load i32* %d1
; CHECK: %dv = phi i32 [ 82, %bb1 ], [ 4, %bb ]
; CHECK-NOT: load
for.body:
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp, %for.body ]
- %arrayidx6 = getelementptr double* %G, i64 %indvar
+ %arrayidx6 = getelementptr double, double* %G, i64 %indvar
%tmp = add i64 %indvar, 1
- %arrayidx = getelementptr double* %G, i64 %tmp
+ %arrayidx = getelementptr double, double* %G, i64 %tmp
%tmp3 = load double* %arrayidx
%tmp7 = load double* %arrayidx6
%add = fadd double %tmp3, %tmp7
entry:
%conv = bitcast i16* %A to i8*
tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 1, i64 200, i32 1, i1 false)
- %arrayidx = getelementptr inbounds i16* %A, i64 42
+ %arrayidx = getelementptr inbounds i16, i16* %A, i64 42
%tmp2 = load i16* %arrayidx
ret i16 %tmp2
; CHECK-LABEL: @memset_to_i16_local(
entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memset.p0i8.i64(i8* %conv, i8 %Val, i64 400, i32 1, i1 false)
- %arrayidx = getelementptr inbounds float* %A, i64 42 ; <float*> [#uses=1]
+ %arrayidx = getelementptr inbounds float, float* %A, i64 42 ; <float*> [#uses=1]
%tmp2 = load float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memset_to_float_local(
br label %Cont
Cont:
- %P2 = getelementptr i16* %P, i32 4
+ %P2 = getelementptr i16, i16* %P, i32 4
%A = load i16* %P2
ret i16 %A
entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %conv, i8* bitcast ({i32, float, i32 }* @GCst to i8*), i64 12, i32 1, i1 false)
- %arrayidx = getelementptr inbounds float* %A, i64 1 ; <float*> [#uses=1]
+ %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
%tmp2 = load float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memcpy_to_float_local(
entry:
%conv = bitcast float* %A to i8* ; <i8*> [#uses=1]
tail call void @llvm.memcpy.p0i8.p1i8.i64(i8* %conv, i8 addrspace(1)* bitcast ({i32, float, i32 } addrspace(1)* @GCst_as1 to i8 addrspace(1)*), i64 12, i32 1, i1 false)
- %arrayidx = getelementptr inbounds float* %A, i64 1 ; <float*> [#uses=1]
+ %arrayidx = getelementptr inbounds float, float* %A, i64 1 ; <float*> [#uses=1]
%tmp2 = load float* %arrayidx ; <float> [#uses=1]
ret float %tmp2
; CHECK-LABEL: @memcpy_to_float_local_as1(
store i32 %V, i32* %P
%P2 = bitcast i32* %P to i8*
- %P3 = getelementptr i8* %P2, i32 2
+ %P3 = getelementptr i8, i8* %P2, i32 2
%A = load i8* %P3
ret i8 %A
store i32 %V, i32* %P
%P2 = addrspacecast i32* %P to i8 addrspace(1)*
- %P3 = getelementptr i8 addrspace(1)* %P2, i32 2
+ %P3 = getelementptr i8, i8 addrspace(1)* %P2, i32 2
%A = load i8 addrspace(1)* %P3
ret i8 %A
define i8 @coerce_offset_nonlocal0(i32* %P, i1 %cond) {
%P2 = bitcast i32* %P to float*
%P3 = bitcast i32* %P to i8*
- %P4 = getelementptr i8* %P3, i32 2
+ %P4 = getelementptr i8, i8* %P3, i32 2
br i1 %cond, label %T, label %F
T:
store i32 57005, i32* %P
;; non-local i32 -> i8 partial redundancy load forwarding.
define i8 @coerce_offset_pre0(i32* %P, i1 %cond) {
%P3 = bitcast i32* %P to i8*
- %P4 = getelementptr i8* %P3, i32 2
+ %P4 = getelementptr i8, i8* %P3, i32 2
br i1 %cond, label %T, label %F
T:
store i32 42, i32* %P
br i1 %cond2, label %T1, label %TY
T1:
- %P2 = getelementptr i32* %P, i32 %A
+ %P2 = getelementptr i32, i32* %P, i32 %A
%x = load i32* %P2
%cond = call i1 @cond2()
br i1 %cond, label %TX, label %F
F:
- %P3 = getelementptr i32* %P, i32 2
+ %P3 = getelementptr i32, i32* %P, i32 2
store i32 17, i32* %P3
store i32 42, i32* %P2 ; Provides "P[A]".
br label %block4
block3:
- %p2 = getelementptr i32* %p, i32 43
+ %p2 = getelementptr i32, i32* %p, i32 43
store i32 97, i32* %p2
br label %block4
br i1 %cmpxy, label %block6, label %exit
block6:
- %C = getelementptr i32* %p, i32 %B
+ %C = getelementptr i32, i32* %p, i32 %B
br i1 %cmpxy, label %block7, label %exit
block7:
define i8 @phi_trans4(i8* %p) {
; CHECK-LABEL: @phi_trans4(
entry:
- %X3 = getelementptr i8* %p, i32 192
+ %X3 = getelementptr i8, i8* %p, i32 192
store i8 192, i8* %X3
- %X = getelementptr i8* %p, i32 4
+ %X = getelementptr i8, i8* %p, i32 4
%Y = load i8* %X
br label %loop
loop:
%i = phi i32 [4, %entry], [192, %loop]
- %X2 = getelementptr i8* %p, i32 %i
+ %X2 = getelementptr i8, i8* %p, i32 %i
%Y2 = load i8* %X2
; CHECK: loop:
; CHECK-LABEL: @phi_trans5(
entry:
- %X4 = getelementptr i8* %p, i32 2
+ %X4 = getelementptr i8, i8* %p, i32 2
store i8 19, i8* %X4
- %X = getelementptr i8* %p, i32 4
+ %X = getelementptr i8, i8* %p, i32 4
%Y = load i8* %X
br label %loop
loop:
%i = phi i32 [4, %entry], [3, %cont]
- %X2 = getelementptr i8* %p, i32 %i
+ %X2 = getelementptr i8, i8* %p, i32 %i
%Y2 = load i8* %X2 ; Ensure this load is not being incorrectly replaced.
%cond = call i1 @cond2()
br i1 %cond, label %cont, label %out
cont:
- %Z = getelementptr i8* %X2, i32 -1
+ %Z = getelementptr i8, i8* %X2, i32 -1
%Z2 = bitcast i8 *%Z to i32*
store i32 50462976, i32* %Z2 ;; (1 << 8) | (2 << 16) | (3 << 24)
; CHECK: store i32
-; CHECK-NEXT: getelementptr i8* %p, i32 3
+; CHECK-NEXT: getelementptr i8, i8* %p, i32 3
; CHECK-NEXT: load i8*
br label %loop
%x = alloca [256 x i32], align 4 ; <[256 x i32]*> [#uses=2]
%tmp = bitcast [256 x i32]* %x to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 1024, i32 4, i1 false)
- %arraydecay = getelementptr inbounds [256 x i32]* %x, i32 0, i32 0 ; <i32*>
+ %arraydecay = getelementptr inbounds [256 x i32], [256 x i32]* %x, i32 0, i32 0 ; <i32*>
%tmp1 = load i32* %arraydecay ; <i32> [#uses=1]
ret i32 %tmp1
; CHECK-LABEL: @memset_to_load(
entry:
%0 = bitcast i8* %P to i32*
%tmp2 = load i32* %0
- %add.ptr = getelementptr inbounds i8* %P, i64 1
+ %add.ptr = getelementptr inbounds i8, i8* %P, i64 1
%tmp5 = load i8* %add.ptr
%conv = zext i8 %tmp5 to i32
%add = add nsw i32 %tmp2, %conv
br i1 %cmp, label %land.lhs.true, label %if.end
land.lhs.true: ; preds = %entry
- %arrayidx4 = getelementptr inbounds i8* %P, i64 1
+ %arrayidx4 = getelementptr inbounds i8, i8* %P, i64 1
%tmp5 = load i8* %arrayidx4, align 1
%conv6 = zext i8 %tmp5 to i32
ret i32 %conv6
br label %L1
L1:
- %arrayidx = getelementptr inbounds [3 x i8*]* @L, i32 0, i32 %idx
+ %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @L, i32 0, i32 %idx
%l = load i8** %arrayidx
indirectbr i8* %l, [label %L1, label %L2]
define fastcc void @pypy_array_constant() {
block0:
- %tmp.9 = getelementptr %structtype.test* bitcast ({ i32, { i32, [2 x i32] } }* @structinstance.test to %structtype.test*), i32 0, i32 0 ; <i32*> [#uses=0]
+ %tmp.9 = getelementptr %structtype.test, %structtype.test* bitcast ({ i32, { i32, [2 x i32] } }* @structinstance.test to %structtype.test*), i32 0, i32 0 ; <i32*> [#uses=0]
ret void
}
define %struct.__CFDictionary* @_ZN18SFLMutableListItem18GetPrefsDictionaryEv(%struct.SFLMutableListItem* %this) {
entry:
- %tmp4 = getelementptr %struct.SFLMutableListItem* %this, i32 0, i32 0 ; <i16*> [#uses=1]
+ %tmp4 = getelementptr %struct.SFLMutableListItem, %struct.SFLMutableListItem* %this, i32 0, i32 0 ; <i16*> [#uses=1]
%tmp5 = load i16* %tmp4 ; <i16> [#uses=1]
%tmp6 = icmp eq i16 %tmp5, 0 ; <i1> [#uses=1]
br i1 %tmp6, label %cond_next22, label %cond_true
define i8 @func() {
entry:
- %tmp10 = getelementptr [2 x i32]* getelementptr ([6 x [2 x i32]]* @aaui1, i32 0, i32 0), i32 5, i32 1 ; <i32*> [#uses=1]
+ %tmp10 = getelementptr [2 x i32], [2 x i32]* getelementptr ([6 x [2 x i32]]* @aaui1, i32 0, i32 0), i32 5, i32 1 ; <i32*> [#uses=1]
%tmp11 = load i32* %tmp10, align 4 ; <i32> [#uses=1]
%tmp12 = call i32 (...)* @func3( i32* null, i32 0, i32 %tmp11 ) ; <i32> [#uses=0]
ret i8 undef
@mm = internal global [16 x [31 x double]] zeroinitializer, align 32
define void @test(i32 %X) {
- %P = getelementptr [16 x [31 x double]]* @mm, i32 0, i32 0, i32 %X
+ %P = getelementptr [16 x [31 x double]], [16 x [31 x double]]* @mm, i32 0, i32 0, i32 %X
store double 1.0, double* %P
ret void
}
define double @get(i32 %X) {
- %P = getelementptr [16 x [31 x double]]* @mm, i32 0, i32 0, i32 %X
+ %P = getelementptr [16 x [31 x double]], [16 x [31 x double]]* @mm, i32 0, i32 0, i32 %X
%V = load double* %P
ret double %V
}
entry:
%malloccall = tail call i8* @malloc(i32 trunc (i64 mul (i64 ptrtoint (i32* getelementptr (i32* null, i32 1) to i64), i64 2000000) to i32))
%tmp = bitcast i8* %malloccall to [1000000 x %struct.foo]*
- %.sub = getelementptr [1000000 x %struct.foo]* %tmp, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
+ %.sub = getelementptr [1000000 x %struct.foo], [1000000 x %struct.foo]* %tmp, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
store %struct.foo* %.sub, %struct.foo** @X, align 4
ret void
}
bb1: ; preds = %bb1, %bb1.thread
%tmp = phi %struct.foo* [ %tmpLD1, %bb1.thread ], [ %tmpLD1, %bb1 ] ; <%struct.foo*> [#uses=1]
- %0 = getelementptr %struct.foo* %tmp, i32 1 ; <%struct.foo*> [#uses=0]
+ %0 = getelementptr %struct.foo, %struct.foo* %tmp, i32 1 ; <%struct.foo*> [#uses=0]
br label %bb1
}
entry:
%malloccall = tail call i8* @malloc(i32 trunc (i64 mul (i64 ptrtoint (i32* getelementptr (i32* null, i32 1) to i64), i64 2000000) to i32))
%tmp = bitcast i8* %malloccall to [1000000 x %struct.foo]*
- %.sub = getelementptr [1000000 x %struct.foo]* %tmp, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
+ %.sub = getelementptr [1000000 x %struct.foo], [1000000 x %struct.foo]* %tmp, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
store %struct.foo* %.sub, %struct.foo** @X, align 4
ret void
}
br label %bb1
bb: ; preds = %bb1
- %0 = getelementptr %struct.node* %t.0, i64 0, i32 1 ; <i32*> [#uses=1]
+ %0 = getelementptr %struct.node, %struct.node* %t.0, i64 0, i32 1 ; <i32*> [#uses=1]
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
- %2 = getelementptr %struct.node* %t.0, i64 0, i32 0 ; <%struct.node**> [#uses=1]
+ %2 = getelementptr %struct.node, %struct.node* %t.0, i64 0, i32 0 ; <%struct.node**> [#uses=1]
br label %bb1
bb1: ; preds = %bb, %entry
%5 = bitcast i8* %4 to i64* ; <i64*> [#uses=1]
store i64* %5, i64** @TOP, align 8
%6 = load i64** @TOP, align 8 ; <i64*> [#uses=1]
- %7 = getelementptr inbounds i64* %6, i64 13 ; <i64*> [#uses=1]
+ %7 = getelementptr inbounds i64, i64* %6, i64 13 ; <i64*> [#uses=1]
store i64 0, i64* %7, align 8
ret void
}
define internal i32 @test2_helper(%closure* %this, i32 %b) {
entry:
- %0 = getelementptr inbounds %closure* %this, i32 0, i32 0
+ %0 = getelementptr inbounds %closure, %closure* %this, i32 0, i32 0
%1 = load i32* %0, align 4
%add = add nsw i32 %1, %b
ret i32 %add
unreachable
bb.nph.i:
- %scevgep.i539 = getelementptr i8* %C, i64 4
+ %scevgep.i539 = getelementptr i8, i8* %C, i64 4
unreachable
xx:
; arbitrary constant expression, the code generator can't handle it.
define internal void @init1() {
entry:
- %tmp = getelementptr inbounds %struct.foo* @X, i32 0, i32 0
+ %tmp = getelementptr inbounds %struct.foo, %struct.foo* @X, i32 0, i32 0
store i32* inttoptr (i64 sdiv (i64 ptrtoint (i32* @G to i64), i64 ptrtoint (i32* @H to i64)) to i32*), i32** %tmp, align 8
ret void
}
; PR11705 - ptrtoint isn't safe in general in global initializers.
define internal void @init2() {
entry:
- %tmp = getelementptr inbounds %struct.bar* @X2, i32 0, i32 0
+ %tmp = getelementptr inbounds %struct.bar, %struct.bar* @X2, i32 0, i32 0
store i128 ptrtoint (i32* @G to i128), i128* %tmp, align 16
ret void
}
}
define internal void @CTOR5() {
- %X.2p = getelementptr inbounds { i32, [2 x i32] }* @X, i32 0, i32 1, i32 0 ; <i32*> [#uses=2]
+ %X.2p = getelementptr inbounds { i32, [2 x i32] }, { i32, [2 x i32] }* @X, i32 0, i32 1, i32 0 ; <i32*> [#uses=2]
%X.2 = load i32* %X.2p ; <i32> [#uses=1]
- %X.1p = getelementptr inbounds { i32, [2 x i32] }* @X, i32 0, i32 0 ; <i32*> [#uses=1]
+ %X.1p = getelementptr inbounds { i32, [2 x i32] }, { i32, [2 x i32] }* @X, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 %X.2, i32* %X.1p
store i32 42, i32* %X.2p
ret void
define internal void @CTOR9() {
entry:
%0 = bitcast %struct.B* @GV1 to i8*
- %1 = getelementptr inbounds i8* %0, i64 16
+ %1 = getelementptr inbounds i8, i8* %0, i64 16
%2 = bitcast i8* %1 to %struct.A*
%3 = bitcast %struct.B* @GV1 to i8***
store i8** getelementptr inbounds ([3 x i8*]* @GV2, i64 1, i64 0), i8*** %3
define internal void @test(i32 %n) nounwind noinline {
entry:
%idxprom = sext i32 %n to i64
- %arrayidx = getelementptr inbounds [3 x i8*]* @test.x, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [3 x i8*], [3 x i8*]* @test.x, i64 0, i64 %idxprom
%0 = load i8** %arrayidx, align 8
indirectbr i8* %0, [label %a, label %b, label %c]
@G = internal global [4 x i32] zeroinitializer
define void @foo(i32 %X) {
- %Ptr = getelementptr [4 x i32]* @G, i32 0, i32 %X
+ %Ptr = getelementptr [4 x i32], [4 x i32]* @G, i32 0, i32 %X
store i32 1, i32* %Ptr
ret void
}
}
define void @storeinit(i32 %i) {
- %Ptr = getelementptr { i32, [4 x float] }* @G, i32 0, i32 1, i32 %i ; <float*> [#uses=1]
+ %Ptr = getelementptr { i32, [4 x float] }, { i32, [4 x float] }* @G, i32 0, i32 1, i32 %i ; <float*> [#uses=1]
store float 1.000000e+00, float* %Ptr
ret void
}
define float @readval(i32 %i) {
- %Ptr = getelementptr { i32, [4 x float] }* @G, i32 0, i32 1, i32 %i ; <float*> [#uses=1]
+ %Ptr = getelementptr { i32, [4 x float] }, { i32, [4 x float] }* @G, i32 0, i32 1, i32 %i ; <float*> [#uses=1]
%V = load float* %Ptr ; <float> [#uses=1]
ret float %V
}
ret void
}
define i32 @borf(i64 %i, i64 %j) {
- %p = getelementptr inbounds [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0
+ %p = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 0
%a = load i32* %p
- %q = getelementptr inbounds [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 0
+ %q = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 0, i32 1, i64 0
%b = load i32* %q
%c = add i32 %a, %b
ret i32 %c
}
define i32 @borg(i64 %i, i64 %j) {
- %p = getelementptr inbounds [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 1
+ %p = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 0, i64 1
%a = load i32* %p
- %q = getelementptr inbounds [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1
+ %q = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 1, i32 1, i64 1
%b = load i32* %q
%c = add i32 %a, %b
ret i32 %c
}
define i32 @borh(i64 %i, i64 %j) {
- %p = getelementptr inbounds [3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 2
+ %p = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 0, i64 2
%a = load i32* %p
- %q = getelementptr inbounds [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2
+ %q = getelementptr inbounds [3 x %struct.X], [3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2
%b = load i32* %q
%c = add i32 %a, %b
ret i32 %c
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ]
- %1 = getelementptr %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
+ %1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
%2 = load i32* %1, align 4
%3 = add i32 %2, %sum.0.reg2mem.0
%indvar.next = add i32 %i.0.reg2mem.0, 1
entry:
%malloccall = tail call i8* @malloc(i64 8000000) ; <i8*> [#uses=1]
%0 = bitcast i8* %malloccall to [1000000 x %struct.foo]* ; <[1000000 x %struct.foo]*> [#uses=1]
- %.sub = getelementptr [1000000 x %struct.foo]* %0, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
+ %.sub = getelementptr [1000000 x %struct.foo], [1000000 x %struct.foo]* %0, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
store %struct.foo* %.sub, %struct.foo** @X, align 4
ret void
}
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=2]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ] ; <i32> [#uses=1]
- %1 = getelementptr %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0 ; <i32*> [#uses=1]
+ %1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0 ; <i32*> [#uses=1]
%2 = load i32* %1, align 4 ; <i32> [#uses=1]
%3 = add i32 %2, %sum.0.reg2mem.0 ; <i32> [#uses=2]
%indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ]
- %1 = getelementptr %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
+ %1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
%2 = load i32* %1, align 4
%3 = add i32 %2, %sum.0.reg2mem.0
%indvar.next = add i32 %i.0.reg2mem.0, 1
bb1: ; preds = %bb1, %bb1.thread
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %3, %bb1 ]
- %1 = getelementptr %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
+ %1 = getelementptr %struct.foo, %struct.foo* %0, i32 %i.0.reg2mem.0, i32 0
%2 = load i32* %1, align 4
%3 = add i32 %2, %sum.0.reg2mem.0
%indvar.next = add i32 %i.0.reg2mem.0, 1
entry:
%malloccall = tail call i8* @malloc(i64 8000000) ; <i8*> [#uses=1]
%tmp = bitcast i8* %malloccall to [1000000 x %struct.foo]* ; <[1000000 x %struct.foo]*> [#uses=1]
- %.sub = getelementptr [1000000 x %struct.foo]* %tmp, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
+ %.sub = getelementptr [1000000 x %struct.foo], [1000000 x %struct.foo]* %tmp, i32 0, i32 0 ; <%struct.foo*> [#uses=1]
store %struct.foo* %.sub, %struct.foo** @X, align 4
ret void
}
%tmp = phi %struct.foo* [%tmpLD1, %bb1.thread ], [ %tmpLD2, %bb1 ] ; <i32> [#uses=2]
%i.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=2]
%sum.0.reg2mem.0 = phi i32 [ 0, %bb1.thread ], [ %tmp3, %bb1 ] ; <i32> [#uses=1]
- %tmp1 = getelementptr %struct.foo* %tmp, i32 %i.0.reg2mem.0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.foo, %struct.foo* %tmp, i32 %i.0.reg2mem.0, i32 0 ; <i32*> [#uses=1]
%tmp2 = load i32* %tmp1, align 4 ; <i32> [#uses=1]
%tmp6 = add i32 %tmp2, %sum.0.reg2mem.0 ; <i32> [#uses=2]
- %tmp4 = getelementptr %struct.foo* %tmp, i32 %i.0.reg2mem.0, i32 1 ; <i32*> [#uses=1]
+ %tmp4 = getelementptr %struct.foo, %struct.foo* %tmp, i32 %i.0.reg2mem.0, i32 1 ; <i32*> [#uses=1]
%tmp5 = load i32 * %tmp4
%tmp3 = add i32 %tmp5, %tmp6
%indvar.next = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
; PR13968
define void @qux() nounwind {
%b = bitcast i64** @a to i8*
- %g = getelementptr i64** @a, i32 1
+ %g = getelementptr i64*, i64** @a, i32 1
%cmp = icmp ne i8* null, %b
%cmp2 = icmp eq i8* null, %b
%cmp3 = icmp eq i64** null, %g
%P = bitcast i8* %malloccall to i32*
store i32* %P, i32** @G
%GV = load i32** @G
- %GVe = getelementptr i32* %GV, i32 40
+ %GVe = getelementptr i32, i32* %GV, i32 40
store i32 20, i32* %GVe
ret void
}
%P = bitcast i8* %malloccall to i32*
store i32* %P, i32** @G
%GV = load i32** @G
- %GVe = getelementptr i32* %GV, i32 40
+ %GVe = getelementptr i32, i32* %GV, i32 40
store i32 20, i32* %GVe
ret void
}
define void @foo() {
%Blah = alloca [58 x i8]
- %tmp.0 = getelementptr [58 x i8]* %Blah, i32 0, i32 0
+ %tmp.0 = getelementptr [58 x i8], [58 x i8]* %Blah, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp.0, i8* getelementptr inbounds ([58 x i8]* @G1, i32 0, i32 0), i32 58, i32 1, i1 false)
ret void
}
define i1 @bah(i64 %i) nounwind readonly optsize ssp {
entry:
- %arrayidx4 = getelementptr inbounds [4 x i8]* @d, i64 0, i64 %i
+ %arrayidx4 = getelementptr inbounds [4 x i8], [4 x i8]* @d, i64 0, i64 %i
%tmp5 = load i8* %arrayidx4, align 1
%array0 = bitcast [4 x i8]* @d to i8*
%tmp6 = load i8* %array0, align 1
@zero = internal global [10 x i32] zeroinitializer
define i32 @test1(i64 %idx) nounwind {
- %arrayidx = getelementptr inbounds [10 x i32]* @zero, i64 0, i64 %idx
+ %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* @zero, i64 0, i64 %idx
%l = load i32* %arrayidx
ret i32 %l
; CHECK-LABEL: @test1(
@mystr = internal global %struct.MYstr zeroinitializer ; <%struct.MYstr*> [#uses=3]
define internal void @vfu1(%struct.MYstr* byval align 4 %u) nounwind {
entry:
- %0 = getelementptr %struct.MYstr* %u, i32 0, i32 1 ; <i32*> [#uses=1]
+ %0 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 99, i32* %0, align 4
; CHECK: %struct.MYstr* %u
- %1 = getelementptr %struct.MYstr* %u, i32 0, i32 0 ; <i8*> [#uses=1]
+ %1 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 97, i8* %1, align 4
; CHECK: %struct.MYstr* %u
br label %return
define internal i32 @vfu2(%struct.MYstr* byval align 4 %u) nounwind readonly {
entry:
- %0 = getelementptr %struct.MYstr* %u, i32 0, i32 1 ; <i32*> [#uses=1]
+ %0 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 1 ; <i32*> [#uses=1]
%1 = load i32* %0
; CHECK: load i32* getelementptr inbounds (%struct.MYstr* @mystr, i32 0, i32 1)
- %2 = getelementptr %struct.MYstr* %u, i32 0, i32 0 ; <i8*> [#uses=1]
+ %2 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 0 ; <i8*> [#uses=1]
%3 = load i8* %2
; CHECK: load i8* getelementptr inbounds (%struct.MYstr* @mystr, i32 0, i32 0)
%4 = zext i8 %3 to i32
indirectgoto: ; preds = %lab0, %entry
%indvar = phi i32 [ %indvar.next, %lab0 ], [ 0, %entry ] ; <i32> [#uses=2]
- %pc.addr.0 = getelementptr i32* %pc, i32 %indvar ; <i32*> [#uses=1]
+ %pc.addr.0 = getelementptr i32, i32* %pc, i32 %indvar ; <i32*> [#uses=1]
%tmp1.pn = load i32* %pc.addr.0 ; <i32> [#uses=1]
- %indirect.goto.dest.in = getelementptr inbounds [2 x i8*]* @bar.l, i32 0, i32 %tmp1.pn ; <i8**> [#uses=1]
+ %indirect.goto.dest.in = getelementptr inbounds [2 x i8*], [2 x i8*]* @bar.l, i32 0, i32 %tmp1.pn ; <i8**> [#uses=1]
%indirect.goto.dest = load i8** %indirect.goto.dest.in ; <i8*> [#uses=1]
indirectbr i8* %indirect.goto.dest, [label %lab0, label %end]
}
br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
in.bounds:
- %addr = getelementptr i32* %arr, i32 %idx
+ %addr = getelementptr i32, i32* %arr, i32 %idx
store i32 0, i32* %addr
%next = icmp sgt i32 %idx.dec, -1
br i1 %next, label %loop, label %exit
br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
in.bounds:
- %addr = getelementptr i32* %arr, i32 %idx
+ %addr = getelementptr i32, i32* %arr, i32 %idx
store i32 0, i32* %addr
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit, !prof !2
br i1 %abc.a, label %in.bounds.a, label %out.of.bounds, !prof !1
in.bounds.a:
- %addr.a = getelementptr i32* %arr_a, i32 %idx
+ %addr.a = getelementptr i32, i32* %arr_a, i32 %idx
store i32 0, i32* %addr.a
%abc.b = icmp slt i32 %idx, %len.b
br i1 %abc.b, label %in.bounds.b, label %out.of.bounds, !prof !1
in.bounds.b:
- %addr.b = getelementptr i32* %arr_b, i32 %idx
+ %addr.b = getelementptr i32, i32* %arr_b, i32 %idx
store i32 -1, i32* %addr.b
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit
br i1 %abc.a, label %in.bounds.a, label %out.of.bounds, !prof !1
in.bounds.a:
- %addr.a = getelementptr i32* %arr_a, i32 %idx
+ %addr.a = getelementptr i32, i32* %arr_a, i32 %idx
store i32 0, i32* %addr.a
%abc.b = icmp slt i32 %idx, %len.b
br i1 %abc.b, label %in.bounds.b, label %out.of.bounds, !prof !1
in.bounds.b:
- %addr.b = getelementptr i32* %arr_b, i32 %idx
+ %addr.b = getelementptr i32, i32* %arr_b, i32 %idx
store i32 -1, i32* %addr.b
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit
br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
in.bounds:
- %addr = getelementptr i32* %arr, i32 %idx
+ %addr = getelementptr i32, i32* %arr, i32 %idx
store i32 0, i32* %addr
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit
; CHECK-NEXT: br i1 %abc.postloop, label %in.bounds.postloop, label %out.of.bounds
; CHECK-LABEL: in.bounds.postloop:
-; CHECK-NEXT: %addr.postloop = getelementptr i32* %arr, i32 %idx.postloop
+; CHECK-NEXT: %addr.postloop = getelementptr i32, i32* %arr, i32 %idx.postloop
; CHECK-NEXT: store i32 0, i32* %addr.postloop
; CHECK-NEXT: %next.postloop = icmp slt i32 %idx.next.postloop, %n
; CHECK-NEXT: br i1 %next.postloop, label %loop.postloop, label %exit.loopexit
br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
in.bounds:
- %addr = getelementptr i32* %arr, i32 %idx.for.abc
+ %addr = getelementptr i32, i32* %arr, i32 %idx.for.abc
store i32 0, i32* %addr
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit
br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
in.bounds:
- %addr = getelementptr i32* %arr, i32 %array.idx
+ %addr = getelementptr i32, i32* %arr, i32 %array.idx
store i32 0, i32* %addr
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit
br i1 %abc, label %in.bounds, label %out.of.bounds
in.bounds:
- %addr = getelementptr i32* %arr, i32 %array.idx
+ %addr = getelementptr i32, i32* %arr, i32 %array.idx
store i32 0, i32* %addr
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit
br i1 %abc, label %in.bounds, label %out.of.bounds, !prof !1
in.bounds: ; preds = %loop
- %addr = getelementptr i32* %arr, i32 %idx
+ %addr = getelementptr i32, i32* %arr, i32 %idx
store i32 0, i32* %addr
%next = icmp slt i32 %idx.next, %n
br i1 %next, label %loop, label %exit
br i1 %abc.i, label %in.bounds.i, label %out.of.bounds.i, !prof !1
in.bounds.i: ; preds = %loop.i
- %addr.i = getelementptr i32* %arr, i32 %idx.i
+ %addr.i = getelementptr i32, i32* %arr, i32 %idx.i
store i32 0, i32* %addr.i
%next.i = icmp slt i32 %idx.next.i, %n
br i1 %next.i, label %loop.i, label %exit.i
br i1 %abc.i.i, label %in.bounds.i.i, label %out.of.bounds.i.i, !prof !1
in.bounds.i.i: ; preds = %loop.i.i
- %addr.i.i = getelementptr i32* %arr, i32 %idx.i.i
+ %addr.i.i = getelementptr i32, i32* %arr, i32 %idx.i.i
store i32 0, i32* %addr.i.i
%next.i.i = icmp slt i32 %idx.next.i.i, %n
br i1 %next.i.i, label %loop.i.i, label %exit.i.i
br i1 %abc.i, label %in.bounds.i, label %out.of.bounds.i, !prof !1
in.bounds.i: ; preds = %loop.i
- %addr.i = getelementptr i32* %arr, i32 %idx.i
+ %addr.i = getelementptr i32, i32* %arr, i32 %idx.i
store i32 0, i32* %addr.i
%next.i = icmp slt i32 %idx.next.i, %n
br i1 %next.i, label %loop.i, label %exit.i
br i1 %abc.i5, label %in.bounds.i9, label %out.of.bounds.i10, !prof !1
in.bounds.i9: ; preds = %loop.i6
- %addr.i7 = getelementptr i32* %arr, i32 %idx.i3
+ %addr.i7 = getelementptr i32, i32* %arr, i32 %idx.i3
store i32 0, i32* %addr.i7
%next.i8 = icmp slt i32 %idx.next.i4, %n
br i1 %next.i8, label %loop.i6, label %exit.i11
br i1 %abc.i.i, label %in.bounds.i.i, label %out.of.bounds.i.i, !prof !1
in.bounds.i.i: ; preds = %loop.i.i
- %addr.i.i = getelementptr i32* %arr, i32 %idx.i.i
+ %addr.i.i = getelementptr i32, i32* %arr, i32 %idx.i.i
store i32 0, i32* %addr.i.i
%next.i.i = icmp slt i32 %idx.next.i.i, %n
br i1 %next.i.i, label %loop.i.i, label %exit.i.i
br i1 %abc.i.i9, label %in.bounds.i.i13, label %out.of.bounds.i.i14, !prof !1
in.bounds.i.i13: ; preds = %loop.i.i10
- %addr.i.i11 = getelementptr i32* %arr, i32 %idx.i.i7
+ %addr.i.i11 = getelementptr i32, i32* %arr, i32 %idx.i.i7
store i32 0, i32* %addr.i.i11
%next.i.i12 = icmp slt i32 %idx.next.i.i8, %n
br i1 %next.i.i12, label %loop.i.i10, label %exit.i.i15
br i1 %abc.i, label %in.bounds.i, label %out.of.bounds.i, !prof !1
in.bounds.i: ; preds = %loop.i
- %addr.i = getelementptr i32* %arr, i32 %idx.i
+ %addr.i = getelementptr i32, i32* %arr, i32 %idx.i
store i32 0, i32* %addr.i
%next.i = icmp slt i32 %idx.next.i, %n
br i1 %next.i, label %loop.i, label %exit.i
br i1 %abc.i.i, label %in.bounds.i.i, label %out.of.bounds.i.i, !prof !1
in.bounds.i.i: ; preds = %loop.i.i
- %addr.i.i = getelementptr i32* %arr, i32 %idx.i.i
+ %addr.i.i = getelementptr i32, i32* %arr, i32 %idx.i.i
store i32 0, i32* %addr.i.i
%next.i.i = icmp slt i32 %idx.next.i.i, %n
br i1 %next.i.i, label %loop.i.i, label %exit.i.i
no_exit.0: ; preds = %no_exit.0, %entry
%p.0.0 = phi i32* [ getelementptr ([29 x [29 x [2 x i32]]]* @fixtab, i32 0, i32 0, i32 0, i32 0), %entry ], [ %inc.0, %no_exit.0 ] ; <i32*> [#uses=1]
- %inc.0 = getelementptr i32* %p.0.0, i32 1 ; <i32*> [#uses=1]
+ %inc.0 = getelementptr i32, i32* %p.0.0, i32 1 ; <i32*> [#uses=1]
br i1 undef, label %no_exit.0, label %no_exit.1
no_exit.1: ; preds = %no_exit.0
br label %bb
cond_next: ; preds = %bb2
- %tmp2 = getelementptr [5 x i8]* @foo, i32 0, i32 %i.0 ; <i8*> [#uses=1]
+ %tmp2 = getelementptr [5 x i8], [5 x i8]* @foo, i32 0, i32 %i.0 ; <i8*> [#uses=1]
%tmp3 = load i8* %tmp2 ; <i8> [#uses=1]
%tmp5 = icmp eq i8 %tmp3, 0 ; <i1> [#uses=1]
br i1 %tmp5, label %bb6, label %bb
define signext i16 @ExtractBufferedBlocksIgnored(%struct.JPEGGlobals* %globp) nounwind {
entry:
- %tmp4311 = getelementptr %struct.JPEGGlobals* %globp, i32 0, i32 70 ; <i32*> [#uses=1]
+ %tmp4311 = getelementptr %struct.JPEGGlobals, %struct.JPEGGlobals* %globp, i32 0, i32 70 ; <i32*> [#uses=1]
%tmp4412 = load i32* %tmp4311, align 16 ; <i32> [#uses=2]
%tmp4613 = icmp sgt i32 %tmp4412, 0 ; <i1> [#uses=1]
br i1 %tmp4613, label %bb, label %bb49
bb: ; preds = %bb28, %entry
%component.09 = phi i16 [ 0, %entry ], [ %tmp37, %bb28 ] ; <i16> [#uses=2]
%tmp12 = sext i16 %component.09 to i32 ; <i32> [#uses=2]
- %tmp6 = getelementptr %struct.JPEGGlobals* %globp, i32 0, i32 77, i32 %tmp12 ; <i16**> [#uses=2]
+ %tmp6 = getelementptr %struct.JPEGGlobals, %struct.JPEGGlobals* %globp, i32 0, i32 77, i32 %tmp12 ; <i16**> [#uses=2]
%tmp7 = load i16** %tmp6, align 4 ; <i16*> [#uses=2]
- %tmp235 = getelementptr %struct.JPEGGlobals* %globp, i32 0, i32 71, i32 %tmp12 ; <i32*> [#uses=1]
+ %tmp235 = getelementptr %struct.JPEGGlobals, %struct.JPEGGlobals* %globp, i32 0, i32 71, i32 %tmp12 ; <i32*> [#uses=1]
%tmp246 = load i32* %tmp235, align 4 ; <i32> [#uses=2]
%tmp267 = icmp sgt i32 %tmp246, 0 ; <i1> [#uses=1]
br i1 %tmp267, label %bb8, label %bb28
%indvar = phi i32 [ 0, %bb ], [ %indvar.next2, %bb8 ] ; <i32> [#uses=3]
%theDCTBufferIter.01.rec = shl i32 %indvar, 6 ; <i32> [#uses=1]
%tmp10.rec = add i32 %theDCTBufferIter.01.rec, 64 ; <i32> [#uses=1]
- %tmp10 = getelementptr i16* %tmp7, i32 %tmp10.rec ; <i16*> [#uses=1]
+ %tmp10 = getelementptr i16, i16* %tmp7, i32 %tmp10.rec ; <i16*> [#uses=1]
%i.02 = trunc i32 %indvar to i16 ; <i16> [#uses=1]
%tmp13 = add i16 %i.02, 1 ; <i16> [#uses=1]
%phitmp = sext i16 %tmp13 to i32 ; <i32> [#uses=1]
%0 = load i32** @a, align 8 ; <i32*> [#uses=1]
%1 = load i32** @b, align 8 ; <i32*> [#uses=1]
%2 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32* %1, i64 %2 ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32* %1, i64 %2 ; <i32*> [#uses=1]
%4 = load i32* %3, align 1 ; <i32> [#uses=1]
%5 = load i32** @c, align 8 ; <i32*> [#uses=1]
%6 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
- %7 = getelementptr i32* %5, i64 %6 ; <i32*> [#uses=1]
+ %7 = getelementptr i32, i32* %5, i64 %6 ; <i32*> [#uses=1]
%8 = load i32* %7, align 1 ; <i32> [#uses=1]
%9 = add i32 %8, %4 ; <i32> [#uses=1]
%10 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
- %11 = getelementptr i32* %0, i64 %10 ; <i32*> [#uses=1]
+ %11 = getelementptr i32, i32* %0, i64 %10 ; <i32*> [#uses=1]
store i32 %9, i32* %11, align 1
%12 = load i32** @a, align 8 ; <i32*> [#uses=1]
%13 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%14 = load i32** @b, align 8 ; <i32*> [#uses=1]
%15 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%16 = sext i32 %15 to i64 ; <i64> [#uses=1]
- %17 = getelementptr i32* %14, i64 %16 ; <i32*> [#uses=1]
+ %17 = getelementptr i32, i32* %14, i64 %16 ; <i32*> [#uses=1]
%18 = load i32* %17, align 1 ; <i32> [#uses=1]
%19 = load i32** @c, align 8 ; <i32*> [#uses=1]
%20 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%21 = sext i32 %20 to i64 ; <i64> [#uses=1]
- %22 = getelementptr i32* %19, i64 %21 ; <i32*> [#uses=1]
+ %22 = getelementptr i32, i32* %19, i64 %21 ; <i32*> [#uses=1]
%23 = load i32* %22, align 1 ; <i32> [#uses=1]
%24 = add i32 %23, %18 ; <i32> [#uses=1]
%25 = sext i32 %13 to i64 ; <i64> [#uses=1]
- %26 = getelementptr i32* %12, i64 %25 ; <i32*> [#uses=1]
+ %26 = getelementptr i32, i32* %12, i64 %25 ; <i32*> [#uses=1]
store i32 %24, i32* %26, align 1
%27 = load i32** @a, align 8 ; <i32*> [#uses=1]
%28 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%29 = load i32** @b, align 8 ; <i32*> [#uses=1]
%30 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%31 = sext i32 %30 to i64 ; <i64> [#uses=1]
- %32 = getelementptr i32* %29, i64 %31 ; <i32*> [#uses=1]
+ %32 = getelementptr i32, i32* %29, i64 %31 ; <i32*> [#uses=1]
%33 = load i32* %32, align 1 ; <i32> [#uses=1]
%34 = load i32** @c, align 8 ; <i32*> [#uses=1]
%35 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%36 = sext i32 %35 to i64 ; <i64> [#uses=1]
- %37 = getelementptr i32* %34, i64 %36 ; <i32*> [#uses=1]
+ %37 = getelementptr i32, i32* %34, i64 %36 ; <i32*> [#uses=1]
%38 = load i32* %37, align 1 ; <i32> [#uses=1]
%39 = add i32 %38, %33 ; <i32> [#uses=1]
%40 = sext i32 %28 to i64 ; <i64> [#uses=1]
- %41 = getelementptr i32* %27, i64 %40 ; <i32*> [#uses=1]
+ %41 = getelementptr i32, i32* %27, i64 %40 ; <i32*> [#uses=1]
store i32 %39, i32* %41, align 1
%42 = load i32** @d, align 8 ; <i32*> [#uses=1]
%43 = load i32** @e, align 8 ; <i32*> [#uses=1]
%44 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
- %45 = getelementptr i32* %43, i64 %44 ; <i32*> [#uses=1]
+ %45 = getelementptr i32, i32* %43, i64 %44 ; <i32*> [#uses=1]
%46 = load i32* %45, align 1 ; <i32> [#uses=1]
%47 = load i32** @f, align 8 ; <i32*> [#uses=1]
%48 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
- %49 = getelementptr i32* %47, i64 %48 ; <i32*> [#uses=1]
+ %49 = getelementptr i32, i32* %47, i64 %48 ; <i32*> [#uses=1]
%50 = load i32* %49, align 1 ; <i32> [#uses=1]
%51 = add i32 %50, %46 ; <i32> [#uses=1]
%52 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
- %53 = getelementptr i32* %42, i64 %52 ; <i32*> [#uses=1]
+ %53 = getelementptr i32, i32* %42, i64 %52 ; <i32*> [#uses=1]
store i32 %51, i32* %53, align 1
%54 = load i32** @d, align 8 ; <i32*> [#uses=1]
%55 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%56 = load i32** @e, align 8 ; <i32*> [#uses=1]
%57 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%58 = sext i32 %57 to i64 ; <i64> [#uses=1]
- %59 = getelementptr i32* %56, i64 %58 ; <i32*> [#uses=1]
+ %59 = getelementptr i32, i32* %56, i64 %58 ; <i32*> [#uses=1]
%60 = load i32* %59, align 1 ; <i32> [#uses=1]
%61 = load i32** @f, align 8 ; <i32*> [#uses=1]
%62 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%63 = sext i32 %62 to i64 ; <i64> [#uses=1]
- %64 = getelementptr i32* %61, i64 %63 ; <i32*> [#uses=1]
+ %64 = getelementptr i32, i32* %61, i64 %63 ; <i32*> [#uses=1]
%65 = load i32* %64, align 1 ; <i32> [#uses=1]
%66 = add i32 %65, %60 ; <i32> [#uses=1]
%67 = sext i32 %55 to i64 ; <i64> [#uses=1]
- %68 = getelementptr i32* %54, i64 %67 ; <i32*> [#uses=1]
+ %68 = getelementptr i32, i32* %54, i64 %67 ; <i32*> [#uses=1]
store i32 %66, i32* %68, align 1
%69 = load i32** @d, align 8 ; <i32*> [#uses=1]
%70 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%71 = load i32** @e, align 8 ; <i32*> [#uses=1]
%72 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%73 = sext i32 %72 to i64 ; <i64> [#uses=1]
- %74 = getelementptr i32* %71, i64 %73 ; <i32*> [#uses=1]
+ %74 = getelementptr i32, i32* %71, i64 %73 ; <i32*> [#uses=1]
%75 = load i32* %74, align 1 ; <i32> [#uses=1]
%76 = load i32** @f, align 8 ; <i32*> [#uses=1]
%77 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%78 = sext i32 %77 to i64 ; <i64> [#uses=1]
- %79 = getelementptr i32* %76, i64 %78 ; <i32*> [#uses=1]
+ %79 = getelementptr i32, i32* %76, i64 %78 ; <i32*> [#uses=1]
%80 = load i32* %79, align 1 ; <i32> [#uses=1]
%81 = add i32 %80, %75 ; <i32> [#uses=1]
%82 = sext i32 %70 to i64 ; <i64> [#uses=1]
- %83 = getelementptr i32* %69, i64 %82 ; <i32*> [#uses=1]
+ %83 = getelementptr i32, i32* %69, i64 %82 ; <i32*> [#uses=1]
store i32 %81, i32* %83, align 1
%84 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
%85 = icmp sgt i32 %84, 23646 ; <i1> [#uses=1]
%2 = load i32** @b, align 8 ; <i32*> [#uses=1]
%3 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%4 = zext i32 %3 to i64 ; <i64> [#uses=1]
- %5 = getelementptr i32* %2, i64 %4 ; <i32*> [#uses=1]
+ %5 = getelementptr i32, i32* %2, i64 %4 ; <i32*> [#uses=1]
%6 = load i32* %5, align 1 ; <i32> [#uses=1]
%7 = load i32** @c, align 8 ; <i32*> [#uses=1]
%8 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%9 = zext i32 %8 to i64 ; <i64> [#uses=1]
- %10 = getelementptr i32* %7, i64 %9 ; <i32*> [#uses=1]
+ %10 = getelementptr i32, i32* %7, i64 %9 ; <i32*> [#uses=1]
%11 = load i32* %10, align 1 ; <i32> [#uses=1]
%12 = add i32 %11, %6 ; <i32> [#uses=1]
%13 = zext i32 %1 to i64 ; <i64> [#uses=1]
- %14 = getelementptr i32* %0, i64 %13 ; <i32*> [#uses=1]
+ %14 = getelementptr i32, i32* %0, i64 %13 ; <i32*> [#uses=1]
store i32 %12, i32* %14, align 1
%15 = load i32** @a, align 8 ; <i32*> [#uses=1]
%16 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%19 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%20 = and i32 %19, 15 ; <i32> [#uses=1]
%21 = zext i32 %20 to i64 ; <i64> [#uses=1]
- %22 = getelementptr i32* %18, i64 %21 ; <i32*> [#uses=1]
+ %22 = getelementptr i32, i32* %18, i64 %21 ; <i32*> [#uses=1]
%23 = load i32* %22, align 1 ; <i32> [#uses=1]
%24 = load i32** @c, align 8 ; <i32*> [#uses=1]
%25 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%26 = and i32 %25, 15 ; <i32> [#uses=1]
%27 = zext i32 %26 to i64 ; <i64> [#uses=1]
- %28 = getelementptr i32* %24, i64 %27 ; <i32*> [#uses=1]
+ %28 = getelementptr i32, i32* %24, i64 %27 ; <i32*> [#uses=1]
%29 = load i32* %28, align 1 ; <i32> [#uses=1]
%30 = add i32 %29, %23 ; <i32> [#uses=1]
%31 = zext i32 %17 to i64 ; <i64> [#uses=1]
- %32 = getelementptr i32* %15, i64 %31 ; <i32*> [#uses=1]
+ %32 = getelementptr i32, i32* %15, i64 %31 ; <i32*> [#uses=1]
store i32 %30, i32* %32, align 1
%33 = load i32** @a, align 8 ; <i32*> [#uses=1]
%34 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%37 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%38 = and i32 %37, 15 ; <i32> [#uses=1]
%39 = zext i32 %38 to i64 ; <i64> [#uses=1]
- %40 = getelementptr i32* %36, i64 %39 ; <i32*> [#uses=1]
+ %40 = getelementptr i32, i32* %36, i64 %39 ; <i32*> [#uses=1]
%41 = load i32* %40, align 1 ; <i32> [#uses=1]
%42 = load i32** @c, align 8 ; <i32*> [#uses=1]
%43 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%44 = and i32 %43, 15 ; <i32> [#uses=1]
%45 = zext i32 %44 to i64 ; <i64> [#uses=1]
- %46 = getelementptr i32* %42, i64 %45 ; <i32*> [#uses=1]
+ %46 = getelementptr i32, i32* %42, i64 %45 ; <i32*> [#uses=1]
%47 = load i32* %46, align 1 ; <i32> [#uses=1]
%48 = add i32 %47, %41 ; <i32> [#uses=1]
%49 = zext i32 %35 to i64 ; <i64> [#uses=1]
- %50 = getelementptr i32* %33, i64 %49 ; <i32*> [#uses=1]
+ %50 = getelementptr i32, i32* %33, i64 %49 ; <i32*> [#uses=1]
store i32 %48, i32* %50, align 1
%51 = load i32** @d, align 8 ; <i32*> [#uses=1]
%52 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%53 = load i32** @e, align 8 ; <i32*> [#uses=1]
%54 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%55 = zext i32 %54 to i64 ; <i64> [#uses=1]
- %56 = getelementptr i32* %53, i64 %55 ; <i32*> [#uses=1]
+ %56 = getelementptr i32, i32* %53, i64 %55 ; <i32*> [#uses=1]
%57 = load i32* %56, align 1 ; <i32> [#uses=1]
%58 = load i32** @f, align 8 ; <i32*> [#uses=1]
%59 = and i32 %i.0.reg2mem.0, 15 ; <i32> [#uses=1]
%60 = zext i32 %59 to i64 ; <i64> [#uses=1]
- %61 = getelementptr i32* %58, i64 %60 ; <i32*> [#uses=1]
+ %61 = getelementptr i32, i32* %58, i64 %60 ; <i32*> [#uses=1]
%62 = load i32* %61, align 1 ; <i32> [#uses=1]
%63 = sext i32 %i.0.reg2mem.0 to i64 ; <i64> [#uses=1]
- %64 = getelementptr [256 x i32]* @K, i64 0, i64 %63 ; <i32*> [#uses=1]
+ %64 = getelementptr [256 x i32], [256 x i32]* @K, i64 0, i64 %63 ; <i32*> [#uses=1]
%65 = load i32* %64, align 4 ; <i32> [#uses=1]
%66 = add i32 %62, %57 ; <i32> [#uses=1]
%67 = add i32 %66, %65 ; <i32> [#uses=1]
%68 = zext i32 %52 to i64 ; <i64> [#uses=1]
- %69 = getelementptr i32* %51, i64 %68 ; <i32*> [#uses=1]
+ %69 = getelementptr i32, i32* %51, i64 %68 ; <i32*> [#uses=1]
store i32 %67, i32* %69, align 1
%70 = load i32** @d, align 8 ; <i32*> [#uses=1]
%71 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%74 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%75 = and i32 %74, 15 ; <i32> [#uses=1]
%76 = zext i32 %75 to i64 ; <i64> [#uses=1]
- %77 = getelementptr i32* %73, i64 %76 ; <i32*> [#uses=1]
+ %77 = getelementptr i32, i32* %73, i64 %76 ; <i32*> [#uses=1]
%78 = load i32* %77, align 1 ; <i32> [#uses=1]
%79 = load i32** @f, align 8 ; <i32*> [#uses=1]
%80 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%81 = and i32 %80, 15 ; <i32> [#uses=1]
%82 = zext i32 %81 to i64 ; <i64> [#uses=1]
- %83 = getelementptr i32* %79, i64 %82 ; <i32*> [#uses=1]
+ %83 = getelementptr i32, i32* %79, i64 %82 ; <i32*> [#uses=1]
%84 = load i32* %83, align 1 ; <i32> [#uses=1]
%85 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=1]
%86 = sext i32 %85 to i64 ; <i64> [#uses=1]
- %87 = getelementptr [256 x i32]* @K, i64 0, i64 %86 ; <i32*> [#uses=1]
+ %87 = getelementptr [256 x i32], [256 x i32]* @K, i64 0, i64 %86 ; <i32*> [#uses=1]
%88 = load i32* %87, align 4 ; <i32> [#uses=1]
%89 = add i32 %84, %78 ; <i32> [#uses=1]
%90 = add i32 %89, %88 ; <i32> [#uses=1]
%91 = zext i32 %72 to i64 ; <i64> [#uses=1]
- %92 = getelementptr i32* %70, i64 %91 ; <i32*> [#uses=1]
+ %92 = getelementptr i32, i32* %70, i64 %91 ; <i32*> [#uses=1]
store i32 %90, i32* %92, align 1
%93 = load i32** @d, align 8 ; <i32*> [#uses=1]
%94 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%97 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%98 = and i32 %97, 15 ; <i32> [#uses=1]
%99 = zext i32 %98 to i64 ; <i64> [#uses=1]
- %100 = getelementptr i32* %96, i64 %99 ; <i32*> [#uses=1]
+ %100 = getelementptr i32, i32* %96, i64 %99 ; <i32*> [#uses=1]
%101 = load i32* %100, align 1 ; <i32> [#uses=1]
%102 = load i32** @f, align 8 ; <i32*> [#uses=1]
%103 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%104 = and i32 %103, 15 ; <i32> [#uses=1]
%105 = zext i32 %104 to i64 ; <i64> [#uses=1]
- %106 = getelementptr i32* %102, i64 %105 ; <i32*> [#uses=1]
+ %106 = getelementptr i32, i32* %102, i64 %105 ; <i32*> [#uses=1]
%107 = load i32* %106, align 1 ; <i32> [#uses=1]
%108 = add i32 %i.0.reg2mem.0, 2 ; <i32> [#uses=1]
%109 = sext i32 %108 to i64 ; <i64> [#uses=1]
- %110 = getelementptr [256 x i32]* @K, i64 0, i64 %109 ; <i32*> [#uses=1]
+ %110 = getelementptr [256 x i32], [256 x i32]* @K, i64 0, i64 %109 ; <i32*> [#uses=1]
%111 = load i32* %110, align 4 ; <i32> [#uses=1]
%112 = add i32 %107, %101 ; <i32> [#uses=1]
%113 = add i32 %112, %111 ; <i32> [#uses=1]
%114 = zext i32 %95 to i64 ; <i64> [#uses=1]
- %115 = getelementptr i32* %93, i64 %114 ; <i32*> [#uses=1]
+ %115 = getelementptr i32, i32* %93, i64 %114 ; <i32*> [#uses=1]
store i32 %113, i32* %115, align 1
%116 = add i32 %i.0.reg2mem.0, 1 ; <i32> [#uses=2]
%117 = icmp sgt i32 %116, 23646 ; <i1> [#uses=1]
%i2.115 = phi i32 [ 0, %entry ], [ %add249, %for.body ]
%add174 = add nsw i32 %i2.115, %x
%idxprom177 = sext i32 %add174 to i64
- %arrayidx179 = getelementptr inbounds double* %data, i64 %idxprom177
+ %arrayidx179 = getelementptr inbounds double, double* %data, i64 %idxprom177
%tmp180 = load double* %arrayidx179, align 8
%add249 = add nsw i32 %i2.115, %y
%cmp168 = icmp sgt i32 %add249, %n
br label %if.end.i126
if.end.i126: ; preds = %if.else.i124, %for.body21.i
- %incdec.ptr.i = getelementptr inbounds i8* %destYPixelPtr.010.i, i32 1
+ %incdec.ptr.i = getelementptr inbounds i8, i8* %destYPixelPtr.010.i, i32 1
%inc.i125 = add i32 %x.09.i, 1
%cmp19.i = icmp ult i32 %inc.i125, undef
br i1 %cmp19.i, label %for.body21.i, label %for.end.i129
loop:
%p.01.us.us = phi i8* [ null, %preheader ], [ %gep, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
- %gep = getelementptr inbounds i8* %p.01.us.us, i64 1
+ %gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
%snext = load i8* %gep
%cmp = icmp ult i8* %gep, %end
br i1 %cmp, label %loop, label %exit
loop:
%p.01.us.us = phi i8* [ %buf, %preheader ], [ %gep, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
- %gep = getelementptr inbounds i8* %p.01.us.us, i64 1
+ %gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
%snext = load i8* %gep
%cmp = icmp ult i8* %gep, %end
br i1 %cmp, label %loop, label %exit
%p.01.us.us = phi i8* [ null, %preheader ], [ %gep, %loop ]
%iv = phi i32 [ 0, %preheader ], [ %ivnext, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
- %gep = getelementptr inbounds i8* %p.01.us.us, i64 1
+ %gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
%snext = load i8* %gep
%ivnext = add i32 %iv, 1
%cmp = icmp ult i32 %ivnext, %cnt
%p.01.us.us = phi i8* [ %buf, %preheader ], [ %gep, %loop ]
%iv = phi i32 [ %bi, %preheader ], [ %ivnext, %loop ]
%s = phi i8 [0, %preheader], [%snext, %loop]
- %gep = getelementptr inbounds i8* %p.01.us.us, i64 1
+ %gep = getelementptr inbounds i8, i8* %p.01.us.us, i64 1
%snext = load i8* %gep
%ivnext = add i32 %iv, 1
%cmp = icmp ult i32 %ivnext, %cnt
; IV and BECount have two different pointer types here.
define void @testnullptr([512 x i8]* %base) nounwind {
entry:
- %add.ptr1603 = getelementptr [512 x i8]* %base, i64 0, i64 512
+ %add.ptr1603 = getelementptr [512 x i8], [512 x i8]* %base, i64 0, i64 512
br label %preheader
preheader:
for.body:
%r.17193 = phi i8* [ %incdec.ptr1608, %for.body ], [ null, %preheader ]
- %incdec.ptr1608 = getelementptr i8* %r.17193, i64 1
+ %incdec.ptr1608 = getelementptr i8, i8* %r.17193, i64 1
%cmp1604 = icmp ult i8* %incdec.ptr1608, %add.ptr1603
br i1 %cmp1604, label %for.body, label %for.end1609
do.body: ; preds = %if.else, %if.then
%firstIV = phi i32* [ %incdec.ptr2, %if.else ], [ %first, %if.then ]
- %incdec.ptr1 = getelementptr inbounds i32* %firstIV, i64 1
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %firstIV, i64 1
%cmp1 = icmp eq i32* %incdec.ptr1, %last
br i1 %cmp1, label %early.exit, label %if.else
if.else: ; preds = %do.body
- %incdec.ptr2 = getelementptr inbounds i32* %firstIV, i64 2
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %firstIV, i64 2
%cmp2 = icmp eq i32* %incdec.ptr2, %last
br i1 %cmp2, label %if.end, label %do.body
; CHECK: phi i32
%mul = mul nsw i32 %i.06, %i.06
%0 = sext i32 %i.06 to i64
- %arrayidx = getelementptr inbounds i32* %output, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %output, i64 %0
store i32 %mul, i32* %arrayidx, align 4
%add = add nsw i32 %i.06, 3
%cmp = icmp slt i32 %add, %n
define void @kinds__sbytezero([256 x i32]* nocapture %a) nounwind {
bb.thread:
- %tmp46 = getelementptr [256 x i32]* %a, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp46 = getelementptr [256 x i32], [256 x i32]* %a, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp46
br label %bb
%tmp8 = add i8 %i.0.reg2mem.0, 1 ; <i8> [#uses=3]
%tmp1 = sext i8 %tmp8 to i32 ; <i32> [#uses=1]
%tmp3 = add i32 %tmp1, 128 ; <i32> [#uses=1]
- %tmp4 = getelementptr [256 x i32]* %a, i32 0, i32 %tmp3 ; <i32*> [#uses=1]
+ %tmp4 = getelementptr [256 x i32], [256 x i32]* %a, i32 0, i32 %tmp3 ; <i32*> [#uses=1]
store i32 0, i32* %tmp4
%0 = icmp eq i8 %tmp8, 127 ; <i1> [#uses=1]
br i1 %0, label %return, label %bb
define void @kinds__ubytezero([256 x i32]* nocapture %a) nounwind {
bb.thread:
- %tmp35 = getelementptr [256 x i32]* %a, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp35 = getelementptr [256 x i32], [256 x i32]* %a, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp35
br label %bb
%i.0.reg2mem.0 = phi i8 [ 0, %bb.thread ], [ %tmp7, %bb ] ; <i8> [#uses=1]
%tmp7 = add i8 %i.0.reg2mem.0, 1 ; <i8> [#uses=3]
%tmp1 = zext i8 %tmp7 to i32 ; <i32> [#uses=1]
- %tmp3 = getelementptr [256 x i32]* %a, i32 0, i32 %tmp1 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr [256 x i32], [256 x i32]* %a, i32 0, i32 %tmp1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp3
%0 = icmp eq i8 %tmp7, -1 ; <i1> [#uses=1]
br i1 %0, label %return, label %bb
%i.0.reg2mem.0 = phi i8 [ -10, %bb.thread ], [ %tmp7, %bb ] ; <i8> [#uses=2]
%tmp12 = sext i8 %i.0.reg2mem.0 to i32 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp12, 10 ; <i32> [#uses=1]
- %tmp5 = getelementptr [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
+ %tmp5 = getelementptr [21 x i32], [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
store i32 0, i32* %tmp5
%tmp7 = add i8 %i.0.reg2mem.0, 1 ; <i8> [#uses=2]
%0 = icmp sgt i8 %tmp7, 10 ; <i1> [#uses=1]
%i.0.reg2mem.0 = phi i8 [ 10, %bb.thread ], [ %tmp7, %bb ] ; <i8> [#uses=2]
%tmp12 = sext i8 %i.0.reg2mem.0 to i32 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp12, -10 ; <i32> [#uses=1]
- %tmp5 = getelementptr [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
+ %tmp5 = getelementptr [21 x i32], [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
store i32 0, i32* %tmp5
%tmp7 = add i8 %i.0.reg2mem.0, 1 ; <i8> [#uses=2]
%0 = icmp sgt i8 %tmp7, 30 ; <i1> [#uses=1]
%.pn2.in = and i32 %.pn2.in.in, 3 ; <i32> [#uses=1]
%.pn3 = sext i32 %.pn3.in to i64 ; <i64> [#uses=1]
%.pn2 = zext i32 %.pn2.in to i64 ; <i64> [#uses=1]
- %.pn.in = getelementptr [0 x float]* %pow_2_tab.pn, i64 0, i64 %.pn3 ; <float*> [#uses=1]
- %.pn1.in = getelementptr [0 x float]* %pow_2_025_tab.pn, i64 0, i64 %.pn2 ; <float*> [#uses=1]
+ %.pn.in = getelementptr [0 x float], [0 x float]* %pow_2_tab.pn, i64 0, i64 %.pn3 ; <float*> [#uses=1]
+ %.pn1.in = getelementptr [0 x float], [0 x float]* %pow_2_025_tab.pn, i64 0, i64 %.pn2 ; <float*> [#uses=1]
%.pn = load float* %.pn.in ; <float> [#uses=1]
%.pn1 = load float* %.pn1.in ; <float> [#uses=1]
%invQuantizer.0 = fmul float %.pn, %.pn1 ; <float> [#uses=4]
%i.05 = phi i32 [ %t49, %bb4 ], [ 0, %bb.nph ] ; <i32> [#uses=9]
%k.04 = phi i32 [ %t48, %bb4 ], [ 0, %bb.nph ] ; <i32> [#uses=1]
%t6 = sext i32 %i.05 to i64 ; <i64> [#uses=1]
- %t7 = getelementptr i32* %quaSpectrum, i64 %t6 ; <i32*> [#uses=1]
+ %t7 = getelementptr i32, i32* %quaSpectrum, i64 %t6 ; <i32*> [#uses=1]
%t8 = load i32* %t7, align 4 ; <i32> [#uses=1]
%t9 = zext i32 %t8 to i64 ; <i64> [#uses=1]
- %t10 = getelementptr float* %pow4_3_tab_ptr, i64 %t9 ; <float*> [#uses=1]
+ %t10 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t9 ; <float*> [#uses=1]
%t11 = load float* %t10, align 4 ; <float> [#uses=1]
%t12 = or i32 %i.05, 1 ; <i32> [#uses=1]
%t13 = sext i32 %t12 to i64 ; <i64> [#uses=1]
- %t14 = getelementptr i32* %quaSpectrum, i64 %t13 ; <i32*> [#uses=1]
+ %t14 = getelementptr i32, i32* %quaSpectrum, i64 %t13 ; <i32*> [#uses=1]
%t15 = load i32* %t14, align 4 ; <i32> [#uses=1]
%t16 = zext i32 %t15 to i64 ; <i64> [#uses=1]
- %t17 = getelementptr float* %pow4_3_tab_ptr, i64 %t16 ; <float*> [#uses=1]
+ %t17 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t16 ; <float*> [#uses=1]
%t18 = load float* %t17, align 4 ; <float> [#uses=1]
%t19 = or i32 %i.05, 2 ; <i32> [#uses=1]
%t20 = sext i32 %t19 to i64 ; <i64> [#uses=1]
- %t21 = getelementptr i32* %quaSpectrum, i64 %t20 ; <i32*> [#uses=1]
+ %t21 = getelementptr i32, i32* %quaSpectrum, i64 %t20 ; <i32*> [#uses=1]
%t22 = load i32* %t21, align 4 ; <i32> [#uses=1]
%t23 = zext i32 %t22 to i64 ; <i64> [#uses=1]
- %t24 = getelementptr float* %pow4_3_tab_ptr, i64 %t23 ; <float*> [#uses=1]
+ %t24 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t23 ; <float*> [#uses=1]
%t25 = load float* %t24, align 4 ; <float> [#uses=1]
%t26 = or i32 %i.05, 3 ; <i32> [#uses=1]
%t27 = sext i32 %t26 to i64 ; <i64> [#uses=1]
- %t28 = getelementptr i32* %quaSpectrum, i64 %t27 ; <i32*> [#uses=1]
+ %t28 = getelementptr i32, i32* %quaSpectrum, i64 %t27 ; <i32*> [#uses=1]
%t29 = load i32* %t28, align 4 ; <i32> [#uses=1]
%t30 = zext i32 %t29 to i64 ; <i64> [#uses=1]
- %t31 = getelementptr float* %pow4_3_tab_ptr, i64 %t30 ; <float*> [#uses=1]
+ %t31 = getelementptr float, float* %pow4_3_tab_ptr, i64 %t30 ; <float*> [#uses=1]
%t32 = load float* %t31, align 4 ; <float> [#uses=1]
%t33 = fmul float %t11, %invQuantizer.0 ; <float> [#uses=1]
%t34 = sext i32 %i.05 to i64 ; <i64> [#uses=1]
- %t35 = getelementptr float* %iquaSpectrum, i64 %t34 ; <float*> [#uses=1]
+ %t35 = getelementptr float, float* %iquaSpectrum, i64 %t34 ; <float*> [#uses=1]
store float %t33, float* %t35, align 4
%t36 = or i32 %i.05, 1 ; <i32> [#uses=1]
%t37 = fmul float %t18, %invQuantizer.0 ; <float> [#uses=1]
%t38 = sext i32 %t36 to i64 ; <i64> [#uses=1]
- %t39 = getelementptr float* %iquaSpectrum, i64 %t38 ; <float*> [#uses=1]
+ %t39 = getelementptr float, float* %iquaSpectrum, i64 %t38 ; <float*> [#uses=1]
store float %t37, float* %t39, align 4
%t40 = or i32 %i.05, 2 ; <i32> [#uses=1]
%t41 = fmul float %t25, %invQuantizer.0 ; <float> [#uses=1]
%t42 = sext i32 %t40 to i64 ; <i64> [#uses=1]
- %t43 = getelementptr float* %iquaSpectrum, i64 %t42 ; <float*> [#uses=1]
+ %t43 = getelementptr float, float* %iquaSpectrum, i64 %t42 ; <float*> [#uses=1]
store float %t41, float* %t43, align 4
%t44 = or i32 %i.05, 3 ; <i32> [#uses=1]
%t45 = fmul float %t32, %invQuantizer.0 ; <float> [#uses=1]
%t46 = sext i32 %t44 to i64 ; <i64> [#uses=1]
- %t47 = getelementptr float* %iquaSpectrum, i64 %t46 ; <float*> [#uses=1]
+ %t47 = getelementptr float, float* %iquaSpectrum, i64 %t46 ; <float*> [#uses=1]
store float %t45, float* %t47, align 4
%t48 = add i32 %k.04, 1 ; <i32> [#uses=2]
%t49 = add i32 %i.05, 4 ; <i32> [#uses=1]
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
; CHECK: br i1 true, label %ok, label %latch
ok:
- %addr = getelementptr i32* %a, i32 %idx
+ %addr = getelementptr i32, i32* %a, i32 %idx
store i32 %idx, i32* %addr
br label %latch
%pn = phi i8* [ %ge, %loop ], [ null, %entry ] ; <i8*> [#uses=1]
%cp = ptrtoint i8* %to to i32 ; <i32> [#uses=1]
%su = sub i32 0, %cp ; <i32> [#uses=1]
- %ge = getelementptr i8* %pn, i32 %su ; <i8*> [#uses=2]
+ %ge = getelementptr i8, i8* %pn, i32 %su ; <i8*> [#uses=2]
tail call void @bcopy(i8* %ge) nounwind
br label %loop
}
%sub.ptr.rhs.cast46.pn = ptrtoint i8* %from to i32 ; <i32> [#uses=1]
%sub.ptr.lhs.cast45.pn = ptrtoint i8* %to to i32 ; <i32> [#uses=1]
%sub.ptr.sub47.pn = sub i32 %sub.ptr.rhs.cast46.pn, %sub.ptr.lhs.cast45.pn ; <i32> [#uses=1]
- %sub.ptr4912 = getelementptr i8* %sub.ptr4912.pn, i32 %sub.ptr.sub47.pn ; <i8*> [#uses=2]
+ %sub.ptr4912 = getelementptr i8, i8* %sub.ptr4912.pn, i32 %sub.ptr.sub47.pn ; <i8*> [#uses=2]
tail call void @bcopy_4038(i8* %sub.ptr4912, i8* %sub.ptr7, i32 0) nounwind
- %sub.ptr = getelementptr i8* %sub.ptr7, i32 %sub.ptr.rhs.cast40 ; <i8*> [#uses=1]
+ %sub.ptr = getelementptr i8, i8* %sub.ptr7, i32 %sub.ptr.rhs.cast40 ; <i8*> [#uses=1]
br label %if.end54
}
bb10: ; preds = %bb49
%tmp326 = mul nsw i32 %tmp1, %tmp2 ; <i32> [#uses=1]
- %tmp351 = getelementptr inbounds i8* %x_addr.0, i32 %tmp326 ; <i8*> [#uses=1]
+ %tmp351 = getelementptr inbounds i8, i8* %x_addr.0, i32 %tmp326 ; <i8*> [#uses=1]
br i1 false, label %bb.nph, label %bb48
bb.nph: ; preds = %bb10
%pOriginHi.01 = phi i8* [ %tmp351, %bb.nph ], [ %pOriginHi.0, %bb28 ] ; <i8*> [#uses=2]
%tmp378 = bitcast i8* %pOriginHi.01 to i8* ; <i8*> [#uses=1]
store i8* %tmp378, i8** null
- %tmp385 = getelementptr inbounds i8* %pOriginHi.01, i32 %tmp2 ; <i8*> [#uses=1]
+ %tmp385 = getelementptr inbounds i8, i8* %pOriginHi.01, i32 %tmp2 ; <i8*> [#uses=1]
br label %bb28
bb28: ; preds = %bb23
br label %bb48
bb48: ; preds = %bb28.bb48_crit_edge, %bb10
- %tmp481 = getelementptr inbounds i8* %x_addr.0, i32 1 ; <i8*> [#uses=1]
+ %tmp481 = getelementptr inbounds i8, i8* %x_addr.0, i32 1 ; <i8*> [#uses=1]
br label %bb49
}
%iv = phi i32 [ %postiv, %loop ], [ 0, %entry ]
%ivnsw = phi i32 [ %postivnsw, %loop ], [ 0, %entry ]
%preofs = sext i32 %iv to i64
- %preadr = getelementptr i8* %base, i64 %preofs
+ %preadr = getelementptr i8, i8* %base, i64 %preofs
store i8 0, i8* %preadr
%postiv = add i32 %iv, 1
%postofs = sext i32 %postiv to i64
- %postadr = getelementptr i8* %base, i64 %postofs
+ %postadr = getelementptr i8, i8* %base, i64 %postofs
store i8 0, i8* %postadr
%postivnsw = add nsw i32 %ivnsw, 1
%postofsnsw = sext i32 %postivnsw to i64
- %postadrnsw = getelementptr i8* %base, i64 %postofsnsw
+ %postadrnsw = getelementptr i8, i8* %base, i64 %postofsnsw
store i8 0, i8* %postadrnsw
%cond = icmp sgt i32 %limit, %iv
br i1 %cond, label %loop, label %exit
%iv = phi i32 [ %postiv, %loop ], [ %init, %entry ]
%ivnsw = phi i32 [ %postivnsw, %loop ], [ %init, %entry ]
%preofs = sext i32 %iv to i64
- %preadr = getelementptr i8* %base, i64 %preofs
+ %preadr = getelementptr i8, i8* %base, i64 %preofs
store i8 0, i8* %preadr
%postiv = add i32 %iv, 1
%postofs = sext i32 %postiv to i64
- %postadr = getelementptr i8* %base, i64 %postofs
+ %postadr = getelementptr i8, i8* %base, i64 %postofs
store i8 0, i8* %postadr
%postivnsw = add nsw i32 %ivnsw, 1
%postofsnsw = sext i32 %postivnsw to i64
- %postadrnsw = getelementptr i8* %base, i64 %postofsnsw
+ %postadrnsw = getelementptr i8, i8* %base, i64 %postofsnsw
store i8 0, i8* %postadrnsw
%cond = icmp sgt i32 %limit, %postiv
br i1 %cond, label %loop, label %exit
%outercountdec = add i32 %outercount, -1
%ofs1 = sext i32 %outercountdec to i64
- %adr1 = getelementptr i8* %address, i64 %ofs1
+ %adr1 = getelementptr i8, i8* %address, i64 %ofs1
store i8 0, i8* %adr1
br label %innerpreheader
%innerpostiv = add i32 %inneriv, 1
%ofs2 = sext i32 %inneriv to i64
- %adr2 = getelementptr i8* %address, i64 %ofs2
+ %adr2 = getelementptr i8, i8* %address, i64 %ofs2
store i8 0, i8* %adr2
%ofs3 = sext i32 %innerpostiv to i64
- %adr3 = getelementptr i8* %address, i64 %ofs3
+ %adr3 = getelementptr i8, i8* %address, i64 %ofs3
store i8 0, i8* %adr3
%innercmp = icmp sgt i32 %limitdec, %innerpostiv
%innercount.merge = phi i32 [ %innercount.lcssa, %innerexit ], [ %innercount, %innerpreheader ]
%ofs4 = sext i32 %outercount to i64
- %adr4 = getelementptr i8* %address, i64 %ofs4
+ %adr4 = getelementptr i8, i8* %address, i64 %ofs4
store i8 0, i8* %adr4
%ofs5 = sext i32 %innercount.merge to i64
- %adr5 = getelementptr i8* %address, i64 %ofs5
+ %adr5 = getelementptr i8, i8* %address, i64 %ofs5
store i8 0, i8* %adr5
%outerpostcount = add i32 %outercount, 1
br i1 %cond, label %if.then, label %for.inc
if.then:
- %arrayidx = getelementptr [0 x double]* @X, i64 0, i64 %i
+ %arrayidx = getelementptr [0 x double], [0 x double]* @X, i64 0, i64 %i
store double 3.200000e+00, double* %arrayidx
br label %for.inc
bb13:
%tmp66 = load i64** %tmp65, align 4
- %tmp68 = getelementptr inbounds i64* %tmp66, i32 %i
+ %tmp68 = getelementptr inbounds i64, i64* %tmp66, i32 %i
%tmp69 = load i64* %tmp68, align 4
%tmp74 = load i64** %tmp73, align 4
- %tmp76 = getelementptr inbounds i64* %tmp74, i32 %i
+ %tmp76 = getelementptr inbounds i64, i64* %tmp74, i32 %i
%tmp77 = load i64* %tmp76, align 4
%tmp78 = icmp ugt i64 %tmp69, %tmp77
br i1 %tmp78, label %bb20.loopexit, label %bb15
bb15:
%tmp83 = load i64** %tmp82, align 4
- %tmp85 = getelementptr inbounds i64* %tmp83, i32 %i
+ %tmp85 = getelementptr inbounds i64, i64* %tmp83, i32 %i
%tmp86 = load i64* %tmp85, align 4
%tmp91 = load i64** %tmp90, align 4
- %tmp93 = getelementptr inbounds i64* %tmp91, i32 %i
+ %tmp93 = getelementptr inbounds i64, i64* %tmp91, i32 %i
%tmp94 = load i64* %tmp93, align 4
%tmp95 = icmp ult i64 %tmp86, %tmp94
br i1 %tmp95, label %bb20.loopexit, label %bb17
bb5: ; preds = %bb4, %bb5
%t6 = phi i64 [ %t9, %bb5 ], [ 0, %bb4 ] ; <i64> [#uses=2]
%t7 = srem i64 %t6, %arg ; <i64> [#uses=1]
- %t8 = getelementptr inbounds double* %arg3, i64 %t7 ; <double*> [#uses=1]
+ %t8 = getelementptr inbounds double, double* %arg3, i64 %t7 ; <double*> [#uses=1]
store double 0.000000e+00, double* %t8
%t9 = add nsw i64 %t6, 1 ; <i64> [#uses=2]
%t10 = icmp slt i64 %t9, %arg ; <i1> [#uses=1]
%t26 = add nsw i64 %t24, %t22 ; <i64> [#uses=1]
%t27 = mul i64 %t11, %arg1 ; <i64> [#uses=1]
%t28 = add nsw i64 %t25, %t22 ; <i64> [#uses=1]
- %t29 = getelementptr inbounds i64* %arg, i64 %t26 ; <i64*> [#uses=1]
+ %t29 = getelementptr inbounds i64, i64* %arg, i64 %t26 ; <i64*> [#uses=1]
%t30 = add nsw i64 %t27, %t22 ; <i64> [#uses=1]
- %t31 = getelementptr inbounds i64* %arg, i64 %t28 ; <i64*> [#uses=1]
+ %t31 = getelementptr inbounds i64, i64* %arg, i64 %t28 ; <i64*> [#uses=1]
%t32 = zext i32 %t23 to i64 ; <i64> [#uses=1]
%t33 = load i64* %t29 ; <i64> [#uses=1]
- %t34 = getelementptr inbounds i64* %arg, i64 %t30 ; <i64*> [#uses=1]
+ %t34 = getelementptr inbounds i64, i64* %arg, i64 %t30 ; <i64*> [#uses=1]
%t35 = load i64* %t31 ; <i64> [#uses=1]
%t36 = add nsw i64 %t32, %t33 ; <i64> [#uses=1]
%t37 = add nsw i64 %t36, %t35 ; <i64> [#uses=1]
bb16: ; preds = %bb16, %bb14, %bb7.preheader
%S.31.0 = phi i64 [ %3, %bb16 ], [ 1, %bb7.preheader ], [ 1, %bb14 ] ; <i64> [#uses=2]
%0 = add nsw i64 %S.31.0, -1 ; <i64> [#uses=1]
- %1 = getelementptr inbounds [3 x double]* undef, i64 0, i64 %0 ; <double*> [#uses=1]
+ %1 = getelementptr inbounds [3 x double], [3 x double]* undef, i64 0, i64 %0 ; <double*> [#uses=1]
%2 = load double* %1, align 8 ; <double> [#uses=0]
%3 = add nsw i64 %S.31.0, 1 ; <i64> [#uses=1]
br label %bb16
while.body:
%0 = phi i32 [ 0, %entry ], [ %inc.2, %while.body ]
%shr = lshr i32 %0, 5
- %arrayidx = getelementptr inbounds i32* %bitmap, i32 %shr
+ %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
%tmp6 = load i32* %arrayidx, align 4
%inc.1 = add i32 %0, 1
%shr.1 = lshr i32 %inc.1, 5
- %arrayidx.1 = getelementptr inbounds i32* %bitmap, i32 %shr.1
+ %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1
%tmp6.1 = load i32* %arrayidx.1, align 4
%inc.2 = add i32 %inc.1, 1
%exitcond.3 = icmp eq i32 %inc.2, 128
while.body:
%0 = phi i32 [ 0, %entry ], [ %inc.3, %while.body ]
%shr = lshr i32 %0, 5
- %arrayidx = getelementptr inbounds i32* %bitmap, i32 %shr
+ %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
%tmp6 = load i32* %arrayidx, align 4
%inc.1 = add i32 %0, 1
%shr.1 = lshr i32 %inc.1, 5
- %arrayidx.1 = getelementptr inbounds i32* %bitmap, i32 %shr.1
+ %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1
%tmp6.1 = load i32* %arrayidx.1, align 4
%inc.3 = add i32 %inc.1, 2
%exitcond.3 = icmp eq i32 %inc.3, 96
bb1: ; preds = %bb
%tmp5 = add i32 %part.016, -1 ; <i32> [#uses=1]
%tmp6 = sext i32 %tmp5 to i64 ; <i64> [#uses=1]
- %tmp7 = getelementptr float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
+ %tmp7 = getelementptr float, float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
%tmp8 = load float* %tmp7, align 4 ; <float> [#uses=1]
%tmp9 = fadd float %tmp8, %distERBlo.120 ; <float> [#uses=1]
%tmp10 = add i32 %part.016, -1 ; <i32> [#uses=1]
%tmp11 = sext i32 %tmp10 to i64 ; <i64> [#uses=1]
- %tmp12 = getelementptr float* %pTmp1, i64 %tmp11 ; <float*> [#uses=1]
+ %tmp12 = getelementptr float, float* %pTmp1, i64 %tmp11 ; <float*> [#uses=1]
%tmp13 = load float* %tmp12, align 4 ; <float> [#uses=1]
%tmp14 = fsub float %distERBhi.121, %tmp13 ; <float> [#uses=1]
br label %bb3.preheader
%loPart.02 = phi i32 [ %tmp24, %bb3 ], [ %loPart.118, %bb.nph ] ; <i32> [#uses=3]
%peakCount.01 = phi float [ %tmp23, %bb3 ], [ %peakCount.117, %bb.nph ] ; <float> [#uses=1]
%tmp16 = sext i32 %loPart.02 to i64 ; <i64> [#uses=1]
- %tmp17 = getelementptr float* %pTmp1, i64 %tmp16 ; <float*> [#uses=1]
+ %tmp17 = getelementptr float, float* %pTmp1, i64 %tmp16 ; <float*> [#uses=1]
%tmp18 = load float* %tmp17, align 4 ; <float> [#uses=1]
%tmp19 = fsub float %distERBlo.03, %tmp18 ; <float> [#uses=3]
%tmp20 = sext i32 %loPart.02 to i64 ; <i64> [#uses=1]
- %tmp21 = getelementptr float* %peakWeight, i64 %tmp20 ; <float*> [#uses=1]
+ %tmp21 = getelementptr float, float* %peakWeight, i64 %tmp20 ; <float*> [#uses=1]
%tmp22 = load float* %tmp21, align 4 ; <float> [#uses=1]
%tmp23 = fsub float %peakCount.01, %tmp22 ; <float> [#uses=2]
%tmp24 = add i32 %loPart.02, 1 ; <i32> [#uses=2]
%hiPart.08 = phi i32 [ %tmp31, %bb5 ], [ %hiPart.119, %bb.nph12 ] ; <i32> [#uses=2]
%peakCount.27 = phi float [ %tmp35, %bb5 ], [ %peakCount.0.lcssa, %bb.nph12 ] ; <float> [#uses=1]
%tmp27 = sext i32 %hiPart.08 to i64 ; <i64> [#uses=1]
- %tmp28 = getelementptr float* %pTmp1, i64 %tmp27 ; <float*> [#uses=1]
+ %tmp28 = getelementptr float, float* %pTmp1, i64 %tmp27 ; <float*> [#uses=1]
%tmp29 = load float* %tmp28, align 4 ; <float> [#uses=1]
%tmp30 = fadd float %tmp29, %distERBhi.29 ; <float> [#uses=3]
%tmp31 = add i32 %hiPart.08, 1 ; <i32> [#uses=4]
%tmp32 = sext i32 %tmp31 to i64 ; <i64> [#uses=1]
- %tmp33 = getelementptr float* %peakWeight, i64 %tmp32 ; <float*> [#uses=1]
+ %tmp33 = getelementptr float, float* %peakWeight, i64 %tmp32 ; <float*> [#uses=1]
%tmp34 = load float* %tmp33, align 4 ; <float> [#uses=1]
%tmp35 = fadd float %tmp34, %peakCount.27 ; <float> [#uses=2]
br label %bb5
%tmp42 = fadd float %tmp41, 1.000000e+00 ; <float> [#uses=1]
%tmp43 = fdiv float 1.000000e+00, %tmp42 ; <float> [#uses=1]
%tmp44 = sext i32 %part.016 to i64 ; <i64> [#uses=1]
- %tmp45 = getelementptr float* %nrgReducePeakrate, i64 %tmp44 ; <float*> [#uses=1]
+ %tmp45 = getelementptr float, float* %nrgReducePeakrate, i64 %tmp44 ; <float*> [#uses=1]
store float %tmp43, float* %tmp45, align 4
%tmp46 = add i32 %part.016, 1 ; <i32> [#uses=2]
br label %bb8
%.02 = phi i32 [ 0, %Prologue ], [ %tmp33, %B24 ]
%tmp23 = zext i32 %.02 to i64
%tmp33 = add i32 %.02, 1
- %o = getelementptr i32* %a, i32 %.02
+ %o = getelementptr i32, i32* %a, i32 %.02
%v = load i32* %o
%t = icmp eq i32 %v, 0
br i1 %t, label %exit24, label %B24
loop:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
%indvar.i8 = and i64 %indvar, 255
- %t0 = getelementptr double* %d, i64 %indvar.i8
+ %t0 = getelementptr double, double* %d, i64 %indvar.i8
%t1 = load double* %t0
%t2 = fmul double %t1, 0.1
store double %t2, double* %t0
%indvar.i24 = and i64 %indvar, 16777215
- %t3 = getelementptr double* %d, i64 %indvar.i24
+ %t3 = getelementptr double, double* %d, i64 %indvar.i24
%t4 = load double* %t3
%t5 = fmul double %t4, 2.3
store double %t5, double* %t3
- %t6 = getelementptr double* %d, i64 %indvar
+ %t6 = getelementptr double, double* %d, i64 %indvar
%t7 = load double* %t6
%t8 = fmul double %t7, 4.5
store double %t8, double* %t6
; CHECK-LABEL: @ptriv_as2(
entry:
%idx.trunc = trunc i32 %n to i8
- %add.ptr = getelementptr inbounds i8 addrspace(2)* %base, i8 %idx.trunc
+ %add.ptr = getelementptr inbounds i8, i8 addrspace(2)* %base, i8 %idx.trunc
%cmp1 = icmp ult i8 addrspace(2)* %base, %add.ptr
br i1 %cmp1, label %for.body, label %for.end
; Make sure the added GEP has the right index type
-; CHECK: %lftr.limit = getelementptr i8 addrspace(2)* %base, i8
+; CHECK: %lftr.limit = getelementptr i8, i8 addrspace(2)* %base, i8
; CHECK: for.body:
; CHECK: phi i8 addrspace(2)*
%sub.ptr.rhs.cast = ptrtoint i8 addrspace(2)* %base to i8
%sub.ptr.sub = sub i8 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
store i8 %sub.ptr.sub, i8 addrspace(2)* %p.02
- %incdec.ptr = getelementptr inbounds i8 addrspace(2)* %p.02, i32 1
+ %incdec.ptr = getelementptr inbounds i8, i8 addrspace(2)* %p.02, i32 1
%cmp = icmp ult i8 addrspace(2)* %incdec.ptr, %add.ptr
br i1 %cmp, label %for.body, label %for.end
; CHECK-LABEL: @ptriv_as3(
entry:
%idx.trunc = trunc i32 %n to i16
- %add.ptr = getelementptr inbounds i8 addrspace(3)* %base, i16 %idx.trunc
+ %add.ptr = getelementptr inbounds i8, i8 addrspace(3)* %base, i16 %idx.trunc
%cmp1 = icmp ult i8 addrspace(3)* %base, %add.ptr
br i1 %cmp1, label %for.body, label %for.end
; Make sure the added GEP has the right index type
-; CHECK: %lftr.limit = getelementptr i8 addrspace(3)* %base, i16
+; CHECK: %lftr.limit = getelementptr i8, i8 addrspace(3)* %base, i16
; CHECK: for.body:
; CHECK: phi i8 addrspace(3)*
%sub.ptr.sub = sub i16 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
%conv = trunc i16 %sub.ptr.sub to i8
store i8 %conv, i8 addrspace(3)* %p.02
- %incdec.ptr = getelementptr inbounds i8 addrspace(3)* %p.02, i32 1
+ %incdec.ptr = getelementptr inbounds i8, i8 addrspace(3)* %p.02, i32 1
%cmp = icmp ult i8 addrspace(3)* %incdec.ptr, %add.ptr
br i1 %cmp, label %for.body, label %for.end
bb2: ; preds = %bb3, %bb.nph
%i.01 = phi i32 [ %7, %bb3 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
%1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %2 = getelementptr double* %p, i64 %1 ; <double*> [#uses=1]
+ %2 = getelementptr double, double* %p, i64 %1 ; <double*> [#uses=1]
%3 = load double* %2, align 8 ; <double> [#uses=1]
%4 = fmul double %3, 1.100000e+00 ; <double> [#uses=1]
%5 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %6 = getelementptr double* %p, i64 %5 ; <double*> [#uses=1]
+ %6 = getelementptr double, double* %p, i64 %5 ; <double*> [#uses=1]
store double %4, double* %6, align 8
%7 = add i32 %i.01, 1 ; <i32> [#uses=2]
br label %bb3
define void @ptriv(i8* %base, i32 %n) nounwind {
entry:
%idx.ext = sext i32 %n to i64
- %add.ptr = getelementptr inbounds i8* %base, i64 %idx.ext
+ %add.ptr = getelementptr inbounds i8, i8* %base, i64 %idx.ext
%cmp1 = icmp ult i8* %base, %add.ptr
br i1 %cmp1, label %for.body, label %for.end
%sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
%conv = trunc i64 %sub.ptr.sub to i8
store i8 %conv, i8* %p.02
- %incdec.ptr = getelementptr inbounds i8* %p.02, i32 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %p.02, i32 1
%cmp = icmp ult i8* %incdec.ptr, %add.ptr
br i1 %cmp, label %for.body, label %for.end
%i = phi i32 [ 0, %entry ], [ %i.inc, %loop ]
%diagidx = add nsw i32 %rowidx, %i
%diagidxw = sext i32 %diagidx to i64
- %matrixp = getelementptr inbounds [0 x double]* %matrix, i32 0, i64 %diagidxw
+ %matrixp = getelementptr inbounds [0 x double], [0 x double]* %matrix, i32 0, i64 %diagidxw
%v1 = load double* %matrixp
%iw = sext i32 %i to i64
- %vectorp = getelementptr inbounds [0 x double]* %vector, i32 0, i64 %iw
+ %vectorp = getelementptr inbounds [0 x double], [0 x double]* %vector, i32 0, i64 %iw
%v2 = load double* %vectorp
%row.inc = add nsw i32 %rowidx, %ilead
%i.inc = add nsw i32 %i, 1
%i = phi i32 [ 0, %entry ], [ %i.inc, %loop ]
%diagidx = add nsw i32 %rowidx, %i
%diagidxw = sext i32 %diagidx to i64
- %matrixp = getelementptr inbounds [0 x double]* %matrix, i32 0, i64 %diagidxw
+ %matrixp = getelementptr inbounds [0 x double], [0 x double]* %matrix, i32 0, i64 %diagidxw
%v1 = load double* %matrixp
%iw = sext i32 %i to i64
- %vectorp = getelementptr inbounds [0 x double]* %vector, i32 0, i64 %iw
+ %vectorp = getelementptr inbounds [0 x double], [0 x double]* %vector, i32 0, i64 %iw
%v2 = load double* %vectorp
%row.inc = add nsw i32 %rowidx, %ilead
%i.inc = add nsw i32 %i, 1
define void @geplftr(i8* %base, i32 %x, i32 %y, i32 %n) nounwind {
entry:
%x.ext = sext i32 %x to i64
- %add.ptr = getelementptr inbounds i8* %base, i64 %x.ext
+ %add.ptr = getelementptr inbounds i8, i8* %base, i64 %x.ext
%y.ext = sext i32 %y to i64
- %add.ptr10 = getelementptr inbounds i8* %add.ptr, i64 %y.ext
+ %add.ptr10 = getelementptr inbounds i8, i8* %add.ptr, i64 %y.ext
%lim = add i32 %x, %n
%cmp.ph = icmp ult i32 %x, %lim
br i1 %cmp.ph, label %loop, label %exit
loop:
%i = phi i32 [ %x, %entry ], [ %inc, %loop ]
%aptr = phi i8* [ %add.ptr10, %entry ], [ %incdec.ptr, %loop ]
- %incdec.ptr = getelementptr inbounds i8* %aptr, i32 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %aptr, i32 1
store i8 3, i8* %aptr
%inc = add i32 %i, 1
%cmp = icmp ult i32 %inc, %lim
; Test LFTR on an IV whose recurrence start is a non-unit pointer type.
define void @aryptriv([256 x i8]* %base, i32 %n) nounwind {
entry:
- %ivstart = getelementptr inbounds [256 x i8]* %base, i32 0, i32 0
- %ivend = getelementptr inbounds [256 x i8]* %base, i32 0, i32 %n
+ %ivstart = getelementptr inbounds [256 x i8], [256 x i8]* %base, i32 0, i32 0
+ %ivend = getelementptr inbounds [256 x i8], [256 x i8]* %base, i32 0, i32 %n
%cmp.ph = icmp ult i8* %ivstart, %ivend
br i1 %cmp.ph, label %loop, label %exit
; CHECK: br i1
loop:
%aptr = phi i8* [ %ivstart, %entry ], [ %incdec.ptr, %loop ]
- %incdec.ptr = getelementptr inbounds i8* %aptr, i32 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %aptr, i32 1
store i8 3, i8* %aptr
%cmp = icmp ult i8* %incdec.ptr, %ivend
br i1 %cmp, label %loop, label %exit
%i.0 = phi i8 [ 0, %0 ], [ %5, %1 ]
%p.0 = phi i8* [ getelementptr inbounds ([240 x i8]* @data, i64 0, i64 0), %0 ], [ %4, %1 ]
%.0 = phi i8* [ %a, %0 ], [ %2, %1 ]
- %2 = getelementptr inbounds i8* %.0, i64 1
+ %2 = getelementptr inbounds i8, i8* %.0, i64 1
%3 = load i8* %.0, align 1
- %4 = getelementptr inbounds i8* %p.0, i64 1
+ %4 = getelementptr inbounds i8, i8* %p.0, i64 1
store i8 %3, i8* %p.0, align 1
%5 = add i8 %i.0, 1
%6 = icmp ult i8 %5, -16
br i1 %2, label %bb22, label %overflow2
bb22: ; preds = %bb21
- %3 = getelementptr i8* %q.0, i32 1 ; <i8*> [#uses=1]
+ %3 = getelementptr i8, i8* %q.0, i32 1 ; <i8*> [#uses=1]
br label %bb23
bb23: ; preds = %bb22, %bb20
br i1 %2, label %bb22, label %overflow2
bb22: ; preds = %bb21
- %3 = getelementptr i8* %q.0, i32 1 ; <i8*> [#uses=1]
+ %3 = getelementptr i8, i8* %q.0, i32 1 ; <i8*> [#uses=1]
br label %bb23
bb23: ; preds = %bb22, %bb20
bb: ; preds = %bb, %bb.preheader
%i.01 = phi i64 [ %t6, %bb ], [ %a, %bb.preheader ] ; <i64> [#uses=3]
%t1 = and i64 %i.01, 255 ; <i64> [#uses=1]
- %t2 = getelementptr i64* %A, i64 %t1 ; <i64*> [#uses=1]
+ %t2 = getelementptr i64, i64* %A, i64 %t1 ; <i64*> [#uses=1]
store i64 %i.01, i64* %t2, align 8
%t6 = add i64 %i.01, %s ; <i64> [#uses=1]
br label %bb
%i.02 = phi i32 [ 0, %ph ], [ %iinc, %loop ]
%s.01 = phi i32 [ 0, %ph ], [ %sinc, %loop ]
%ofs = sext i32 %i.02 to i64
- %adr = getelementptr inbounds i32* %arr, i64 %ofs
+ %adr = getelementptr inbounds i32, i32* %arr, i64 %ofs
%val = load i32* %adr
%sinc = add nsw i32 %s.01, %val
%iinc = add nsw i32 %i.02, 1
%i.02 = phi i32 [ 0, %ph ], [ %iinc, %loop ]
%s.01 = phi i64 [ 0, %ph ], [ %sinc, %loop ]
%ofs = sext i32 %i.02 to i64
- %adr = getelementptr inbounds i32* %arr, i64 %ofs
+ %adr = getelementptr inbounds i32, i32* %arr, i64 %ofs
%val = load i32* %adr
%vall = sext i32 %val to i64
%sinc = add nsw i64 %s.01, %vall
; CHECK-NOT: add
;
; Preserve gep inboundsness, and don't factor it.
-; CHECK: getelementptr inbounds i32* %ptriv, i32 1
+; CHECK: getelementptr inbounds i32, i32* %ptriv, i32 1
; CHECK-NOT: add
; CHECK: exit:
loop:
%ptriv = phi i32* [ %first, %ph ], [ %ptrpost, %loop ]
%ofs = sext i32 %idx to i64
- %adr = getelementptr inbounds i32* %ptriv, i64 %ofs
+ %adr = getelementptr inbounds i32, i32* %ptriv, i64 %ofs
store i32 3, i32* %adr
- %ptrpost = getelementptr inbounds i32* %ptriv, i32 1
+ %ptrpost = getelementptr inbounds i32, i32* %ptriv, i32 1
%cond = icmp ne i32* %ptrpost, %last
br i1 %cond, label %loop, label %exit
loop:
%iv = phi i32 [%start, %entry], [%next, %loop]
%p = phi %structI* [%base, %entry], [%pinc, %loop]
- %adr = getelementptr %structI* %p, i32 0, i32 0
+ %adr = getelementptr %structI, %structI* %p, i32 0, i32 0
store i32 3, i32* %adr
%pp = bitcast %structI* %p to i32*
store i32 4, i32* %pp
- %pinc = getelementptr %structI* %p, i32 1
+ %pinc = getelementptr %structI, %structI* %p, i32 1
%next = add i32 %iv, 1
%cond = icmp ne i32 %next, %limit
br i1 %cond, label %loop, label %exit
%idx = phi i32 [ 0, %entry ], [ %idx.next, %loop.inc ]
%max = phi i32 [ 0, %entry ], [ %max.next, %loop.inc ]
%idxprom = sext i32 %idx to i64
- %adr = getelementptr inbounds i32* %base, i64 %idxprom
+ %adr = getelementptr inbounds i32, i32* %base, i64 %idxprom
%val = load i32* %adr
%cmp19 = icmp sgt i32 %val, %max
br i1 %cmp19, label %if.then, label %if.else
loop:
%iv = phi i32 [ 0, %entry], [ %iv.next, %loop ]
%t1 = sext i32 %iv to i64
- %adr = getelementptr i64* %base, i64 %t1
+ %adr = getelementptr i64, i64* %base, i64 %t1
%val = load i64* %adr
%t2 = or i32 %iv, 1
%t3 = sext i32 %t2 to i64
define void @congruentgepiv(%structIF* %base) nounwind uwtable ssp {
entry:
- %first = getelementptr inbounds %structIF* %base, i64 0, i32 0
+ %first = getelementptr inbounds %structIF, %structIF* %base, i64 0, i32 0
br label %loop
; CHECK: loop:
br i1 undef, label %latch, label %exit
latch: ; preds = %for.inc50.i
- %ptr.inc = getelementptr inbounds %structIF* %ptr.iv, i64 1
- %next.inc = getelementptr inbounds %structIF* %ptr.inc, i64 0, i32 0
+ %ptr.inc = getelementptr inbounds %structIF, %structIF* %ptr.iv, i64 1
+ %next.inc = getelementptr inbounds %structIF, %structIF* %ptr.inc, i64 0, i32 0
br label %loop
exit:
loop1:
%zxt = zext i32 %i to i64
%ofs = shl nuw nsw i64 %zxt, 3
- %gep = getelementptr i64* %a, i64 %zxt
+ %gep = getelementptr i64, i64* %a, i64 %zxt
%v = load i64* %gep, align 8
%truncv = trunc i64 %v to i32
%adds = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %s, i32 %truncv)
%tmp31 = phi i32 [ %tmp39, %bb30 ], [ %tmp28, %bb24 ] ; <i32> [#uses=2]
%tmp32 = phi i32 [ %tmp37, %bb30 ], [ %tmp27, %bb24 ] ; <i32> [#uses=2]
%tmp33 = sext i32 %tmp32 to i64 ; <i64> [#uses=1]
- %tmp35 = getelementptr float* %tmp4, i64 %tmp33 ; <%0*> [#uses=1]
+ %tmp35 = getelementptr float, float* %tmp4, i64 %tmp33 ; <%0*> [#uses=1]
%tmp36 = load float* %tmp35, align 4 ; <%0> [#uses=0]
%tmp37 = add nsw i32 %tmp32, -1 ; <i32> [#uses=1]
%tmp39 = add nsw i32 %tmp31, -1 ; <i32> [#uses=1]
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
- %3 = getelementptr i32* %d, i64 %2 ; <i32*> [#uses=1]
+ %3 = getelementptr i32, i32* %d, i64 %2 ; <i32*> [#uses=1]
store i32 %1, i32* %3, align 4
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
%result.02 = phi i64 [ %t5, %bb1 ], [ 0, %bb.nph ] ; <i64> [#uses=1]
%n.01 = phi i32 [ %t6, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=2]
%t1 = sext i32 %n.01 to i64 ; <i64> [#uses=1]
- %t2 = getelementptr i64* %first, i64 %t1 ; <i64*> [#uses=1]
+ %t2 = getelementptr i64, i64* %first, i64 %t1 ; <i64*> [#uses=1]
%t3 = load i64* %t2, align 8 ; <i64> [#uses=1]
%t4 = lshr i64 %t3, 4 ; <i64> [#uses=1]
%t5 = add i64 %t4, %result.02 ; <i64> [#uses=2]
bb: ; preds = %bb1, %bb.nph
%i.01 = phi i16 [ %t3, %bb1 ], [ 0, %bb.nph ] ; <i16> [#uses=2]
%t1 = sext i16 %i.01 to i64 ; <i64> [#uses=1]
- %t2 = getelementptr i32* %P, i64 %t1 ; <i32*> [#uses=1]
+ %t2 = getelementptr i32, i32* %P, i64 %t1 ; <i32*> [#uses=1]
store i32 123, i32* %t2, align 4
%t3 = add i16 %i.01, 1 ; <i16> [#uses=2]
br label %bb1
%i.0.reg2mem.0 = phi i8 [ -10, %bb.thread ], [ %tmp7, %bb ] ; <i8> [#uses=2]
%tmp12 = sext i8 %i.0.reg2mem.0 to i32 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp12, 10 ; <i32> [#uses=1]
- %tmp5 = getelementptr [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
+ %tmp5 = getelementptr [21 x i32], [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
store i32 0, i32* %tmp5
%tmp7 = add i8 %i.0.reg2mem.0, 1 ; <i8> [#uses=2]
%0 = icmp sgt i8 %tmp7, 10 ; <i1> [#uses=1]
%i.0.reg2mem.0 = phi i8 [ 10, %bb.thread ], [ %tmp7, %bb ] ; <i8> [#uses=2]
%tmp12 = sext i8 %i.0.reg2mem.0 to i32 ; <i32> [#uses=1]
%tmp4 = add i32 %tmp12, -10 ; <i32> [#uses=1]
- %tmp5 = getelementptr [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
+ %tmp5 = getelementptr [21 x i32], [21 x i32]* %a, i32 0, i32 %tmp4 ; <i32*> [#uses=1]
store i32 0, i32* %tmp5
%tmp7 = add i8 %i.0.reg2mem.0, 1 ; <i8> [#uses=2]
%0 = icmp sgt i8 %tmp7, 30 ; <i1> [#uses=1]
loop:
; CHECK: loop
%.sum = add i64 %i.01, -2
- %v = getelementptr inbounds i8* null, i64 %.sum
+ %v = getelementptr inbounds i8, i8* null, i64 %.sum
%r = tail call i32 @check(i8* %v)
%c = icmp eq i32 %r, 0
br i1 %c, label %loop.end, label %abort.now
bb: ; preds = %bb7, %bb.nph
%i.01 = phi i32 [ %tmp6, %bb7 ], [ 0, %bb.nph ] ; <i32> [#uses=3]
%tmp1 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
- %tmp4 = getelementptr i64* %x, i32 %i.01 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr i64, i64* %x, i32 %i.01 ; <i64*> [#uses=1]
store i64 %tmp1, i64* %tmp4, align 8
%tmp6 = add i32 %i.01, 1 ; <i32> [#uses=2]
br label %bb7
for.body.i:
%indvars.iv37.i = phi i64 [ %indvars.iv.next38.i, %for.body.i ], [ 0, %entry ]
%call.i = call i8* (...)* @a() nounwind
- %arrayidx.i = getelementptr inbounds i8** %vla.i, i64 %indvars.iv37.i
+ %arrayidx.i = getelementptr inbounds i8*, i8** %vla.i, i64 %indvars.iv37.i
store i8* %call.i, i8** %arrayidx.i, align 8
%indvars.iv.next38.i = add i64 %indvars.iv37.i, 1
%exitcond5 = icmp eq i64 %indvars.iv.next38.i, %n
br i1 %cmp, label %cond.true, label %while.cond.preheader
cond.true: ; preds = %entry
- %arrayidx = getelementptr inbounds i8** %argv, i64 1 ; <i8**> [#uses=1]
+ %arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1 ; <i8**> [#uses=1]
%tmp2 = load i8** %arrayidx ; <i8*> [#uses=1]
%call = tail call i32 @atoi(i8* %tmp2) nounwind readonly ; <i32> [#uses=1]
br label %while.cond.preheader
for.body: ; preds = %bb.nph, %for.cond
%i.02 = phi i64 [ 2, %bb.nph ], [ %inc, %for.cond ] ; <i64> [#uses=2]
- %arrayidx10 = getelementptr inbounds [8193 x i8]* @main.flags, i64 0, i64 %i.02 ; <i8*> [#uses=1]
+ %arrayidx10 = getelementptr inbounds [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %i.02 ; <i8*> [#uses=1]
store i8 1, i8* %arrayidx10
%inc = add nsw i64 %i.02, 1 ; <i64> [#uses=2]
br label %for.cond
for.body15: ; preds = %bb.nph16, %for.cond12
%count.212 = phi i32 [ 0, %bb.nph16 ], [ %count.1, %for.cond12 ] ; <i32> [#uses=2]
%i.17 = phi i64 [ 2, %bb.nph16 ], [ %inc37, %for.cond12 ] ; <i64> [#uses=4]
- %arrayidx17 = getelementptr inbounds [8193 x i8]* @main.flags, i64 0, i64 %i.17 ; <i8*> [#uses=1]
+ %arrayidx17 = getelementptr inbounds [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %i.17 ; <i8*> [#uses=1]
%tmp18 = load i8* %arrayidx17 ; <i8> [#uses=1]
%tobool19 = icmp eq i8 %tmp18, 0 ; <i1> [#uses=1]
br i1 %tobool19, label %for.inc35, label %if.then
for.body25: ; preds = %bb.nph5, %for.cond22
%k.04 = phi i64 [ %add, %bb.nph5 ], [ %add31, %for.cond22 ] ; <i64> [#uses=2]
- %arrayidx27 = getelementptr inbounds [8193 x i8]* @main.flags, i64 0, i64 %k.04 ; <i8*> [#uses=1]
+ %arrayidx27 = getelementptr inbounds [8193 x i8], [8193 x i8]* @main.flags, i64 0, i64 %k.04 ; <i8*> [#uses=1]
store i8 0, i8* %arrayidx27
%add31 = add nsw i64 %k.04, %i.17 ; <i64> [#uses=2]
br label %for.cond22
for.body: ; preds = %for.body.preheader, %for.body
%i.03 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ] ; <i64> [#uses=2]
- %arrayidx = getelementptr inbounds double* %p, i64 %i.03 ; <double*> [#uses=1]
+ %arrayidx = getelementptr inbounds double, double* %p, i64 %i.03 ; <double*> [#uses=1]
store double 0.000000e+00, double* %arrayidx
%inc = add i64 %i.03, 1 ; <i64> [#uses=2]
%divx = udiv i64 %n, 7 ; <i64> [#uses=1]
bb1: ; preds = %bb2
%tmp = load double*** @tds, align 8 ; <double**> [#uses=1]
%tmp1 = sext i32 %i.0 to i64 ; <i64> [#uses=1]
- %tmp2 = getelementptr inbounds double** %tmp, i64 %tmp1 ; <double**> [#uses=1]
+ %tmp2 = getelementptr inbounds double*, double** %tmp, i64 %tmp1 ; <double**> [#uses=1]
%tmp3 = load double** %tmp2, align 1 ; <double*> [#uses=1]
%tmp6 = add nsw i32 %j.0, 1 ; <i32> [#uses=1]
br label %bb2
%0 = trunc i64 %indvars.iv to i32
%add = add i32 %0, %sample
%idxprom = zext i32 %add to i64
- %arrayidx = getelementptr inbounds float* %data, i64 %idxprom
+ %arrayidx = getelementptr inbounds float, float* %data, i64 %idxprom
%1 = load float* %arrayidx, align 4
%mul = fmul float %1, %d
- %arrayidx2 = getelementptr inbounds float* %autoc, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %autoc, i64 %indvars.iv
%2 = load float* %arrayidx2, align 4
%add3 = fadd float %2, %mul
store float %add3, float* %arrayidx2, align 4
define void @vnum_test8(i32* %data) {
entry:
- %tmp.1 = getelementptr i32* %data, i32 3 ; <i32*> [#uses=1]
+ %tmp.1 = getelementptr i32, i32* %data, i32 3 ; <i32*> [#uses=1]
%tmp.2 = load i32* %tmp.1 ; <i32> [#uses=2]
- %tmp.4 = getelementptr i32* %data, i32 4 ; <i32*> [#uses=1]
+ %tmp.4 = getelementptr i32, i32* %data, i32 4 ; <i32*> [#uses=1]
%tmp.5 = load i32* %tmp.4 ; <i32> [#uses=2]
- %tmp.8 = getelementptr i32* %data, i32 2 ; <i32*> [#uses=1]
+ %tmp.8 = getelementptr i32, i32* %data, i32 2 ; <i32*> [#uses=1]
%tmp.9 = load i32* %tmp.8 ; <i32> [#uses=3]
%tmp.125 = icmp sgt i32 %tmp.2, 0 ; <i1> [#uses=1]
br i1 %tmp.125, label %no_exit.preheader, label %return
no_exit.preheader: ; preds = %entry
- %tmp.16 = getelementptr i32* %data, i32 %tmp.9 ; <i32*> [#uses=1]
+ %tmp.16 = getelementptr i32, i32* %data, i32 %tmp.9 ; <i32*> [#uses=1]
br label %no_exit
; CHECK: store i32 0
for.body:
%i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.cond ]
%idxprom = sext i32 %i.05 to i64
- %arrayidx = getelementptr inbounds i32* %1, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %1, i64 %idxprom
%3 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %3, 0
br i1 %tobool, label %if.then, label %for.cond
for.body4.us:
%storemerge14.us = phi i32 [ 0, %for.body4.lr.ph.us ], [ %inc.us, %for.body4.us ]
%idxprom.us = sext i32 %storemerge14.us to i64
- %arrayidx6.us = getelementptr inbounds [8 x i8]* %a, i64 %idxprom5.us, i64 %idxprom.us
+ %arrayidx6.us = getelementptr inbounds [8 x i8], [8 x i8]* %a, i64 %idxprom5.us, i64 %idxprom.us
%0 = load i8* %arrayidx6.us, align 1
%idxprom7.us = zext i8 %0 to i64
- %arrayidx8.us = getelementptr inbounds i8* %b, i64 %idxprom7.us
+ %arrayidx8.us = getelementptr inbounds i8, i8* %b, i64 %idxprom7.us
%1 = load i8* %arrayidx8.us, align 1
store i8 %1, i8* %arrayidx6.us, align 1
%inc.us = add nsw i32 %storemerge14.us, 1
for.body:
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds i32* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
for.body:
%idxprom = zext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds i32* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
for.body: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds i32* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
]
bb: ; preds = %entry
- %0 = getelementptr %struct.quad_struct* %tree, i32 0, i32 3 ; <%struct.quad_struct**> [#uses=1]
+ %0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 3 ; <%struct.quad_struct**> [#uses=1]
%1 = load %struct.quad_struct** %0, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %1
bb1: ; preds = %entry
- %2 = getelementptr %struct.quad_struct* %tree, i32 0, i32 2 ; <%struct.quad_struct**> [#uses=1]
+ %2 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 2 ; <%struct.quad_struct**> [#uses=1]
%3 = load %struct.quad_struct** %2, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %3
bb2: ; preds = %entry
- %4 = getelementptr %struct.quad_struct* %tree, i32 0, i32 5 ; <%struct.quad_struct**> [#uses=1]
+ %4 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 5 ; <%struct.quad_struct**> [#uses=1]
%5 = load %struct.quad_struct** %4, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %5
bb3: ; preds = %entry
- %6 = getelementptr %struct.quad_struct* %tree, i32 0, i32 4 ; <%struct.quad_struct**> [#uses=1]
+ %6 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 4 ; <%struct.quad_struct**> [#uses=1]
%7 = load %struct.quad_struct** %6, align 4 ; <%struct.quad_struct*> [#uses=1]
ret %struct.quad_struct* %7
define internal fastcc %struct.quad_struct* @gtequal_adj_neighbor(%struct.quad_struct* nocapture %tree, i32 %d) nounwind readonly {
entry:
- %0 = getelementptr %struct.quad_struct* %tree, i32 0, i32 6 ; <%struct.quad_struct**> [#uses=1]
+ %0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 6 ; <%struct.quad_struct**> [#uses=1]
%1 = load %struct.quad_struct** %0, align 4 ; <%struct.quad_struct*> [#uses=4]
- %2 = getelementptr %struct.quad_struct* %tree, i32 0, i32 1 ; <i32*> [#uses=1]
+ %2 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 1 ; <i32*> [#uses=1]
%3 = load i32* %2, align 4 ; <i32> [#uses=2]
%4 = icmp eq %struct.quad_struct* %1, null ; <i1> [#uses=1]
br i1 %4, label %bb3, label %bb
br i1 %8, label %bb7, label %bb4
bb4: ; preds = %bb3
- %9 = getelementptr %struct.quad_struct* %q.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %9 = getelementptr %struct.quad_struct, %struct.quad_struct* %q.0, i32 0, i32 0 ; <i32*> [#uses=1]
%10 = load i32* %9, align 4 ; <i32> [#uses=1]
%11 = icmp eq i32 %10, 2 ; <i1> [#uses=1]
br i1 %11, label %bb5, label %bb7
define i32 @perimeter(%struct.quad_struct* nocapture %tree, i32 %size) nounwind readonly {
entry:
- %0 = getelementptr %struct.quad_struct* %tree, i32 0, i32 0 ; <i32*> [#uses=1]
+ %0 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 0 ; <i32*> [#uses=1]
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
%2 = icmp eq i32 %1, 2 ; <i1> [#uses=1]
br i1 %2, label %bb, label %bb2
bb: ; preds = %entry
- %3 = getelementptr %struct.quad_struct* %tree, i32 0, i32 4 ; <%struct.quad_struct**> [#uses=1]
+ %3 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 4 ; <%struct.quad_struct**> [#uses=1]
%4 = load %struct.quad_struct** %3, align 4 ; <%struct.quad_struct*> [#uses=1]
%5 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%6 = call i32 @perimeter(%struct.quad_struct* %4, i32 %5) nounwind ; <i32> [#uses=1]
- %7 = getelementptr %struct.quad_struct* %tree, i32 0, i32 5 ; <%struct.quad_struct**> [#uses=1]
+ %7 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 5 ; <%struct.quad_struct**> [#uses=1]
%8 = load %struct.quad_struct** %7, align 4 ; <%struct.quad_struct*> [#uses=1]
%9 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%10 = call i32 @perimeter(%struct.quad_struct* %8, i32 %9) nounwind ; <i32> [#uses=1]
%11 = add i32 %10, %6 ; <i32> [#uses=1]
- %12 = getelementptr %struct.quad_struct* %tree, i32 0, i32 3 ; <%struct.quad_struct**> [#uses=1]
+ %12 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 3 ; <%struct.quad_struct**> [#uses=1]
%13 = load %struct.quad_struct** %12, align 4 ; <%struct.quad_struct*> [#uses=1]
%14 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%15 = call i32 @perimeter(%struct.quad_struct* %13, i32 %14) nounwind ; <i32> [#uses=1]
%16 = add i32 %15, %11 ; <i32> [#uses=1]
- %17 = getelementptr %struct.quad_struct* %tree, i32 0, i32 2 ; <%struct.quad_struct**> [#uses=1]
+ %17 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 2 ; <%struct.quad_struct**> [#uses=1]
%18 = load %struct.quad_struct** %17, align 4 ; <%struct.quad_struct*> [#uses=1]
%19 = sdiv i32 %size, 2 ; <i32> [#uses=1]
%20 = call i32 @perimeter(%struct.quad_struct* %18, i32 %19) nounwind ; <i32> [#uses=1]
ret i32 %21
bb2: ; preds = %entry
- %22 = getelementptr %struct.quad_struct* %tree, i32 0, i32 0 ; <i32*> [#uses=1]
+ %22 = getelementptr %struct.quad_struct, %struct.quad_struct* %tree, i32 0, i32 0 ; <i32*> [#uses=1]
%23 = load i32* %22, align 4 ; <i32> [#uses=1]
%24 = icmp eq i32 %23, 0 ; <i1> [#uses=1]
br i1 %24, label %bb3, label %bb23
br i1 %26, label %bb8, label %bb4
bb4: ; preds = %bb3
- %27 = getelementptr %struct.quad_struct* %25, i32 0, i32 0 ; <i32*> [#uses=1]
+ %27 = getelementptr %struct.quad_struct, %struct.quad_struct* %25, i32 0, i32 0 ; <i32*> [#uses=1]
%28 = load i32* %27, align 4 ; <i32> [#uses=1]
%29 = icmp eq i32 %28, 1 ; <i1> [#uses=1]
br i1 %29, label %bb8, label %bb6
bb6: ; preds = %bb4
- %30 = getelementptr %struct.quad_struct* %25, i32 0, i32 0 ; <i32*> [#uses=1]
+ %30 = getelementptr %struct.quad_struct, %struct.quad_struct* %25, i32 0, i32 0 ; <i32*> [#uses=1]
%31 = load i32* %30, align 4 ; <i32> [#uses=1]
%32 = icmp eq i32 %31, 2 ; <i1> [#uses=1]
br i1 %32, label %bb7, label %bb8
br i1 %35, label %bb10, label %bb9
bb9: ; preds = %bb8
- %36 = getelementptr %struct.quad_struct* %34, i32 0, i32 0 ; <i32*> [#uses=1]
+ %36 = getelementptr %struct.quad_struct, %struct.quad_struct* %34, i32 0, i32 0 ; <i32*> [#uses=1]
%37 = load i32* %36, align 4 ; <i32> [#uses=1]
%38 = icmp eq i32 %37, 1 ; <i1> [#uses=1]
br i1 %38, label %bb10, label %bb11
br label %bb13
bb11: ; preds = %bb9
- %40 = getelementptr %struct.quad_struct* %34, i32 0, i32 0 ; <i32*> [#uses=1]
+ %40 = getelementptr %struct.quad_struct, %struct.quad_struct* %34, i32 0, i32 0 ; <i32*> [#uses=1]
%41 = load i32* %40, align 4 ; <i32> [#uses=1]
%42 = icmp eq i32 %41, 2 ; <i1> [#uses=1]
br i1 %42, label %bb12, label %bb13
br i1 %46, label %bb15, label %bb14
bb14: ; preds = %bb13
- %47 = getelementptr %struct.quad_struct* %45, i32 0, i32 0 ; <i32*> [#uses=1]
+ %47 = getelementptr %struct.quad_struct, %struct.quad_struct* %45, i32 0, i32 0 ; <i32*> [#uses=1]
%48 = load i32* %47, align 4 ; <i32> [#uses=1]
%49 = icmp eq i32 %48, 1 ; <i1> [#uses=1]
br i1 %49, label %bb15, label %bb16
br label %bb18
bb16: ; preds = %bb14
- %51 = getelementptr %struct.quad_struct* %45, i32 0, i32 0 ; <i32*> [#uses=1]
+ %51 = getelementptr %struct.quad_struct, %struct.quad_struct* %45, i32 0, i32 0 ; <i32*> [#uses=1]
%52 = load i32* %51, align 4 ; <i32> [#uses=1]
%53 = icmp eq i32 %52, 2 ; <i1> [#uses=1]
br i1 %53, label %bb17, label %bb18
br i1 %57, label %bb20, label %bb19
bb19: ; preds = %bb18
- %58 = getelementptr %struct.quad_struct* %56, i32 0, i32 0 ; <i32*> [#uses=1]
+ %58 = getelementptr %struct.quad_struct, %struct.quad_struct* %56, i32 0, i32 0 ; <i32*> [#uses=1]
%59 = load i32* %58, align 4 ; <i32> [#uses=1]
%60 = icmp eq i32 %59, 1 ; <i1> [#uses=1]
br i1 %60, label %bb20, label %bb21
ret i32 %61
bb21: ; preds = %bb19
- %62 = getelementptr %struct.quad_struct* %56, i32 0, i32 0 ; <i32*> [#uses=1]
+ %62 = getelementptr %struct.quad_struct, %struct.quad_struct* %56, i32 0, i32 0 ; <i32*> [#uses=1]
%63 = load i32* %62, align 4 ; <i32> [#uses=1]
%64 = icmp eq i32 %63, 2 ; <i1> [#uses=1]
br i1 %64, label %bb22, label %bb23
define void @hello(float* align 128 nocapture %a, float* nocapture readonly %c) #0 {
entry:
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 5
+ %arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
ret void
}
entry:
tail call void @hello(float* %a, float* %c)
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK: %maskcond = icmp eq i64 %maskedptr, 0
; CHECK: call void @llvm.assume(i1 %maskcond)
; CHECK: %0 = load float* %c, align 4
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4
; CHECK: %1 = load float* %c, align 4
-; CHECK: %arrayidx = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
; CHECK: }
entry:
tail call void @hello(float* %a, float* %c)
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK: define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 {
; CHECK: entry:
; CHECK: %0 = load float* %c, align 4
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4
; CHECK: %1 = load float* %c, align 4
-; CHECK: %arrayidx = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
; CHECK: }
define void @hello2(float* align 128 nocapture %a, float* align 128 nocapture %b, float* nocapture readonly %c) #0 {
entry:
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 5
+ %arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds float* %b, i64 8
+ %arrayidx1 = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1, align 4
ret void
}
entry:
tail call void @hello2(float* %a, float* %b, float* %c)
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK: %maskcond3 = icmp eq i64 %maskedptr2, 0
; CHECK: call void @llvm.assume(i1 %maskcond3)
; CHECK: %0 = load float* %c, align 4
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4
-; CHECK: %arrayidx1.i = getelementptr inbounds float* %b, i64 8
+; CHECK: %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %0, float* %arrayidx1.i, align 4
; CHECK: %1 = load float* %c, align 4
-; CHECK: %arrayidx = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
; CHECK: }
define void @inner1(i32 *%ptr) {
%A = load i32* %ptr
store i32 0, i32* %ptr
- %C = getelementptr inbounds i32* %ptr, i32 0
- %D = getelementptr inbounds i32* %ptr, i32 1
+ %C = getelementptr inbounds i32, i32* %ptr, i32 0
+ %D = getelementptr inbounds i32, i32* %ptr, i32 1
%E = bitcast i32* %ptr to i8*
%F = select i1 false, i32* %ptr, i32* @glbl
call void @llvm.lifetime.start(i64 0, i8* %E)
define void @inner2(i32 *%ptr) {
%A = load i32* %ptr
store i32 0, i32* %ptr
- %C = getelementptr inbounds i32* %ptr, i32 0
- %D = getelementptr inbounds i32* %ptr, i32 %A
+ %C = getelementptr inbounds i32, i32* %ptr, i32 0
+ %D = getelementptr inbounds i32, i32* %ptr, i32 %A
%E = bitcast i32* %ptr to i8*
%F = select i1 false, i32* %ptr, i32* @glbl
call void @llvm.lifetime.start(i64 0, i8* %E)
; %B poisons this call, scalar-repl can't handle that instruction. However, we
; still want to detect that the icmp and branch *can* be handled.
define void @inner4(i32 *%ptr, i32 %A) {
- %B = getelementptr inbounds i32* %ptr, i32 %A
+ %B = getelementptr inbounds i32, i32* %ptr, i32 %A
%C = icmp eq i32* %ptr, null
br i1 %C, label %bb.true, label %bb.false
bb.true:
define void @inner5(i1 %flag, i32 *%ptr) {
%A = load i32* %ptr
store i32 0, i32* %ptr
- %C = getelementptr inbounds i32* %ptr, i32 0
+ %C = getelementptr inbounds i32, i32* %ptr, i32 0
br i1 %flag, label %if.then, label %exit
if.then:
- %D = getelementptr inbounds i32* %ptr, i32 %A
+ %D = getelementptr inbounds i32, i32* %ptr, i32 %A
%E = bitcast i32* %ptr to i8*
%F = select i1 false, i32* %ptr, i32* @glbl
call void @llvm.lifetime.start(i64 0, i8* %E)
tail call void @llvm.dbg.declare(metadata [20 x i8]* %agg.tmp.sroa.3, metadata !46, metadata !48), !dbg !49
%agg.tmp.sroa.0.0.copyload = load i32* getelementptr inbounds (%struct.A* @b, i64 0, i32 0), align 8, !dbg !50
tail call void @llvm.dbg.value(metadata i32 %agg.tmp.sroa.0.0.copyload, i64 0, metadata !46, metadata !51), !dbg !49
- %agg.tmp.sroa.3.0..sroa_idx = getelementptr inbounds [20 x i8]* %agg.tmp.sroa.3, i64 0, i64 0, !dbg !50
+ %agg.tmp.sroa.3.0..sroa_idx = getelementptr inbounds [20 x i8], [20 x i8]* %agg.tmp.sroa.3, i64 0, i64 0, !dbg !50
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %agg.tmp.sroa.3.0..sroa_idx, i8* getelementptr (i8* bitcast (%struct.A* @b to i8*), i64 4), i64 20, i32 4, i1 false), !dbg !50
tail call void @llvm.dbg.declare(metadata %struct.A* undef, metadata !46, metadata !31) #2, !dbg !49
%tobool.i = icmp eq i32 %agg.tmp.sroa.0.0.copyload, 0, !dbg !52
define internal fastcc void @a() nounwind ssp {
entry:
%al = alloca [3 x i32], align 4
- %0 = getelementptr inbounds [3 x i32]* %al, i32 0, i32 2
+ %0 = getelementptr inbounds [3 x i32], [3 x i32]* %al, i32 0, i32 2
call fastcc void @c() nounwind
unreachable
define void @foo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 4
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
ret void
define void @foo0(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32]
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
ret void
define void @goo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
ret void
define void @foo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 4
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
ret void
define void @foo0(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32]
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
ret void
define void @foo1(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 1
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 4
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
ret void
define void @goo(%struct.s* byval nocapture readonly %a) {
entry:
%x = alloca [2 x i32], align 32
- %a1 = getelementptr inbounds %struct.s* %a, i64 0, i32 0
+ %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0
%0 = load i32* %a1, align 4
- %arrayidx = getelementptr inbounds [2 x i32]* %x, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 0
store i32 %0, i32* %arrayidx, align 32
- %b = getelementptr inbounds %struct.s* %a, i64 0, i32 1
+ %b = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 1
%1 = load i32* %b, align 4
- %arrayidx2 = getelementptr inbounds [2 x i32]* %x, i64 0, i64 1
+ %arrayidx2 = getelementptr inbounds [2 x i32], [2 x i32]* %x, i64 0, i64 1
store i32 %1, i32* %arrayidx2, align 4
call void @bar(i32* %arrayidx) #2
ret void
br i1 %cond, label %T, label %F
T:
- %A = getelementptr %T* %P, i32 0, i32 0
+ %A = getelementptr %T, %T* %P, i32 0, i32 0
store i32 42, i32* %A
ret %T* %P
%A = alloca %T
%B = call %T* @test2f(i1 %cond, %T* %A)
- %C = getelementptr %T* %B, i32 0, i32 0
+ %C = getelementptr %T, %T* %B, i32 0, i32 0
%D = load i32* %C
ret i32 %D
define internal void @f(%struct.ss* byval %b) nounwind {
entry:
- %tmp = getelementptr %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
store i32 %tmp2, i32* %tmp, align 4
define i32 @test1() nounwind {
entry:
%S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
- %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 2, i64* %tmp4, align 4
call void @f( %struct.ss* byval %S ) nounwind
ret i32 0
define internal i32 @f2(%struct.ss* byval %b) nounwind readonly {
entry:
- %tmp = getelementptr %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
+ %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp1 = load i32* %tmp, align 4 ; <i32> [#uses=1]
%tmp2 = add i32 %tmp1, 1 ; <i32> [#uses=1]
ret i32 %tmp2
define i32 @test2() nounwind {
entry:
%S = alloca %struct.ss ; <%struct.ss*> [#uses=4]
- %tmp1 = getelementptr %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp1, align 8
- %tmp4 = getelementptr %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; <i64*> [#uses=1]
store i64 2, i64* %tmp4, align 4
%X = call i32 @f2( %struct.ss* byval %S ) nounwind
ret i32 %X
define internal void @f5(%struct.S0* byval nocapture readonly align 4 %p) {
entry:
store i32 0, i32* getelementptr inbounds (%struct.S0* @b, i64 0, i32 0), align 4
- %f2 = getelementptr inbounds %struct.S0* %p, i64 0, i32 0
+ %f2 = getelementptr inbounds %struct.S0, %struct.S0* %p, i64 0, i32 0
%0 = load i32* %f2, align 4
store i32 %0, i32* @a, align 4
ret void
define i32 @foo(%struct.foo* byval align 8 %f, i32 %a) {
entry:
- %a1 = getelementptr inbounds %struct.foo* %f, i32 0, i32 1
- %arrayidx = getelementptr inbounds [16 x i32]* %a1, i32 0, i32 %a
+ %a1 = getelementptr inbounds %struct.foo, %struct.foo* %f, i32 0, i32 1
+ %arrayidx = getelementptr inbounds [16 x i32], [16 x i32]* %a1, i32 0, i32 %a
%tmp2 = load i32* %arrayidx, align 1
ret i32 %tmp2
}
define i32 @main() nounwind ssp {
entry:
%cont = alloca %struct.cont_t, align 8 ; <%struct.cont_t*> [#uses=4]
- %tmp = getelementptr inbounds %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
- %tmp1 = getelementptr inbounds %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=2]
+ %tmp = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
+ %tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=2]
store void (i8*, i32)* bitcast (void (%struct.cont_t*, i32)* @quit to void (i8*, i32)*), void (i8*, i32)** %tmp1
%tmp2 = load void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
store void (i8*, i32)* %tmp2, void (i8*, i32)** %tmp
- %tmp3 = getelementptr inbounds %struct.cont_t* %cont, i32 0, i32 1 ; <i8**> [#uses=1]
+ %tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %cont, i32 0, i32 1 ; <i8**> [#uses=1]
store i8* null, i8** %tmp3
call void @foo(%struct.cont_t* %cont)
ret i32 0
entry:
%sf = alloca %struct.foo_sf_t, align 8 ; <%struct.foo_sf_t*> [#uses=3]
%next = alloca %struct.cont_t, align 8 ; <%struct.cont_t*> [#uses=3]
- %tmp = getelementptr inbounds %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
+ %tmp = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
store %struct.cont_t* %c, %struct.cont_t** %tmp
- %tmp2 = getelementptr inbounds %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 2, i32* %tmp2
- %tmp4 = getelementptr inbounds %struct.cont_t* %next, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
+ %tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %next, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
store void (i8*, i32)* bitcast (void (%struct.foo_sf_t*, i32)* @foo2 to void (i8*, i32)*), void (i8*, i32)** %tmp4
- %tmp5 = getelementptr inbounds %struct.cont_t* %next, i32 0, i32 1 ; <i8**> [#uses=1]
+ %tmp5 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %next, i32 0, i32 1 ; <i8**> [#uses=1]
%conv = bitcast %struct.foo_sf_t* %sf to i8* ; <i8*> [#uses=1]
store i8* %conv, i8** %tmp5
call void @bar(%struct.cont_t* %next, i32 14)
define internal void @foo2(%struct.foo_sf_t* %sf, i32 %y) nounwind ssp {
entry:
- %tmp1 = getelementptr inbounds %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
+ %tmp1 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
%tmp2 = load %struct.cont_t** %tmp1 ; <%struct.cont_t*> [#uses=1]
- %tmp3 = getelementptr inbounds %struct.cont_t* %tmp2, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
+ %tmp3 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %tmp2, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
%tmp4 = load void (i8*, i32)** %tmp3 ; <void (i8*, i32)*> [#uses=1]
- %tmp6 = getelementptr inbounds %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
+ %tmp6 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 0 ; <%struct.cont_t**> [#uses=1]
%tmp7 = load %struct.cont_t** %tmp6 ; <%struct.cont_t*> [#uses=1]
%conv = bitcast %struct.cont_t* %tmp7 to i8* ; <i8*> [#uses=1]
- %tmp9 = getelementptr inbounds %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp9 = getelementptr inbounds %struct.foo_sf_t, %struct.foo_sf_t* %sf, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp10 = load i32* %tmp9 ; <i32> [#uses=1]
%mul = mul i32 %tmp10, %y ; <i32> [#uses=1]
call void %tmp4(i8* %conv, i32 %mul)
define internal void @bar(%struct.cont_t* %c, i32 %y) nounwind ssp {
entry:
- %tmp1 = getelementptr inbounds %struct.cont_t* %c, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
+ %tmp1 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 0 ; <void (i8*, i32)**> [#uses=1]
%tmp2 = load void (i8*, i32)** %tmp1 ; <void (i8*, i32)*> [#uses=1]
- %tmp4 = getelementptr inbounds %struct.cont_t* %c, i32 0, i32 1 ; <i8**> [#uses=1]
+ %tmp4 = getelementptr inbounds %struct.cont_t, %struct.cont_t* %c, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp5 = load i8** %tmp4 ; <i8*> [#uses=1]
%add = add nsw i32 %y, 5 ; <i32> [#uses=1]
call void %tmp2(i8* %tmp5, i32 %add)
cast.notnull: ; preds = %entry
%1 = bitcast %struct.C* %d to i8* ; <i8*> [#uses=1]
- %add.ptr = getelementptr i8* %1, i64 8 ; <i8*> [#uses=1]
+ %add.ptr = getelementptr i8, i8* %1, i64 8 ; <i8*> [#uses=1]
%2 = bitcast i8* %add.ptr to %struct.A* ; <%struct.A*> [#uses=1]
br label %cast.end
%3 = phi %struct.A* [ %2, %cast.notnull ], [ null, %entry ] ; <%struct.A*> [#uses=2]
%4 = bitcast %struct.A* %3 to i32 (%struct.A*)*** ; <i32 (%struct.A*)***> [#uses=1]
%5 = load i32 (%struct.A*)*** %4 ; <i32 (%struct.A*)**> [#uses=1]
- %vfn = getelementptr inbounds i32 (%struct.A*)** %5, i64 0 ; <i32 (%struct.A*)**> [#uses=1]
+ %vfn = getelementptr inbounds i32 (%struct.A*)*, i32 (%struct.A*)** %5, i64 0 ; <i32 (%struct.A*)**> [#uses=1]
%6 = load i32 (%struct.A*)** %vfn ; <i32 (%struct.A*)*> [#uses=1]
%call = call i32 %6(%struct.A* %3) ; <i32> [#uses=1]
ret i32 %call
define linkonce_odr i32 @_ZThn8_N1D1fEv(%struct.C* %this) {
entry:
%0 = bitcast %struct.C* %this to i8* ; <i8*> [#uses=1]
- %1 = getelementptr inbounds i8* %0, i64 -8 ; <i8*> [#uses=1]
+ %1 = getelementptr inbounds i8, i8* %0, i64 -8 ; <i8*> [#uses=1]
%2 = bitcast i8* %1 to %struct.C* ; <%struct.C*> [#uses=1]
%call = call i32 @_ZN1D1fEv(%struct.C* %2) ; <i32> [#uses=1]
ret i32 %call
entry:
call void @_ZN1CC2Ev(%struct.C* %this)
%0 = bitcast %struct.C* %this to i8* ; <i8*> [#uses=1]
- %1 = getelementptr inbounds i8* %0, i64 0 ; <i8*> [#uses=1]
+ %1 = getelementptr inbounds i8, i8* %0, i64 0 ; <i8*> [#uses=1]
%2 = bitcast i8* %1 to i8*** ; <i8***> [#uses=1]
store i8** getelementptr inbounds ([6 x i8*]* @_ZTV1D, i64 0, i64 2), i8*** %2
%3 = bitcast %struct.C* %this to i8* ; <i8*> [#uses=1]
- %4 = getelementptr inbounds i8* %3, i64 8 ; <i8*> [#uses=1]
+ %4 = getelementptr inbounds i8, i8* %3, i64 8 ; <i8*> [#uses=1]
%5 = bitcast i8* %4 to i8*** ; <i8***> [#uses=1]
store i8** getelementptr inbounds ([6 x i8*]* @_ZTV1D, i64 0, i64 5), i8*** %5
ret void
%0 = bitcast %struct.C* %this to %struct.A* ; <%struct.A*> [#uses=1]
call void @_ZN1AC2Ev(%struct.A* %0)
%1 = bitcast %struct.C* %this to i8* ; <i8*> [#uses=1]
- %2 = getelementptr inbounds i8* %1, i64 8 ; <i8*> [#uses=1]
+ %2 = getelementptr inbounds i8, i8* %1, i64 8 ; <i8*> [#uses=1]
%3 = bitcast i8* %2 to %struct.A* ; <%struct.A*> [#uses=1]
call void @_ZN1BC2Ev(%struct.A* %3)
%4 = bitcast %struct.C* %this to i8* ; <i8*> [#uses=1]
- %5 = getelementptr inbounds i8* %4, i64 0 ; <i8*> [#uses=1]
+ %5 = getelementptr inbounds i8, i8* %4, i64 0 ; <i8*> [#uses=1]
%6 = bitcast i8* %5 to i8*** ; <i8***> [#uses=1]
store i8** getelementptr inbounds ([6 x i8*]* @_ZTV1C, i64 0, i64 2), i8*** %6
%7 = bitcast %struct.C* %this to i8* ; <i8*> [#uses=1]
- %8 = getelementptr inbounds i8* %7, i64 8 ; <i8*> [#uses=1]
+ %8 = getelementptr inbounds i8, i8* %7, i64 8 ; <i8*> [#uses=1]
%9 = bitcast i8* %8 to i8*** ; <i8***> [#uses=1]
store i8** getelementptr inbounds ([6 x i8*]* @_ZTV1C, i64 0, i64 5), i8*** %9
ret void
define linkonce_odr i32 @_ZThn8_N1C1fEv(%struct.C* %this) {
entry:
%0 = bitcast %struct.C* %this to i8* ; <i8*> [#uses=1]
- %1 = getelementptr inbounds i8* %0, i64 -8 ; <i8*> [#uses=1]
+ %1 = getelementptr inbounds i8, i8* %0, i64 -8 ; <i8*> [#uses=1]
%2 = bitcast i8* %1 to %struct.C* ; <%struct.C*> [#uses=1]
%call = call i32 @_ZN1C1fEv(%struct.C* %2) ; <i32> [#uses=1]
ret i32 %call
define linkonce_odr void @_ZN1AC2Ev(%struct.A* %this) inlinehint ssp align 2 {
entry:
%0 = bitcast %struct.A* %this to i8* ; <i8*> [#uses=1]
- %1 = getelementptr inbounds i8* %0, i64 0 ; <i8*> [#uses=1]
+ %1 = getelementptr inbounds i8, i8* %0, i64 0 ; <i8*> [#uses=1]
%2 = bitcast i8* %1 to i8*** ; <i8***> [#uses=1]
store i8** getelementptr inbounds ([3 x i8*]* @_ZTV1A, i64 0, i64 2), i8*** %2
ret void
define linkonce_odr void @_ZN1BC2Ev(%struct.A* %this) inlinehint ssp align 2 {
entry:
%0 = bitcast %struct.A* %this to i8* ; <i8*> [#uses=1]
- %1 = getelementptr inbounds i8* %0, i64 0 ; <i8*> [#uses=1]
+ %1 = getelementptr inbounds i8, i8* %0, i64 0 ; <i8*> [#uses=1]
%2 = bitcast i8* %1 to i8*** ; <i8***> [#uses=1]
store i8** getelementptr inbounds ([3 x i8*]* @_ZTV1B, i64 0, i64 2), i8*** %2
ret void
declare void @fix(%struct.ray*)
define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture byval align 8 %ray, %struct.spoint* %sp) nounwind uwtable ssp {
- %1 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 1, i32 0
+ %1 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 0
%2 = load double* %1, align 8
%3 = fmul double %2, %2
- %4 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 1, i32 1
+ %4 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 1
%5 = load double* %4, align 8
%6 = fmul double %5, %5
%7 = fadd double %3, %6
- %8 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 1, i32 2
+ %8 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 2
%9 = load double* %8, align 8
%10 = fmul double %9, %9
%11 = fadd double %7, %10
%12 = fmul double %2, 2.000000e+00
- %13 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 0, i32 0
+ %13 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 0
%14 = load double* %13, align 8
- %15 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 0, i32 0
+ %15 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 0
%16 = load double* %15, align 8
%17 = fsub double %14, %16
%18 = fmul double %12, %17
%19 = fmul double %5, 2.000000e+00
- %20 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 0, i32 1
+ %20 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 1
%21 = load double* %20, align 8
- %22 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 0, i32 1
+ %22 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 1
%23 = load double* %22, align 8
%24 = fsub double %21, %23
%25 = fmul double %19, %24
%26 = fadd double %18, %25
%27 = fmul double %9, 2.000000e+00
- %28 = getelementptr inbounds %struct.ray* %ray, i64 0, i32 0, i32 2
+ %28 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 0, i32 2
%29 = load double* %28, align 8
- %30 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 0, i32 2
+ %30 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 0, i32 2
%31 = load double* %30, align 8
%32 = fsub double %29, %31
%33 = fmul double %27, %32
%51 = fsub double %49, %50
%52 = fmul double %51, 2.000000e+00
%53 = fadd double %52, %45
- %54 = getelementptr inbounds %struct.sphere* %sph, i64 0, i32 1
+ %54 = getelementptr inbounds %struct.sphere, %struct.sphere* %sph, i64 0, i32 1
%55 = load double* %54, align 8
%56 = fmul double %55, %55
%57 = fsub double %53, %56
%t2.0 = select i1 %72, double %t1.0, double %70
%79 = fcmp olt double %t1.0, %t2.0
%80 = select i1 %79, double %t1.0, double %t2.0
- %81 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 3
+ %81 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 3
store double %80, double* %81, align 8
%82 = fmul double %80, %2
%83 = fadd double %14, %82
- %84 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 0, i32 0
+ %84 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 0, i32 0
store double %83, double* %84, align 8
%85 = fmul double %5, %80
%86 = fadd double %21, %85
- %87 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 0, i32 1
+ %87 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 0, i32 1
store double %86, double* %87, align 8
%88 = fmul double %9, %80
%89 = fadd double %29, %88
- %90 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 0, i32 2
+ %90 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 0, i32 2
store double %89, double* %90, align 8
%91 = load double* %15, align 8
%92 = fsub double %83, %91
%93 = load double* %54, align 8
%94 = fdiv double %92, %93
- %95 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 1, i32 0
+ %95 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 0
store double %94, double* %95, align 8
%96 = load double* %22, align 8
%97 = fsub double %86, %96
%98 = load double* %54, align 8
%99 = fdiv double %97, %98
- %100 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 1, i32 1
+ %100 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 1
store double %99, double* %100, align 8
%101 = load double* %30, align 8
%102 = fsub double %89, %101
%103 = load double* %54, align 8
%104 = fdiv double %102, %103
- %105 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 1, i32 2
+ %105 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 1, i32 2
store double %104, double* %105, align 8
%106 = fmul double %2, %94
%107 = fmul double %5, %99
%118 = fmul double %104, %111
%119 = fsub double %118, %9
%120 = fsub double -0.000000e+00, %119
- %.06 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 2, i32 0
- %.18 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 2, i32 1
- %.210 = getelementptr inbounds %struct.spoint* %sp, i64 0, i32 2, i32 2
+ %.06 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 2, i32 0
+ %.18 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 2, i32 1
+ %.210 = getelementptr inbounds %struct.spoint, %struct.spoint* %sp, i64 0, i32 2, i32 2
%121 = fmul double %113, %113
%122 = fmul double %116, %116
%123 = fadd double %121, %122
if.then: ; preds = %entry
%a1 = load float* %a, align 8
- %arrayidx1 = getelementptr inbounds float* %a, i64 1
+ %arrayidx1 = getelementptr inbounds float, float* %a, i64 1
%a2 = load float* %arrayidx1, align 4
%add = fadd fast float %a1, %a2
br label %if.end
declare void @ext_method(i8*, i32)
define linkonce_odr void @thunk(i8* %this, ...) {
- %this_adj = getelementptr i8* %this, i32 4
+ %this_adj = getelementptr i8, i8* %this, i32 4
musttail call void (i8*, ...)* bitcast (void (i8*, i32)* @ext_method to void (i8*, ...)*)(i8* %this_adj, ...)
ret void
}
define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 {
entry:
%data = alloca [2 x i8], align 1
- %arraydecay = getelementptr inbounds [2 x i8]* %data, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [2 x i8], [2 x i8]* %data, i64 0, i64 0
call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([2 x i8]* @.str, i64 0, i64 0), i64 1)
call fastcc void @memcpy2(i8* %arraydecay, i8* getelementptr inbounds ([3 x i8]* @.str1, i64 0, i64 0), i64 2)
ret i32 0
store float* %dst, float** %dst.addr, align 4\r
call void @llvm.dbg.declare(metadata float** %dst.addr, metadata !20, metadata !17), !dbg !21\r
%0 = load float** %dst.addr, align 4, !dbg !22\r
- %arrayidx = getelementptr inbounds float* %0, i32 0, !dbg !22\r
+ %arrayidx = getelementptr inbounds float, float* %0, i32 0, !dbg !22\r
%1 = load float* %arrayidx, align 4, !dbg !22\r
%call = call float @foo(float %1), !dbg !22\r
\r
; CHECK: void @llvm.dbg.declare(metadata float* [[x_addr_i]], metadata [[m23:![0-9]+]], metadata !17), !dbg [[m24:![0-9]+]]\r
\r
%2 = load float** %dst.addr, align 4, !dbg !22\r
- %arrayidx1 = getelementptr inbounds float* %2, i32 0, !dbg !22\r
+ %arrayidx1 = getelementptr inbounds float, float* %2, i32 0, !dbg !22\r
store float %call, float* %arrayidx1, align 4, !dbg !22\r
ret void, !dbg !23\r
}\r
%tmp = load i32* %a.addr, align 4
%idxprom = sext i32 %tmp to i64
%tmp1 = load i32** @data, align 8
- %arrayidx = getelementptr inbounds i32* %tmp1, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %tmp1, i64 %idxprom
%tmp2 = load i32* %arrayidx, align 4
%tmp3 = load i32* %a.addr, align 4
%add = add nsw i32 %tmp3, 1
%idxprom1 = sext i32 %add to i64
%tmp4 = load i32** @data, align 8
- %arrayidx2 = getelementptr inbounds i32* %tmp4, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom1
%tmp5 = load i32* %arrayidx2, align 4
%mul = mul nsw i32 %tmp2, %tmp5
store i32 %mul, i32* %res, align 4
%tmp8 = load i32* %i, align 4
%idxprom3 = sext i32 %tmp8 to i64
%tmp9 = load i32** @data, align 8
- %arrayidx4 = getelementptr inbounds i32* %tmp9, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds i32, i32* %tmp9, i64 %idxprom3
call void @fct0(i32* %arrayidx4)
br label %for.inc
%tmp13 = load i32* %i, align 4
%idxprom8 = sext i32 %tmp13 to i64
%tmp14 = load i32** @data, align 8
- %arrayidx9 = getelementptr inbounds i32* %tmp14, i64 %idxprom8
+ %arrayidx9 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom8
call void @fct0(i32* %arrayidx9)
br label %for.inc10
%tmp18 = load i32* %i, align 4
%idxprom16 = sext i32 %tmp18 to i64
%tmp19 = load i32** @data, align 8
- %arrayidx17 = getelementptr inbounds i32* %tmp19, i64 %idxprom16
+ %arrayidx17 = getelementptr inbounds i32, i32* %tmp19, i64 %idxprom16
call void @fct0(i32* %arrayidx17)
br label %for.inc18
%shl = shl i32 %tmp, 1
%idxprom = sext i32 %shl to i64
%tmp1 = load i32** @data, align 8
- %arrayidx = getelementptr inbounds i32* %tmp1, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %tmp1, i64 %idxprom
%tmp2 = load i32* %arrayidx, align 4
%tmp3 = load i32* %a.addr, align 4
%shl1 = shl i32 %tmp3, 1
%add = add nsw i32 %shl1, 13
%idxprom2 = sext i32 %add to i64
%tmp4 = load i32** @data, align 8
- %arrayidx3 = getelementptr inbounds i32* %tmp4, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom2
%tmp5 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %tmp2, %tmp5
store i32 %mul, i32* %res, align 4
%tmp8 = load i32* %i, align 4
%idxprom4 = sext i32 %tmp8 to i64
%tmp9 = load i32** @data, align 8
- %arrayidx5 = getelementptr inbounds i32* %tmp9, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %tmp9, i64 %idxprom4
call void @fct0(i32* %arrayidx5)
br label %for.inc
%tmp13 = load i32* %i, align 4
%idxprom9 = sext i32 %tmp13 to i64
%tmp14 = load i32** @data, align 8
- %arrayidx10 = getelementptr inbounds i32* %tmp14, i64 %idxprom9
+ %arrayidx10 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom9
call void @fct0(i32* %arrayidx10)
br label %for.inc11
%tmp18 = load i32* %i, align 4
%idxprom17 = sext i32 %tmp18 to i64
%tmp19 = load i32** @data, align 8
- %arrayidx18 = getelementptr inbounds i32* %tmp19, i64 %idxprom17
+ %arrayidx18 = getelementptr inbounds i32, i32* %tmp19, i64 %idxprom17
call void @fct0(i32* %arrayidx18)
br label %for.inc19
define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
entry:
%0 = load float* %c, align 4, !noalias !3
- %arrayidx.i = getelementptr inbounds float* %a, i64 5
+ %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !alias.scope !7, !noalias !8
- %arrayidx1.i = getelementptr inbounds float* %b, i64 8
+ %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1.i, align 4, !alias.scope !8, !noalias !7
%1 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
}
; CHECK: define void @foo(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
; CHECK: entry:
; CHECK: %0 = load float* %c, align 4, !noalias !6
-; CHECK: %arrayidx.i.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i.i, align 4, !alias.scope !12, !noalias !13
-; CHECK: %arrayidx1.i.i = getelementptr inbounds float* %b, i64 8
+; CHECK: %arrayidx1.i.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %0, float* %arrayidx1.i.i, align 4, !alias.scope !14, !noalias !15
; CHECK: %1 = load float* %c, align 4, !noalias !16
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx.i, align 4, !noalias !16
; CHECK: %2 = load float* %a, align 4, !alias.scope !16, !noalias !17
-; CHECK: %arrayidx.i.i1 = getelementptr inbounds float* %b, i64 5
+; CHECK: %arrayidx.i.i1 = getelementptr inbounds float, float* %b, i64 5
; CHECK: store float %2, float* %arrayidx.i.i1, align 4, !alias.scope !21, !noalias !22
-; CHECK: %arrayidx1.i.i2 = getelementptr inbounds float* %b, i64 8
+; CHECK: %arrayidx1.i.i2 = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %2, float* %arrayidx1.i.i2, align 4, !alias.scope !23, !noalias !24
; CHECK: %3 = load float* %a, align 4, !alias.scope !16
-; CHECK: %arrayidx.i3 = getelementptr inbounds float* %b, i64 7
+; CHECK: %arrayidx.i3 = getelementptr inbounds float, float* %b, i64 7
; CHECK: store float %3, float* %arrayidx.i3, align 4, !alias.scope !16
; CHECK: ret void
; CHECK: }
define void @hello(float* noalias nocapture %a, float* nocapture readonly %c) #0 {
entry:
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 5
+ %arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
ret void
}
entry:
tail call void @hello(float* %a, float* %c)
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK: define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
; CHECK: entry:
; CHECK: %0 = load float* %c, align 4, !noalias !0
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4, !alias.scope !0
; CHECK: %1 = load float* %c, align 4
-; CHECK: %arrayidx = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
; CHECK: }
define void @hello2(float* noalias nocapture %a, float* noalias nocapture %b, float* nocapture readonly %c) #0 {
entry:
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 5
+ %arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds float* %b, i64 8
+ %arrayidx1 = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1, align 4
ret void
}
entry:
tail call void @hello2(float* %a, float* %b, float* %c)
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK: define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
; CHECK: entry:
; CHECK: %0 = load float* %c, align 4, !noalias !3
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4, !alias.scope !7, !noalias !8
-; CHECK: %arrayidx1.i = getelementptr inbounds float* %b, i64 8
+; CHECK: %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %0, float* %arrayidx1.i, align 4, !alias.scope !8, !noalias !7
; CHECK: %1 = load float* %c, align 4
-; CHECK: %arrayidx = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
; CHECK: }
define void @hello(float* noalias nocapture %a, float* noalias nocapture readonly %c) #0 {
entry:
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 5
+ %arrayidx = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx, align 4
ret void
}
entry:
tail call void @hello(float* %a, float* %c)
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK: define void @foo(float* noalias nocapture %a, float* noalias nocapture readonly %c) #0 {
; CHECK: entry:
; CHECK: %0 = load float* %c, align 4, !alias.scope !0, !noalias !3
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i, align 4, !alias.scope !3, !noalias !0
; CHECK: %1 = load float* %c, align 4
-; CHECK: %arrayidx = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx, align 4
; CHECK: ret void
; CHECK: }
define void @hello2(float* noalias nocapture %a, float* noalias nocapture %b, float* nocapture readonly %c) #0 {
entry:
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 6
+ %arrayidx = getelementptr inbounds float, float* %a, i64 6
store float %0, float* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds float* %b, i64 8
+ %arrayidx1 = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1, align 4
ret void
}
tail call void @foo(float* %a, float* %c)
tail call void @hello2(float* %a, float* %b, float* %c)
%0 = load float* %c, align 4
- %arrayidx = getelementptr inbounds float* %a, i64 7
+ %arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %0, float* %arrayidx, align 4
ret void
}
; CHECK: define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
; CHECK: entry:
; CHECK: %0 = load float* %c, align 4, !alias.scope !5, !noalias !10
-; CHECK: %arrayidx.i.i = getelementptr inbounds float* %a, i64 5
+; CHECK: %arrayidx.i.i = getelementptr inbounds float, float* %a, i64 5
; CHECK: store float %0, float* %arrayidx.i.i, align 4, !alias.scope !10, !noalias !5
; CHECK: %1 = load float* %c, align 4, !alias.scope !13, !noalias !14
-; CHECK: %arrayidx.i = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx.i = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %1, float* %arrayidx.i, align 4, !alias.scope !14, !noalias !13
; CHECK: %2 = load float* %c, align 4, !noalias !15
-; CHECK: %arrayidx.i1 = getelementptr inbounds float* %a, i64 6
+; CHECK: %arrayidx.i1 = getelementptr inbounds float, float* %a, i64 6
; CHECK: store float %2, float* %arrayidx.i1, align 4, !alias.scope !19, !noalias !20
-; CHECK: %arrayidx1.i = getelementptr inbounds float* %b, i64 8
+; CHECK: %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
; CHECK: store float %2, float* %arrayidx1.i, align 4, !alias.scope !20, !noalias !19
; CHECK: %3 = load float* %c, align 4
-; CHECK: %arrayidx = getelementptr inbounds float* %a, i64 7
+; CHECK: %arrayidx = getelementptr inbounds float, float* %a, i64 7
; CHECK: store float %3, float* %arrayidx, align 4
; CHECK: ret void
; CHECK: }
; CHECK: ret i32
%ptr = alloca i32
- %ptr1 = getelementptr inbounds i32* %ptr, i32 0
- %ptr2 = getelementptr inbounds i32* %ptr, i32 42
+ %ptr1 = getelementptr inbounds i32, i32* %ptr, i32 0
+ %ptr2 = getelementptr inbounds i32, i32* %ptr, i32 42
%result = call i32 @inner1(i32* %ptr1, i32* %ptr2)
ret i32 %result
}
; CHECK: call i32 @inner2
; CHECK: ret i32
- %ptr1 = getelementptr i32* %ptr, i32 0
- %ptr2 = getelementptr i32* %ptr, i32 42
+ %ptr1 = getelementptr i32, i32* %ptr, i32 0
+ %ptr2 = getelementptr i32, i32* %ptr, i32 42
%result = call i32 @inner2(i32* %ptr1, i32* %ptr2)
ret i32 %result
}
%p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp.upgrd.1 = getelementptr { i64 }* %tmp, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr { i64 }, { i64 }* %tmp, i64 0, i32 0 ; <i64*> [#uses=1]
store i64 %p1.0, i64* %tmp.upgrd.1
%tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp2 = getelementptr { i64 }* %tmp1, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp2 = getelementptr { i64 }, { i64 }* %tmp1, i64 0, i32 0 ; <i64*> [#uses=1]
store i64 %p2.0, i64* %tmp2
%tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp4 = getelementptr { i64 }* %tmp3, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr { i64 }, { i64 }* %tmp3, i64 0, i32 0 ; <i64*> [#uses=1]
store i64 %p3.0, i64* %tmp4
%tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
%tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp6 = getelementptr { i64 }* %tmp5, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp6 = getelementptr { i64 }, { i64 }* %tmp5, i64 0, i32 0 ; <i64*> [#uses=1]
%tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
%tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp8 = getelementptr { i64 }* %tmp7, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i64 0, i32 0 ; <i64*> [#uses=1]
%tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
%tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp11 = getelementptr { i64 }* %tmp10, i64 0, i32 0 ; <i64*> [#uses=1]
+ %tmp11 = getelementptr { i64 }, { i64 }* %tmp10, i64 0, i32 0 ; <i64*> [#uses=1]
%tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
%tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
br i1 %tmp.upgrd.2, label %cond_true, label %cond_false
%p2_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
%p3_addr = alloca %struct.point ; <%struct.point*> [#uses=2]
%tmp = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp.upgrd.1 = getelementptr { i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr { i64 }, { i64 }* %tmp, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %p1.0, i64* %tmp.upgrd.1
%tmp1 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp2 = getelementptr { i64 }* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp2 = getelementptr { i64 }, { i64 }* %tmp1, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %p2.0, i64* %tmp2
%tmp3 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp4 = getelementptr { i64 }* %tmp3, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp4 = getelementptr { i64 }, { i64 }* %tmp3, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %p3.0, i64* %tmp4
%tmp.upgrd.2 = icmp eq i32 %direction, 0 ; <i1> [#uses=1]
%tmp5 = bitcast %struct.point* %p1_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp6 = getelementptr { i64 }* %tmp5, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp6 = getelementptr { i64 }, { i64 }* %tmp5, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp.upgrd.3 = load i64* %tmp6 ; <i64> [#uses=1]
%tmp7 = bitcast %struct.point* %p2_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp8 = getelementptr { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
%tmp10 = bitcast %struct.point* %p3_addr to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp11 = getelementptr { i64 }* %tmp10, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp11 = getelementptr { i64 }, { i64 }* %tmp10, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp12 = load i64* %tmp11 ; <i64> [#uses=1]
%tmp13 = call i32 @determinant( i64 %tmp.upgrd.3, i64 %tmp9, i64 %tmp12 ) ; <i32> [#uses=2]
%tmp14 = icmp slt i32 %tmp13, 0 ; <i1> [#uses=1]
ret i1 false
cond_true: ; preds = %newFuncRoot
- %tmp15 = getelementptr [17 x i32]* @r, i32 0, i32 %tmp12.reload ; <i32*> [#uses=1]
+ %tmp15 = getelementptr [17 x i32], [17 x i32]* @r, i32 0, i32 %tmp12.reload ; <i32*> [#uses=1]
%tmp16 = load i32* %tmp15 ; <i32> [#uses=4]
%tmp18 = icmp slt i32 %tmp16, -31 ; <i1> [#uses=1]
%tmp21 = icmp sgt i32 %tmp16, 31 ; <i1> [#uses=1]
define i32 @main(i32 %x, i8** %a) {
entry:
- %tmp = getelementptr [6 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
+ %tmp = getelementptr [6 x i8], [6 x i8]* @str, i32 0, i64 0 ; <i8*> [#uses=1]
%tmp1 = load i8** %a ; <i8*> [#uses=1]
%tmp2 = ptrtoint i8* %tmp1 to i32 ; <i32> [#uses=1]
%tmp3 = zext i32 %tmp2 to i64 ; <i64> [#uses=1]
define i32 @main() {
entry:
%u = alloca %struct..1anon, align 8 ; <%struct..1anon*> [#uses=4]
- %tmp1 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp1 = getelementptr %struct..1anon, %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
store double 0x7FF0000000000000, double* %tmp1
- %tmp3 = getelementptr %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp3 = getelementptr %struct..1anon, %struct..1anon* %u, i32 0, i32 0 ; <double*> [#uses=1]
%tmp34 = bitcast double* %tmp3 to %struct..0anon* ; <%struct..0anon*> [#uses=1]
- %tmp5 = getelementptr %struct..0anon* %tmp34, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp5 = getelementptr %struct..0anon, %struct..0anon* %tmp34, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp6 = load i32* %tmp5 ; <i32> [#uses=1]
%tmp7 = shl i32 %tmp6, 1 ; <i32> [#uses=1]
%tmp8 = lshr i32 %tmp7, 21 ; <i32> [#uses=1]
define i8* @foo(%struct.anon* %deviceRef, %struct.abc* %pCap) {
entry:
%tmp1 = bitcast %struct.anon* %deviceRef to %struct.def*
- %tmp3 = getelementptr %struct.def* %tmp1, i32 0, i32 1
+ %tmp3 = getelementptr %struct.def, %struct.def* %tmp1, i32 0, i32 1
%tmp35 = bitcast %struct.abc* %tmp3 to i8*
ret i8* %tmp35
}
%P_addr = alloca i8*
store i8* %P, i8** %P_addr
%tmp = load i8** %P_addr, align 4
- %tmp1 = getelementptr [4 x i8]* @.str, i32 0, i32 0
+ %tmp1 = getelementptr [4 x i8], [4 x i8]* @.str, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* %tmp1, i32 4, i32 1, i1 false)
br label %return
bb29: ; preds = %bb62
%tmp322 = bitcast %struct.Ray* %tmp3 to %struct.Vec* ; <%struct.Vec*> [#uses=1]
- %tmp322.0 = getelementptr %struct.Vec* %tmp322, i32 0, i32 0 ; <double*> [#uses=1]
+ %tmp322.0 = getelementptr %struct.Vec, %struct.Vec* %tmp322, i32 0, i32 0 ; <double*> [#uses=1]
store double 0.000000e+00, double* %tmp322.0
%tmp57 = call double @_Z9ray_traceRK3VecRK3RayRK5Scene( %struct.Vec* null, %struct.Ray* %tmp3, %struct.Scene* null ) ; <double> [#uses=0]
br label %bb62
%tmp17 = mul i32 %tmp5, 4 ; <i32> [#uses=1]
%tmp18 = alloca i8, i32 %tmp17 ; <i8*> [#uses=1]
%tmp1819 = bitcast i8* %tmp18 to i32* ; <i32*> [#uses=2]
- %tmp21 = getelementptr i32* %tmp1819, i32 0 ; <i32*> [#uses=1]
+ %tmp21 = getelementptr i32, i32* %tmp1819, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp21, align 4
%tmp2223 = bitcast i32* %tmp1819 to i8* ; <i8*> [#uses=1]
store volatile i8* %tmp2223, i8** @p, align 4
define i8* @foo([100 x {i8,i8,i8}]* %x) {
entry:
%p = bitcast [100 x {i8,i8,i8}]* %x to i8*
- %q = getelementptr i8* %p, i32 -4
+ %q = getelementptr i8, i8* %p, i32 -4
ret i8* %q
}
entry:
%FRAME.0 = alloca %struct.FRAME.nest, align 8 ; <%struct.FRAME.nest*> [#uses=3]
%TRAMP.216 = alloca [10 x i8], align 16 ; <[10 x i8]*> [#uses=1]
- %TRAMP.216.sub = getelementptr [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
- %tmp3 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %TRAMP.216.sub = getelementptr [10 x i8], [10 x i8]* %TRAMP.216, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr %struct.FRAME.nest, %struct.FRAME.nest* %FRAME.0, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 %n, i32* %tmp3, align 8
%FRAME.06 = bitcast %struct.FRAME.nest* %FRAME.0 to i8* ; <i8*> [#uses=1]
call void @llvm.init.trampoline( i8* %TRAMP.216.sub, i8* bitcast (i32 (%struct.FRAME.nest*, ...)* @f to i8*), i8* %FRAME.06 ) ; <i8*> [#uses=1]
%tramp = call i8* @llvm.adjust.trampoline( i8* %TRAMP.216.sub)
- %tmp7 = getelementptr %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (...)**> [#uses=1]
+ %tmp7 = getelementptr %struct.FRAME.nest, %struct.FRAME.nest* %FRAME.0, i32 0, i32 1 ; <i32 (...)**> [#uses=1]
%tmp89 = bitcast i8* %tramp to i32 (...)* ; <i32 (...)*> [#uses=2]
store i32 (...)* %tmp89, i32 (...)** %tmp7, align 8
%tmp2.i = call i32 (...)* %tmp89( i32 zeroext 0 ) ; <i32> [#uses=1]
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
- %tmp3 = getelementptr i8* %tmp1, i32 1 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr i8, i8* %tmp1, i32 1 ; <i8*> [#uses=1]
store i8 0, i8* %tmp3, align 1
- %tmp5 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ %tmp5 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
store i8 1, i8* %tmp5, align 1
%tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
- %tmp9 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ %tmp9 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
store i8 0, i8* %tmp9, align 1
%tmp11 = call i32 (...)* @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
ret i32 %tmp7
entry:
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
%tmp1 = call i8* @malloc( i32 10 ) nounwind ; <i8*> [#uses=5]
- %tmp3 = getelementptr i8* %tmp1, i32 1 ; <i8*> [#uses=1]
+ %tmp3 = getelementptr i8, i8* %tmp1, i32 1 ; <i8*> [#uses=1]
store i8 0, i8* %tmp3, align 1
- %tmp5 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ %tmp5 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
store i8 1, i8* %tmp5, align 1
; CHECK: store
; CHECK: store
; CHECK-NEXT: strlen
; CHECK-NEXT: store
%tmp7 = call i32 @strlen( i8* %tmp1 ) nounwind readonly ; <i32> [#uses=1]
- %tmp9 = getelementptr i8* %tmp1, i32 0 ; <i8*> [#uses=1]
+ %tmp9 = getelementptr i8, i8* %tmp1, i32 0 ; <i8*> [#uses=1]
store i8 0, i8* %tmp9, align 1
%tmp11 = call i32 (...)* @b( i8* %tmp1 ) nounwind ; <i32> [#uses=0]
br label %return
invcont37: ; preds = %invcont31
%tmp39 = load i32** %tmp38, align 8 ; <i32*> [#uses=1]
- %tmp41 = getelementptr %"struct.std::ctype<char>"* %this, i32 0, i32 4 ; <i32**> [#uses=1]
+ %tmp41 = getelementptr %"struct.std::ctype<char>", %"struct.std::ctype<char>"* %this, i32 0, i32 4 ; <i32**> [#uses=1]
store i32* %tmp39, i32** %tmp41, align 8
ret void
%tmp27 = alloca i32, i32 %tmp5 ; <i32*> [#uses=3]
%tmpcast = bitcast i32* %tmp27 to i8* ; <i8*> [#uses=1]
store i32 1, i32* %tmp27, align 4
- %tmp34 = getelementptr i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1]
+ %tmp34 = getelementptr i32, i32* %tmp27, i32 %tmp4 ; <i32*> [#uses=1]
store i32 2, i32* %tmp34, align 4
store volatile i8* %tmpcast, i8** @p, align 4
%exitcond = icmp eq i32 %tmp3857, 999999 ; <i1> [#uses=1]
br label %bb
bb:
- %g1 = getelementptr i8* %x, i32 0
+ %g1 = getelementptr i8, i8* %x, i32 0
%l1 = load i8* %g1, align 1
%s1 = sub i8 %l1, 6
%c1 = icmp ugt i8 %s1, 2
%iospec = alloca %struct.Key ; <%struct.Key*> [#uses=3]
%ret = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
- %0 = getelementptr %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=2]
- %1 = getelementptr { i32, i32 }* %0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %0 = getelementptr %struct.Key, %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=2]
+ %1 = getelementptr { i32, i32 }, { i32, i32 }* %0, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %1, align 4
- %2 = getelementptr { i32, i32 }* %0, i32 0, i32 1 ; <i32*> [#uses=1]
+ %2 = getelementptr { i32, i32 }, { i32, i32 }* %0, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 0, i32* %2, align 4
- %3 = getelementptr %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=1]
+ %3 = getelementptr %struct.Key, %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=1]
%4 = bitcast { i32, i32 }* %3 to i64* ; <i64*> [#uses=1]
store i64 %key_token2, i64* %4, align 4
%5 = call i32 (...)* @foo(%struct.Key* byval align 4 %iospec, i32* %ret) nounwind ; <i32> [#uses=0]
define void @handle_event(%struct.inode* %bar) nounwind {
entry:
- %0 = getelementptr %struct.inode* %bar, i64 -1, i32 1, i32 1 ; <%struct.rwlock_t*> [#uses=1]
+ %0 = getelementptr %struct.inode, %struct.inode* %bar, i64 -1, i32 1, i32 1 ; <%struct.rwlock_t*> [#uses=1]
%1 = bitcast %struct.rwlock_t* %0 to i32* ; <i32*> [#uses=1]
store i32 1, i32* %1, align 4
ret void
%4 = alloca i32
%"alloca point" = bitcast i32 0 to i32
store i32 42, i32* %4, align 4
- %5 = getelementptr %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
- %6 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >"* %5, i32 0, i32 0
- %7 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %6, i32 0, i32 1
+ %5 = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
+ %6 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", %"struct.std::_Vector_base<int,std::allocator<int> >"* %5, i32 0, i32 0
+ %7 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %6, i32 0, i32 1
%8 = load i32** %7, align 4
- %9 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
+ %9 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
store i32* %8, i32** %9, align 4
- %10 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
+ %10 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %3, i32 0, i32 0
%11 = load i32** %10, align 4
%tmp2.i = ptrtoint i32* %11 to i32
%tmp1.i = inttoptr i32 %tmp2.i to i32*
%tmp3 = ptrtoint i32* %tmp1.i to i32
%tmp2 = inttoptr i32 %tmp3 to i32*
- %12 = getelementptr %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
- %13 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >"* %12, i32 0, i32 0
- %14 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %13, i32 0, i32 0
+ %12 = getelementptr %"struct.std::vector<int,std::allocator<int> >", %"struct.std::vector<int,std::allocator<int> >"* %X, i32 0, i32 0
+ %13 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >", %"struct.std::_Vector_base<int,std::allocator<int> >"* %12, i32 0, i32 0
+ %14 = getelementptr %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl", %"struct.std::_Vector_base<int,std::allocator<int> >::_Vector_impl"* %13, i32 0, i32 0
%15 = load i32** %14, align 4
- %16 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
+ %16 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
store i32* %15, i32** %16, align 4
- %17 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
+ %17 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %0, i32 0, i32 0
%18 = load i32** %17, align 4
%tmp2.i17 = ptrtoint i32* %18 to i32
%tmp1.i18 = inttoptr i32 %tmp2.i17 to i32*
%tmp8 = ptrtoint i32* %tmp1.i18 to i32
%tmp6 = inttoptr i32 %tmp8 to i32*
- %19 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
+ %19 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
store i32* %tmp6, i32** %19
- %20 = getelementptr %"struct.std::bidirectional_iterator_tag"* %1, i32 0, i32 0
+ %20 = getelementptr %"struct.std::bidirectional_iterator_tag", %"struct.std::bidirectional_iterator_tag"* %1, i32 0, i32 0
%21 = load i8* %20, align 1
%22 = or i8 %21, 0
%23 = or i8 %22, 0
%24 = or i8 %23, 0
- %25 = getelementptr %"struct.std::bidirectional_iterator_tag"* %2, i32 0, i32 0
+ %25 = getelementptr %"struct.std::bidirectional_iterator_tag", %"struct.std::bidirectional_iterator_tag"* %2, i32 0, i32 0
store i8 0, i8* %25, align 1
- %elt.i = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
+ %elt.i = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i, i32 0, i32 0
%val.i = load i32** %elt.i
%tmp.i = bitcast %"struct.std::bidirectional_iterator_tag"* %unnamed_arg.i to i8*
%tmp9.i = bitcast %"struct.std::bidirectional_iterator_tag"* %2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp.i, i8* %tmp9.i, i64 1, i32 1, i1 false)
- %26 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %26 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %val.i, i32** %26
- %27 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ %27 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
store i32* %tmp2, i32** %27
- %28 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ %28 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
%29 = load i32** %28, align 4
%30 = ptrtoint i32* %29 to i32
- %31 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %31 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%32 = load i32** %31, align 4
%33 = ptrtoint i32* %32 to i32
%34 = sub i32 %30, %33
br label %bb12.i.i
bb.i.i: ; preds = %bb12.i.i
- %37 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %37 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%38 = load i32** %37, align 4
%39 = load i32* %38, align 4
%40 = load i32* %4, align 4
br i1 %toBool.i.i, label %bb1.i.i, label %bb2.i.i
bb1.i.i: ; preds = %bb.i.i
- %43 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %43 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%44 = load i32** %43, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb2.i.i: ; preds = %bb.i.i
- %45 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %45 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%46 = load i32** %45, align 4
- %47 = getelementptr i32* %46, i64 1
- %48 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %47 = getelementptr i32, i32* %46, i64 1
+ %48 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %47, i32** %48, align 4
- %49 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %49 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%50 = load i32** %49, align 4
%51 = load i32* %50, align 4
%52 = load i32* %4, align 4
br i1 %toBool3.i.i, label %bb4.i.i, label %bb5.i.i
bb4.i.i: ; preds = %bb2.i.i
- %55 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %55 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%56 = load i32** %55, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb5.i.i: ; preds = %bb2.i.i
- %57 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %57 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%58 = load i32** %57, align 4
- %59 = getelementptr i32* %58, i64 1
- %60 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %59 = getelementptr i32, i32* %58, i64 1
+ %60 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %59, i32** %60, align 4
- %61 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %61 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%62 = load i32** %61, align 4
%63 = load i32* %62, align 4
%64 = load i32* %4, align 4
br i1 %toBool6.i.i, label %bb7.i.i, label %bb8.i.i
bb7.i.i: ; preds = %bb5.i.i
- %67 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %67 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%68 = load i32** %67, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb8.i.i: ; preds = %bb5.i.i
- %69 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %69 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%70 = load i32** %69, align 4
- %71 = getelementptr i32* %70, i64 1
- %72 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %71 = getelementptr i32, i32* %70, i64 1
+ %72 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %71, i32** %72, align 4
- %73 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %73 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%74 = load i32** %73, align 4
%75 = load i32* %74, align 4
%76 = load i32* %4, align 4
br i1 %toBool9.i.i, label %bb10.i.i, label %bb11.i.i
bb10.i.i: ; preds = %bb8.i.i
- %79 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %79 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%80 = load i32** %79, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb11.i.i: ; preds = %bb8.i.i
- %81 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %81 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%82 = load i32** %81, align 4
- %83 = getelementptr i32* %82, i64 1
- %84 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %83 = getelementptr i32, i32* %82, i64 1
+ %84 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %83, i32** %84, align 4
%85 = sub i32 %__trip_count.0.i.i, 1
br label %bb12.i.i
br i1 %86, label %bb.i.i, label %bb13.i.i
bb13.i.i: ; preds = %bb12.i.i
- %87 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ %87 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
%88 = load i32** %87, align 4
%89 = ptrtoint i32* %88 to i32
- %90 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %90 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%91 = load i32** %90, align 4
%92 = ptrtoint i32* %91 to i32
%93 = sub i32 %89, %92
]
bb14.i.i: ; preds = %bb13.i.i
- %95 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %95 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%96 = load i32** %95, align 4
%97 = load i32* %96, align 4
%98 = load i32* %4, align 4
br i1 %toBool15.i.i, label %bb16.i.i, label %bb17.i.i
bb16.i.i: ; preds = %bb14.i.i
- %101 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %101 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%102 = load i32** %101, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb17.i.i: ; preds = %bb14.i.i
- %103 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %103 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%104 = load i32** %103, align 4
- %105 = getelementptr i32* %104, i64 1
- %106 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %105 = getelementptr i32, i32* %104, i64 1
+ %106 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %105, i32** %106, align 4
br label %bb18.i.i
bb18.i.i: ; preds = %bb17.i.i, %bb13.i.i
- %107 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %107 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%108 = load i32** %107, align 4
%109 = load i32* %108, align 4
%110 = load i32* %4, align 4
br i1 %toBool19.i.i, label %bb20.i.i, label %bb21.i.i
bb20.i.i: ; preds = %bb18.i.i
- %113 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %113 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%114 = load i32** %113, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb21.i.i: ; preds = %bb18.i.i
- %115 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %115 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%116 = load i32** %115, align 4
- %117 = getelementptr i32* %116, i64 1
- %118 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %117 = getelementptr i32, i32* %116, i64 1
+ %118 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %117, i32** %118, align 4
br label %bb22.i.i
bb22.i.i: ; preds = %bb21.i.i, %bb13.i.i
- %119 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %119 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%120 = load i32** %119, align 4
%121 = load i32* %120, align 4
%122 = load i32* %4, align 4
br i1 %toBool23.i.i, label %bb24.i.i, label %bb25.i.i
bb24.i.i: ; preds = %bb22.i.i
- %125 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %125 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%126 = load i32** %125, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
bb25.i.i: ; preds = %bb22.i.i
- %127 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %127 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
%128 = load i32** %127, align 4
- %129 = getelementptr i32* %128, i64 1
- %130 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
+ %129 = getelementptr i32, i32* %128, i64 1
+ %130 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__first_addr.i.i, i32 0, i32 0
store i32* %129, i32** %130, align 4
br label %bb26.i.i
bb26.i.i: ; preds = %bb25.i.i, %bb13.i.i
- %131 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
+ %131 = getelementptr %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >", %"struct.__gnu_cxx::__normal_iterator<int*,std::vector<int, std::allocator<int> > >"* %__last_addr.i.i, i32 0, i32 0
%132 = load i32** %131, align 4
br label %_ZSt4findIN9__gnu_cxx17__normal_iteratorIPiSt6vectorIiSaIiEEEEiET_S7_S7_RKT0_.exit
%tmp15 = bitcast [0 x [0 x i32]]* %tmp8 to i8* ; <i8*> [#uses=1]
%tmp16 = mul i32 %tmp14, 4 ; <i32> [#uses=1]
%tmp17 = mul i32 1, %tmp16 ; <i32> [#uses=1]
- %tmp18 = getelementptr i8* %tmp15, i32 %tmp17 ; <i8*> [#uses=1]
+ %tmp18 = getelementptr i8, i8* %tmp15, i32 %tmp17 ; <i8*> [#uses=1]
%tmp19 = bitcast i8* %tmp18 to [0 x i32]* ; <[0 x i32]*> [#uses=1]
%tmp20 = bitcast [0 x i32]* %tmp19 to i32* ; <i32*> [#uses=1]
- %tmp21 = getelementptr i32* %tmp20, i32 0 ; <i32*> [#uses=1]
+ %tmp21 = getelementptr i32, i32* %tmp20, i32 0 ; <i32*> [#uses=1]
%tmp22 = load i32* %tmp21, align 1 ; <i32> [#uses=1]
%tmp23 = icmp eq i32 %tmp22, 4 ; <i1> [#uses=1]
%tmp24 = zext i1 %tmp23 to i8 ; <i8> [#uses=1]
%tobool272 = icmp ne i32 %isfuncname, 0
%cond276 = select i1 %tobool272, i8* getelementptr inbounds ([2 x i8]* @.str254, i32 0, i32 0), i8* getelementptr inbounds ([3 x i8]* @.str557, i32 0, i32 0) ; <i8*> [#uses=4]
%cmp.i504 = icmp eq i8* %cond276, null
- %rval = getelementptr i8* %cond276, i1 %cmp.i504
+ %rval = getelementptr i8, i8* %cond276, i1 %cmp.i504
ret i8* %rval
}
define {}* @foo({}* %x, i32 %n) {
; CHECK-LABEL: @foo(
; CHECK-NOT: getelementptr
- %p = getelementptr {}* %x, i32 %n
+ %p = getelementptr {}, {}* %x, i32 %n
ret {}* %p
}
define i8* @bar(i64 %n, {{}, [0 x {[0 x i8]}]}* %p) {
; CHECK-LABEL: @bar(
- %g = getelementptr {{}, [0 x {[0 x i8]}]}* %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
+ %g = getelementptr {{}, [0 x {[0 x i8]}]}, {{}, [0 x {[0 x i8]}]}* %p, i64 %n, i32 1, i64 %n, i32 0, i64 %n
; CHECK: %p, i64 0, i32 1, i64 0, i32 0, i64 %n
ret i8* %g
}
%tmp2 = add i32 %argc, 1879048192
%p = alloca i8
; CHECK: getelementptr
- %p1 = getelementptr i8* %p, i32 %tmp1
+ %p1 = getelementptr i8, i8* %p, i32 %tmp1
; CHECK: getelementptr
- %p2 = getelementptr i8* %p, i32 %tmp2
+ %p2 = getelementptr i8, i8* %p, i32 %tmp2
%cmp = icmp ult i8* %p1, %p2
br i1 %cmp, label %bbtrue, label %bbfalse
bbtrue: ; preds = %entry
; Most common case
define i32 @test0(i32 %n) {
%alloca = alloca [10 x i8], align 16
- %gep = getelementptr [10 x i8]* %alloca, i32 0, i32 0
+ %gep = getelementptr [10 x i8], [10 x i8]* %alloca, i32 0, i32 0
call void @llvm.init.trampoline(i8* %gep, i8* bitcast (i32 (i8*, i32)* @f to i8*),
i8* null)
%tramp = call i8* @llvm.adjust.trampoline(i8* %gep)
define i32 @test4(i32 %n) {
%alloca = alloca [10 x i8], align 16
- %gep = getelementptr [10 x i8]* %alloca, i32 0, i32 0
+ %gep = getelementptr [10 x i8], [10 x i8]* %alloca, i32 0, i32 0
call void @llvm.init.trampoline(i8* %gep, i8* bitcast (i32 (i8*, i32)* @f to i8*),
i8* null)
define void @f() {
%1 = alloca [0 x i8], align 1
%2 = alloca [0 x i8], align 1024
- %3 = getelementptr inbounds [0 x i8]* %1, i64 0, i64 0
- %4 = getelementptr inbounds [0 x i8]* %2, i64 0, i64 0
+ %3 = getelementptr inbounds [0 x i8], [0 x i8]* %1, i64 0, i64 0
+ %4 = getelementptr inbounds [0 x i8], [0 x i8]* %2, i64 0, i64 0
store i8* %3, i8** @x, align 8
store i8* %4, i8** @y, align 8
ret void
; CHECK: llvm.memcpy
; CHECK: ret void
%A = alloca [100 x i8]
- %a = getelementptr inbounds [100 x i8]* %A, i64 0, i64 0
+ %a = getelementptr inbounds [100 x i8], [100 x i8]* %A, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* getelementptr inbounds ([100 x i8]* @G, i64 0, i32 0), i64 100, i32 4, i1 false)
call void @bar(i8* %a) readonly
ret void
define void @widget(%struct.hoge* nocapture %arg) nounwind uwtable ssp {
bb:
- %tmp = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 0
+ %tmp = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 0
br i1 undef, label %bb1, label %bb17
bb1: ; preds = %bb
%bf.set10 = or i96 %bf.set5, %bf.clear
%retval.0.cast7 = bitcast %struct._my_struct* %retval to i96*
store i96 %bf.set10, i96* %retval.0.cast7, align 8
- %retval.12.idx8 = getelementptr inbounds %struct._my_struct* %retval, i64 0, i32 1
+ %retval.12.idx8 = getelementptr inbounds %struct._my_struct, %struct._my_struct* %retval, i64 0, i32 1
%retval.12.cast9 = bitcast [4 x i8]* %retval.12.idx8 to i32*
store i32 %k.sroa.1.12.copyload, i32* %retval.12.cast9, align 4
%trunc = trunc i96 %bf.set10 to i64
%.fca.0.insert = insertvalue { i64, i64 } undef, i64 %trunc, 0
- %retval.8.idx12 = getelementptr inbounds %struct._my_struct* %retval, i64 0, i32 0, i64 8
+ %retval.8.idx12 = getelementptr inbounds %struct._my_struct, %struct._my_struct* %retval, i64 0, i32 0, i64 8
%retval.8.cast13 = bitcast i8* %retval.8.idx12 to i64*
%retval.8.load14 = load i64* %retval.8.cast13, align 8
%.fca.1.insert = insertvalue { i64, i64 } %.fca.0.insert, i64 %retval.8.load14, 1
store i32 1, i32* %1
add i32 %.val24, -16
inttoptr i32 %2 to i32*
- getelementptr i32* %3, i32 1
+ getelementptr i32, i32* %3, i32 1
load i32* %4
tail call i32 @callee( i32 %5 )
ret void
define i32 @canonicalize_addrspacecast([16 x i32] addrspace(1)* %arr) {
; CHECK-LABEL: @canonicalize_addrspacecast(
-; CHECK-NEXT: getelementptr inbounds [16 x i32] addrspace(1)* %arr, i32 0, i32 0
+; CHECK-NEXT: getelementptr inbounds [16 x i32], [16 x i32] addrspace(1)* %arr, i32 0, i32 0
; CHECK-NEXT: addrspacecast i32 addrspace(1)* %{{[a-zA-Z0-9]+}} to i32*
; CHECK-NEXT: load i32*
; CHECK-NEXT: ret i32
loop.body:
%i = phi i32 [ 0, %entry ], [ %i.inc, %loop.body ]
%sum = phi i32 [ 0, %entry ], [ %sum.inc, %loop.body]
- %ptr = getelementptr i8* %alloca, i32 %i
+ %ptr = getelementptr i8, i8* %alloca, i32 %i
%load = load i8* %ptr
%ext = zext i8 %load to i32
%sum.inc = add i32 %sum, %ext
bb1:
%j = phi i64 [ 0, %bb7.outer ], [ %indvar.next, %bb1 ]
- %t4 = getelementptr [1001 x [20000 x double]]* @Nice, i64 0, i64 %i, i64 %j
+ %t4 = getelementptr [1001 x [20000 x double]], [1001 x [20000 x double]]* @Nice, i64 0, i64 %i, i64 %j
%q = bitcast double* %t4 to <2 x double>*
store <2 x double><double 0.0, double 0.0>, <2 x double>* %q, align 8
- %s4 = getelementptr [1001 x [20001 x double]]* @Awkward, i64 0, i64 %i, i64 %j
+ %s4 = getelementptr [1001 x [20001 x double]], [1001 x [20001 x double]]* @Awkward, i64 0, i64 %i, i64 %j
%r = bitcast double* %s4 to <2 x double>*
store <2 x double><double 0.0, double 0.0>, <2 x double>* %r, align 8
%i = phi i64 [ %indvar.next, %bb ], [ 20, %entry ]
%j = mul i64 %i, %v
%h = add i64 %j, %z
- %t8 = getelementptr double* %e, i64 %h
+ %t8 = getelementptr double, double* %e, i64 %h
%p = bitcast double* %t8 to <2 x double>*
store <2 x double><double 0.0, double 0.0>, <2 x double>* %p, align 8
%indvar.next = add i64 %i, 1
define <4 x i32> @test1(<4 x i32>* %h) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
%vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
define <4 x i32> @test1a(<4 x i32>* align 16 %h) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
%vl = call <4 x i32> @llvm.ppc.altivec.lvx(i8* %hv)
define <4 x i32> @test2(<4 x i32>* %h, <4 x i32> %d) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
define <4 x i32> @test2a(<4 x i32>* align 16 %h, <4 x i32> %d) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvx(<4 x i32> %d, i8* %hv)
define <4 x i32> @test1l(<4 x i32>* %h) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
%vl = call <4 x i32> @llvm.ppc.altivec.lvxl(i8* %hv)
define <4 x i32> @test1la(<4 x i32>* align 16 %h) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
%vl = call <4 x i32> @llvm.ppc.altivec.lvxl(i8* %hv)
define <4 x i32> @test2l(<4 x i32>* %h, <4 x i32> %d) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvxl(<4 x i32> %d, i8* %hv)
define <4 x i32> @test2la(<4 x i32>* align 16 %h, <4 x i32> %d) #0 {
entry:
- %h1 = getelementptr <4 x i32>* %h, i64 1
+ %h1 = getelementptr <4 x i32>, <4 x i32>* %h, i64 1
%hv = bitcast <4 x i32>* %h1 to i8*
call void @llvm.ppc.altivec.stvxl(<4 x i32> %d, i8* %hv)
define <4 x double> @test1(<4 x float>* %h) #0 {
entry:
- %h1 = getelementptr <4 x float>* %h, i64 1
+ %h1 = getelementptr <4 x float>, <4 x float>* %h, i64 1
%hv = bitcast <4 x float>* %h1 to i8*
%vl = call <4 x double> @llvm.ppc.qpx.qvlfs(i8* %hv)
define <4 x double> @test1a(<4 x float>* align 16 %h) #0 {
entry:
- %h1 = getelementptr <4 x float>* %h, i64 1
+ %h1 = getelementptr <4 x float>, <4 x float>* %h, i64 1
%hv = bitcast <4 x float>* %h1 to i8*
%vl = call <4 x double> @llvm.ppc.qpx.qvlfs(i8* %hv)
define <4 x float> @test2(<4 x float>* %h, <4 x double> %d) #0 {
entry:
- %h1 = getelementptr <4 x float>* %h, i64 1
+ %h1 = getelementptr <4 x float>, <4 x float>* %h, i64 1
%hv = bitcast <4 x float>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
define <4 x float> @test2a(<4 x float>* align 16 %h, <4 x double> %d) #0 {
entry:
- %h1 = getelementptr <4 x float>* %h, i64 1
+ %h1 = getelementptr <4 x float>, <4 x float>* %h, i64 1
%hv = bitcast <4 x float>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfs(<4 x double> %d, i8* %hv)
define <4 x double> @test1l(<4 x double>* %h) #0 {
entry:
- %h1 = getelementptr <4 x double>* %h, i64 1
+ %h1 = getelementptr <4 x double>, <4 x double>* %h, i64 1
%hv = bitcast <4 x double>* %h1 to i8*
%vl = call <4 x double> @llvm.ppc.qpx.qvlfd(i8* %hv)
define <4 x double> @test1ln(<4 x double>* align 16 %h) #0 {
entry:
- %h1 = getelementptr <4 x double>* %h, i64 1
+ %h1 = getelementptr <4 x double>, <4 x double>* %h, i64 1
%hv = bitcast <4 x double>* %h1 to i8*
%vl = call <4 x double> @llvm.ppc.qpx.qvlfd(i8* %hv)
define <4 x double> @test1la(<4 x double>* align 32 %h) #0 {
entry:
- %h1 = getelementptr <4 x double>* %h, i64 1
+ %h1 = getelementptr <4 x double>, <4 x double>* %h, i64 1
%hv = bitcast <4 x double>* %h1 to i8*
%vl = call <4 x double> @llvm.ppc.qpx.qvlfd(i8* %hv)
define <4 x double> @test2l(<4 x double>* %h, <4 x double> %d) #0 {
entry:
- %h1 = getelementptr <4 x double>* %h, i64 1
+ %h1 = getelementptr <4 x double>, <4 x double>* %h, i64 1
%hv = bitcast <4 x double>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfd(<4 x double> %d, i8* %hv)
define <4 x double> @test2ln(<4 x double>* align 16 %h, <4 x double> %d) #0 {
entry:
- %h1 = getelementptr <4 x double>* %h, i64 1
+ %h1 = getelementptr <4 x double>, <4 x double>* %h, i64 1
%hv = bitcast <4 x double>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfd(<4 x double> %d, i8* %hv)
define <4 x double> @test2la(<4 x double>* align 32 %h, <4 x double> %d) #0 {
entry:
- %h1 = getelementptr <4 x double>* %h, i64 1
+ %h1 = getelementptr <4 x double>, <4 x double>* %h, i64 1
%hv = bitcast <4 x double>* %h1 to i8*
call void @llvm.ppc.qpx.qvstfd(<4 x double> %d, i8* %hv)
; CHECK-NOT: alloca
define void @test3() {
%A = alloca { i32 } ; <{ i32 }*> [#uses=1]
- %B = getelementptr { i32 }* %A, i32 0, i32 0 ; <i32*> [#uses=1]
+ %B = getelementptr { i32 }, { i32 }* %A, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 123, i32* %B
ret void
}
entry:
%a = alloca { i32 }
%b = alloca i32*
- %a.1 = getelementptr { i32 }* %a, i32 0, i32 0
+ %a.1 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
store i32 123, i32* %a.1
store i32* %a.1, i32** %b
%b.1 = bitcast i32** %b to i32*
store i32 123, i32* %b.1
- %a.2 = getelementptr { i32 }* %a, i32 0, i32 0
+ %a.2 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
store atomic i32 2, i32* %a.2 unordered, align 4
- %a.3 = getelementptr { i32 }* %a, i32 0, i32 0
+ %a.3 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
store atomic i32 3, i32* %a.3 release, align 4
- %a.4 = getelementptr { i32 }* %a, i32 0, i32 0
+ %a.4 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
store atomic i32 4, i32* %a.4 seq_cst, align 4
ret void
}
entry:
%a = alloca { i32 }
%b = alloca i32
- %a.1 = getelementptr { i32 }* %a, i32 0, i32 0
+ %a.1 = getelementptr { i32 }, { i32 }* %a, i32 0, i32 0
store volatile i32 123, i32* %a.1
tail call void @f(i32* %b)
ret void
define void @test8() {
; CHECK-LABEL: @test8(
; CHECK: alloca [100 x i32]
-; CHECK: getelementptr inbounds [100 x i32]* %x1, i64 0, i64 0
+; CHECK: getelementptr inbounds [100 x i32], [100 x i32]* %x1, i64 0, i64 0
; P32-LABEL: @test8(
; P32: alloca [100 x i32]
-; P32: getelementptr inbounds [100 x i32]* %x1, i32 0, i32 0
+; P32: getelementptr inbounds [100 x i32], [100 x i32]* %x1, i32 0, i32 0
; NODL-LABEL: @test8(
; NODL: alloca [100 x i32]
-; NODL: getelementptr inbounds [100 x i32]* %x1, i64 0, i64 0
+; NODL: getelementptr inbounds [100 x i32], [100 x i32]* %x1, i64 0, i64 0
%x = alloca i32, i32 100
call void (...)* @use(i32* %x)
ret void
%inalloca.save = call i8* @llvm.stacksave()
%argmem = alloca inalloca <{ %struct_type }>
; CHECK: alloca inalloca i64, align 8
- %0 = getelementptr inbounds <{ %struct_type }>* %argmem, i32 0, i32 0
+ %0 = getelementptr inbounds <{ %struct_type }>, <{ %struct_type }>* %argmem, i32 0, i32 0
%1 = bitcast %struct_type* %0 to i8*
%2 = bitcast %struct_type* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %1, i8* %2, i32 8, i32 4, i1 false)
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, 1
- %arrayidx5 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 16
%1 = trunc i64 %indvars.iv.next to i32
; CHECK-NOT: call void @llvm.assume
entry:
- %a = getelementptr inbounds %struct.s* %x, i64 0, i32 0
+ %a = getelementptr inbounds %struct.s, %struct.s* %x, i64 0, i32 0
%0 = load double** %a, align 8
%ptrint = ptrtoint double* %0 to i64
%maskedptr = and i64 %ptrint, 31
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next.1, %for.body ]
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds double* %0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %0, i64 %indvars.iv
%1 = load double* %arrayidx, align 16
%add = fadd double %1, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond)
store double %mul, double* %arrayidx, align 16
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx.1 = getelementptr inbounds double* %0, i64 %indvars.iv.next
+ %arrayidx.1 = getelementptr inbounds double, double* %0, i64 %indvars.iv.next
%2 = load double* %arrayidx.1, align 8
%add.1 = fadd double %2, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond)
}
define i8* @test13(i64 %A) {
- %c = getelementptr [0 x i8]* bitcast ([32832 x i8]* @inbuf to [0 x i8]*), i64 0, i64 %A ; <i8*> [#uses=1]
+ %c = getelementptr [0 x i8], [0 x i8]* bitcast ([32832 x i8]* @inbuf to [0 x i8]*), i64 0, i64 %A ; <i8*> [#uses=1]
ret i8* %c
-; CHECK: %c = getelementptr [32832 x i8]* @inbuf, i64 0, i64 %A
+; CHECK: %c = getelementptr [32832 x i8], [32832 x i8]* @inbuf, i64 0, i64 %A
; CHECK: ret i8* %c
}
define [4 x float]* @test27([9 x [4 x float]]* %A) {
%c = bitcast [9 x [4 x float]]* %A to [4 x float]* ; <[4 x float]*> [#uses=1]
ret [4 x float]* %c
-; CHECK: %c = getelementptr inbounds [9 x [4 x float]]* %A, i64 0, i64 0
+; CHECK: %c = getelementptr inbounds [9 x [4 x float]], [9 x [4 x float]]* %A, i64 0, i64 0
; CHECK: ret [4 x float]* %c
}
define float* @test28([4 x float]* %A) {
%c = bitcast [4 x float]* %A to float* ; <float*> [#uses=1]
ret float* %c
-; CHECK: %c = getelementptr inbounds [4 x float]* %A, i64 0, i64 0
+; CHECK: %c = getelementptr inbounds [4 x float], [4 x float]* %A, i64 0, i64 0
; CHECK: ret float* %c
}
; PR1263
define i32* @test41(i32* %tmp1) {
%tmp64 = bitcast i32* %tmp1 to { i32 }*
- %tmp65 = getelementptr { i32 }* %tmp64, i32 0, i32 0
+ %tmp65 = getelementptr { i32 }, { i32 }* %tmp64, i32 0, i32 0
ret i32* %tmp65
; CHECK-LABEL: @test41(
; CHECK: ret i32* %tmp1
define i32 addrspace(1)* @test41_addrspacecast_smaller(i32* %tmp1) {
%tmp64 = addrspacecast i32* %tmp1 to { i32 } addrspace(1)*
- %tmp65 = getelementptr { i32 } addrspace(1)* %tmp64, i32 0, i32 0
+ %tmp65 = getelementptr { i32 }, { i32 } addrspace(1)* %tmp64, i32 0, i32 0
ret i32 addrspace(1)* %tmp65
; CHECK-LABEL: @test41_addrspacecast_smaller(
; CHECK: addrspacecast i32* %tmp1 to i32 addrspace(1)*
define i32* @test41_addrspacecast_larger(i32 addrspace(1)* %tmp1) {
%tmp64 = addrspacecast i32 addrspace(1)* %tmp1 to { i32 }*
- %tmp65 = getelementptr { i32 }* %tmp64, i32 0, i32 0
+ %tmp65 = getelementptr { i32 }, { i32 }* %tmp64, i32 0, i32 0
ret i32* %tmp65
; CHECK-LABEL: @test41_addrspacecast_larger(
; CHECK: addrspacecast i32 addrspace(1)* %tmp1 to i32*
; CHECK-LABEL: @test68(
%o = mul i64 %i, 12
%q = bitcast %s* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %o
-; CHECK-NEXT: getelementptr %s*
+ %pp = getelementptr inbounds i8, i8* %q, i64 %o
+; CHECK-NEXT: getelementptr %s, %s*
%r = bitcast i8* %pp to %s*
%l = load %s* %r
; CHECK-NEXT: load %s*
; addrspacecasts should be eliminated.
define %s @test68_addrspacecast(%s* %p, i64 %i) {
; CHECK-LABEL: @test68_addrspacecast(
-; CHECK-NEXT: getelementptr %s*
+; CHECK-NEXT: getelementptr %s, %s*
; CHECK-NEXT: load %s*
; CHECK-NEXT: ret %s
%o = mul i64 %i, 12
%q = addrspacecast %s* %p to i8 addrspace(2)*
- %pp = getelementptr inbounds i8 addrspace(2)* %q, i64 %o
+ %pp = getelementptr inbounds i8, i8 addrspace(2)* %q, i64 %o
%r = addrspacecast i8 addrspace(2)* %pp to %s*
%l = load %s* %r
ret %s %l
define %s @test68_addrspacecast_2(%s* %p, i64 %i) {
; CHECK-LABEL: @test68_addrspacecast_2(
-; CHECK-NEXT: getelementptr %s* %p
+; CHECK-NEXT: getelementptr %s, %s* %p
; CHECK-NEXT: addrspacecast
; CHECK-NEXT: load %s addrspace(1)*
; CHECK-NEXT: ret %s
%o = mul i64 %i, 12
%q = addrspacecast %s* %p to i8 addrspace(2)*
- %pp = getelementptr inbounds i8 addrspace(2)* %q, i64 %o
+ %pp = getelementptr inbounds i8, i8 addrspace(2)* %q, i64 %o
%r = addrspacecast i8 addrspace(2)* %pp to %s addrspace(1)*
%l = load %s addrspace(1)* %r
ret %s %l
; CHECK-LABEL: @test68_as1(
%o = mul i32 %i, 12
%q = bitcast %s addrspace(1)* %p to i8 addrspace(1)*
- %pp = getelementptr inbounds i8 addrspace(1)* %q, i32 %o
-; CHECK-NEXT: getelementptr %s addrspace(1)*
+ %pp = getelementptr inbounds i8, i8 addrspace(1)* %q, i32 %o
+; CHECK-NEXT: getelementptr %s, %s addrspace(1)*
%r = bitcast i8 addrspace(1)* %pp to %s addrspace(1)*
%l = load %s addrspace(1)* %r
; CHECK-NEXT: load %s addrspace(1)*
; CHECK-LABEL: @test69(
%o = shl nsw i64 %i, 3
%q = bitcast double* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %o
-; CHECK-NEXT: getelementptr inbounds double*
+ %pp = getelementptr inbounds i8, i8* %q, i64 %o
+; CHECK-NEXT: getelementptr inbounds double, double*
%r = bitcast i8* %pp to double*
%l = load double* %r
; CHECK-NEXT: load double*
%o = mul nsw i64 %i, 36
; CHECK-NEXT: mul nsw i64 %i, 3
%q = bitcast %s* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %o
-; CHECK-NEXT: getelementptr inbounds %s*
+ %pp = getelementptr inbounds i8, i8* %q, i64 %o
+; CHECK-NEXT: getelementptr inbounds %s, %s*
%r = bitcast i8* %pp to %s*
%l = load %s* %r
; CHECK-NEXT: load %s*
%o = shl i64 %i, 5
; CHECK-NEXT: shl i64 %i, 2
%q = bitcast double* %p to i8*
- %pp = getelementptr i8* %q, i64 %o
-; CHECK-NEXT: getelementptr double*
+ %pp = getelementptr i8, i8* %q, i64 %o
+; CHECK-NEXT: getelementptr double, double*
%r = bitcast i8* %pp to double*
%l = load double* %r
; CHECK-NEXT: load double*
%o = sext i32 %so to i64
; CHECK-NEXT: sext i32 %i to i64
%q = bitcast double* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %o
-; CHECK-NEXT: getelementptr inbounds double*
+ %pp = getelementptr inbounds i8, i8* %q, i64 %o
+; CHECK-NEXT: getelementptr inbounds double, double*
%r = bitcast i8* %pp to double*
%l = load double* %r
; CHECK-NEXT: load double*
%o = trunc i128 %lo to i64
; CHECK-NEXT: trunc i128 %i to i64
%q = bitcast double* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %o
-; CHECK-NEXT: getelementptr double*
+ %pp = getelementptr inbounds i8, i8* %q, i64 %o
+; CHECK-NEXT: getelementptr double, double*
%r = bitcast i8* %pp to double*
%l = load double* %r
; CHECK-NEXT: load double*
define double @test74(double *%p, i64 %i) {
; CHECK-LABEL: @test74(
%q = bitcast double* %p to i64*
- %pp = getelementptr inbounds i64* %q, i64 %i
-; CHECK-NEXT: getelementptr inbounds double*
+ %pp = getelementptr inbounds i64, i64* %q, i64 %i
+; CHECK-NEXT: getelementptr inbounds double, double*
%r = bitcast i64* %pp to double*
%l = load double* %r
; CHECK-NEXT: load double*
%z = sext i32 %y to i64
; CHECK-NEXT: sext i32 %y to i64
%q = bitcast i32* %p to i8*
- %r = getelementptr i8* %q, i64 %z
+ %r = getelementptr i8, i8* %q, i64 %z
%s = bitcast i8* %r to i32*
ret i32* %s
}
%o2 = mul nsw i64 %o, %j
; CHECK-NEXT: %o2 = mul i64 %i, %j
%q = bitcast %s* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %o2
-; CHECK-NEXT: getelementptr %s* %p, i64 %o2
+ %pp = getelementptr inbounds i8, i8* %q, i64 %o2
+; CHECK-NEXT: getelementptr %s, %s* %p, i64 %o2
%r = bitcast i8* %pp to %s*
%l = load %s* %r
; CHECK-NEXT: load %s*
; CHECK-NEXT: %o = mul nsw i64 %i, 3
; CHECK-NEXT: %o2 = mul nsw i64 %o, %j
%q = bitcast %s* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %o2
-; CHECK-NEXT: getelementptr inbounds %s* %p, i64 %o2
+ %pp = getelementptr inbounds i8, i8* %q, i64 %o2
+; CHECK-NEXT: getelementptr inbounds %s, %s* %p, i64 %o2
%r = bitcast i8* %pp to %s*
%l = load %s* %r
; CHECK-NEXT: load %s*
%h = mul nsw i64 %g, %j
; CHECK-NEXT: mul i64 %g, %j
%q = bitcast %s* %p to i8*
- %pp = getelementptr inbounds i8* %q, i64 %h
-; CHECK-NEXT: getelementptr %s* %p, i64 %h
+ %pp = getelementptr inbounds i8, i8* %q, i64 %h
+; CHECK-NEXT: getelementptr %s, %s* %p, i64 %h
%r = bitcast i8* %pp to %s*
%load = load %s* %r
; CHECK-NEXT: load %s*
%c = mul i32 %b, %j
%q = bitcast %s* %p to i8*
; CHECK: bitcast
- %pp = getelementptr inbounds i8* %q, i32 %c
+ %pp = getelementptr inbounds i8, i8* %q, i32 %c
%r = bitcast i8* %pp to %s*
%l = load %s* %r
ret %s %l
%tmp = shl nsw i32 %i, 3
; CHECK-NEXT: sext i32 %i to i64
%q = bitcast [100 x double]* %p to i8*
- %pp = getelementptr i8* %q, i32 %tmp
-; CHECK-NEXT: getelementptr [100 x double]*
+ %pp = getelementptr i8, i8* %q, i32 %tmp
+; CHECK-NEXT: getelementptr [100 x double], [100 x double]*
%r = bitcast i8* %pp to double*
%l = load double* %r
; CHECK-NEXT: load double*
define double @test80_addrspacecast([100 x double] addrspace(1)* %p, i32 %i) {
; CHECK-LABEL: @test80_addrspacecast(
-; CHECK-NEXT: getelementptr [100 x double] addrspace(1)* %p
+; CHECK-NEXT: getelementptr [100 x double], [100 x double] addrspace(1)* %p
; CHECK-NEXT: load double addrspace(1)*
; CHECK-NEXT: ret double
%tmp = shl nsw i32 %i, 3
%q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
- %pp = getelementptr i8 addrspace(2)* %q, i32 %tmp
+ %pp = getelementptr i8, i8 addrspace(2)* %q, i32 %tmp
%r = addrspacecast i8 addrspace(2)* %pp to double addrspace(1)*
%l = load double addrspace(1)* %r
ret double %l
define double @test80_addrspacecast_2([100 x double] addrspace(1)* %p, i32 %i) {
; CHECK-LABEL: @test80_addrspacecast_2(
-; CHECK-NEXT: getelementptr [100 x double] addrspace(1)*
+; CHECK-NEXT: getelementptr [100 x double], [100 x double] addrspace(1)*
; CHECK-NEXT: addrspacecast double addrspace(1)*
; CHECK-NEXT: load double addrspace(3)*
; CHECK-NEXT: ret double
%tmp = shl nsw i32 %i, 3
%q = addrspacecast [100 x double] addrspace(1)* %p to i8 addrspace(2)*
- %pp = getelementptr i8 addrspace(2)* %q, i32 %tmp
+ %pp = getelementptr i8, i8 addrspace(2)* %q, i32 %tmp
%r = addrspacecast i8 addrspace(2)* %pp to double addrspace(3)*
%l = load double addrspace(3)* %r
ret double %l
%tmp = shl nsw i16 %i, 3
; CHECK-NEXT: sext i16 %i to i32
%q = bitcast [100 x double] addrspace(1)* %p to i8 addrspace(1)*
- %pp = getelementptr i8 addrspace(1)* %q, i16 %tmp
-; CHECK-NEXT: getelementptr [100 x double] addrspace(1)*
+ %pp = getelementptr i8, i8 addrspace(1)* %q, i16 %tmp
+; CHECK-NEXT: getelementptr [100 x double], [100 x double] addrspace(1)*
%r = bitcast i8 addrspace(1)* %pp to double addrspace(1)*
%l = load double addrspace(1)* %r
; CHECK-NEXT: load double addrspace(1)*
define double @test81(double *%p, float %f) {
%i = fptosi float %f to i64
%q = bitcast double* %p to i8*
- %pp = getelementptr i8* %q, i64 %i
+ %pp = getelementptr i8, i8* %q, i64 %i
%r = bitcast i8* %pp to double*
%l = load double* %r
ret double %l
define i32 @test_cast_gep_small_indices_as() {
; CHECK-LABEL: @test_cast_gep_small_indices_as(
; CHECK: load i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16
- %p = getelementptr [10 x i32] addrspace(3)* @i32_array_as3, i7 0, i7 0
+ %p = getelementptr [10 x i32], [10 x i32] addrspace(3)* @i32_array_as3, i7 0, i7 0
%x = load i32 addrspace(3)* %p, align 4
ret i32 %x
}
define i32 @test_cast_gep_large_indices_as() {
; CHECK-LABEL: @test_cast_gep_large_indices_as(
; CHECK: load i32 addrspace(3)* getelementptr inbounds ([10 x i32] addrspace(3)* @i32_array_as3, i16 0, i16 0), align 16
- %p = getelementptr [10 x i32] addrspace(3)* @i32_array_as3, i64 0, i64 0
+ %p = getelementptr [10 x i32], [10 x i32] addrspace(3)* @i32_array_as3, i64 0, i64 0
%x = load i32 addrspace(3)* %p, align 4
ret i32 %x
}
define i32 @test_constant_cast_gep_struct_indices_as() {
; CHECK-LABEL: @test_constant_cast_gep_struct_indices_as(
; CHECK: load i32 addrspace(3)* getelementptr inbounds (%struct.foo addrspace(3)* @constant_fold_global_ptr, i16 0, i32 2, i16 2), align 8
- %x = getelementptr %struct.foo addrspace(3)* @constant_fold_global_ptr, i18 0, i32 2, i12 2
+ %x = getelementptr %struct.foo, %struct.foo addrspace(3)* @constant_fold_global_ptr, i18 0, i32 2, i12 2
%y = load i32 addrspace(3)* %x, align 4
ret i32 %y
}
define i32 @test_read_data_from_global_as3() {
; CHECK-LABEL: @test_read_data_from_global_as3(
; CHECK-NEXT: ret i32 2
- %x = getelementptr [5 x i32] addrspace(3)* @constant_data_as3, i32 0, i32 1
+ %x = getelementptr [5 x i32], [5 x i32] addrspace(3)* @constant_data_as3, i32 0, i32 1
%y = load i32 addrspace(3)* %x, align 4
ret i32 %y
}
define float @canonicalize_addrspacecast(i32 %i) {
; CHECK-LABEL: @canonicalize_addrspacecast
-; CHECK-NEXT: getelementptr inbounds float* addrspacecast (float addrspace(3)* bitcast ([0 x i8] addrspace(3)* @shared_mem to float addrspace(3)*) to float*), i32 %i
- %p = getelementptr inbounds float* addrspacecast ([0 x i8] addrspace(3)* @shared_mem to float*), i32 %i
+; CHECK-NEXT: getelementptr inbounds float, float* addrspacecast (float addrspace(3)* bitcast ([0 x i8] addrspace(3)* @shared_mem to float addrspace(3)*) to float*), i32 %i
+ %p = getelementptr inbounds float, float* addrspacecast ([0 x i8] addrspace(3)* @shared_mem to float*), i32 %i
%v = load float* %p
ret float %v
}
%B2 = ptrtoint i8* %B to i64
%C = sub i64 0, %B2
- %D = getelementptr i8* %A, i64 %C
+ %D = getelementptr i8, i8* %A, i64 %C
%E = ptrtoint i8* %D to i64
ret i64 %E
%B2 = ptrtoint i8 addrspace(1)* %B to i16
%C = sub i16 0, %B2
- %D = getelementptr i8 addrspace(1)* %A, i16 %C
+ %D = getelementptr i8, i8 addrspace(1)* %A, i16 %C
%E = ptrtoint i8 addrspace(1)* %D to i16
ret i16 %E
; PR4908
define void @test2(<1 x i16>* nocapture %b, i32* nocapture %c) nounwind ssp {
entry:
- %arrayidx = getelementptr inbounds <1 x i16>* %b, i64 undef ; <<1 x i16>*>
+ %arrayidx = getelementptr inbounds <1 x i16>, <1 x i16>* %b, i64 undef ; <<1 x i16>*>
%tmp2 = load <1 x i16>* %arrayidx ; <<1 x i16>> [#uses=1]
%tmp6 = bitcast <1 x i16> %tmp2 to i16 ; <i16> [#uses=1]
%tmp7 = zext i16 %tmp6 to i32 ; <i32> [#uses=1]
%ins = or i32 0, %tmp7 ; <i32> [#uses=1]
- %arrayidx20 = getelementptr inbounds i32* %c, i64 undef ; <i32*> [#uses=1]
+ %arrayidx20 = getelementptr inbounds i32, i32* %c, i64 undef ; <i32*> [#uses=1]
store i32 %ins, i32* %arrayidx20
ret void
}
br i1 %1, label %10, label %3
; <label>:3 ; preds = %2
- %4 = getelementptr inbounds %t0* null, i64 0, i32 1 ; <i32*> [#uses=0]
- %5 = getelementptr inbounds %t1* null, i64 0, i32 4 ; <i32**> [#uses=1]
+ %4 = getelementptr inbounds %t0, %t0* null, i64 0, i32 1 ; <i32*> [#uses=0]
+ %5 = getelementptr inbounds %t1, %t1* null, i64 0, i32 4 ; <i32**> [#uses=1]
%6 = load i32** %5, align 8 ; <i32*> [#uses=1]
%7 = icmp ne i32* %6, null ; <i1> [#uses=1]
%8 = zext i1 %7 to i32 ; <i32> [#uses=1]
resume { i8*, i32 } %exc1
cond.false: ; preds = %entry
- %tmp4 = getelementptr inbounds %class.RuleBasedBreakIterator* %this, i32 0, i32 0 ; <i64 ()**> [#uses=1]
+ %tmp4 = getelementptr inbounds %class.RuleBasedBreakIterator, %class.RuleBasedBreakIterator* %this, i32 0, i32 0 ; <i64 ()**> [#uses=1]
%tmp5 = load i64 ()** %tmp4 ; <i64 ()*> [#uses=1]
%call = invoke i64 %tmp5()
to label %cond.end unwind label %ehcleanup ; <i64> [#uses=1]
%s2 = type { i64 }
define void @test13() nounwind ssp {
entry:
- %0 = getelementptr inbounds %s1* null, i64 0, i32 2, i64 0, i32 0
+ %0 = getelementptr inbounds %s1, %s1* null, i64 0, i32 2, i64 0, i32 0
%1 = bitcast i64* %0 to i32*
- %2 = getelementptr inbounds %s1* null, i64 0, i32 2, i64 1, i32 0
+ %2 = getelementptr inbounds %s1, %s1* null, i64 0, i32 2, i64 1, i32 0
%.pre = load i32* %1, align 8
%3 = lshr i32 %.pre, 19
%brmerge = or i1 undef, undef
define %struct.basic_ios *@test17() ssp {
entry:
- %add.ptr.i = getelementptr i8* null, i64 undef
+ %add.ptr.i = getelementptr i8, i8* null, i64 undef
%0 = bitcast i8* %add.ptr.i to %struct.basic_ios*
ret %struct.basic_ios* %0
}
%num_times_2 = shl i64 %num, 1
%num_times_2_plus_4 = add i64 %num_times_2, 4
%i8_ptr = bitcast i16* %i16_ptr to i8*
- %i8_ptr_num_times_2_plus_4 = getelementptr i8* %i8_ptr, i64 %num_times_2_plus_4
+ %i8_ptr_num_times_2_plus_4 = getelementptr i8, i8* %i8_ptr, i64 %num_times_2_plus_4
%num_times_neg2 = mul i64 %num, -2
%num_times_neg2_minus_4 = add i64 %num_times_neg2, -4
- %addr = getelementptr i8* %i8_ptr_num_times_2_plus_4, i64 %num_times_neg2_minus_4
+ %addr = getelementptr i8, i8* %i8_ptr_num_times_2_plus_4, i64 %num_times_neg2_minus_4
ret i8* %addr
}
define i8* @t6(i8* %x) {
; CHECK-LABEL: @t6(
- %empty = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %empty = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
%ret = call i8* @strcat(i8* %x, i8* %empty)
ret i8* %ret
; CHECK: call i8* @strcat
define i8* @t7(i8* %x) {
; CHECK-LABEL: @t7(
- %empty = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %empty = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
%ret = call i8* @strncat(i8* %x, i8* %empty, i32 1)
ret i8* %ret
; CHECK: call i8* @strncat
define i8* @t8() {
; CHECK-LABEL: @t8(
- %x = getelementptr inbounds [13 x i8]* @.str1, i32 0, i32 0
+ %x = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
%ret = call i8* @strchr(i8* %x, i32 119)
ret i8* %ret
; CHECK: call i8* @strchr
define i8* @t9() {
; CHECK-LABEL: @t9(
- %x = getelementptr inbounds [13 x i8]* @.str1, i32 0, i32 0
+ %x = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
%ret = call i8* @strrchr(i8* %x, i32 119)
ret i8* %ret
; CHECK: call i8* @strrchr
define i32 @t10() {
; CHECK-LABEL: @t10(
- %x = getelementptr inbounds [4 x i8]* @.str2, i32 0, i32 0
- %y = getelementptr inbounds [4 x i8]* @.str3, i32 0, i32 0
+ %x = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
+ %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str3, i32 0, i32 0
%ret = call i32 @strcmp(i8* %x, i8* %y)
ret i32 %ret
; CHECK: call i32 @strcmp
define i32 @t11() {
; CHECK-LABEL: @t11(
- %x = getelementptr inbounds [4 x i8]* @.str2, i32 0, i32 0
- %y = getelementptr inbounds [4 x i8]* @.str3, i32 0, i32 0
+ %x = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
+ %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str3, i32 0, i32 0
%ret = call i32 @strncmp(i8* %x, i8* %y, i64 3)
ret i32 %ret
; CHECK: call i32 @strncmp
define i8* @t12(i8* %x) {
; CHECK-LABEL: @t12(
- %y = getelementptr inbounds [4 x i8]* @.str2, i32 0, i32 0
+ %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
%ret = call i8* @strcpy(i8* %x, i8* %y)
ret i8* %ret
; CHECK: call i8* @strcpy
define i8* @t13(i8* %x) {
; CHECK-LABEL: @t13(
- %y = getelementptr inbounds [4 x i8]* @.str2, i32 0, i32 0
+ %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
%ret = call i8* @stpcpy(i8* %x, i8* %y)
ret i8* %ret
; CHECK: call i8* @stpcpy
define i8* @t14(i8* %x) {
; CHECK-LABEL: @t14(
- %y = getelementptr inbounds [4 x i8]* @.str2, i32 0, i32 0
+ %y = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
%ret = call i8* @strncpy(i8* %x, i8* %y, i64 3)
ret i8* %ret
; CHECK: call i8* @strncpy
define i64 @t15() {
; CHECK-LABEL: @t15(
- %x = getelementptr inbounds [4 x i8]* @.str2, i32 0, i32 0
+ %x = getelementptr inbounds [4 x i8], [4 x i8]* @.str2, i32 0, i32 0
%ret = call i64 @strlen(i8* %x)
ret i64 %ret
; CHECK: call i64 @strlen
define i8* @t16(i8* %x) {
; CHECK-LABEL: @t16(
- %y = getelementptr inbounds [1 x i8]* @.str, i32 0, i32 0
+ %y = getelementptr inbounds [1 x i8], [1 x i8]* @.str, i32 0, i32 0
%ret = call i8* @strpbrk(i8* %x, i8* %y)
ret i8* %ret
; CHECK: call i8* @strpbrk
define i64 @t17(i8* %x) {
; CHECK-LABEL: @t17(
- %y = getelementptr inbounds [1 x i8]* @.str, i32 0, i32 0
+ %y = getelementptr inbounds [1 x i8], [1 x i8]* @.str, i32 0, i32 0
%ret = call i64 @strspn(i8* %x, i8* %y)
ret i64 %ret
; CHECK: call i64 @strspn
define double @t18(i8** %y) {
; CHECK-LABEL: @t18(
- %x = getelementptr inbounds [6 x i8]* @.str4, i64 0, i64 0
+ %x = getelementptr inbounds [6 x i8], [6 x i8]* @.str4, i64 0, i64 0
%ret = call double @strtod(i8* %x, i8** %y)
ret double %ret
; CHECK: call double @strtod
define float @t19(i8** %y) {
; CHECK-LABEL: @t19(
- %x = getelementptr inbounds [6 x i8]* @.str4, i64 0, i64 0
+ %x = getelementptr inbounds [6 x i8], [6 x i8]* @.str4, i64 0, i64 0
%ret = call float @strtof(i8* %x, i8** %y)
ret float %ret
; CHECK: call float @strtof
define x86_fp80 @t20(i8** %y) {
; CHECK-LABEL: @t20(
- %x = getelementptr inbounds [6 x i8]* @.str4, i64 0, i64 0
+ %x = getelementptr inbounds [6 x i8], [6 x i8]* @.str4, i64 0, i64 0
%ret = call x86_fp80 @strtold(i8* %x, i8** %y)
ret x86_fp80 %ret
; CHECK: call x86_fp80 @strtold
define i64 @t21(i8** %y) {
; CHECK-LABEL: @t21(
- %x = getelementptr inbounds [5 x i8]* @.str5, i64 0, i64 0
+ %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
%ret = call i64 @strtol(i8* %x, i8** %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtol
define i64 @t22(i8** %y) {
; CHECK-LABEL: @t22(
- %x = getelementptr inbounds [5 x i8]* @.str5, i64 0, i64 0
+ %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
%ret = call i64 @strtoll(i8* %x, i8** %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtoll
define i64 @t23(i8** %y) {
; CHECK-LABEL: @t23(
- %x = getelementptr inbounds [5 x i8]* @.str5, i64 0, i64 0
+ %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
%ret = call i64 @strtoul(i8* %x, i8** %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtoul
define i64 @t24(i8** %y) {
; CHECK-LABEL: @t24(
- %x = getelementptr inbounds [5 x i8]* @.str5, i64 0, i64 0
+ %x = getelementptr inbounds [5 x i8], [5 x i8]* @.str5, i64 0, i64 0
%ret = call i64 @strtoull(i8* %x, i8** %y, i32 10)
ret i64 %ret
; CHECK: call i64 @strtoull
define i64 @t25(i8* %y) {
; CHECK-LABEL: @t25(
- %x = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %x = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
%ret = call i64 @strcspn(i8* %x, i8* %y)
ret i64 %ret
; CHECK: call i64 @strcspn
define void @t30() {
; CHECK-LABEL: @t30(
- %x = getelementptr inbounds [13 x i8]* @.str1, i32 0, i32 0
+ %x = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
call i32 @fprintf(i8* null, i8* %x)
ret void
; CHECK: call i32 @fprintf
define void @t36() {
; CHECK-LABEL: @t36(
- %x = getelementptr inbounds [1 x i8]* @empty, i32 0, i32 0
+ %x = getelementptr inbounds [1 x i8], [1 x i8]* @empty, i32 0, i32 0
call i32 @printf(i8* %x)
ret void
; CHECK: call i32 @printf
define void @t37(i8* %x) {
; CHECK-LABEL: @t37(
- %y = getelementptr inbounds [13 x i8]* @.str1, i32 0, i32 0
+ %y = getelementptr inbounds [13 x i8], [13 x i8]* @.str1, i32 0, i32 0
call i32 @sprintf(i8* %x, i8* %y)
ret void
; CHECK: call i32 @sprintf
br label %for.cond.i
for.cond.i: ; preds = %land.lhs.true, %entry
- %0 = getelementptr inbounds %struct.S0.0.1.2.3.4.13.22.31.44.48.53.54.55.56.58.59.60.66.68.70.74.77.106.107.108.109.110.113.117.118.128.129* %l_819.i.i, i64 0, i32 0
+ %0 = getelementptr inbounds %struct.S0.0.1.2.3.4.13.22.31.44.48.53.54.55.56.58.59.60.66.68.70.74.77.106.107.108.109.110.113.117.118.128.129, %struct.S0.0.1.2.3.4.13.22.31.44.48.53.54.55.56.58.59.60.66.68.70.74.77.106.107.108.109.110.113.117.118.128.129* %l_819.i.i, i64 0, i32 0
br label %for.cond.i6.i.i
for.cond.i6.i.i: ; preds = %for.body.i8.i.i, %for.cond.i
; CHECK: alloca
; CHECK: align 16
%2 = alloca [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], align 16 ; <[3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]*> [#uses=1]
- %3 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]* %2, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
- %4 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>* %3, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
- %5 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }* %4, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
+ %3 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>]* %2, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
+ %4 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>, <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>* %3, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
+ %5 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }, { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }* %4, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
%6 = bitcast { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }* %5 to { [8 x i16] }* ; <{ [8 x i16] }*> [#uses=1]
- %7 = getelementptr { [8 x i16] }* %6, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
- %8 = getelementptr [8 x i16]* %7, i32 0, i32 0 ; <i16*> [#uses=1]
+ %7 = getelementptr { [8 x i16] }, { [8 x i16] }* %6, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
+ %8 = getelementptr [8 x i16], [8 x i16]* %7, i32 0, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %8, align 16
call void @bar(i16* %8)
ret void
define void @foo_as1(i32 %a, [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>] addrspace(1)* %b) {
; CHECK-LABEL: @foo_as1(
; CHECK: align 16
- %1 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>] addrspace(1)* %b, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
- %2 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }> addrspace(1)* %1, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
- %3 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } addrspace(1)* %2, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
+ %1 = getelementptr [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>], [3 x <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>] addrspace(1)* %b, i32 0, i32 0 ; <<{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>*> [#uses=1]
+ %2 = getelementptr <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }>, <{ { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } }> addrspace(1)* %1, i32 0, i32 0 ; <{ { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }*> [#uses=1]
+ %3 = getelementptr { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } }, { { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } } addrspace(1)* %2, i32 0, i32 0 ; <{ [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 }*> [#uses=1]
%4 = bitcast { [2 x { { i32 } }], [2 x i8], { i16 }, [2 x i8], i8, i8 } addrspace(1)* %3 to { [8 x i16] } addrspace(1)* ; <{ [8 x i16] }*> [#uses=1]
- %5 = getelementptr { [8 x i16] } addrspace(1)* %4, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
- %6 = getelementptr [8 x i16] addrspace(1)* %5, i32 0, i32 0 ; <i16*> [#uses=1]
+ %5 = getelementptr { [8 x i16] }, { [8 x i16] } addrspace(1)* %4, i32 0, i32 0 ; <[8 x i16]*> [#uses=1]
+ %6 = getelementptr [8 x i16], [8 x i16] addrspace(1)* %5, i32 0, i32 0 ; <i16*> [#uses=1]
store i16 0, i16 addrspace(1)* %6, align 16
call void @bar_as1(i16 addrspace(1)* %6)
ret void
}
; CHECK-LABEL: define i32 @extract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}* %pair, i32 0, i32 1
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %pair, i32 0, i32 1
; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32* [[GEP]]
; CHECK-NEXT: store
; CHECK-NEXT: br label %loop
}
; CHECK-LABEL: define i32 @doubleextract2gep(
-; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}* %arg, i32 0, i32 1, i32 1
+; CHECK-NEXT: [[GEP:%[a-z0-9]+]] = getelementptr inbounds {{.*}}, {{.*}}* %arg, i32 0, i32 1, i32 1
; CHECK-NEXT: [[LOAD:%[A-Za-z0-9]+]] = load i32* [[GEP]]
; CHECK-NEXT: ret i32 [[LOAD]]
define i32 @doubleextract2gep({i32, {i32, i32}}* %arg) {
%color = alloca %struct.NSArray*
%color.466 = alloca %struct.NSObject*
%tmp103 = load %struct.NSArray** %color, align 4
- %tmp103104 = getelementptr %struct.NSArray* %tmp103, i32 0, i32 0
+ %tmp103104 = getelementptr %struct.NSArray, %struct.NSArray* %tmp103, i32 0, i32 0
store %struct.NSObject* %tmp103104, %struct.NSObject** %color.466, align 4
%tmp105 = load %struct.objc_selector** @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
%tmp106 = load %struct.NSObject** %color.466, align 4
define void @test_simplify1(%FILE* %fp) {
; CHECK-LABEL: @test_simplify1(
- %fmt = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt)
; CHECK-NEXT: call i32 @fwrite(i8* getelementptr inbounds ([13 x i8]* @hello_world, i32 0, i32 0), i32 12, i32 1, %FILE* %fp)
ret void
define void @test_simplify2(%FILE* %fp) {
; CHECK-LABEL: @test_simplify2(
- %fmt = getelementptr [3 x i8]* @percent_c, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_c, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt, i8 104)
; CHECK-NEXT: call i32 @fputc(i32 104, %FILE* %fp)
ret void
define void @test_simplify3(%FILE* %fp) {
; CHECK-LABEL: @test_simplify3(
- %fmt = getelementptr [3 x i8]* @percent_s, i32 0, i32 0
- %str = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_s, i32 0, i32 0
+ %str = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt, i8* %str)
; CHECK-NEXT: call i32 @fwrite(i8* getelementptr inbounds ([13 x i8]* @hello_world, i32 0, i32 0), i32 12, i32 1, %FILE* %fp)
ret void
define void @test_simplify4(%FILE* %fp) {
; CHECK-IPRINTF-LABEL: @test_simplify4(
- %fmt = getelementptr [3 x i8]* @percent_d, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_d, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt, i32 187)
; CHECK-IPRINTF-NEXT: call i32 (%FILE*, i8*, ...)* @fiprintf(%FILE* %fp, i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
ret void
define void @test_no_simplify1(%FILE* %fp) {
; CHECK-IPRINTF-LABEL: @test_no_simplify1(
- %fmt = getelementptr [3 x i8]* @percent_f, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_f, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt, double 1.87)
; CHECK-IPRINTF-NEXT: call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
ret void
define void @test_no_simplify2(%FILE* %fp, double %d) {
; CHECK-LABEL: @test_no_simplify2(
- %fmt = getelementptr [3 x i8]* @percent_f, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_f, i32 0, i32 0
call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt, double %d)
; CHECK-NEXT: call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double %d)
ret void
define i32 @test_no_simplify3(%FILE* %fp) {
; CHECK-LABEL: @test_no_simplify3(
- %fmt = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
%1 = call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* %fmt)
; CHECK-NEXT: call i32 (%FILE*, i8*, ...)* @fprintf(%FILE* %fp, i8* getelementptr inbounds ([13 x i8]* @hello_world, i32 0, i32 0))
ret i32 %1
define void @test_simplify1(%FILE* %fp) {
; CHECK-LABEL: @test_simplify1(
- %str = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
call i32 @fputs(i8* %str, %FILE* %fp)
ret void
; CHECK-NEXT: ret void
define void @test_simplify2(%FILE* %fp) {
; CHECK-LABEL: @test_simplify2(
- %str = getelementptr [2 x i8]* @A, i32 0, i32 0
+ %str = getelementptr [2 x i8], [2 x i8]* @A, i32 0, i32 0
call i32 @fputs(i8* %str, %FILE* %fp)
; CHECK-NEXT: call i32 @fputc(i32 65, %FILE* %fp)
ret void
define void @test_simplify3(%FILE* %fp) {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr [7 x i8]* @hello, i32 0, i32 0
+ %str = getelementptr [7 x i8], [7 x i8]* @hello, i32 0, i32 0
call i32 @fputs(i8* %str, %FILE* %fp)
; CHECK-NEXT: call i32 @fwrite(i8* getelementptr inbounds ([7 x i8]* @hello, i32 0, i32 0), i32 6, i32 1, %FILE* %fp)
ret void
define void @test_simplify1(%FILE* %fp) {
; CHECK-LABEL: @test_simplify1(
- %str = getelementptr inbounds [1 x i8]* @str, i64 0, i64 0
+ %str = getelementptr inbounds [1 x i8], [1 x i8]* @str, i64 0, i64 0
call i64 @fwrite(i8* %str, i64 1, i64 1, %FILE* %fp)
; CHECK-NEXT: call i32 @fputc(i32 0, %FILE* %fp)
ret void
define void @test_simplify2(%FILE* %fp) {
; CHECK-LABEL: @test_simplify2(
- %str = getelementptr inbounds [0 x i8]* @empty, i64 0, i64 0
+ %str = getelementptr inbounds [0 x i8], [0 x i8]* @empty, i64 0, i64 0
call i64 @fwrite(i8* %str, i64 1, i64 0, %FILE* %fp)
ret void
; CHECK-NEXT: ret void
define void @test_simplify3(%FILE* %fp) {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr inbounds [0 x i8]* @empty, i64 0, i64 0
+ %str = getelementptr inbounds [0 x i8], [0 x i8]* @empty, i64 0, i64 0
call i64 @fwrite(i8* %str, i64 0, i64 1, %FILE* %fp)
ret void
; CHECK-NEXT: ret void
define i64 @test_no_simplify1(%FILE* %fp) {
; CHECK-LABEL: @test_no_simplify1(
- %str = getelementptr inbounds [1 x i8]* @str, i64 0, i64 0
+ %str = getelementptr inbounds [1 x i8], [1 x i8]* @str, i64 0, i64 0
%ret = call i64 @fwrite(i8* %str, i64 1, i64 1, %FILE* %fp)
; CHECK-NEXT: call i64 @fwrite
ret i64 %ret
define void @test_no_simplify2(%FILE* %fp, i64 %size) {
; CHECK-LABEL: @test_no_simplify2(
- %str = getelementptr inbounds [1 x i8]* @str, i64 0, i64 0
+ %str = getelementptr inbounds [1 x i8], [1 x i8]* @str, i64 0, i64 0
call i64 @fwrite(i8* %str, i64 %size, i64 1, %FILE* %fp)
; CHECK-NEXT: call i64 @fwrite
ret void
; make sure that we are not crashing when creating an illegal type
define void @func(%myStruct addrspace(1)* nocapture %p) nounwind {
ST:
- %A = getelementptr inbounds %myStruct addrspace(1)* %p, i64 0
+ %A = getelementptr inbounds %myStruct, %myStruct addrspace(1)* %p, i64 0
%B = addrspacecast %myStruct addrspace(1)* %A to %myStruct*
- %C = getelementptr inbounds %myStruct* %B, i32 0, i32 1
- %D = getelementptr inbounds [3 x float]* %C, i32 0, i32 2
+ %C = getelementptr inbounds %myStruct, %myStruct* %B, i32 0, i32 1
+ %D = getelementptr inbounds [3 x float], [3 x float]* %C, i32 0, i32 2
%E = load float* %D, align 4
%F = fsub float %E, undef
ret void
define void @keep_necessary_addrspacecast(i64 %i, float** %out0, float** %out1) {
entry:
; CHECK-LABEL: @keep_necessary_addrspacecast
- %0 = getelementptr [256 x float]* addrspacecast ([256 x float] addrspace(3)* @array to [256 x float]*), i64 0, i64 %i
+ %0 = getelementptr [256 x float], [256 x float]* addrspacecast ([256 x float] addrspace(3)* @array to [256 x float]*), i64 0, i64 %i
; CHECK: addrspacecast float addrspace(3)* %{{[0-9]+}} to float*
- %1 = getelementptr [0 x float]* addrspacecast (float addrspace(3)* @scalar to [0 x float]*), i64 0, i64 %i
+ %1 = getelementptr [0 x float], [0 x float]* addrspacecast (float addrspace(3)* @scalar to [0 x float]*), i64 0, i64 %i
; CHECK: addrspacecast float addrspace(3)* %{{[0-9]+}} to float*
store float* %0, float** %out0, align 4
store float* %1, float** %out1, align 4
define void @test(i32* %p, i32 %index) {
; CHECK-LABEL: @test
; CHECK-NEXT: %1 = sext i32 %index to i64
-; CHECK-NEXT: %addr = getelementptr i32* %p, i64 %1
- %addr = getelementptr i32* %p, i32 %index
+; CHECK-NEXT: %addr = getelementptr i32, i32* %p, i64 %1
+ %addr = getelementptr i32, i32* %p, i32 %index
%val = load i32* %addr
call void @use(i32 %val)
ret void
define void @test2(i32* %p, i32 %index) {
; CHECK-LABEL: @test2
; CHECK-NEXT: %i = zext i32 %index to i64
-; CHECK-NEXT: %addr = getelementptr i32* %p, i64 %i
+; CHECK-NEXT: %addr = getelementptr i32, i32* %p, i64 %i
%i = zext i32 %index to i64
- %addr = getelementptr i32* %p, i64 %i
+ %addr = getelementptr i32, i32* %p, i64 %i
%val = load i32* %addr
call void @use(i32 %val)
ret void
; CHECK-LABEL: @test3
; CHECK: zext
; CHECK-NOT: sext
- %addr_begin = getelementptr i32* %p, i64 40
- %addr_fixed = getelementptr i32* %addr_begin, i64 48
+ %addr_begin = getelementptr i32, i32* %p, i64 40
+ %addr_fixed = getelementptr i32, i32* %addr_begin, i64 48
%val_fixed = load i32* %addr_fixed, !range !0
- %addr = getelementptr i32* %addr_begin, i32 %val_fixed
+ %addr = getelementptr i32, i32* %addr_begin, i32 %val_fixed
%val = load i32* %addr
call void @use(i32 %val)
ret void
; CHECK-LABEL: @test4
; CHECK: zext
; CHECK-NOT: sext
- %addr_begin = getelementptr i32* %p, i64 40
- %addr_fixed = getelementptr i32* %addr_begin, i64 48
+ %addr_begin = getelementptr i32, i32* %p, i64 40
+ %addr_fixed = getelementptr i32, i32* %addr_begin, i64 48
%val_fixed = load i32* %addr_fixed, !range !0
%i = sext i32 %val_fixed to i64
- %addr = getelementptr i32* %addr_begin, i64 %i
+ %addr = getelementptr i32, i32* %addr_begin, i64 %i
%val = load i32* %addr
call void @use(i32 %val)
ret void
define i32 @test1(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
bb:
- %tmp = getelementptr inbounds %struct1* %dm, i64 0, i32 0
+ %tmp = getelementptr inbounds %struct1, %struct1* %dm, i64 0, i32 0
%tmp1 = load %struct2** %tmp, align 8
br i1 %tmp4, label %bb1, label %bb2
bb1:
- %tmp10 = getelementptr inbounds %struct2* %tmp1, i64 %tmp9
- %tmp11 = getelementptr inbounds %struct2* %tmp10, i64 0, i32 0
+ %tmp10 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9
+ %tmp11 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 0
store i32 0, i32* %tmp11, align 4
br label %bb3
bb2:
- %tmp20 = getelementptr inbounds %struct2* %tmp1, i64 %tmp19
- %tmp21 = getelementptr inbounds %struct2* %tmp20, i64 0, i32 0
+ %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
+ %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
store i32 0, i32* %tmp21, align 4
br label %bb3
bb3:
%phi = phi %struct2* [ %tmp10, %bb1 ], [ %tmp20, %bb2 ]
- %tmp24 = getelementptr inbounds %struct2* %phi, i64 0, i32 1
+ %tmp24 = getelementptr inbounds %struct2, %struct2* %phi, i64 0, i32 1
%tmp25 = load i32* %tmp24, align 4
ret i32 %tmp25
; CHECK-LABEL: @test1(
-; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp9, i32 0
-; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp19, i32 0
+; CHECK: getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9, i32 0
+; CHECK: getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19, i32 0
; CHECK: %[[PHI:[0-9A-Za-z]+]] = phi i64 [ %tmp9, %bb1 ], [ %tmp19, %bb2 ]
-; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %[[PHI]], i32 1
+; CHECK: getelementptr inbounds %struct2, %struct2* %tmp1, i64 %[[PHI]], i32 1
}
define i32 @test2(%struct1* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19) {
bb:
- %tmp = getelementptr inbounds %struct1* %dm, i64 0, i32 0
+ %tmp = getelementptr inbounds %struct1, %struct1* %dm, i64 0, i32 0
%tmp1 = load %struct2** %tmp, align 8
- %tmp10 = getelementptr inbounds %struct2* %tmp1, i64 %tmp9
- %tmp11 = getelementptr inbounds %struct2* %tmp10, i64 0, i32 0
+ %tmp10 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9
+ %tmp11 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 0
store i32 0, i32* %tmp11, align 4
- %tmp20 = getelementptr inbounds %struct2* %tmp1, i64 %tmp19
- %tmp21 = getelementptr inbounds %struct2* %tmp20, i64 0, i32 0
+ %tmp20 = getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19
+ %tmp21 = getelementptr inbounds %struct2, %struct2* %tmp20, i64 0, i32 0
store i32 0, i32* %tmp21, align 4
- %tmp24 = getelementptr inbounds %struct2* %tmp10, i64 0, i32 1
+ %tmp24 = getelementptr inbounds %struct2, %struct2* %tmp10, i64 0, i32 1
%tmp25 = load i32* %tmp24, align 4
ret i32 %tmp25
; CHECK-LABEL: @test2(
-; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp9, i32 0
-; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp19, i32 0
-; CHECK: getelementptr inbounds %struct2* %tmp1, i64 %tmp9, i32 1
+; CHECK: getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9, i32 0
+; CHECK: getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp19, i32 0
+; CHECK: getelementptr inbounds %struct2, %struct2* %tmp1, i64 %tmp9, i32 1
}
; Check that instcombine doesn't insert GEPs before landingpad.
define i32 @test3(%struct3* %dm, i1 %tmp4, i64 %tmp9, i64 %tmp19, i64 %tmp20, i64 %tmp21) {
bb:
- %tmp = getelementptr inbounds %struct3* %dm, i64 0
+ %tmp = getelementptr inbounds %struct3, %struct3* %dm, i64 0
br i1 %tmp4, label %bb1, label %bb2
bb1:
- %tmp1 = getelementptr inbounds %struct3* %tmp, i64 %tmp19, i32 1
- %tmp11 = getelementptr inbounds %struct4* %tmp1, i64 0, i32 0, i32 0
+ %tmp1 = getelementptr inbounds %struct3, %struct3* %tmp, i64 %tmp19, i32 1
+ %tmp11 = getelementptr inbounds %struct4, %struct4* %tmp1, i64 0, i32 0, i32 0
store i32 0, i32* %tmp11, align 4
br label %bb3
bb2:
- %tmp2 = getelementptr inbounds %struct3* %tmp, i64 %tmp20, i32 1
- %tmp12 = getelementptr inbounds %struct4* %tmp2, i64 0, i32 0, i32 1
+ %tmp2 = getelementptr inbounds %struct3, %struct3* %tmp, i64 %tmp20, i32 1
+ %tmp12 = getelementptr inbounds %struct4, %struct4* %tmp2, i64 0, i32 0, i32 1
store i32 0, i32* %tmp12, align 4
br label %bb3
bb5:
%tmp27 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) catch i8* bitcast (i8** @_ZTIi to i8*)
- %tmp34 = getelementptr inbounds %struct4* %phi, i64 %tmp21, i32 1
- %tmp35 = getelementptr inbounds %struct2* %tmp34, i64 0, i32 1
+ %tmp34 = getelementptr inbounds %struct4, %struct4* %phi, i64 %tmp21, i32 1
+ %tmp35 = getelementptr inbounds %struct2, %struct2* %tmp34, i64 0, i32 1
%tmp25 = load i32* %tmp35, align 4
ret i32 %tmp25
; Test noop elimination
define i32* @test1(i32* %I) {
- %A = getelementptr i32* %I, i64 0
+ %A = getelementptr i32, i32* %I, i64 0
ret i32* %A
; CHECK-LABEL: @test1(
; CHECK: ret i32* %I
}
define i32 addrspace(1)* @test1_as1(i32 addrspace(1)* %I) {
- %A = getelementptr i32 addrspace(1)* %I, i64 0
+ %A = getelementptr i32, i32 addrspace(1)* %I, i64 0
ret i32 addrspace(1)* %A
; CHECK-LABEL: @test1_as1(
; CHECK: ret i32 addrspace(1)* %I
; Test noop elimination
define i32* @test2(i32* %I) {
- %A = getelementptr i32* %I
+ %A = getelementptr i32, i32* %I
ret i32* %A
; CHECK-LABEL: @test2(
; CHECK: ret i32* %I
; Test that two array indexing geps fold
define i32* @test3(i32* %I) {
- %A = getelementptr i32* %I, i64 17
- %B = getelementptr i32* %A, i64 4
+ %A = getelementptr i32, i32* %I, i64 17
+ %B = getelementptr i32, i32* %A, i64 4
ret i32* %B
; CHECK-LABEL: @test3(
-; CHECK: getelementptr i32* %I, i64 21
+; CHECK: getelementptr i32, i32* %I, i64 21
}
; Test that two getelementptr insts fold
define i32* @test4({ i32 }* %I) {
- %A = getelementptr { i32 }* %I, i64 1
- %B = getelementptr { i32 }* %A, i64 0, i32 0
+ %A = getelementptr { i32 }, { i32 }* %I, i64 1
+ %B = getelementptr { i32 }, { i32 }* %A, i64 0, i32 0
ret i32* %B
; CHECK-LABEL: @test4(
-; CHECK: getelementptr { i32 }* %I, i64 1, i32 0
+; CHECK: getelementptr { i32 }, { i32 }* %I, i64 1, i32 0
}
define void @test5(i8 %B) {
; This should be turned into a constexpr instead of being an instruction
- %A = getelementptr [10 x i8]* @Global, i64 0, i64 4
+ %A = getelementptr [10 x i8], [10 x i8]* @Global, i64 0, i64 4
store i8 %B, i8* %A
ret void
; CHECK-LABEL: @test5(
define void @test5_as1(i8 %B) {
; This should be turned into a constexpr instead of being an instruction
- %A = getelementptr [10 x i8] addrspace(1)* @Global_as1, i16 0, i16 4
+ %A = getelementptr [10 x i8], [10 x i8] addrspace(1)* @Global_as1, i16 0, i16 4
store i8 %B, i8 addrspace(1)* %A
ret void
; CHECK-LABEL: @test5_as1(
; CHECK-LABEL: @test_evaluate_gep_nested_as_ptrs(
; CHECK-NEXT: store i32 addrspace(2)* %B, i32 addrspace(2)* addrspace(1)* getelementptr inbounds (%as2_ptr_struct addrspace(1)* @global_as1_as2_ptr, i16 0, i32 0), align 8
; CHECK-NEXT: ret void
- %A = getelementptr %as2_ptr_struct addrspace(1)* @global_as1_as2_ptr, i16 0, i32 0
+ %A = getelementptr %as2_ptr_struct, %as2_ptr_struct addrspace(1)* @global_as1_as2_ptr, i16 0, i32 0
store i32 addrspace(2)* %B, i32 addrspace(2)* addrspace(1)* %A
ret void
}
; CHECK-NEXT: store i8 addrspace(2)* %B, i8 addrspace(2)* addrspace(1)* getelementptr inbounds ([4 x i8 addrspace(2)*] addrspace(1)* @arst, i16 0, i16 2), align 4
; CHECK-NEXT: ret void
- %A = getelementptr [4 x i8 addrspace(2)*] addrspace(1)* @arst, i16 0, i16 2
+ %A = getelementptr [4 x i8 addrspace(2)*], [4 x i8 addrspace(2)*] addrspace(1)* @arst, i16 0, i16 2
store i8 addrspace(2)* %B, i8 addrspace(2)* addrspace(1)* %A
ret void
}
define i32* @test7(i32* %I, i64 %C, i64 %D) {
- %A = getelementptr i32* %I, i64 %C
- %B = getelementptr i32* %A, i64 %D
+ %A = getelementptr i32, i32* %I, i64 %C
+ %B = getelementptr i32, i32* %A, i64 %D
ret i32* %B
; CHECK-LABEL: @test7(
; CHECK: %A.sum = add i64 %C, %D
-; CHECK: getelementptr i32* %I, i64 %A.sum
+; CHECK: getelementptr i32, i32* %I, i64 %A.sum
}
define i8* @test8([10 x i32]* %X) {
;; Fold into the cast.
- %A = getelementptr [10 x i32]* %X, i64 0, i64 0
+ %A = getelementptr [10 x i32], [10 x i32]* %X, i64 0, i64 0
%B = bitcast i32* %A to i8*
ret i8* %B
; CHECK-LABEL: @test8(
}
define i32 @test9() {
- %A = getelementptr { i32, double }* null, i32 0, i32 1
+ %A = getelementptr { i32, double }, { i32, double }* null, i32 0, i32 1
%B = ptrtoint double* %A to i32
ret i32 %B
; CHECK-LABEL: @test9(
}
define i1 @test10({ i32, i32 }* %x, { i32, i32 }* %y) {
- %tmp.1 = getelementptr { i32, i32 }* %x, i32 0, i32 1
- %tmp.3 = getelementptr { i32, i32 }* %y, i32 0, i32 1
+ %tmp.1 = getelementptr { i32, i32 }, { i32, i32 }* %x, i32 0, i32 1
+ %tmp.3 = getelementptr { i32, i32 }, { i32, i32 }* %y, i32 0, i32 1
;; seteq x, y
%tmp.4 = icmp eq i32* %tmp.1, %tmp.3
ret i1 %tmp.4
}
define i1 @test11({ i32, i32 }* %X) {
- %P = getelementptr { i32, i32 }* %X, i32 0, i32 0
+ %P = getelementptr { i32, i32 }, { i32, i32 }* %X, i32 0, i32 0
%Q = icmp eq i32* %P, null
ret i1 %Q
; CHECK-LABEL: @test11(
; PR4748
define i32 @test12(%struct.A* %a) {
entry:
- %g3 = getelementptr %struct.A* %a, i32 0, i32 1
+ %g3 = getelementptr %struct.A, %struct.A* %a, i32 0, i32 1
store i32 10, i32* %g3, align 4
- %g4 = getelementptr %struct.A* %a, i32 0, i32 0
+ %g4 = getelementptr %struct.A, %struct.A* %a, i32 0, i32 0
%new_a = bitcast %struct.B* %g4 to %struct.A*
- %g5 = getelementptr %struct.A* %new_a, i32 0, i32 1
+ %g5 = getelementptr %struct.A, %struct.A* %new_a, i32 0, i32 1
%a_a = load i32* %g5, align 4
ret i32 %a_a
; CHECK-LABEL: @test12(
-; CHECK: getelementptr %struct.A* %a, i64 0, i32 1
+; CHECK: getelementptr %struct.A, %struct.A* %a, i64 0, i32 1
; CHECK-NEXT: store i32 10, i32* %g3
; CHECK-NEXT: ret i32 10
}
; PR2235
%S = type { i32, [ 100 x i32] }
define i1 @test13(i64 %X, %S* %P) {
- %A = getelementptr inbounds %S* %P, i32 0, i32 1, i64 %X
- %B = getelementptr inbounds %S* %P, i32 0, i32 0
+ %A = getelementptr inbounds %S, %S* %P, i32 0, i32 1, i64 %X
+ %B = getelementptr inbounds %S, %S* %P, i32 0, i32 0
%C = icmp eq i32* %A, %B
ret i1 %C
; CHECK-LABEL: @test13(
; CHECK-NEXT: shl nuw <2 x i64> %X, <i64 2, i64 2>
; CHECK-NEXT: add <2 x i64> %A.idx, <i64 4, i64 4>
; CHECK-NEXT: icmp eq <2 x i64> %A.offs, zeroinitializer
- %A = getelementptr inbounds <2 x %S*> %P, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 1>, <2 x i64> %X
- %B = getelementptr inbounds <2 x %S*> %P, <2 x i64> <i64 0, i64 0>, <2 x i32> <i32 0, i32 0>
+ %A = getelementptr inbounds %S, <2 x %S*> %P, <2 x i64> zeroinitializer, <2 x i32> <i32 1, i32 1>, <2 x i64> %X
+ %B = getelementptr inbounds %S, <2 x %S*> %P, <2 x i64> <i64 0, i64 0>, <2 x i32> <i32 0, i32 0>
%C = icmp eq <2 x i32*> %A, %B
ret <2 x i1> %C
}
; CHECK-LABEL: @test13_as1(
; CHECK-NEXT: %C = icmp eq i16 %X, -1
; CHECK-NEXT: ret i1 %C
- %A = getelementptr inbounds %S addrspace(1)* %P, i16 0, i32 1, i16 %X
- %B = getelementptr inbounds %S addrspace(1)* %P, i16 0, i32 0
+ %A = getelementptr inbounds %S, %S addrspace(1)* %P, i16 0, i32 1, i16 %X
+ %B = getelementptr inbounds %S, %S addrspace(1)* %P, i16 0, i32 0
%C = icmp eq i32 addrspace(1)* %A, %B
ret i1 %C
}
; CHECK-NEXT: add <2 x i16> %A.idx, <i16 4, i16 4>
; CHECK-NEXT: icmp eq <2 x i16> %A.offs, zeroinitializer
; CHECK-NEXT: ret <2 x i1>
- %A = getelementptr inbounds <2 x %S addrspace(1)*> %P, <2 x i16> <i16 0, i16 0>, <2 x i32> <i32 1, i32 1>, <2 x i16> %X
- %B = getelementptr inbounds <2 x %S addrspace(1)*> %P, <2 x i16> <i16 0, i16 0>, <2 x i32> <i32 0, i32 0>
+ %A = getelementptr inbounds %S, <2 x %S addrspace(1)*> %P, <2 x i16> <i16 0, i16 0>, <2 x i32> <i32 1, i32 1>, <2 x i16> %X
+ %B = getelementptr inbounds %S, <2 x %S addrspace(1)*> %P, <2 x i16> <i16 0, i16 0>, <2 x i32> <i32 0, i32 0>
%C = icmp eq <2 x i32 addrspace(1)*> %A, %B
ret <2 x i1> %C
}
define i1 @test13_i32(i32 %X, %S* %P) {
; CHECK-LABEL: @test13_i32(
; CHECK: %C = icmp eq i32 %X, -1
- %A = getelementptr inbounds %S* %P, i32 0, i32 1, i32 %X
- %B = getelementptr inbounds %S* %P, i32 0, i32 0
+ %A = getelementptr inbounds %S, %S* %P, i32 0, i32 1, i32 %X
+ %B = getelementptr inbounds %S, %S* %P, i32 0, i32 0
%C = icmp eq i32* %A, %B
ret i1 %C
}
define i1 @test13_i16(i16 %X, %S* %P) {
; CHECK-LABEL: @test13_i16(
; CHECK: %C = icmp eq i16 %X, -1
- %A = getelementptr inbounds %S* %P, i16 0, i32 1, i16 %X
- %B = getelementptr inbounds %S* %P, i16 0, i32 0
+ %A = getelementptr inbounds %S, %S* %P, i16 0, i32 1, i16 %X
+ %B = getelementptr inbounds %S, %S* %P, i16 0, i32 0
%C = icmp eq i32* %A, %B
ret i1 %C
}
define i1 @test13_i128(i128 %X, %S* %P) {
; CHECK-LABEL: @test13_i128(
; CHECK: %C = icmp eq i64 %1, -1
- %A = getelementptr inbounds %S* %P, i128 0, i32 1, i128 %X
- %B = getelementptr inbounds %S* %P, i128 0, i32 0
+ %A = getelementptr inbounds %S, %S* %P, i128 0, i32 1, i128 %X
+ %B = getelementptr inbounds %S, %S* %P, i128 0, i32 0
%C = icmp eq i32* %A, %B
ret i1 %C
}
@G = external global [3 x i8]
define i8* @test14(i32 %Idx) {
%idx = zext i32 %Idx to i64
- %tmp = getelementptr i8* getelementptr ([3 x i8]* @G, i32 0, i32 0), i64 %idx
+ %tmp = getelementptr i8, i8* getelementptr ([3 x i8]* @G, i32 0, i32 0), i64 %idx
ret i8* %tmp
; CHECK-LABEL: @test14(
-; CHECK: getelementptr [3 x i8]* @G, i64 0, i64 %idx
+; CHECK: getelementptr [3 x i8], [3 x i8]* @G, i64 0, i64 %idx
}
; Test folding of constantexpr geps into normal geps.
@Array = external global [40 x i32]
define i32 *@test15(i64 %X) {
- %A = getelementptr i32* getelementptr ([40 x i32]* @Array, i64 0, i64 0), i64 %X
+ %A = getelementptr i32, i32* getelementptr ([40 x i32]* @Array, i64 0, i64 0), i64 %X
ret i32* %A
; CHECK-LABEL: @test15(
-; CHECK: getelementptr [40 x i32]* @Array, i64 0, i64 %X
+; CHECK: getelementptr [40 x i32], [40 x i32]* @Array, i64 0, i64 %X
}
define i32* @test16(i32* %X, i32 %Idx) {
- %R = getelementptr i32* %X, i32 %Idx
+ %R = getelementptr i32, i32* %X, i32 %Idx
ret i32* %R
; CHECK-LABEL: @test16(
; CHECK: sext i32 %Idx to i64
define i1 @test17(i16* %P, i32 %I, i32 %J) {
- %X = getelementptr inbounds i16* %P, i32 %I
- %Y = getelementptr inbounds i16* %P, i32 %J
+ %X = getelementptr inbounds i16, i16* %P, i32 %I
+ %Y = getelementptr inbounds i16, i16* %P, i32 %J
%C = icmp ult i16* %X, %Y
ret i1 %C
; CHECK-LABEL: @test17(
}
define i1 @test18(i16* %P, i32 %I) {
- %X = getelementptr inbounds i16* %P, i32 %I
+ %X = getelementptr inbounds i16, i16* %P, i32 %I
%C = icmp ult i16* %X, %P
ret i1 %C
; CHECK-LABEL: @test18(
; CHECK-NEXT: %1 = trunc i32 %I to i16
; CHECK-NEXT: %C = icmp slt i16 %1, 0
; CHECK-NEXT: ret i1 %C
- %X = getelementptr inbounds i16 addrspace(1)* %P, i32 %I
+ %X = getelementptr inbounds i16, i16 addrspace(1)* %P, i32 %I
%C = icmp ult i16 addrspace(1)* %X, %P
ret i1 %C
}
; CHECK-NEXT: %1 = trunc i32 %I to i16
; CHECK-NEXT: %C = icmp slt i16 %1, 0
; CHECK-NEXT: ret i1 %C
- %X = getelementptr inbounds i16 addrspace(1)* %P, i32 %I
+ %X = getelementptr inbounds i16, i16 addrspace(1)* %P, i32 %I
%C = icmp ult i16 addrspace(1)* %X, %P
ret i1 %C
}
define i1 @test18_i16(i16* %P, i16 %I) {
; CHECK-LABEL: @test18_i16(
; CHECK: %C = icmp slt i16 %I, 0
- %X = getelementptr inbounds i16* %P, i16 %I
+ %X = getelementptr inbounds i16, i16* %P, i16 %I
%C = icmp ult i16* %X, %P
ret i1 %C
}
define i1 @test18_i64(i16* %P, i64 %I) {
; CHECK-LABEL: @test18_i64(
; CHECK: %C = icmp slt i64 %I, 0
- %X = getelementptr inbounds i16* %P, i64 %I
+ %X = getelementptr inbounds i16, i16* %P, i64 %I
%C = icmp ult i16* %X, %P
ret i1 %C
}
define i1 @test18_i128(i16* %P, i128 %I) {
; CHECK-LABEL: @test18_i128(
; CHECK: %C = icmp slt i64 %1, 0
- %X = getelementptr inbounds i16* %P, i128 %I
+ %X = getelementptr inbounds i16, i16* %P, i128 %I
%C = icmp ult i16* %X, %P
ret i1 %C
}
define i32 @test19(i32* %P, i32 %A, i32 %B) {
- %tmp.4 = getelementptr inbounds i32* %P, i32 %A
- %tmp.9 = getelementptr inbounds i32* %P, i32 %B
+ %tmp.4 = getelementptr inbounds i32, i32* %P, i32 %A
+ %tmp.9 = getelementptr inbounds i32, i32* %P, i32 %B
%tmp.10 = icmp eq i32* %tmp.4, %tmp.9
%tmp.11 = zext i1 %tmp.10 to i32
ret i32 %tmp.11
}
define i32 @test20(i32* %P, i32 %A, i32 %B) {
- %tmp.4 = getelementptr inbounds i32* %P, i32 %A
+ %tmp.4 = getelementptr inbounds i32, i32* %P, i32 %A
%tmp.6 = icmp eq i32* %tmp.4, %P
%tmp.7 = zext i1 %tmp.6 to i32
ret i32 %tmp.7
}
define i32 @test20_as1(i32 addrspace(1)* %P, i32 %A, i32 %B) {
- %tmp.4 = getelementptr inbounds i32 addrspace(1)* %P, i32 %A
+ %tmp.4 = getelementptr inbounds i32, i32 addrspace(1)* %P, i32 %A
%tmp.6 = icmp eq i32 addrspace(1)* %tmp.4, %P
%tmp.7 = zext i1 %tmp.6 to i32
ret i32 %tmp.7
define i32 @test21() {
%pbob1 = alloca %intstruct
- %pbob2 = getelementptr %intstruct* %pbob1
- %pbobel = getelementptr %intstruct* %pbob2, i64 0, i32 0
+ %pbob2 = getelementptr %intstruct, %intstruct* %pbob1
+ %pbobel = getelementptr %intstruct, %intstruct* %pbob2, i64 0, i32 0
%rval = load i32* %pbobel
ret i32 %rval
; CHECK-LABEL: @test21(
-; CHECK: getelementptr %intstruct* %pbob1, i64 0, i32 0
+; CHECK: getelementptr %intstruct, %intstruct* %pbob1, i64 0, i32 0
}
%X = type { [10 x i32], float }
define i1 @test23() {
- %A = getelementptr %X* null, i64 0, i32 0, i64 0 ; <i32*> [#uses=1]
+ %A = getelementptr %X, %X* null, i64 0, i32 0, i64 0 ; <i32*> [#uses=1]
%B = icmp ne i32* %A, null ; <i1> [#uses=1]
ret i1 %B
; CHECK-LABEL: @test23(
define void @test25() {
entry:
- %tmp = getelementptr { i64, i64, i64, i64 }* null, i32 0, i32 3 ; <i64*> [#uses=1]
+ %tmp = getelementptr { i64, i64, i64, i64 }, { i64, i64, i64, i64 }* null, i32 0, i32 3 ; <i64*> [#uses=1]
%tmp.upgrd.1 = load i64* %tmp ; <i64> [#uses=1]
%tmp8.ui = load i64* null ; <i64> [#uses=1]
%tmp8 = bitcast i64 %tmp8.ui to i64 ; <i64> [#uses=1]
; PR1637
define i1 @test26(i8* %arr) {
- %X = getelementptr i8* %arr, i32 1
- %Y = getelementptr i8* %arr, i32 1
+ %X = getelementptr i8, i8* %arr, i32 1
+ %Y = getelementptr i8, i8* %arr, i32 1
%test = icmp uge i8* %X, %Y
ret i1 %test
; CHECK-LABEL: @test26(
entry:
%from_addr = alloca %struct.siginfo_t*
%tmp344 = load %struct.siginfo_t** %from_addr, align 8
- %tmp345 = getelementptr %struct.siginfo_t* %tmp344, i32 0, i32 3
- %tmp346 = getelementptr { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }* %tmp345, i32 0, i32 0
+ %tmp345 = getelementptr %struct.siginfo_t, %struct.siginfo_t* %tmp344, i32 0, i32 3
+ %tmp346 = getelementptr { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }, { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }* %tmp345, i32 0, i32 0
%tmp346347 = bitcast { i32, i32, [0 x i8], %struct.sigval_t, i32 }* %tmp346 to { i32, i32, %struct.sigval_t }*
- %tmp348 = getelementptr { i32, i32, %struct.sigval_t }* %tmp346347, i32 0, i32 2
- %tmp349 = getelementptr %struct.sigval_t* %tmp348, i32 0, i32 0
+ %tmp348 = getelementptr { i32, i32, %struct.sigval_t }, { i32, i32, %struct.sigval_t }* %tmp346347, i32 0, i32 2
+ %tmp349 = getelementptr %struct.sigval_t, %struct.sigval_t* %tmp348, i32 0, i32 0
%tmp349350 = bitcast i8** %tmp349 to i32*
%tmp351 = load i32* %tmp349350, align 8
%tmp360 = call i32 asm sideeffect "...",
entry:
%orientations = alloca [1 x [1 x %struct.x]]
%tmp3 = call i32 @puts( i8* getelementptr ([6 x i8]* @.str, i32 0, i32 0) ) nounwind
- %tmp45 = getelementptr inbounds [1 x [1 x %struct.x]]* %orientations, i32 1, i32 0, i32 0
- %orientations62 = getelementptr [1 x [1 x %struct.x]]* %orientations, i32 0, i32 0, i32 0
+ %tmp45 = getelementptr inbounds [1 x [1 x %struct.x]], [1 x [1 x %struct.x]]* %orientations, i32 1, i32 0, i32 0
+ %orientations62 = getelementptr [1 x [1 x %struct.x]], [1 x [1 x %struct.x]]* %orientations, i32 0, i32 0, i32 0
br label %bb10
bb10:
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %bb10 ]
%tmp.0.reg2mem.0.rec = mul i32 %indvar, -1
%tmp12.rec = add i32 %tmp.0.reg2mem.0.rec, -1
- %tmp12 = getelementptr inbounds %struct.x* %tmp45, i32 %tmp12.rec
+ %tmp12 = getelementptr inbounds %struct.x, %struct.x* %tmp45, i32 %tmp12.rec
%tmp16 = call i32 (i8*, ...)* @printf( i8* getelementptr ([12 x i8]* @.str1, i32 0, i32 0), %struct.x* %tmp12 ) nounwind
%tmp84 = icmp eq %struct.x* %tmp12, %orientations62
%indvar.next = add i32 %indvar, 1
define i32 @test29(i8* %start, i32 %X) nounwind {
entry:
%tmp3 = load i64* null
- %add.ptr = getelementptr i8* %start, i64 %tmp3
+ %add.ptr = getelementptr i8, i8* %start, i64 %tmp3
%tmp158 = load i32* null
- %add.ptr159 = getelementptr %T* null, i32 %tmp158
- %add.ptr209 = getelementptr i8* %start, i64 0
- %add.ptr212 = getelementptr i8* %add.ptr209, i32 %X
+ %add.ptr159 = getelementptr %T, %T* null, i32 %tmp158
+ %add.ptr209 = getelementptr i8, i8* %start, i64 0
+ %add.ptr212 = getelementptr i8, i8* %add.ptr209, i32 %X
%cmp214 = icmp ugt i8* %add.ptr212, %add.ptr
br i1 %cmp214, label %if.then216, label %if.end363
%0 = alloca i32, i32 %n, align 4
%1 = bitcast i32* %0 to [0 x i32]*
call void @test30f(i32* %0) nounwind
- %2 = getelementptr [0 x i32]* %1, i32 0, i32 %m
+ %2 = getelementptr [0 x i32], [0 x i32]* %1, i32 0, i32 %m
%3 = load i32* %2, align 4
ret i32 %3
; CHECK-LABEL: @test30(
define i1 @test31(i32* %A) {
- %B = getelementptr i32* %A, i32 1
- %C = getelementptr i32* %A, i64 1
+ %B = getelementptr i32, i32* %A, i32 1
+ %C = getelementptr i32, i32* %A, i64 1
%V = icmp eq i32* %B, %C
ret i1 %V
; CHECK-LABEL: @test31(
; PR1345
define i8* @test32(i8* %v) {
%A = alloca [4 x i8*], align 16
- %B = getelementptr [4 x i8*]* %A, i32 0, i32 0
+ %B = getelementptr [4 x i8*], [4 x i8*]* %A, i32 0, i32 0
store i8* null, i8** %B
%C = bitcast [4 x i8*]* %A to { [16 x i8] }*
- %D = getelementptr { [16 x i8] }* %C, i32 0, i32 0, i32 8
+ %D = getelementptr { [16 x i8] }, { [16 x i8] }* %C, i32 0, i32 0, i32 8
%E = bitcast i8* %D to i8**
store i8* %v, i8** %E
- %F = getelementptr [4 x i8*]* %A, i32 0, i32 2
+ %F = getelementptr [4 x i8*], [4 x i8*]* %A, i32 0, i32 2
%G = load i8** %F
ret i8* %G
; CHECK-LABEL: @test32(
-; CHECK: %D = getelementptr [4 x i8*]* %A, i64 0, i64 1
-; CHECK: %F = getelementptr [4 x i8*]* %A, i64 0, i64 2
+; CHECK: %D = getelementptr [4 x i8*], [4 x i8*]* %A, i64 0, i64 1
+; CHECK: %F = getelementptr [4 x i8*], [4 x i8*]* %A, i64 0, i64 2
}
; PR3290
define i32* @test33(%struct.Key* %A) {
; CHECK-LABEL: @test33(
-; CHECK: getelementptr %struct.Key* %A, i64 0, i32 0, i32 1
+; CHECK: getelementptr %struct.Key, %struct.Key* %A, i64 0, i32 0, i32 1
%B = bitcast %struct.Key* %A to %struct.anon*
- %C = getelementptr %struct.anon* %B, i32 0, i32 2
+ %C = getelementptr %struct.anon, %struct.anon* %B, i32 0, i32 2
ret i32* %C
}
define i32 addrspace(1)* @test33_as1(%struct.Key addrspace(1)* %A) {
; CHECK-LABEL: @test33_as1(
-; CHECK: getelementptr %struct.Key addrspace(1)* %A, i16 0, i32 0, i32 1
+; CHECK: getelementptr %struct.Key, %struct.Key addrspace(1)* %A, i16 0, i32 0, i32 1
%B = bitcast %struct.Key addrspace(1)* %A to %struct.anon addrspace(1)*
- %C = getelementptr %struct.anon addrspace(1)* %B, i32 0, i32 2
+ %C = getelementptr %struct.anon, %struct.anon addrspace(1)* %B, i32 0, i32 2
ret i32 addrspace(1)* %C
}
define i32 addrspace(1)* @test33_array_as1([10 x i32] addrspace(1)* %A) {
; CHECK-LABEL: @test33_array_as1(
-; CHECK: getelementptr [10 x i32] addrspace(1)* %A, i16 0, i16 2
+; CHECK: getelementptr [10 x i32], [10 x i32] addrspace(1)* %A, i16 0, i16 2
%B = bitcast [10 x i32] addrspace(1)* %A to [5 x i32] addrspace(1)*
- %C = getelementptr [5 x i32] addrspace(1)* %B, i32 0, i32 2
+ %C = getelementptr [5 x i32], [5 x i32] addrspace(1)* %B, i32 0, i32 2
ret i32 addrspace(1)* %C
}
; Make sure the GEP indices use the right pointer sized integer
define i32 addrspace(1)* @test33_array_struct_as1([10 x %struct.Key] addrspace(1)* %A) {
; CHECK-LABEL: @test33_array_struct_as1(
-; CHECK: getelementptr [10 x %struct.Key] addrspace(1)* %A, i16 0, i16 1, i32 0, i32 0
+; CHECK: getelementptr [10 x %struct.Key], [10 x %struct.Key] addrspace(1)* %A, i16 0, i16 1, i32 0, i32 0
%B = bitcast [10 x %struct.Key] addrspace(1)* %A to [20 x i32] addrspace(1)*
- %C = getelementptr [20 x i32] addrspace(1)* %B, i32 0, i32 2
+ %C = getelementptr [20 x i32], [20 x i32] addrspace(1)* %B, i32 0, i32 2
ret i32 addrspace(1)* %C
}
define i32 addrspace(1)* @test33_addrspacecast(%struct.Key* %A) {
; CHECK-LABEL: @test33_addrspacecast(
-; CHECK: %C = getelementptr %struct.Key* %A, i64 0, i32 0, i32 1
+; CHECK: %C = getelementptr %struct.Key, %struct.Key* %A, i64 0, i32 0, i32 1
; CHECK-NEXT: addrspacecast i32* %C to i32 addrspace(1)*
; CHECK-NEXT: ret
%B = addrspacecast %struct.Key* %A to %struct.anon addrspace(1)*
- %C = getelementptr %struct.anon addrspace(1)* %B, i32 0, i32 2
+ %C = getelementptr %struct.anon, %struct.anon addrspace(1)* %B, i32 0, i32 2
ret i32 addrspace(1)* %C
}
entry:
%A = alloca %T2, align 8
%mrv_gep = bitcast %T2* %A to i64*
- %B = getelementptr %T2* %A, i64 0, i32 0
+ %B = getelementptr %T2, %T2* %A, i64 0, i32 0
store i64 %V, i64* %mrv_gep
%C = load i8** %B, align 8
; Test index promotion
define i32* @test38(i32* %I, i32 %n) {
- %A = getelementptr i32* %I, i32 %n
+ %A = getelementptr i32, i32* %I, i32 %n
ret i32* %A
; CHECK-LABEL: @test38(
; CHECK: = sext i32 %n to i64
-; CHECK: %A = getelementptr i32* %I, i64 %
+; CHECK: %A = getelementptr i32, i32* %I, i64 %
}
; Test that we don't duplicate work when the second gep is a "bitcast".
declare void @pr10322_f3(i8**)
define void @pr10322_f1(%pr10322_t* %foo) {
entry:
- %arrayidx8 = getelementptr inbounds %pr10322_t* %foo, i64 2
+ %arrayidx8 = getelementptr inbounds %pr10322_t, %pr10322_t* %foo, i64 2
call void @pr10322_f2(%pr10322_t* %arrayidx8) nounwind
- %tmp2 = getelementptr inbounds %pr10322_t* %arrayidx8, i64 0, i32 0
+ %tmp2 = getelementptr inbounds %pr10322_t, %pr10322_t* %arrayidx8, i64 0, i32 0
call void @pr10322_f3(i8** %tmp2) nounwind
ret void
; CHECK-LABEL: @pr10322_f1(
-; CHECK: %tmp2 = getelementptr inbounds %pr10322_t* %arrayidx8, i64 0, i32 0
+; CHECK: %tmp2 = getelementptr inbounds %pr10322_t, %pr10322_t* %arrayidx8, i64 0, i32 0
}
; Test that we combine the last two geps in this sequence, before we
%three_gep_t2 = type {%three_gep_t}
define void @three_gep_f(%three_gep_t2* %x) {
- %gep1 = getelementptr %three_gep_t2* %x, i64 2
+ %gep1 = getelementptr %three_gep_t2, %three_gep_t2* %x, i64 2
call void @three_gep_h(%three_gep_t2* %gep1)
- %gep2 = getelementptr %three_gep_t2* %gep1, i64 0, i32 0
- %gep3 = getelementptr %three_gep_t* %gep2, i64 0, i32 0
+ %gep2 = getelementptr %three_gep_t2, %three_gep_t2* %gep1, i64 0, i32 0
+ %gep3 = getelementptr %three_gep_t, %three_gep_t* %gep2, i64 0, i32 0
call void @three_gep_g(i32* %gep3)
; CHECK-LABEL: @three_gep_f(
-; CHECK: %gep3 = getelementptr %three_gep_t2* %gep1, i64 0, i32 0, i32 0
+; CHECK: %gep3 = getelementptr %three_gep_t2, %three_gep_t2* %gep1, i64 0, i32 0, i32 0
ret void
}
%struct.zot = type { i64, i8 }
define void @test39(%struct.ham* %arg, i8 %arg1) nounwind {
- %tmp = getelementptr inbounds %struct.ham* %arg, i64 0, i32 2
+ %tmp = getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 2
%tmp2 = load %struct.zot** %tmp, align 8
%tmp3 = bitcast %struct.zot* %tmp2 to i8*
- %tmp4 = getelementptr inbounds i8* %tmp3, i64 -8
+ %tmp4 = getelementptr inbounds i8, i8* %tmp3, i64 -8
store i8 %arg1, i8* %tmp4, align 8
ret void
; CHECK-LABEL: @test39(
-; CHECK: getelementptr inbounds %struct.ham* %arg, i64 0, i32 2
-; CHECK: getelementptr inbounds i8* %{{.+}}, i64 -8
+; CHECK: getelementptr inbounds %struct.ham, %struct.ham* %arg, i64 0, i32 2
+; CHECK: getelementptr inbounds i8, i8* %{{.+}}, i64 -8
}
define i1 @pr16483([1 x i8]* %a, [1 x i8]* %b) {
- %c = getelementptr [1 x i8]* %a, i32 0, i32 0
- %d = getelementptr [1 x i8]* %b, i32 0, i32 0
+ %c = getelementptr [1 x i8], [1 x i8]* %a, i32 0, i32 0
+ %d = getelementptr [1 x i8], [1 x i8]* %b, i32 0, i32 0
%cmp = icmp ult i8* %c, %d
ret i1 %cmp
define i8 @test_gep_bitcast_as1(i32 addrspace(1)* %arr, i16 %N) {
; CHECK-LABEL: @test_gep_bitcast_as1(
-; CHECK: getelementptr i32 addrspace(1)* %arr, i16 %N
+; CHECK: getelementptr i32, i32 addrspace(1)* %arr, i16 %N
; CHECK: bitcast
%cast = bitcast i32 addrspace(1)* %arr to i8 addrspace(1)*
%V = mul i16 %N, 4
- %t = getelementptr i8 addrspace(1)* %cast, i16 %V
+ %t = getelementptr i8, i8 addrspace(1)* %cast, i16 %V
%x = load i8 addrspace(1)* %t
ret i8 %x
}
; The element size of the array matches the element size of the pointer
define i64 @test_gep_bitcast_array_same_size_element([100 x double]* %arr, i64 %N) {
; CHECK-LABEL: @test_gep_bitcast_array_same_size_element(
-; CHECK: getelementptr [100 x double]* %arr, i64 0, i64 %V
+; CHECK: getelementptr [100 x double], [100 x double]* %arr, i64 0, i64 %V
; CHECK: bitcast
%cast = bitcast [100 x double]* %arr to i64*
%V = mul i64 %N, 8
- %t = getelementptr i64* %cast, i64 %V
+ %t = getelementptr i64, i64* %cast, i64 %V
%x = load i64* %t
ret i64 %x
}
; gep should be done in the original address space.
define i64 @test_gep_bitcast_array_same_size_element_addrspacecast([100 x double]* %arr, i64 %N) {
; CHECK-LABEL: @test_gep_bitcast_array_same_size_element_addrspacecast(
-; CHECK: getelementptr [100 x double]* %arr, i64 0, i64 %V
+; CHECK: getelementptr [100 x double], [100 x double]* %arr, i64 0, i64 %V
; CHECK-NEXT: bitcast double*
; CHECK-NEXT: %t = addrspacecast i64*
; CHECK: load i64 addrspace(3)* %t
%cast = addrspacecast [100 x double]* %arr to i64 addrspace(3)*
%V = mul i64 %N, 8
- %t = getelementptr i64 addrspace(3)* %cast, i64 %V
+ %t = getelementptr i64, i64 addrspace(3)* %cast, i64 %V
%x = load i64 addrspace(3)* %t
ret i64 %x
}
; The element size of the array is different the element size of the pointer
define i8 @test_gep_bitcast_array_different_size_element([100 x double]* %arr, i64 %N) {
; CHECK-LABEL: @test_gep_bitcast_array_different_size_element(
-; CHECK: getelementptr [100 x double]* %arr, i64 0, i64 %N
+; CHECK: getelementptr [100 x double], [100 x double]* %arr, i64 0, i64 %N
; CHECK: bitcast
%cast = bitcast [100 x double]* %arr to i8*
%V = mul i64 %N, 8
- %t = getelementptr i8* %cast, i64 %V
+ %t = getelementptr i8, i8* %cast, i64 %V
%x = load i8* %t
ret i8 %x
}
define i64 @test_gep_bitcast_array_same_size_element_as1([100 x double] addrspace(1)* %arr, i16 %N) {
; CHECK-LABEL: @test_gep_bitcast_array_same_size_element_as1(
-; CHECK: getelementptr [100 x double] addrspace(1)* %arr, i16 0, i16 %V
+; CHECK: getelementptr [100 x double], [100 x double] addrspace(1)* %arr, i16 0, i16 %V
; CHECK: bitcast
%cast = bitcast [100 x double] addrspace(1)* %arr to i64 addrspace(1)*
%V = mul i16 %N, 8
- %t = getelementptr i64 addrspace(1)* %cast, i16 %V
+ %t = getelementptr i64, i64 addrspace(1)* %cast, i16 %V
%x = load i64 addrspace(1)* %t
ret i64 %x
}
define i8 @test_gep_bitcast_array_different_size_element_as1([100 x double] addrspace(1)* %arr, i16 %N) {
; CHECK-LABEL: @test_gep_bitcast_array_different_size_element_as1(
-; CHECK: getelementptr [100 x double] addrspace(1)* %arr, i16 0, i16 %N
+; CHECK: getelementptr [100 x double], [100 x double] addrspace(1)* %arr, i16 0, i16 %N
; CHECK: bitcast
%cast = bitcast [100 x double] addrspace(1)* %arr to i8 addrspace(1)*
%V = mul i16 %N, 8
- %t = getelementptr i8 addrspace(1)* %cast, i16 %V
+ %t = getelementptr i8, i8 addrspace(1)* %cast, i16 %V
%x = load i8 addrspace(1)* %t
ret i8 %x
}
define i64 @test40() {
%array = alloca [3 x i32], align 4
- %gep = getelementptr inbounds [3 x i32]* %array, i64 0, i64 2
+ %gep = getelementptr inbounds [3 x i32], [3 x i32]* %array, i64 0, i64 2
%gepi8 = bitcast i32* %gep to i8*
%p = ptrtoint [3 x i32]* %array to i64
%np = sub i64 0, %p
- %gep2 = getelementptr i8* %gepi8, i64 %np
+ %gep2 = getelementptr i8, i8* %gepi8, i64 %np
%ret = ptrtoint i8* %gep2 to i64
ret i64 %ret
}
define i16 @test41([3 x i32] addrspace(1)* %array) {
- %gep = getelementptr inbounds [3 x i32] addrspace(1)* %array, i16 0, i16 2
+ %gep = getelementptr inbounds [3 x i32], [3 x i32] addrspace(1)* %array, i16 0, i16 2
%gepi8 = bitcast i32 addrspace(1)* %gep to i8 addrspace(1)*
%p = ptrtoint [3 x i32] addrspace(1)* %array to i16
%np = sub i16 0, %p
- %gep2 = getelementptr i8 addrspace(1)* %gepi8, i16 %np
+ %gep2 = getelementptr i8, i8 addrspace(1)* %gepi8, i16 %np
%ret = ptrtoint i8 addrspace(1)* %gep2 to i16
ret i16 %ret
define i8* @test42(i8* %c1, i8* %c2) {
%ptrtoint = ptrtoint i8* %c1 to i64
%sub = sub i64 0, %ptrtoint
- %gep = getelementptr inbounds i8* %c2, i64 %sub
+ %gep = getelementptr inbounds i8, i8* %c2, i64 %sub
ret i8* %gep
; CHECK-LABEL: @test42(
%ptrtoint = ptrtoint i16* %c1 to i64
%sub = sub i64 0, %ptrtoint
%shr = ashr i64 %sub, 1
- %gep = getelementptr inbounds i16* %c2, i64 %shr
+ %gep = getelementptr inbounds i16, i16* %c2, i64 %shr
ret i16* %gep
; CHECK-LABEL: @test43(
%ptrtoint = ptrtoint %struct.C* %c1 to i64
%sub = sub i64 0, %ptrtoint
%shr = sdiv i64 %sub, 7
- %gep = getelementptr inbounds %struct.C* %c2, i64 %shr
+ %gep = getelementptr inbounds %struct.C, %struct.C* %c2, i64 %shr
ret %struct.C* %gep
; CHECK-LABEL: @test44(
%ptrtoint2 = ptrtoint %struct.C** %c2 to i64
%sub = sub i64 %ptrtoint2, %ptrtoint1 ; C2 - C1
%shr = sdiv i64 %sub, 7
- %gep = getelementptr inbounds %struct.C* %c1, i64 %shr ; C1 + (C2 - C1)
+ %gep = getelementptr inbounds %struct.C, %struct.C* %c1, i64 %shr ; C1 + (C2 - C1)
ret %struct.C* %gep
; CHECK-LABEL: @test45(
%ptrtoint = ptrtoint %struct.C* %c1 to i64
%sub = sub i64 0, %ptrtoint
%sdiv = sdiv i64 %sub, %N
- %gep = getelementptr inbounds %struct.C* %c2, i64 %sdiv
+ %gep = getelementptr inbounds %struct.C, %struct.C* %c2, i64 %sdiv
ret %struct.C* %gep
; CHECK-LABEL: @test46(
; CHECK-NEXT: [[PTRTOINT:%.*]] = ptrtoint %struct.C* %c1 to i64
; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[PTRTOINT]]
; CHECK-NEXT: [[SDIV:%.*]] = sdiv i64 [[SUB]], %N
-; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds %struct.C* %c2, i64 %sdiv
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds %struct.C, %struct.C* %c2, i64 %sdiv
; CHECK-NEXT: ret %struct.C* [[GEP]]
}
; CHECK-LABEL: @ascast_0_gep(
; CHECK-NOT: getelementptr
; CHECK: ret
- %gep = getelementptr i32* %p, i32 0
+ %gep = getelementptr i32, i32* %p, i32 0
%x = addrspacecast i32* %gep to i32 addrspace(1)*
ret i32 addrspace(1)* %x
}
; CHECK-NEXT: getelementptr [128 x i32]
; CHECK-NEXT: addrspacecast i32*
; CHECK-NEXT: ret i32 addrspace(1)*
- %gep = getelementptr [128 x i32]* %p, i32 0, i32 0
+ %gep = getelementptr [128 x i32], [128 x i32]* %p, i32 0, i32 0
%x = addrspacecast i32* %gep to i32 addrspace(1)*
ret i32 addrspace(1)* %x
}
; CHECK: %cmp = icmp eq i64 %i, 1000
; CHECK: ret i1 %cmp
define i1 @test24(i64 %i) {
- %p1 = getelementptr inbounds i32* getelementptr inbounds ([1000 x i32]* @X, i64 0, i64 0), i64 %i
+ %p1 = getelementptr inbounds i32, i32* getelementptr inbounds ([1000 x i32]* @X, i64 0, i64 0), i64 %i
%cmp = icmp eq i32* %p1, getelementptr inbounds ([1000 x i32]* @X, i64 1, i64 0)
ret i1 %cmp
}
; CHECK: %cmp = icmp eq i16 %1, 1000
; CHECK: ret i1 %cmp
define i1 @test24_as1(i64 %i) {
- %p1 = getelementptr inbounds i32 addrspace(1)* getelementptr inbounds ([1000 x i32] addrspace(1)* @X_as1, i64 0, i64 0), i64 %i
+ %p1 = getelementptr inbounds i32, i32 addrspace(1)* getelementptr inbounds ([1000 x i32] addrspace(1)* @X_as1, i64 0, i64 0), i64 %i
%cmp = icmp eq i32 addrspace(1)* %p1, getelementptr inbounds ([1000 x i32] addrspace(1)* @X_as1, i64 1, i64 0)
ret i1 %cmp
}
define i1 @test59(i8* %foo) {
%bit = bitcast i8* %foo to i32*
- %gep1 = getelementptr inbounds i32* %bit, i64 2
- %gep2 = getelementptr inbounds i8* %foo, i64 10
+ %gep1 = getelementptr inbounds i32, i32* %bit, i64 2
+ %gep2 = getelementptr inbounds i8, i8* %foo, i64 10
%cast1 = bitcast i32* %gep1 to i8*
%cmp = icmp ult i8* %cast1, %gep2
%use = ptrtoint i8* %cast1 to i64
define i1 @test59_as1(i8 addrspace(1)* %foo) {
%bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
- %gep1 = getelementptr inbounds i32 addrspace(1)* %bit, i64 2
- %gep2 = getelementptr inbounds i8 addrspace(1)* %foo, i64 10
+ %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 2
+ %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 10
%cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
%cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
%use = ptrtoint i8 addrspace(1)* %cast1 to i64
%call = call i32 @test58_d(i64 %use) nounwind
ret i1 %cmp
; CHECK: @test59_as1
-; CHECK: %[[GEP:.+]] = getelementptr inbounds i8 addrspace(1)* %foo, i16 8
+; CHECK: %[[GEP:.+]] = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 8
; CHECK: ptrtoint i8 addrspace(1)* %[[GEP]] to i16
; CHECK: ret i1 true
}
define i1 @test60(i8* %foo, i64 %i, i64 %j) {
%bit = bitcast i8* %foo to i32*
- %gep1 = getelementptr inbounds i32* %bit, i64 %i
- %gep2 = getelementptr inbounds i8* %foo, i64 %j
+ %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i
+ %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
%cast1 = bitcast i32* %gep1 to i8*
%cmp = icmp ult i8* %cast1, %gep2
ret i1 %cmp
define i1 @test60_as1(i8 addrspace(1)* %foo, i64 %i, i64 %j) {
%bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
- %gep1 = getelementptr inbounds i32 addrspace(1)* %bit, i64 %i
- %gep2 = getelementptr inbounds i8 addrspace(1)* %foo, i64 %j
+ %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 %i
+ %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 %j
%cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
%cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
ret i1 %cmp
; bitcast. This uses the same sized addrspace.
define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) {
%bit = addrspacecast i8* %foo to i32 addrspace(3)*
- %gep1 = getelementptr inbounds i32 addrspace(3)* %bit, i64 %i
- %gep2 = getelementptr inbounds i8* %foo, i64 %j
+ %gep1 = getelementptr inbounds i32, i32 addrspace(3)* %bit, i64 %i
+ %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
%cast1 = addrspacecast i32 addrspace(3)* %gep1 to i8*
%cmp = icmp ult i8* %cast1, %gep2
ret i1 %cmp
define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) {
%bit = addrspacecast i8* %foo to i32 addrspace(1)*
- %gep1 = getelementptr inbounds i32 addrspace(1)* %bit, i16 %i
- %gep2 = getelementptr inbounds i8* %foo, i64 %j
+ %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i
+ %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
%cast1 = addrspacecast i32 addrspace(1)* %gep1 to i8*
%cmp = icmp ult i8* %cast1, %gep2
ret i1 %cmp
define i1 @test60_addrspacecast_larger(i8 addrspace(1)* %foo, i32 %i, i16 %j) {
%bit = addrspacecast i8 addrspace(1)* %foo to i32 addrspace(2)*
- %gep1 = getelementptr inbounds i32 addrspace(2)* %bit, i32 %i
- %gep2 = getelementptr inbounds i8 addrspace(1)* %foo, i16 %j
+ %gep1 = getelementptr inbounds i32, i32 addrspace(2)* %bit, i32 %i
+ %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 %j
%cast1 = addrspacecast i32 addrspace(2)* %gep1 to i8 addrspace(1)*
%cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
ret i1 %cmp
define i1 @test61(i8* %foo, i64 %i, i64 %j) {
%bit = bitcast i8* %foo to i32*
- %gep1 = getelementptr i32* %bit, i64 %i
- %gep2 = getelementptr i8* %foo, i64 %j
+ %gep1 = getelementptr i32, i32* %bit, i64 %i
+ %gep2 = getelementptr i8, i8* %foo, i64 %j
%cast1 = bitcast i32* %gep1 to i8*
%cmp = icmp ult i8* %cast1, %gep2
ret i1 %cmp
define i1 @test61_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) {
%bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
- %gep1 = getelementptr i32 addrspace(1)* %bit, i16 %i
- %gep2 = getelementptr i8 addrspace(1)* %foo, i16 %j
+ %gep1 = getelementptr i32, i32 addrspace(1)* %bit, i16 %i
+ %gep2 = getelementptr i8, i8 addrspace(1)* %foo, i16 %j
%cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)*
%cmp = icmp ult i8 addrspace(1)* %cast1, %gep2
ret i1 %cmp
}
define i1 @test62(i8* %a) {
- %arrayidx1 = getelementptr inbounds i8* %a, i64 1
- %arrayidx2 = getelementptr inbounds i8* %a, i64 10
+ %arrayidx1 = getelementptr inbounds i8, i8* %a, i64 1
+ %arrayidx2 = getelementptr inbounds i8, i8* %a, i64 10
%cmp = icmp slt i8* %arrayidx1, %arrayidx2
ret i1 %cmp
; CHECK-LABEL: @test62(
define i1 @test62_as1(i8 addrspace(1)* %a) {
; CHECK-LABEL: @test62_as1(
; CHECK-NEXT: ret i1 true
- %arrayidx1 = getelementptr inbounds i8 addrspace(1)* %a, i64 1
- %arrayidx2 = getelementptr inbounds i8 addrspace(1)* %a, i64 10
+ %arrayidx1 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 1
+ %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 10
%cmp = icmp slt i8 addrspace(1)* %arrayidx1, %arrayidx2
ret i1 %cmp
}
; CHECK-LABEL: define i1 @test71(
; CHECK-NEXT: ret i1 false
define i1 @test71(i8* %x) {
- %a = getelementptr i8* %x, i64 8
- %b = getelementptr inbounds i8* %x, i64 8
+ %a = getelementptr i8, i8* %x, i64 8
+ %b = getelementptr inbounds i8, i8* %x, i64 8
%c = icmp ugt i8* %a, %b
ret i1 %c
}
define i1 @test71_as1(i8 addrspace(1)* %x) {
; CHECK-LABEL: @test71_as1(
; CHECK-NEXT: ret i1 false
- %a = getelementptr i8 addrspace(1)* %x, i64 8
- %b = getelementptr inbounds i8 addrspace(1)* %x, i64 8
+ %a = getelementptr i8, i8 addrspace(1)* %x, i64 8
+ %b = getelementptr inbounds i8, i8 addrspace(1)* %x, i64 8
%c = icmp ugt i8 addrspace(1)* %a, %b
ret i1 %c
}
define i1 @test1(i32 %X) {
- %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
%Q = load i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
}
define i1 @test1_noinbounds(i32 %X) {
- %P = getelementptr [10 x i16]* @G16, i32 0, i32 %X
+ %P = getelementptr [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
%Q = load i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
; NODL-LABEL: @test1_noinbounds(
-; NODL-NEXT: %P = getelementptr [10 x i16]* @G16, i32 0, i32 %X
+; NODL-NEXT: %P = getelementptr [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
; P32-LABEL: @test1_noinbounds(
; P32-NEXT: %R = icmp eq i32 %X, 9
}
define i1 @test1_noinbounds_i64(i64 %X) {
- %P = getelementptr [10 x i16]* @G16, i64 0, i64 %X
+ %P = getelementptr [10 x i16], [10 x i16]* @G16, i64 0, i64 %X
%Q = load i16* %P
%R = icmp eq i16 %Q, 0
ret i1 %R
; NODL-LABEL: @test1_noinbounds_i64(
-; NODL-NEXT: %P = getelementptr [10 x i16]* @G16, i64 0, i64 %X
+; NODL-NEXT: %P = getelementptr [10 x i16], [10 x i16]* @G16, i64 0, i64 %X
; P32-LABEL: @test1_noinbounds_i64(
; P32: %R = icmp eq i32 %1, 9
}
define i1 @test1_noinbounds_as1(i32 %x) {
- %p = getelementptr [10 x i16] addrspace(1)* @G16_as1, i16 0, i32 %x
+ %p = getelementptr [10 x i16], [10 x i16] addrspace(1)* @G16_as1, i16 0, i32 %x
%q = load i16 addrspace(1)* %p
%r = icmp eq i16 %q, 0
ret i1 %r
}
define i1 @test2(i32 %X) {
- %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
%Q = load i16* %P
%R = icmp slt i16 %Q, 85
ret i1 %R
}
define i1 @test3(i32 %X) {
- %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
%Q = load double* %P
%R = fcmp oeq double %Q, 1.0
ret i1 %R
}
define i1 @test4(i32 %X) {
- %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
%Q = load i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
}
define i1 @test4_i16(i16 %X) {
- %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i16 %X
+ %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i16 %X
%Q = load i16* %P
%R = icmp sle i16 %Q, 73
ret i1 %R
}
define i1 @test5(i32 %X) {
- %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
%Q = load i16* %P
%R = icmp eq i16 %Q, 69
ret i1 %R
}
define i1 @test6(i32 %X) {
- %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
%Q = load double* %P
%R = fcmp ogt double %Q, 0.0
ret i1 %R
}
define i1 @test7(i32 %X) {
- %P = getelementptr inbounds [6 x double]* @GD, i32 0, i32 %X
+ %P = getelementptr inbounds [6 x double], [6 x double]* @GD, i32 0, i32 %X
%Q = load double* %P
%R = fcmp olt double %Q, 0.0
ret i1 %R
}
define i1 @test8(i32 %X) {
- %P = getelementptr inbounds [10 x i16]* @G16, i32 0, i32 %X
+ %P = getelementptr inbounds [10 x i16], [10 x i16]* @G16, i32 0, i32 %X
%Q = load i16* %P
%R = and i16 %Q, 3
%S = icmp eq i16 %R, 0
]
define i1 @test9(i32 %X) {
- %P = getelementptr inbounds [4 x { i32, i32 } ]* @GA, i32 0, i32 %X, i32 1
+ %P = getelementptr inbounds [4 x { i32, i32 } ], [4 x { i32, i32 } ]* @GA, i32 0, i32 %X, i32 1
%Q = load i32* %P
%R = icmp eq i32 %Q, 1
ret i1 %R
define i1 @test10_struct(i32 %x) {
; NODL-LABEL: @test10_struct(
-; NODL: getelementptr inbounds %Foo* @GS, i32 %x, i32 0
+; NODL: getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
; P32-LABEL: @test10_struct(
; P32: ret i1 false
- %p = getelementptr inbounds %Foo* @GS, i32 %x, i32 0
+ %p = getelementptr inbounds %Foo, %Foo* @GS, i32 %x, i32 0
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
define i1 @test10_struct_noinbounds(i32 %x) {
; NODL-LABEL: @test10_struct_noinbounds(
-; NODL: getelementptr %Foo* @GS, i32 %x, i32 0
+; NODL: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
; P32-LABEL: @test10_struct_noinbounds(
-; P32: getelementptr %Foo* @GS, i32 %x, i32 0
- %p = getelementptr %Foo* @GS, i32 %x, i32 0
+; P32: getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
+ %p = getelementptr %Foo, %Foo* @GS, i32 %x, i32 0
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
; Index < ptr size
define i1 @test10_struct_i16(i16 %x){
; NODL-LABEL: @test10_struct_i16(
-; NODL: getelementptr inbounds %Foo* @GS, i16 %x, i32 0
+; NODL: getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
; P32-LABEL: @test10_struct_i16(
; P32: ret i1 false
- %p = getelementptr inbounds %Foo* @GS, i16 %x, i32 0
+ %p = getelementptr inbounds %Foo, %Foo* @GS, i16 %x, i32 0
%q = load i32* %p
%r = icmp eq i32 %q, 0
ret i1 %r
; Index > ptr size
define i1 @test10_struct_i64(i64 %x){
; NODL-LABEL: @test10_struct_i64(
-; NODL: getelementptr inbounds %Foo* @GS, i64 %x, i32 0
+; NODL: getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
; P32-LABEL: @test10_struct_i64(
; P32: ret i1 false
- %p = getelementptr inbounds %Foo* @GS, i64 %x, i32 0
+ %p = getelementptr inbounds %Foo, %Foo* @GS, i64 %x, i32 0
%q = load i32* %p
%r = icmp eq i32 %q, 0
ret i1 %r
define i1 @test10_struct_noinbounds_i16(i16 %x) {
; NODL-LABEL: @test10_struct_noinbounds_i16(
-; NODL: getelementptr %Foo* @GS, i16 %x, i32 0
+; NODL: getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
; P32-LABEL: @test10_struct_noinbounds_i16(
; P32: %1 = sext i16 %x to i32
-; P32: getelementptr %Foo* @GS, i32 %1, i32 0
- %p = getelementptr %Foo* @GS, i16 %x, i32 0
+; P32: getelementptr %Foo, %Foo* @GS, i32 %1, i32 0
+ %p = getelementptr %Foo, %Foo* @GS, i16 %x, i32 0
%q = load i32* %p
%r = icmp eq i32 %q, 0
ret i1 %r
; P32-LABEL: @test10_struct_arr(
; P32-NEXT: %r = icmp ne i32 %x, 1
; P32-NEXT: ret i1 %r
- %p = getelementptr inbounds [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
+ %p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
define i1 @test10_struct_arr_noinbounds(i32 %x) {
; NODL-LABEL: @test10_struct_arr_noinbounds(
-; NODL-NEXT %p = getelementptr [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
+; NODL-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
; P32-LABEL: @test10_struct_arr_noinbounds(
-; P32-NEXT %p = getelementptr [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
- %p = getelementptr [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
+; P32-NEXT %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
+ %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i32 %x, i32 2
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
; P32-LABEL: @test10_struct_arr_i16(
; P32-NEXT: %r = icmp ne i16 %x, 1
; P32-NEXT: ret i1 %r
- %p = getelementptr inbounds [4 x %Foo]* @GStructArr, i16 0, i16 %x, i32 2
+ %p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i16 0, i16 %x, i32 2
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
; P32-NEXT: trunc i64 %x to i32
; P32-NEXT: %r = icmp ne i32 %1, 1
; P32-NEXT: ret i1 %r
- %p = getelementptr inbounds [4 x %Foo]* @GStructArr, i64 0, i64 %x, i32 2
+ %p = getelementptr inbounds [4 x %Foo], [4 x %Foo]* @GStructArr, i64 0, i64 %x, i32 2
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
define i1 @test10_struct_arr_noinbounds_i16(i16 %x) {
; NODL-LABEL: @test10_struct_arr_noinbounds_i16(
-; NODL-NEXT: %p = getelementptr [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
+; NODL-NEXT: %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
; P32-LABEL: @test10_struct_arr_noinbounds_i16(
; P32-NEXT: %r = icmp ne i16 %x, 1
- %p = getelementptr [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
+ %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i16 %x, i32 2
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
define i1 @test10_struct_arr_noinbounds_i64(i64 %x) {
; FIXME: Should be no trunc?
; NODL-LABEL: @test10_struct_arr_noinbounds_i64(
-; NODL-NEXT: %p = getelementptr [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
+; NODL-NEXT: %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
; P32-LABEL: @test10_struct_arr_noinbounds_i64(
; P32: %r = icmp ne i32 %1, 1
; P32-NEXT: ret i1 %r
- %p = getelementptr [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
+ %p = getelementptr [4 x %Foo], [4 x %Foo]* @GStructArr, i32 0, i64 %x, i32 2
%q = load i32* %p
%r = icmp eq i32 %q, 9
ret i1 %r
; CHECK-LABEL: @test2(
; CHECK-NOT: load
define float @test2() {
- %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
%B = load float* %A ; <float> [#uses=1]
ret float %B
}
; CHECK-LABEL: @test3(
; CHECK-NOT: load
define i32 @test3() {
- %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
%B = load i32* %A ; <i32> [#uses=1]
ret i32 %B
}
; CHECK-LABEL: @test4(
; CHECK-NOT: load
define i32 @test4() {
- %A = getelementptr [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Z, i64 0, i64 1, i32 0 ; <i32*> [#uses=1]
%B = load i32* %A ; <i32> [#uses=1]
ret i32 %B
}
; CHECK-LABEL: @test7(
; CHECK-NOT: load
define i32 @test7(i32 %X) {
- %V = getelementptr i32* null, i32 %X ; <i32*> [#uses=1]
+ %V = getelementptr i32, i32* null, i32 %X ; <i32*> [#uses=1]
%R = load i32* %V ; <i32> [#uses=1]
ret i32 %R
}
; CHECK-LABEL: @test11(
; CHECK-NOT: load
define double @test11(double* %p) {
- %t0 = getelementptr double* %p, i32 1
+ %t0 = getelementptr double, double* %p, i32 1
store double 2.0, double* %t0
- %t1 = getelementptr double* %p, i32 1
+ %t1 = getelementptr double, double* %p, i32 1
%x = load double* %t1
ret double %x
}
; Instcombine should be able to do trivial CSE of loads.
define i32 @test1(i32* %p) {
- %t0 = getelementptr i32* %p, i32 1
+ %t0 = getelementptr i32, i32* %p, i32 1
%y = load i32* %t0
- %t1 = getelementptr i32* %p, i32 1
+ %t1 = getelementptr i32, i32* %p, i32 1
%x = load i32* %t1
%a = sub i32 %y, %x
ret i32 %a
; CHECK-LABEL: @static_hem(
; CHECK: , align 16
define <2 x i64> @static_hem() {
- %t = getelementptr <2 x i64>* @x, i32 7
+ %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
%tmp1 = load <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
; CHECK-LABEL: @static_hem_addrspacecast(
; CHECK: , align 16
define <2 x i64> @static_hem_addrspacecast() {
- %t = getelementptr <2 x i64>* @x, i32 7
+ %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
%t.asc = addrspacecast <2 x i64>* %t to <2 x i64> addrspace(1)*
%tmp1 = load <2 x i64> addrspace(1)* %t.asc, align 1
ret <2 x i64> %tmp1
; CHECK-LABEL: @static_hem_addrspacecast_smaller_ptr(
; CHECK: , align 16
define <2 x i64> @static_hem_addrspacecast_smaller_ptr() {
- %t = getelementptr <2 x i64>* @x, i32 7
+ %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
%t.asc = addrspacecast <2 x i64>* %t to <2 x i64> addrspace(2)*
%tmp1 = load <2 x i64> addrspace(2)* %t.asc, align 1
ret <2 x i64> %tmp1
; CHECK-LABEL: @static_hem_addrspacecast_larger_ptr(
; CHECK: , align 16
define <2 x i64> @static_hem_addrspacecast_larger_ptr() {
- %t = getelementptr <2 x i64> addrspace(2)* @x.as2, i32 7
+ %t = getelementptr <2 x i64>, <2 x i64> addrspace(2)* @x.as2, i32 7
%t.asc = addrspacecast <2 x i64> addrspace(2)* %t to <2 x i64> addrspace(1)*
%tmp1 = load <2 x i64> addrspace(1)* %t.asc, align 1
ret <2 x i64> %tmp1
; CHECK-LABEL: @hem(
; CHECK: , align 16
define <2 x i64> @hem(i32 %i) {
- %t = getelementptr <2 x i64>* @x, i32 %i
+ %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 %i
%tmp1 = load <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
; CHECK-LABEL: @hem_2d(
; CHECK: , align 16
define <2 x i64> @hem_2d(i32 %i, i32 %j) {
- %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ %t = getelementptr [13 x <2 x i64>], [13 x <2 x i64>]* @xx, i32 %i, i32 %j
%tmp1 = load <2 x i64>* %t, align 1
ret <2 x i64> %tmp1
}
; CHECK-LABEL: @static_hem_store(
; CHECK: , align 16
define void @static_hem_store(<2 x i64> %y) {
- %t = getelementptr <2 x i64>* @x, i32 7
+ %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 7
store <2 x i64> %y, <2 x i64>* %t, align 1
ret void
}
; CHECK-LABEL: @hem_store(
; CHECK: , align 16
define void @hem_store(i32 %i, <2 x i64> %y) {
- %t = getelementptr <2 x i64>* @x, i32 %i
+ %t = getelementptr <2 x i64>, <2 x i64>* @x, i32 %i
store <2 x i64> %y, <2 x i64>* %t, align 1
ret void
}
; CHECK-LABEL: @hem_2d_store(
; CHECK: , align 16
define void @hem_2d_store(i32 %i, i32 %j, <2 x i64> %y) {
- %t = getelementptr [13 x <2 x i64>]* @xx, i32 %i, i32 %j
+ %t = getelementptr [13 x <2 x i64>], [13 x <2 x i64>]* @xx, i32 %i, i32 %j
store <2 x i64> %y, <2 x i64>* %t, align 1
ret void
}
loop:
%i = phi i32 [ 0, %entry ], [ %i.next, %loop ]
- %src.gep = getelementptr inbounds float* %src, i32 %i
- %dst.gep = getelementptr inbounds i32* %dst, i32 %i
+ %src.gep = getelementptr inbounds float, float* %src, i32 %i
+ %dst.gep = getelementptr inbounds i32, i32* %dst, i32 %i
%l = load float* %src.gep, !llvm.mem.parallel_loop_access !1
%c = bitcast float %l to i32
store i32 %c, i32* %dst.gep
; CHECK: store i64 %[[V]], i64*
entry:
%p = load float** %ptr, !nonnull !3
- %gep = getelementptr float** %ptr, i32 42
+ %gep = getelementptr float*, float** %ptr, i32 42
store float* %p, float** %gep
ret void
}
bb: ; preds = %bb, %entry
%indvar = phi i64 [ 0, %entry ], [ %tmp, %bb ] ; <i64> [#uses=2]
%k.04 = phi i32 [ 0, %entry ], [ %t8, %bb ] ; <i32> [#uses=2]
- %cp.05 = getelementptr i8* %key, i64 %indvar ; <i8*> [#uses=1]
+ %cp.05 = getelementptr i8, i8* %key, i64 %indvar ; <i8*> [#uses=1]
%t2 = shl i32 %k.04, 1 ; <i32> [#uses=1]
%t3 = lshr i32 %k.04, 14 ; <i32> [#uses=1]
%t4 = add i32 %t2, %t3 ; <i32> [#uses=1]
%t7 = xor i32 %t6, %t4 ; <i32> [#uses=1]
%t8 = and i32 %t7, 16383 ; <i32> [#uses=2]
%tmp = add i64 %indvar, 1 ; <i64> [#uses=2]
- %scevgep = getelementptr i8* %key, i64 %tmp ; <i8*> [#uses=1]
+ %scevgep = getelementptr i8, i8* %key, i64 %tmp ; <i8*> [#uses=1]
%t9 = load i8* %scevgep, align 1 ; <i8> [#uses=1]
%t10 = icmp eq i8 %t9, 0 ; <i1> [#uses=1]
br i1 %t10, label %bb2, label %bb
define signext i32 @test1(i32 signext %x) #0 {
entry:
%idxprom = sext i32 %x to i64
- %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* @f.a, i64 0, i64 %idxprom
%0 = load i32* %arrayidx, align 4
ret i32 %0
entry:
%p = alloca i64
%idxprom = sext i32 %x to i64
- %arrayidx = getelementptr inbounds i64* %p, i64 %idxprom
+ %arrayidx = getelementptr inbounds i64, i64* %p, i64 %idxprom
store i64 %v, i64* %arrayidx
call void @foo(i64* %p)
ret void
entry:
%idxprom = sext i32 %x to i64
%p = select i1 %y, [1 x i32]* @f.a, [1 x i32]* @f.b
- %arrayidx = getelementptr inbounds [1 x i32]* %p, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %p, i64 0, i64 %idxprom
%0 = load i32* %arrayidx, align 4
ret i32 %0
; CHECK-LABEL: @test3
-; CHECK: getelementptr inbounds [1 x i32]* %p, i64 0, i64 0
+; CHECK: getelementptr inbounds [1 x i32], [1 x i32]* %p, i64 0, i64 0
}
attributes #0 = { nounwind readnone }
define i32 @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
- %mem1 = getelementptr [4 x i8]* @hel, i32 0, i32 0
- %mem2 = getelementptr [8 x i8]* @hello_u, i32 0, i32 0
+ %mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
+ %mem2 = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
ret i32 %ret
; CHECK: ret i32 0
define i32 @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
- %mem1 = getelementptr [4 x i8]* @hel, i32 0, i32 0
- %mem2 = getelementptr [4 x i8]* @foo, i32 0, i32 0
+ %mem1 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
+ %mem2 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
ret i32 %ret
; CHECK: ret i32 1
define i32 @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
- %mem1 = getelementptr [4 x i8]* @foo, i32 0, i32 0
- %mem2 = getelementptr [4 x i8]* @hel, i32 0, i32 0
+ %mem1 = getelementptr [4 x i8], [4 x i8]* @foo, i32 0, i32 0
+ %mem2 = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
%ret = call i32 @memcmp(i8* %mem1, i8* %mem2, i32 3)
ret i32 %ret
; CHECK: ret i32 -1
%tmp3 = shl i32 %hash, 2 ; <i32> [#uses=1]
%tmp5 = and i32 %tmp3, 124 ; <i32> [#uses=4]
- %tmp753 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp5 ; <float*> [#uses=1]
+ %tmp753 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp5 ; <float*> [#uses=1]
%tmp9 = load float* %tmp753 ; <float> [#uses=1]
%tmp11 = fmul float %tmp9, %x ; <float> [#uses=1]
%tmp13 = fadd float %tmp11, 0.000000e+00 ; <float> [#uses=1]
%tmp17.sum52 = or i32 %tmp5, 1 ; <i32> [#uses=1]
- %tmp1851 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp17.sum52 ; <float*> [#uses=1]
+ %tmp1851 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp17.sum52 ; <float*> [#uses=1]
%tmp19 = load float* %tmp1851 ; <float> [#uses=1]
%tmp21 = fmul float %tmp19, %y ; <float> [#uses=1]
%tmp23 = fadd float %tmp21, %tmp13 ; <float> [#uses=1]
%tmp27.sum50 = or i32 %tmp5, 2 ; <i32> [#uses=1]
- %tmp2849 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp27.sum50 ; <float*> [#uses=1]
+ %tmp2849 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp27.sum50 ; <float*> [#uses=1]
%tmp29 = load float* %tmp2849 ; <float> [#uses=1]
%tmp31 = fmul float %tmp29, %z ; <float> [#uses=1]
%tmp33 = fadd float %tmp31, %tmp23 ; <float> [#uses=1]
%tmp37.sum48 = or i32 %tmp5, 3 ; <i32> [#uses=1]
- %tmp3847 = getelementptr [128 x float]* %lookupTable, i32 0, i32 %tmp37.sum48 ; <float*> [#uses=1]
+ %tmp3847 = getelementptr [128 x float], [128 x float]* %lookupTable, i32 0, i32 %tmp37.sum48 ; <float*> [#uses=1]
%tmp39 = load float* %tmp3847 ; <float> [#uses=1]
%tmp41 = fmul float %tmp39, %w ; <float> [#uses=1]
%tmp43 = fadd float %tmp41, %tmp33 ; <float> [#uses=1]
; %A alloca is deleted
; CHECK-NEXT: alloca [124 x i8]
-; CHECK-NEXT: getelementptr inbounds [124 x i8]*
+; CHECK-NEXT: getelementptr inbounds [124 x i8], [124 x i8]*
; use @G instead of %A
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.*}}, i8* getelementptr inbounds (%T* @G, i64 0, i32 0)
}
define i32 @test3() {
- %h_p = getelementptr [2 x i8]* @h, i32 0, i32 0 ; <i8*> [#uses=1]
- %hel_p = getelementptr [4 x i8]* @hel, i32 0, i32 0 ; <i8*> [#uses=1]
- %hello_u_p = getelementptr [8 x i8]* @hello_u, i32 0, i32 0 ; <i8*> [#uses=1]
+ %h_p = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hel_p = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0 ; <i8*> [#uses=1]
+ %hello_u_p = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0 ; <i8*> [#uses=1]
%target = alloca [1024 x i8] ; <[1024 x i8]*> [#uses=1]
- %target_p = getelementptr [1024 x i8]* %target, i32 0, i32 0 ; <i8*> [#uses=3]
+ %target_p = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0 ; <i8*> [#uses=3]
call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %h_p, i32 2, i32 2, i1 false)
call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hel_p, i32 4, i32 4, i1 false)
call void @llvm.memmove.p0i8.p0i8.i32(i8* %target_p, i8* %hello_u_p, i32 8, i32 8, i1 false)
define i32 @main() {
%target = alloca [1024 x i8]
- %target_p = getelementptr [1024 x i8]* %target, i32 0, i32 0
+ %target_p = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 0, i32 1, i1 false)
call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 1, i32 1, i1 false)
call void @llvm.memset.p0i8.i32(i8* %target_p, i8 1, i32 2, i32 2, i1 false)
define i32 @test(%struct.Moves addrspace(1)* nocapture %moves) {
entry:
; CHECK: bitcast i8 addrspace(1)* %gep to i64 addrspace(1)*
- %gep = getelementptr inbounds %struct.Moves addrspace(1)* %moves, i32 1, i32 0, i32 9
+ %gep = getelementptr inbounds %struct.Moves, %struct.Moves addrspace(1)* %moves, i32 1, i32 0, i32 9
call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %gep, i8 0, i64 8, i32 1, i1 false)
ret i32 0
}
define i32 @test_as0(i32 addrspace(0)* %a) {
; CHECK-LABEL: @test_as0(
-; CHECK: %arrayidx = getelementptr i32* %a, i32 1
- %arrayidx = getelementptr i32 addrspace(0)* %a, i64 1
+; CHECK: %arrayidx = getelementptr i32, i32* %a, i32 1
+ %arrayidx = getelementptr i32, i32 addrspace(0)* %a, i64 1
%y = load i32 addrspace(0)* %arrayidx, align 4
ret i32 %y
}
define i32 @test_as1(i32 addrspace(1)* %a) {
; CHECK-LABEL: @test_as1(
-; CHECK: %arrayidx = getelementptr i32 addrspace(1)* %a, i64 1
- %arrayidx = getelementptr i32 addrspace(1)* %a, i32 1
+; CHECK: %arrayidx = getelementptr i32, i32 addrspace(1)* %a, i64 1
+ %arrayidx = getelementptr i32, i32 addrspace(1)* %a, i32 1
%y = load i32 addrspace(1)* %arrayidx, align 4
ret i32 %y
}
define i32 @test_as2(i32 addrspace(2)* %a) {
; CHECK-LABEL: @test_as2(
-; CHECK: %arrayidx = getelementptr i32 addrspace(2)* %a, i8 1
- %arrayidx = getelementptr i32 addrspace(2)* %a, i32 1
+; CHECK: %arrayidx = getelementptr i32, i32 addrspace(2)* %a, i8 1
+ %arrayidx = getelementptr i32, i32 addrspace(2)* %a, i32 1
%y = load i32 addrspace(2)* %arrayidx, align 4
ret i32 %y
}
define i32 @test_as3(i32 addrspace(3)* %a) {
; CHECK-LABEL: @test_as3(
-; CHECK: %arrayidx = getelementptr i32 addrspace(3)* %a, i16 1
- %arrayidx = getelementptr i32 addrspace(3)* %a, i32 1
+; CHECK: %arrayidx = getelementptr i32, i32 addrspace(3)* %a, i16 1
+ %arrayidx = getelementptr i32, i32 addrspace(3)* %a, i32 1
%y = load i32 addrspace(3)* %arrayidx, align 4
ret i32 %y
}
; Check that the GEP index is changed to the address space integer type (i64 -> i8)
define i32 addrspace(2)* @shrink_gep_constant_index_64_as2(i32 addrspace(2)* %p) {
; CHECK-LABEL: @shrink_gep_constant_index_64_as2(
-; CHECK-NEXT: getelementptr i32 addrspace(2)* %p, i8 1
- %ret = getelementptr i32 addrspace(2)* %p, i64 1
+; CHECK-NEXT: getelementptr i32, i32 addrspace(2)* %p, i8 1
+ %ret = getelementptr i32, i32 addrspace(2)* %p, i64 1
ret i32 addrspace(2)* %ret
}
define i32 addrspace(2)* @shrink_gep_constant_index_32_as2(i32 addrspace(2)* %p) {
; CHECK-LABEL: @shrink_gep_constant_index_32_as2(
-; CHECK-NEXT: getelementptr i32 addrspace(2)* %p, i8 1
- %ret = getelementptr i32 addrspace(2)* %p, i32 1
+; CHECK-NEXT: getelementptr i32, i32 addrspace(2)* %p, i8 1
+ %ret = getelementptr i32, i32 addrspace(2)* %p, i32 1
ret i32 addrspace(2)* %ret
}
define i32 addrspace(3)* @shrink_gep_constant_index_64_as3(i32 addrspace(3)* %p) {
; CHECK-LABEL: @shrink_gep_constant_index_64_as3(
-; CHECK-NEXT: getelementptr i32 addrspace(3)* %p, i16 1
- %ret = getelementptr i32 addrspace(3)* %p, i64 1
+; CHECK-NEXT: getelementptr i32, i32 addrspace(3)* %p, i16 1
+ %ret = getelementptr i32, i32 addrspace(3)* %p, i64 1
ret i32 addrspace(3)* %ret
}
define i32 addrspace(2)* @shrink_gep_variable_index_64_as2(i32 addrspace(2)* %p, i64 %idx) {
; CHECK-LABEL: @shrink_gep_variable_index_64_as2(
; CHECK-NEXT: %1 = trunc i64 %idx to i8
-; CHECK-NEXT: getelementptr i32 addrspace(2)* %p, i8 %1
- %ret = getelementptr i32 addrspace(2)* %p, i64 %idx
+; CHECK-NEXT: getelementptr i32, i32 addrspace(2)* %p, i8 %1
+ %ret = getelementptr i32, i32 addrspace(2)* %p, i64 %idx
ret i32 addrspace(2)* %ret
}
define i32 addrspace(1)* @grow_gep_variable_index_8_as1(i32 addrspace(1)* %p, i8 %idx) {
; CHECK-LABEL: @grow_gep_variable_index_8_as1(
; CHECK-NEXT: %1 = sext i8 %idx to i64
-; CHECK-NEXT: getelementptr i32 addrspace(1)* %p, i64 %1
- %ret = getelementptr i32 addrspace(1)* %p, i8 %idx
+; CHECK-NEXT: getelementptr i32, i32 addrspace(1)* %p, i64 %1
+ %ret = getelementptr i32, i32 addrspace(1)* %p, i8 %idx
ret i32 addrspace(1)* %ret
}
br i1 undef, label %bb11, label %bb12
bb11:
- %0 = getelementptr inbounds float* getelementptr inbounds ([480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
+ %0 = getelementptr inbounds float, float* getelementptr inbounds ([480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
%1 = bitcast float* %0 to i8* ; <i8*> [#uses=1]
%2 = call i32 @llvm.objectsize.i32.p0i8(i8* %1, i1 false) ; <i32> [#uses=1]
%3 = call i8* @__memcpy_chk(i8* undef, i8* undef, i32 512, i32 %2) nounwind ; <i8*> [#uses=0]
unreachable
bb12:
- %4 = getelementptr inbounds float* getelementptr inbounds ([480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
+ %4 = getelementptr inbounds float, float* getelementptr inbounds ([480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
%5 = bitcast float* %4 to i8* ; <i8*> [#uses=1]
%6 = call i8* @__inline_memcpy_chk(i8* %5, i8* undef, i32 512) nounwind inlinehint ; <i8*> [#uses=0]
; CHECK: @__inline_memcpy_chk
; CHECK-LABEL: @test7(
%alloc = call noalias i8* @malloc(i32 48) nounwind
store i8* %alloc, i8** %esc
- %gep = getelementptr inbounds i8* %alloc, i32 16
+ %gep = getelementptr inbounds i8, i8* %alloc, i32 16
%objsize = call i32 @llvm.objectsize.i32.p0i8(i8* %gep, i1 false) nounwind readonly
; CHECK: ret i32 32
ret i32 %objsize
; CHECK-LABEL: @test8(
%alloc = call noalias i8* @calloc(i32 5, i32 7) nounwind
store i8* %alloc, i8** %esc
- %gep = getelementptr inbounds i8* %alloc, i32 5
+ %gep = getelementptr inbounds i8, i8* %alloc, i32 5
%objsize = call i32 @llvm.objectsize.i32.p0i8(i8* %gep, i1 false) nounwind readonly
; CHECK: ret i32 30
ret i32 %objsize
; technically reachable, but this malformed IR may appear as a result of constant propagation
xpto:
- %gep2 = getelementptr i8* %gep, i32 1
- %gep = getelementptr i8* %gep2, i32 1
+ %gep2 = getelementptr i8, i8* %gep, i32 1
+ %gep = getelementptr i8, i8* %gep2, i32 1
%o = call i32 @llvm.objectsize.i32.p0i8(i8* %gep, i1 true)
; CHECK: ret i32 undef
ret i32 %o
define void @foo(float* %Ar, float* %Ai, i64 %As, float* %Cr, float* %Ci, i64 %Cs, i64 %n) nounwind {
entry:
- %0 = getelementptr inbounds float* %Ar, i64 0 ; <float*> [#uses=1]
- %1 = getelementptr inbounds float* %Ai, i64 0 ; <float*> [#uses=1]
+ %0 = getelementptr inbounds float, float* %Ar, i64 0 ; <float*> [#uses=1]
+ %1 = getelementptr inbounds float, float* %Ai, i64 0 ; <float*> [#uses=1]
%2 = mul i64 %n, %As ; <i64> [#uses=1]
- %3 = getelementptr inbounds float* %Ar, i64 %2 ; <float*> [#uses=1]
+ %3 = getelementptr inbounds float, float* %Ar, i64 %2 ; <float*> [#uses=1]
%4 = mul i64 %n, %As ; <i64> [#uses=1]
- %5 = getelementptr inbounds float* %Ai, i64 %4 ; <float*> [#uses=1]
+ %5 = getelementptr inbounds float, float* %Ai, i64 %4 ; <float*> [#uses=1]
%6 = mul i64 %n, 2 ; <i64> [#uses=1]
%7 = mul i64 %6, %As ; <i64> [#uses=1]
- %8 = getelementptr inbounds float* %Ar, i64 %7 ; <float*> [#uses=1]
+ %8 = getelementptr inbounds float, float* %Ar, i64 %7 ; <float*> [#uses=1]
%9 = mul i64 %n, 2 ; <i64> [#uses=1]
%10 = mul i64 %9, %As ; <i64> [#uses=1]
- %11 = getelementptr inbounds float* %Ai, i64 %10 ; <float*> [#uses=1]
- %12 = getelementptr inbounds float* %Cr, i64 0 ; <float*> [#uses=1]
- %13 = getelementptr inbounds float* %Ci, i64 0 ; <float*> [#uses=1]
+ %11 = getelementptr inbounds float, float* %Ai, i64 %10 ; <float*> [#uses=1]
+ %12 = getelementptr inbounds float, float* %Cr, i64 0 ; <float*> [#uses=1]
+ %13 = getelementptr inbounds float, float* %Ci, i64 0 ; <float*> [#uses=1]
%14 = mul i64 %n, %Cs ; <i64> [#uses=1]
- %15 = getelementptr inbounds float* %Cr, i64 %14 ; <float*> [#uses=1]
+ %15 = getelementptr inbounds float, float* %Cr, i64 %14 ; <float*> [#uses=1]
%16 = mul i64 %n, %Cs ; <i64> [#uses=1]
- %17 = getelementptr inbounds float* %Ci, i64 %16 ; <float*> [#uses=1]
+ %17 = getelementptr inbounds float, float* %Ci, i64 %16 ; <float*> [#uses=1]
%18 = mul i64 %n, 2 ; <i64> [#uses=1]
%19 = mul i64 %18, %Cs ; <i64> [#uses=1]
- %20 = getelementptr inbounds float* %Cr, i64 %19 ; <float*> [#uses=1]
+ %20 = getelementptr inbounds float, float* %Cr, i64 %19 ; <float*> [#uses=1]
%21 = mul i64 %n, 2 ; <i64> [#uses=1]
%22 = mul i64 %21, %Cs ; <i64> [#uses=1]
- %23 = getelementptr inbounds float* %Ci, i64 %22 ; <float*> [#uses=1]
+ %23 = getelementptr inbounds float, float* %Ci, i64 %22 ; <float*> [#uses=1]
br label %bb13
bb: ; preds = %bb13
store float %43, float* %C1i.0, align 4
store float %44, float* %C2r.0, align 4
store float %45, float* %C2i.0, align 4
- %46 = getelementptr inbounds float* %A0r.0, i64 %As ; <float*> [#uses=1]
- %47 = getelementptr inbounds float* %A0i.0, i64 %As ; <float*> [#uses=1]
- %48 = getelementptr inbounds float* %A1r.0, i64 %As ; <float*> [#uses=1]
- %49 = getelementptr inbounds float* %A1i.0, i64 %As ; <float*> [#uses=1]
- %50 = getelementptr inbounds float* %A2r.0, i64 %As ; <float*> [#uses=1]
- %51 = getelementptr inbounds float* %A2i.0, i64 %As ; <float*> [#uses=1]
- %52 = getelementptr inbounds float* %C0r.0, i64 %Cs ; <float*> [#uses=1]
- %53 = getelementptr inbounds float* %C0i.0, i64 %Cs ; <float*> [#uses=1]
- %54 = getelementptr inbounds float* %C1r.0, i64 %Cs ; <float*> [#uses=1]
- %55 = getelementptr inbounds float* %C1i.0, i64 %Cs ; <float*> [#uses=1]
- %56 = getelementptr inbounds float* %C2r.0, i64 %Cs ; <float*> [#uses=1]
- %57 = getelementptr inbounds float* %C2i.0, i64 %Cs ; <float*> [#uses=1]
+ %46 = getelementptr inbounds float, float* %A0r.0, i64 %As ; <float*> [#uses=1]
+ %47 = getelementptr inbounds float, float* %A0i.0, i64 %As ; <float*> [#uses=1]
+ %48 = getelementptr inbounds float, float* %A1r.0, i64 %As ; <float*> [#uses=1]
+ %49 = getelementptr inbounds float, float* %A1i.0, i64 %As ; <float*> [#uses=1]
+ %50 = getelementptr inbounds float, float* %A2r.0, i64 %As ; <float*> [#uses=1]
+ %51 = getelementptr inbounds float, float* %A2i.0, i64 %As ; <float*> [#uses=1]
+ %52 = getelementptr inbounds float, float* %C0r.0, i64 %Cs ; <float*> [#uses=1]
+ %53 = getelementptr inbounds float, float* %C0i.0, i64 %Cs ; <float*> [#uses=1]
+ %54 = getelementptr inbounds float, float* %C1r.0, i64 %Cs ; <float*> [#uses=1]
+ %55 = getelementptr inbounds float, float* %C1i.0, i64 %Cs ; <float*> [#uses=1]
+ %56 = getelementptr inbounds float, float* %C2r.0, i64 %Cs ; <float*> [#uses=1]
+ %57 = getelementptr inbounds float, float* %C2i.0, i64 %Cs ; <float*> [#uses=1]
%58 = add nsw i64 %i.0, 1 ; <i64> [#uses=1]
br label %bb13
define i32* @test8({ i32, i32 } *%A, i1 %b) {
BB0:
- %X = getelementptr inbounds { i32, i32 } *%A, i32 0, i32 1
+ %X = getelementptr inbounds { i32, i32 }, { i32, i32 } *%A, i32 0, i32 1
br i1 %b, label %BB1, label %BB2
BB1:
- %Y = getelementptr { i32, i32 } *%A, i32 0, i32 1
+ %Y = getelementptr { i32, i32 }, { i32, i32 } *%A, i32 0, i32 1
br label %BB2
BB2:
; CHECK-LABEL: @test8(
; CHECK-NOT: phi
; CHECK: BB2:
-; CHECK-NEXT: %B = getelementptr { i32, i32 }* %A
+; CHECK-NEXT: %B = getelementptr { i32, i32 }, { i32, i32 }* %A
; CHECK-NEXT: ret i32* %B
}
if.then: ; preds = %entry
%tmp1 = load i32 addrspace(1)** %pointer1.addr ; <i32 addrspace(1)*>
- %arrayidx = getelementptr i32 addrspace(1)* %tmp1, i32 0 ; <i32 addrspace(1)*> [#uses=1]
+ %arrayidx = getelementptr i32, i32 addrspace(1)* %tmp1, i32 0 ; <i32 addrspace(1)*> [#uses=1]
%tmp2 = load i32 addrspace(1)* %arrayidx ; <i32> [#uses=1]
store i32 %tmp2, i32* %res
br label %if.end
if.else: ; preds = %entry
%tmp3 = load i32** %pointer2.addr ; <i32*> [#uses=1]
- %arrayidx4 = getelementptr i32* %tmp3, i32 0 ; <i32*> [#uses=1]
+ %arrayidx4 = getelementptr i32, i32* %tmp3, i32 0 ; <i32*> [#uses=1]
%tmp5 = load i32* %arrayidx4 ; <i32> [#uses=1]
store i32 %tmp5, i32* %res
br label %if.end
; <label>:1
bitcast i8* %0 to i32* ; <i32*>:2 [#uses=1]
load i32* %2, align 1 ; <i32>:3 [#uses=1]
- getelementptr i8* %0, i32 4 ; <i8*>:4 [#uses=1]
+ getelementptr i8, i8* %0, i32 4 ; <i8*>:4 [#uses=1]
bitcast i8* %4 to i32* ; <i32*>:5 [#uses=1]
load i32* %5, align 1 ; <i32>:6 [#uses=1]
br label %7
sitofp i32 %.0 to float ; <float>:10 [#uses=1]
insertelement <4 x float> %.01, float %10, i32 0 ; <<4 x float>>:11 [#uses=1]
shufflevector <4 x float> %11, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>>:12 [#uses=2]
- getelementptr i8* %0, i32 48 ; <i8*>:13 [#uses=1]
+ getelementptr i8, i8* %0, i32 48 ; <i8*>:13 [#uses=1]
bitcast i8* %13 to <4 x float>* ; <<4 x float>*>:14 [#uses=1]
store <4 x float> %12, <4 x float>* %14, align 16
add i32 %.0, 2 ; <i32>:15 [#uses=1]
br i1 %5, label %6, label %20
; <label>:6 ; preds = %4
- %7 = getelementptr i8* %2, i32 %.0 ; <i8*> [#uses=1]
+ %7 = getelementptr i8, i8* %2, i32 %.0 ; <i8*> [#uses=1]
%8 = bitcast i8* %7 to <4 x i16>* ; <<4 x i16>*> [#uses=1]
%9 = load <4 x i16>* %8, align 1 ; <<4 x i16>> [#uses=1]
%10 = bitcast <4 x i16> %9 to <1 x i64> ; <<1 x i64>> [#uses=1]
%14 = shufflevector <8 x i16> %13, <8 x i16> %13, <8 x i32> < i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3 > ; <<8 x i16>> [#uses=1]
%15 = bitcast <8 x i16> %14 to <4 x i32> ; <<4 x i32>> [#uses=1]
%16 = sitofp <4 x i32> %15 to <4 x float> ; <<4 x float>> [#uses=1]
- %17 = getelementptr i8* %0, i32 %.0 ; <i8*> [#uses=1]
+ %17 = getelementptr i8, i8* %0, i32 %.0 ; <i8*> [#uses=1]
%18 = bitcast i8* %17 to <4 x float>* ; <<4 x float>*> [#uses=1]
store <4 x float> %16, <4 x float>* %18, align 1
%19 = add i32 %.0, 1 ; <i32> [#uses=1]
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %fmt = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %fmt = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt)
ret void
; CHECK-NEXT: ret void
define void @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %fmt = getelementptr [2 x i8]* @h, i32 0, i32 0
+ %fmt = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt)
; CHECK-NEXT: call i32 @putchar(i32 104)
ret void
define void @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %fmt = getelementptr [2 x i8]* @percent, i32 0, i32 0
+ %fmt = getelementptr [2 x i8], [2 x i8]* @percent, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt)
; CHECK-NEXT: call i32 @putchar(i32 37)
ret void
define void @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
- %fmt = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt)
; CHECK-NEXT: call i32 @puts(i8* getelementptr inbounds ([12 x i8]* [[STR]], i32 0, i32 0))
ret void
define void @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
- %fmt = getelementptr [3 x i8]* @percent_c, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_c, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt, i8 104)
; CHECK-NEXT: call i32 @putchar(i32 104)
ret void
define void @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
- %fmt = getelementptr [4 x i8]* @percent_s, i32 0, i32 0
- %str = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [4 x i8], [4 x i8]* @percent_s, i32 0, i32 0
+ %str = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt, i8* %str)
; CHECK-NEXT: call i32 @puts(i8* getelementptr inbounds ([13 x i8]* @hello_world, i32 0, i32 0))
ret void
define void @test_simplify7() {
; CHECK-IPRINTF-LABEL: @test_simplify7(
- %fmt = getelementptr [3 x i8]* @percent_d, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_d, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt, i32 187)
; CHECK-IPRINTF-NEXT: call i32 (i8*, ...)* @iprintf(i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
ret void
define void @test_no_simplify1() {
; CHECK-IPRINTF-LABEL: @test_no_simplify1(
- %fmt = getelementptr [3 x i8]* @percent_f, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_f, i32 0, i32 0
call i32 (i8*, ...)* @printf(i8* %fmt, double 1.87)
; CHECK-IPRINTF-NEXT: call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
ret void
define i32 @test_no_simplify3() {
; CHECK-LABEL: @test_no_simplify3(
- %fmt = getelementptr [2 x i8]* @h, i32 0, i32 0
+ %fmt = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0
%ret = call i32 (i8*, ...)* @printf(i8* %fmt)
; CHECK-NEXT: call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([2 x i8]* @h, i32 0, i32 0))
ret i32 %ret
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %fmt = getelementptr [2 x i8]* @h, i32 0, i32 0
+ %fmt = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0
call void (i8*, ...)* @printf(i8* %fmt)
; CHECK-NEXT: call i32 @putchar(i32 104)
ret void
define void @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %fmt = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
call void (i8*, ...)* @printf(i8* %fmt)
; CHECK-NEXT: call i32 @puts(i8* getelementptr inbounds ([12 x i8]* @str, i32 0, i32 0))
ret void
define void @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
- %fmt = getelementptr [4 x i8]* @percent_s, i32 0, i32 0
- %str = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [4 x i8], [4 x i8]* @percent_s, i32 0, i32 0
+ %str = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
call void (i8*, ...)* @printf(i8* %fmt, i8* %str)
; CHECK-NEXT: call i32 @puts(i8* getelementptr inbounds ([13 x i8]* @hello_world, i32 0, i32 0))
ret void
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %str = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
call i32 @puts(i8* %str)
; CHECK-NEXT: call i32 @putchar(i32 10)
ret void
define i32 @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %str = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
%ret = call i32 @puts(i8* %str)
; CHECK-NEXT: call i32 @puts(i8* getelementptr inbounds ([1 x i8]* @empty, i32 0, i32 0))
ret i32 %ret
define void @test1(%C*) {
entry:
- %1 = getelementptr inbounds %C* %0, i64 0, i32 0, i32 0
+ %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
%m = load i64** %1, align 8
- %2 = getelementptr inbounds %C* %0, i64 1, i32 0, i32 0
+ %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
%n = load i64** %2, align 8
- %3 = getelementptr inbounds i64* %m, i64 9
+ %3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
%5 = load i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
ret void
; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C* %7, i64 0, i32 0
+ %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
tail call void @bar(%struct.S* %11)
br label %9
define void @test2(%C*) {
entry:
- %1 = getelementptr inbounds %C* %0, i64 0, i32 0, i32 0
+ %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
%m = load i64** %1, align 8
- %2 = getelementptr inbounds %C* %0, i64 1, i32 0, i32 0
+ %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
%n = load i64** %2, align 8
- %3 = getelementptr inbounds i64* %m, i64 9
+ %3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
%5 = load i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
ret void
; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C* %7, i64 0, i32 0
+ %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
tail call void @bar(%struct.S* %11)
br label %9
define void @test3(%C*) {
entry:
- %1 = getelementptr inbounds %C* %0, i64 0, i32 0, i32 0
+ %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
%m = load i64** %1, align 8
- %2 = getelementptr inbounds %C* %0, i64 1, i32 0, i32 0
+ %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
%n = load i64** %2, align 8
- %3 = getelementptr inbounds i64* %m, i64 9
+ %3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
%5 = load i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
ret void
; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C* %7, i64 0, i32 0
+ %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
tail call void @bar(%struct.S* %11)
br label %9
define void @test4(%C*) {
entry:
- %1 = getelementptr inbounds %C* %0, i64 0, i32 0, i32 0
+ %1 = getelementptr inbounds %C, %C* %0, i64 0, i32 0, i32 0
%m = load i64** %1, align 8
- %2 = getelementptr inbounds %C* %0, i64 1, i32 0, i32 0
+ %2 = getelementptr inbounds %C, %C* %0, i64 1, i32 0, i32 0
%n = load i64** %2, align 8
- %3 = getelementptr inbounds i64* %m, i64 9
+ %3 = getelementptr inbounds i64, i64* %m, i64 9
%4 = bitcast i64* %3 to i64 (%C*)**
%5 = load i64 (%C*)** %4, align 8
%6 = icmp eq i64* %m, %n
ret void
; <label>:10 ; preds = %entry
- %11 = getelementptr inbounds %C* %7, i64 0, i32 0
+ %11 = getelementptr inbounds %C, %C* %7, i64 0, i32 0
tail call void @bar(%struct.S* %11)
br label %9
ret void
; <label>:5 ; preds = %entry
- %6 = getelementptr inbounds %C* %2, i64 0, i32 0
+ %6 = getelementptr inbounds %C, %C* %2, i64 0, i32 0
tail call void @bar(%struct.S* %6)
br label %4
define i32 @foo(%struct.State* %dst, <4 x float>* %prgrm, <4 x float>** %buffs, %struct._VMConstants* %cnstn, %struct.PPStreamToken* %pstrm, %struct.PluginBufferData* %gpctx, %struct.VMTextures* %txtrs, %struct.VMGPStack* %gpstk) nounwind {
bb266.i:
- getelementptr <4 x float>* null, i32 11 ; <<4 x float>*>:0 [#uses=1]
+ getelementptr <4 x float>, <4 x float>* null, i32 11 ; <<4 x float>*>:0 [#uses=1]
load <4 x float>* %0, align 16 ; <<4 x float>>:1 [#uses=1]
shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> < i32 0, i32 1, i32 1, i32 1 > ; <<4 x float>>:2 [#uses=1]
shufflevector <4 x float> %2, <4 x float> undef, <4 x i32> < i32 0, i32 4, i32 1, i32 5 > ; <<4 x float>>:3 [#uses=1]
bb:
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %bb ]
%t0 = and i64 %indvar, 65535
- %t1 = getelementptr double* %p, i64 %t0
+ %t1 = getelementptr double, double* %p, i64 %t0
%t2 = load double* %t1, align 8
%t3 = fmul double %t2, 2.2
store double %t3, double* %t1, align 8
@hello_u = constant [8 x i8] c"hello_u\00" ; <[8 x i8]*> [#uses=1]
define i32 @MemCpy() {
- %h_p = getelementptr [2 x i8]* @h, i32 0, i32 0
- %hel_p = getelementptr [4 x i8]* @hel, i32 0, i32 0
- %hello_u_p = getelementptr [8 x i8]* @hello_u, i32 0, i32 0
+ %h_p = getelementptr [2 x i8], [2 x i8]* @h, i32 0, i32 0
+ %hel_p = getelementptr [4 x i8], [4 x i8]* @hel, i32 0, i32 0
+ %hello_u_p = getelementptr [8 x i8], [8 x i8]* @hello_u, i32 0, i32 0
%target = alloca [1024 x i8]
- %target_p = getelementptr [1024 x i8]* %target, i32 0, i32 0
+ %target_p = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %target_p, i8* %h_p, i32 2, i32 2, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %target_p, i8* %hel_p, i32 4, i32 4, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %target_p, i8* %hello_u_p, i32 8, i32 8, i1 false)
define void @test_simplify1(i8* %dst) {
; CHECK-LABEL: @test_simplify1(
- %fmt = getelementptr [13 x i8]* @hello_world, i32 0, i32 0
+ %fmt = getelementptr [13 x i8], [13 x i8]* @hello_world, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt)
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* getelementptr inbounds ([13 x i8]* @hello_world, i32 0, i32 0), i32 13, i32 1, i1 false)
ret void
define void @test_simplify2(i8* %dst) {
; CHECK-LABEL: @test_simplify2(
- %fmt = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %fmt = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt)
; CHECK-NEXT: store i8 0, i8* %dst, align 1
ret void
define void @test_simplify3(i8* %dst) {
; CHECK-LABEL: @test_simplify3(
- %fmt = getelementptr [7 x i8]* @null_hello, i32 0, i32 0
+ %fmt = getelementptr [7 x i8], [7 x i8]* @null_hello, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt)
; CHECK-NEXT: store i8 0, i8* %dst, align 1
ret void
define void @test_simplify4(i8* %dst) {
; CHECK-LABEL: @test_simplify4(
- %fmt = getelementptr [3 x i8]* @percent_c, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_c, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt, i8 104)
; CHECK-NEXT: store i8 104, i8* %dst, align 1
-; CHECK-NEXT: [[NUL:%[a-z0-9]+]] = getelementptr i8* %dst, i32 1
+; CHECK-NEXT: [[NUL:%[a-z0-9]+]] = getelementptr i8, i8* %dst, i32 1
; CHECK-NEXT: store i8 0, i8* [[NUL]], align 1
ret void
; CHECK-NEXT: ret void
define void @test_simplify5(i8* %dst, i8* %str) {
; CHECK-LABEL: @test_simplify5(
- %fmt = getelementptr [3 x i8]* @percent_s, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_s, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt, i8* %str)
; CHECK-NEXT: [[STRLEN:%[a-z0-9]+]] = call i32 @strlen(i8* %str)
; CHECK-NEXT: [[LENINC:%[a-z0-9]+]] = add i32 [[STRLEN]], 1
define void @test_simplify6(i8* %dst) {
; CHECK-IPRINTF-LABEL: @test_simplify6(
- %fmt = getelementptr [3 x i8]* @percent_d, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_d, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt, i32 187)
; CHECK-IPRINTF-NEXT: call i32 (i8*, i8*, ...)* @siprintf(i8* %dst, i8* getelementptr inbounds ([3 x i8]* @percent_d, i32 0, i32 0), i32 187)
ret void
define void @test_no_simplify1(i8* %dst) {
; CHECK-IPRINTF-LABEL: @test_no_simplify1(
- %fmt = getelementptr [3 x i8]* @percent_f, i32 0, i32 0
+ %fmt = getelementptr [3 x i8], [3 x i8]* @percent_f, i32 0, i32 0
call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* %fmt, double 1.87)
; CHECK-IPRINTF-NEXT: call i32 (i8*, i8*, ...)* @sprintf(i8* %dst, i8* getelementptr inbounds ([3 x i8]* @percent_f, i32 0, i32 0), double 1.870000e+00)
ret void
; CHECK: sqrt(
; CHECK-NOT: sqrtf(
; CHECK: fptrunc
- %arrayidx13 = getelementptr inbounds float* %v, i64 2
+ %arrayidx13 = getelementptr inbounds float, float* %v, i64 2
%tmp14 = load float* %arrayidx13
%mul18 = fmul float %tmp14, %tmp14
%add19 = fadd float undef, %mul18
define void @foo() nounwind {
entry:
%src = alloca [1024 x i8], align 1
- %src1 = getelementptr [1024 x i8]* %src, i32 0, i32 0
+ %src1 = getelementptr [1024 x i8], [1024 x i8]* %src, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([1024 x i8]* @dst, i32 0, i32 0), i8* %src1, i32 1024, i32 1, i1 false)
call void @frob(i8* %src1) nounwind
ret void
%i.0.reg2mem.0 = phi i32 [ 0, %bb.preheader ], [ %indvar.next, %bb ] ; <i32> [#uses=2]
%tmp = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
%tmp23 = alloca i8, i32 %size ; <i8*> [#uses=2]
- %tmp27 = getelementptr i8* %tmp23, i32 %tmp25 ; <i8*> [#uses=1]
+ %tmp27 = getelementptr i8, i8* %tmp23, i32 %tmp25 ; <i8*> [#uses=1]
store i8 0, i8* %tmp27, align 1
%tmp28 = call i8* @llvm.stacksave( ) ; <i8*> [#uses=1]
%tmp52 = alloca i8, i32 %size ; <i8*> [#uses=1]
for.body: ; preds = %for.cond
%idxprom = sext i32 %0 to i64
- %arrayidx = getelementptr inbounds float* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
store float 0.000000e+00, float* %arrayidx, align 4, !tbaa !3
%1 = load i32* %gi, align 4, !tbaa !0
%inc = add nsw i32 %1, 1
define i8* @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%ret = call i8* @stpcpy(i8* %dst, i8* %src)
; CHECK: @llvm.memcpy.p0i8.p0i8.i32
define i8* @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
%ret = call i8* @stpcpy(i8* %dst, i8* %dst)
; CHECK: [[LEN:%[a-z]+]] = call i32 @strlen
-; CHECK-NEXT: getelementptr inbounds [32 x i8]* @a, i32 0, i32 [[LEN]]
+; CHECK-NEXT: getelementptr inbounds [32 x i8], [32 x i8]* @a, i32 0, i32 [[LEN]]
ret i8* %ret
}
define i8* @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [32 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [32 x i8], [32 x i8]* @b, i32 0, i32 0
%ret = call i8* @stpcpy(i8* %dst, i8* %src)
; CHECK: call i8* @stpcpy
define void @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i16* @stpcpy(i8* %dst, i8* %src)
; CHECK: call i16* @stpcpy
define i8* @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 11)
define i8* @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 11)
define i8* @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 11)
define i8* @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
; CHECK-NEXT: %stpcpy = call i8* @stpcpy(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8]* @b, i32 0, i32 0))
; CHECK-NEXT: ret i8* %stpcpy
define i8* @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
; CHECK-NEXT: %1 = call i8* @__memcpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 %len)
define i8* @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
; CHECK-NEXT: %strlen = call i32 @strlen(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0))
-; CHECK-NEXT: %1 = getelementptr inbounds [60 x i8]* @a, i32 0, i32 %strlen
+; CHECK-NEXT: %1 = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 %strlen
; CHECK-NEXT: ret i8* %1
%len = call i32 @llvm.objectsize.i32.p0i8(i8* %dst, i1 false)
%ret = call i8* @__stpcpy_chk(i8* %dst, i8* %dst, i32 %len)
define i8* @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
; CHECK-NEXT: %ret = call i8* @__stpcpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8]* @b, i32 0, i32 0), i32 8)
; CHECK-NEXT: ret i8* %ret
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = getelementptr inbounds [60 x i16]* @a, i32 0, i32 0
- %src = getelementptr inbounds [8 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i16], [60 x i16]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call i16* @__strcpy_chk
call i16* @__strcpy_chk(i16* %dst, i8* %src, i32 8)
; CHECK: call i32 @puts
%target = alloca [1024 x i8]
- %arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
+ %arg1 = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
store i8 0, i8* %arg1
; rslt1 = strcat(target, "hello\00")
- %arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %arg2 = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%rslt1 = call i8* @strcat(i8* %arg1, i8* %arg2)
; rslt2 = strcat(rslt1, "\00")
- %arg3 = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %arg3 = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%rslt2 = call i8* @strcat(i8* %rslt1, i8* %arg3)
; rslt3 = strcat(rslt2, "\00hello\00")
- %arg4 = getelementptr [7 x i8]* @null_hello, i32 0, i32 0
+ %arg4 = getelementptr [7 x i8], [7 x i8]* @null_hello, i32 0, i32 0
%rslt3 = call i8* @strcat(i8* %rslt2, i8* %arg4)
call i32 @puts( i8* %rslt3 )
; CHECK-NOT: call i8* @strcat
; CHECK: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i8* @strcat(i8* %dst, i8* %src)
ret void
}
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
call i8* @strcat(i8* %dst, i8* %src)
ret void
}
; CHECK: call i16* @strcat
; CHECK: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i16* @strcat(i8* %dst, i8* %src)
ret void
}
; CHECK-NOT: call i8* @strchr
; CHECK: ret void
- %str = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %str = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strchr(i8* %str, i32 119)
store i8* %dst, i8** @chp
ret void
; CHECK-NOT: call i8* @strchr
; CHECK: ret void
- %str = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%dst = call i8* @strchr(i8* %str, i32 119)
store i8* %dst, i8** @chp
ret void
; CHECK-NOT: call i8* @strchr
; CHECK: ret void
- %src = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strchr(i8* %src, i32 0)
store i8* %dst, i8** @chp
ret void
; CHECK-NOT: call i8* @strchr
; CHECK: ret void
- %src = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strchr(i8* %src, i32 %chr)
store i8* %dst, i8** @chp
ret void
; CHECK-NOT: call i8* @strchr
; CHECK: ret void
- %src = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strchr(i8* %src, i32 65280)
store i8* %dst, i8** @chp
ret void
define void @test_simplify6(i8* %str) {
; CHECK: %strlen = call i32 @strlen(i8* %str)
; CHECK-NOT: call i8* @strchr
-; CHECK: %strchr = getelementptr i8* %str, i32 %strlen
+; CHECK: %strchr = getelementptr i8, i8* %str, i32 %strlen
; CHECK: store i8* %strchr, i8** @chp, align 4
; CHECK: ret void
; CHECK: call i8 @strchr
; CHECK: ret void
- %str = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %str = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8 @strchr(i8* %str, i32 119)
store i8 %dst, i8* @chr
ret void
; CHECK: %2 = sub nsw i32 0, %1
; CHECK: ret i32 %2
- %str1 = getelementptr inbounds [1 x i8]* @null, i32 0, i32 0
+ %str1 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
%temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
ret i32 %temp1
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: ret i32 %1
- %str2 = getelementptr inbounds [1 x i8]* @null, i32 0, i32 0
+ %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
%temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
ret i32 %temp1
}
; CHECK-LABEL: @test3(
; CHECK: ret i32 -1
- %str1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8]* @hello, i32 0, i32 0
+ %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
ret i32 %temp1
}
; CHECK-LABEL: @test4(
; CHECK: ret i32 1
- %str1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [1 x i8]* @null, i32 0, i32 0
+ %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
%temp1 = call i32 @strcmp(i8* %str1, i8* %str2)
ret i32 %temp1
}
; CHECK: %memcmp = call i32 @memcmp(i8* getelementptr inbounds ([6 x i8]* @hello, i32 0, i32 0), i8* %str2, i32 5)
; CHECK: ret i32 %memcmp
- %str1 = getelementptr inbounds [6 x i8]* @hello, i32 0, i32 0
- %temp1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %temp2 = getelementptr inbounds [5 x i8]* @bell, i32 0, i32 0
+ %str1 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
+ %temp1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %temp2 = getelementptr inbounds [5 x i8], [5 x i8]* @bell, i32 0, i32 0
%str2 = select i1 %b, i8* %temp1, i8* %temp2
%temp3 = call i32 @strcmp(i8* %str1, i8* %str2)
ret i32 %temp3
; CHECK: call i16 @strcmp
; CHECK: ret i16 %temp1
- %str1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8]* @hello, i32 0, i32 0
+ %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%temp1 = call i16 @strcmp(i8* %str1, i8* %str2)
ret i16 %temp1
}
define void @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i8* @strcpy(i8* %dst, i8* %src)
; CHECK: @llvm.memcpy.p0i8.p0i8.i32
define i8* @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
%ret = call i8* @strcpy(i8* %dst, i8* %dst)
; CHECK: ret i8* getelementptr inbounds ([32 x i8]* @a, i32 0, i32 0)
define i8* @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [32 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [32 x i8], [32 x i8]* @b, i32 0, i32 0
%ret = call i8* @strcpy(i8* %dst, i8* %src)
; CHECK: call i8* @strcpy
define void @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i16* @strcpy(i8* %dst, i8* %src)
; CHECK: call i16* @strcpy
define i8* @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0)
define i8* @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0)
define i8* @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0)
define i8* @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
; CHECK-NEXT: %strcpy = call i8* @strcpy(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8]* @b, i32 0, i32 0))
; CHECK-NEXT: ret i8* %strcpy
define i8* @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
; CHECK-NEXT: %1 = call i8* @__memcpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 %len)
define i8* @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
; CHECK-NEXT: %len = call i32 @llvm.objectsize.i32.p0i8(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
; CHECK-NEXT: %ret = call i8* @__strcpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i32 %len)
define i8* @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
; CHECK-NEXT: %ret = call i8* @__strcpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8]* @b, i32 0, i32 0), i32 8)
; CHECK-NEXT: ret i8* %ret
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = getelementptr inbounds [60 x i16]* @a, i32 0, i32 0
- %src = getelementptr inbounds [8 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i16], [60 x i16]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [8 x i8], [8 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call i16* @__strcpy_chk
call i16* @__strcpy_chk(i16* %dst, i8* %src, i32 8)
; CHECK: @__strcpy_chk(i8* %arraydecay, i8* %i, i64 32)
entry:
%s = alloca [32 x i8], align 16
- %arraydecay = getelementptr inbounds [32 x i8]* %s, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [32 x i8], [32 x i8]* %s, i32 0, i32 0
%call = call i8* @__strcpy_chk(i8* %arraydecay, i8* %i, i64 32)
call void @func2(i8* %arraydecay)
ret void
define i64 @test_simplify1(i8* %str) {
; CHECK-LABEL: @test_simplify1(
- %pat = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %pat = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call i64 @strcspn(i8* %str, i8* %pat)
; CHECK-NEXT: [[VAR:%[a-z]+]] = call i64 @strlen(i8* %str)
define i64 @test_simplify2(i8* %pat) {
; CHECK-LABEL: @test_simplify2(
- %str = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call i64 @strcspn(i8* %str, i8* %pat)
ret i64 %ret
define i64 @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr [6 x i8]* @abcba, i32 0, i32 0
- %pat = getelementptr [4 x i8]* @abc, i32 0, i32 0
+ %str = getelementptr [6 x i8], [6 x i8]* @abcba, i32 0, i32 0
+ %pat = getelementptr [4 x i8], [4 x i8]* @abc, i32 0, i32 0
%ret = call i64 @strcspn(i8* %str, i8* %pat)
ret i64 %ret
define double @test_no_simplify1(i8* %pat) {
; CHECK-LABEL: @test_no_simplify1(
- %str = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call double @strcspn(i8* %str, i8* %pat)
; CHECK-NEXT: call double @strcspn
define i32 @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %hello_p = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %hello_p = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%hello_l = call i32 @strlen(i8* %hello_p)
ret i32 %hello_l
; CHECK-NEXT: ret i32 5
define i32 @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %null_p = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%null_l = call i32 @strlen(i8* %null_p)
ret i32 %null_l
; CHECK-NEXT: ret i32 0
define i32 @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %null_hello_p = getelementptr [7 x i8]* @null_hello, i32 0, i32 0
+ %null_hello_p = getelementptr [7 x i8], [7 x i8]* @null_hello, i32 0, i32 0
%null_hello_l = call i32 @strlen(i8* %null_hello_p)
ret i32 %null_hello_l
; CHECK-NEXT: ret i32 0
define i1 @test_simplify5() {
; CHECK-LABEL: @test_simplify5(
- %hello_p = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %hello_p = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%hello_l = call i32 @strlen(i8* %hello_p)
%eq_hello = icmp eq i32 %hello_l, 0
ret i1 %eq_hello
define i1 @test_simplify6() {
; CHECK-LABEL: @test_simplify6(
- %null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %null_p = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%null_l = call i32 @strlen(i8* %null_p)
%eq_null = icmp eq i32 %null_l, 0
ret i1 %eq_null
define i1 @test_simplify7() {
; CHECK-LABEL: @test_simplify7(
- %hello_p = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %hello_p = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%hello_l = call i32 @strlen(i8* %hello_p)
%ne_hello = icmp ne i32 %hello_l, 0
ret i1 %ne_hello
define i1 @test_simplify8() {
; CHECK-LABEL: @test_simplify8(
- %null_p = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %null_p = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%null_l = call i32 @strlen(i8* %null_p)
%ne_null = icmp ne i32 %null_l, 0
ret i1 %ne_null
define i32 @test_simplify9(i1 %x) {
; CHECK-LABEL: @test_simplify9
- %hello = getelementptr [6 x i8]* @hello, i32 0, i32 0
- %longer = getelementptr [7 x i8]* @longer, i32 0, i32 0
+ %hello = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
+ %longer = getelementptr [7 x i8], [7 x i8]* @longer, i32 0, i32 0
%s = select i1 %x, i8* %hello, i8* %longer
%l = call i32 @strlen(i8* %s)
; CHECK-NEXT: select i1 %x, i32 5, i32 6
define i32 @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %a_p = getelementptr [32 x i8]* @a, i32 0, i32 0
+ %a_p = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
%a_l = call i32 @strlen(i8* %a_p)
; CHECK-NEXT: %a_l = call i32 @strlen
ret i32 %a_l
define i32 @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %hello_p = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %hello_p = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%hello_l = call i32 @strlen(i8* %hello_p, i32 187)
; CHECK-NEXT: %hello_l = call i32 @strlen
ret i32 %hello_l
; CHECK: call i32 @puts
%target = alloca [1024 x i8]
- %arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
+ %arg1 = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
store i8 0, i8* %arg1
; rslt1 = strncat(target, "hello\00")
- %arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %arg2 = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%rslt1 = call i8* @strncat(i8* %arg1, i8* %arg2, i32 6)
; rslt2 = strncat(rslt1, "\00")
- %arg3 = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %arg3 = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%rslt2 = call i8* @strncat(i8* %rslt1, i8* %arg3, i32 42)
; rslt3 = strncat(rslt2, "\00hello\00")
- %arg4 = getelementptr [7 x i8]* @null_hello, i32 0, i32 0
+ %arg4 = getelementptr [7 x i8], [7 x i8]* @null_hello, i32 0, i32 0
%rslt3 = call i8* @strncat(i8* %rslt2, i8* %arg4, i32 42)
call i32 @puts(i8* %rslt3)
; CHECK-NOT: call i8* @strncat
; CHECK: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i8* @strncat(i8* %dst, i8* %src, i32 13)
ret void
}
; CHECK-LABEL: @test_simplify2(
; CHECK-NEXT: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [1 x i8]* @empty, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [1 x i8], [1 x i8]* @empty, i32 0, i32 0
call i8* @strncat(i8* %dst, i8* %src, i32 13)
ret void
}
; CHECK-LABEL: @test_simplify3(
; CHECK-NEXT: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i8* @strncat(i8* %dst, i8* %src, i32 0)
ret void
}
; CHECK: call i8* @strncat
; CHECK: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i8* @strncat(i8* %dst, i8* %src, i32 1)
ret void
}
; CHECK: call i16* @strncat
; CHECK: ret void
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i16* @strncat(i8* %dst, i8* %src, i32 13)
ret void
}
; CHECK: %2 = sub nsw i32 0, %1
; CHECK: ret i32 %2
- %str1 = getelementptr inbounds [1 x i8]* @null, i32 0, i32 0
+ %str1 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
%temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
ret i32 %temp1
}
; CHECK: %1 = zext i8 %strcmpload to i32
; CHECK: ret i32 %1
- %str2 = getelementptr inbounds [1 x i8]* @null, i32 0, i32 0
+ %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
%temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
ret i32 %temp1
}
; CHECK-LABEL: @test3(
; CHECK: ret i32 -1
- %str1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8]* @hello, i32 0, i32 0
+ %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
ret i32 %temp1
}
; CHECK-LABEL: @test4(
; CHECK: ret i32 1
- %str1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [1 x i8]* @null, i32 0, i32 0
+ %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %str2 = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
%temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 10)
ret i32 %temp1
}
; CHECK-LABEL: @test5(
; CHECK: ret i32 0
- %str1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8]* @hello, i32 0, i32 0
+ %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%temp1 = call i32 @strncmp(i8* %str1, i8* %str2, i32 4)
ret i32 %temp1
}
; CHECK: call i16 @strncmp
; CHECK: ret i16 %temp1
- %str1 = getelementptr inbounds [5 x i8]* @hell, i32 0, i32 0
- %str2 = getelementptr inbounds [6 x i8]* @hello, i32 0, i32 0
+ %str1 = getelementptr inbounds [5 x i8], [5 x i8]* @hell, i32 0, i32 0
+ %str2 = getelementptr inbounds [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%temp1 = call i16 @strncmp(i8* %str1, i8* %str2, i32 10)
ret i16 %temp1
}
; CHECK-NOT: call i8* @strncpy
; CHECK: call i32 @puts
%target = alloca [1024 x i8]
- %arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
+ %arg1 = getelementptr [1024 x i8], [1024 x i8]* %target, i32 0, i32 0
store i8 0, i8* %arg1
- %arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %arg2 = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%rslt1 = call i8* @strncpy(i8* %arg1, i8* %arg2, i32 6)
- %arg3 = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %arg3 = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%rslt2 = call i8* @strncpy(i8* %rslt1, i8* %arg3, i32 42)
- %arg4 = getelementptr [7 x i8]* @null_hello, i32 0, i32 0
+ %arg4 = getelementptr [7 x i8], [7 x i8]* @null_hello, i32 0, i32 0
%rslt3 = call i8* @strncpy(i8* %rslt2, i8* %arg4, i32 42)
call i32 @puts( i8* %rslt3 )
define void @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
call i8* @strncpy(i8* %dst, i8* %src, i32 32)
; CHECK: call void @llvm.memset.p0i8.i32
define i8* @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
%ret = call i8* @strncpy(i8* %dst, i8* %src, i32 0)
ret i8* %ret
define void @test_simplify4() {
; CHECK-LABEL: @test_simplify4(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i8* @strncpy(i8* %dst, i8* %src, i32 6)
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i32
define void @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [32 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [32 x i8], [32 x i8]* @b, i32 0, i32 0
call i8* @strncpy(i8* %dst, i8* %src, i32 32)
; CHECK: call i8* @strncpy
define void @test_no_simplify2() {
; CHECK-LABEL: @test_no_simplify2(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i8* @strncpy(i8* %dst, i8* %src, i32 8)
; CHECK: call i8* @strncpy
define void @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr [32 x i8]* @a, i32 0, i32 0
- %src = getelementptr [6 x i8]* @hello, i32 0, i32 0
+ %dst = getelementptr [32 x i8], [32 x i8]* @a, i32 0, i32 0
+ %src = getelementptr [6 x i8], [6 x i8]* @hello, i32 0, i32 0
call i16* @strncpy(i8* %dst, i8* %src, i32 6)
; CHECK: call i16* @strncpy
define i8* @test_simplify1() {
; CHECK-LABEL: @test_simplify1(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0)
define i8* @test_simplify2() {
; CHECK-LABEL: @test_simplify2(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 12, i32 1, i1 false)
; CHECK-NEXT: ret i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0)
define i8* @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
; CHECK-NEXT: %strncpy = call i8* @strncpy(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8]* @b, i32 0, i32 0), i32 12)
; CHECK-NEXT: ret i8* %strncpy
define i8* @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [12 x i8]* @.str, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [12 x i8], [12 x i8]* @.str, i32 0, i32 0
; CHECK-NEXT: %ret = call i8* @__strncpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8]* @.str, i32 0, i32 0), i32 8, i32 4)
; CHECK-NEXT: ret i8* %ret
define i8* @test_no_simplify2() {
; CHECK-LABEL: @test_no_simplify2(
- %dst = getelementptr inbounds [60 x i8]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i8]* @b, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i8], [60 x i8]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [60 x i8], [60 x i8]* @b, i32 0, i32 0
; CHECK-NEXT: %ret = call i8* @__strncpy_chk(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i8* getelementptr inbounds ([60 x i8]* @b, i32 0, i32 0), i32 8, i32 0)
; CHECK-NEXT: ret i8* %ret
define void @test_no_simplify() {
; CHECK-LABEL: @test_no_simplify(
- %dst = getelementptr inbounds [60 x i16]* @a, i32 0, i32 0
- %src = getelementptr inbounds [60 x i16]* @b, i32 0, i32 0
+ %dst = getelementptr inbounds [60 x i16], [60 x i16]* @a, i32 0, i32 0
+ %src = getelementptr inbounds [60 x i16], [60 x i16]* @b, i32 0, i32 0
; CHECK-NEXT: call i16* @__strncpy_chk
call i16* @__strncpy_chk(i16* %dst, i16* %src, i32 60, i32 60)
define i8* @test_simplify1(i8* %str) {
; CHECK-LABEL: @test_simplify1(
- %pat = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %pat = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call i8* @strpbrk(i8* %str, i8* %pat)
ret i8* %ret
define i8* @test_simplify2(i8* %pat) {
; CHECK-LABEL: @test_simplify2(
- %str = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call i8* @strpbrk(i8* %str, i8* %pat)
ret i8* %ret
define i8* @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr [12 x i8]* @hello, i32 0, i32 0
- %pat = getelementptr [2 x i8]* @w, i32 0, i32 0
+ %str = getelementptr [12 x i8], [12 x i8]* @hello, i32 0, i32 0
+ %pat = getelementptr [2 x i8], [2 x i8]* @w, i32 0, i32 0
%ret = call i8* @strpbrk(i8* %str, i8* %pat)
ret i8* %ret
define i8* @test_simplify4(i8* %str) {
; CHECK-LABEL: @test_simplify4(
- %pat = getelementptr [2 x i8]* @w, i32 0, i32 0
+ %pat = getelementptr [2 x i8], [2 x i8]* @w, i32 0, i32 0
%ret = call i8* @strpbrk(i8* %str, i8* %pat)
; CHECK-NEXT: [[VAR:%[a-z]+]] = call i8* @strchr(i8* %str, i32 119)
define i16* @test_no_simplify1() {
; CHECK-LABEL: @test_no_simplify1(
- %str = getelementptr [12 x i8]* @hello, i32 0, i32 0
- %pat = getelementptr [2 x i8]* @w, i32 0, i32 0
+ %str = getelementptr [12 x i8], [12 x i8]* @hello, i32 0, i32 0
+ %pat = getelementptr [2 x i8], [2 x i8]* @w, i32 0, i32 0
%ret = call i16* @strpbrk(i8* %str, i8* %pat)
; CHECK-NEXT: %ret = call i16* @strpbrk
; CHECK-NOT: call i8* @strrchr
; CHECK: ret void
- %str = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %str = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strrchr(i8* %str, i32 119)
store i8* %dst, i8** @chp
ret void
; CHECK-NOT: call i8* @strrchr
; CHECK: ret void
- %str = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%dst = call i8* @strrchr(i8* %str, i32 119)
store i8* %dst, i8** @chp
ret void
; CHECK-NOT: call i8* @strrchr
; CHECK: ret void
- %src = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strrchr(i8* %src, i32 0)
store i8* %dst, i8** @chp
ret void
; CHECK-NOT: call i8* @strrchr
; CHECK: ret void
- %src = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strrchr(i8* %src, i32 65280)
store i8* %dst, i8** @chp
ret void
; CHECK: call i8* @strrchr
; CHECK: ret void
- %src = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %src = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8* @strrchr(i8* %src, i32 %chr)
store i8* %dst, i8** @chp
ret void
; CHECK: call i8 @strrchr
; CHECK: ret void
- %str = getelementptr [14 x i8]* @hello, i32 0, i32 0
+ %str = getelementptr [14 x i8], [14 x i8]* @hello, i32 0, i32 0
%dst = call i8 @strrchr(i8* %str, i32 119)
store i8 %dst, i8* @chr
ret void
define i64 @test_simplify1(i8* %str) {
; CHECK-LABEL: @test_simplify1(
- %pat = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %pat = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call i64 @strspn(i8* %str, i8* %pat)
ret i64 %ret
define i64 @test_simplify2(i8* %pat) {
; CHECK-LABEL: @test_simplify2(
- %str = getelementptr [1 x i8]* @null, i32 0, i32 0
+ %str = getelementptr [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call i64 @strspn(i8* %str, i8* %pat)
ret i64 %ret
define i64 @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr [6 x i8]* @abcba, i32 0, i32 0
- %pat = getelementptr [4 x i8]* @abc, i32 0, i32 0
+ %str = getelementptr [6 x i8], [6 x i8]* @abcba, i32 0, i32 0
+ %pat = getelementptr [4 x i8], [4 x i8]* @abc, i32 0, i32 0
%ret = call i64 @strspn(i8* %str, i8* %pat)
ret i64 %ret
define i8* @test_simplify1(i8* %str) {
; CHECK-LABEL: @test_simplify1(
- %pat = getelementptr inbounds [1 x i8]* @.str, i32 0, i32 0
+ %pat = getelementptr inbounds [1 x i8], [1 x i8]* @.str, i32 0, i32 0
%ret = call i8* @strstr(i8* %str, i8* %pat)
ret i8* %ret
; CHECK-NEXT: ret i8* %str
define i8* @test_simplify2(i8* %str) {
; CHECK-LABEL: @test_simplify2(
- %pat = getelementptr inbounds [2 x i8]* @.str1, i32 0, i32 0
+ %pat = getelementptr inbounds [2 x i8], [2 x i8]* @.str1, i32 0, i32 0
%ret = call i8* @strstr(i8* %str, i8* %pat)
ret i8* %ret
; CHECK-NEXT: @strchr(i8* %str, i32 97)
define i8* @test_simplify3() {
; CHECK-LABEL: @test_simplify3(
- %str = getelementptr inbounds [6 x i8]* @.str2, i32 0, i32 0
- %pat = getelementptr inbounds [4 x i8]* @.str3, i32 0, i32 0
+ %str = getelementptr inbounds [6 x i8], [6 x i8]* @.str2, i32 0, i32 0
+ %pat = getelementptr inbounds [4 x i8], [4 x i8]* @.str3, i32 0, i32 0
%ret = call i8* @strstr(i8* %str, i8* %pat)
ret i8* %ret
; CHECK-NEXT: getelementptr inbounds ([6 x i8]* @.str2, i64 0, i64 1)
define i8 @test_no_simplify1(i8* %str) {
; CHECK-LABEL: @test_no_simplify1(
- %pat = getelementptr inbounds [1 x i8]* @null, i32 0, i32 0
+ %pat = getelementptr inbounds [1 x i8], [1 x i8]* @null, i32 0, i32 0
%ret = call i8 @strstr(i8* %str, i8* %pat)
; CHECK-NEXT: call i8 @strstr
ret i8 %ret
%tmp = alloca %struct.test2, align 8
%tmp1 = bitcast %struct.test2* %tmp to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* undef, i64 8, i32 8, i1 false), !tbaa.struct !4
- %tmp2 = getelementptr %struct.test2* %tmp, i32 0, i32 0
+ %tmp2 = getelementptr %struct.test2, %struct.test2* %tmp, i32 0, i32 0
%tmp3 = load i32 (i8*, i32*, double*)*** %tmp2
ret i32 (i8*, i32*, double*)*** %tmp2
}
; rdar://7362831
define i32 @test23(i8* %P, i64 %A){
- %B = getelementptr inbounds i8* %P, i64 %A
+ %B = getelementptr inbounds i8, i8* %P, i64 %A
%C = ptrtoint i8* %B to i64
%D = trunc i64 %C to i32
%E = ptrtoint i8* %P to i64
; CHECK: @test23_as1
; CHECK-NEXT: = trunc i16 %A to i8
; CHECK-NEXT: ret i8
- %B = getelementptr inbounds i8 addrspace(1)* %P, i16 %A
+ %B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
%C = ptrtoint i8 addrspace(1)* %B to i16
%D = trunc i16 %C to i8
%E = ptrtoint i8 addrspace(1)* %P to i16
}
define i64 @test24(i8* %P, i64 %A){
- %B = getelementptr inbounds i8* %P, i64 %A
+ %B = getelementptr inbounds i8, i8* %P, i64 %A
%C = ptrtoint i8* %B to i64
%E = ptrtoint i8* %P to i64
%G = sub i64 %C, %E
define i16 @test24_as1(i8 addrspace(1)* %P, i16 %A) {
; CHECK: @test24_as1
; CHECK-NEXT: ret i16 %A
- %B = getelementptr inbounds i8 addrspace(1)* %P, i16 %A
+ %B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
%C = ptrtoint i8 addrspace(1)* %B to i16
%E = ptrtoint i8 addrspace(1)* %P to i16
%G = sub i16 %C, %E
}
define i64 @test24a(i8* %P, i64 %A){
- %B = getelementptr inbounds i8* %P, i64 %A
+ %B = getelementptr inbounds i8, i8* %P, i64 %A
%C = ptrtoint i8* %B to i64
%E = ptrtoint i8* %P to i64
%G = sub i64 %E, %C
; CHECK: @test24a_as1
; CHECK-NEXT: sub i16 0, %A
; CHECK-NEXT: ret i16
- %B = getelementptr inbounds i8 addrspace(1)* %P, i16 %A
+ %B = getelementptr inbounds i8, i8 addrspace(1)* %P, i16 %A
%C = ptrtoint i8 addrspace(1)* %B to i16
%E = ptrtoint i8 addrspace(1)* %P to i16
%G = sub i16 %E, %C
@Arr = external global [42 x i16]
define i64 @test24b(i8* %P, i64 %A){
- %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A
%C = ptrtoint i16* %B to i64
%G = sub i64 %C, ptrtoint ([42 x i16]* @Arr to i64)
ret i64 %G
define i64 @test25(i8* %P, i64 %A){
- %B = getelementptr inbounds [42 x i16]* @Arr, i64 0, i64 %A
+ %B = getelementptr inbounds [42 x i16], [42 x i16]* @Arr, i64 0, i64 %A
%C = ptrtoint i16* %B to i64
%G = sub i64 %C, ptrtoint (i16* getelementptr ([42 x i16]* @Arr, i64 1, i64 0) to i64)
ret i64 %G
; CHECK-NEXT: shl nuw i16 %1, 1
; CHECK-NEXT: add i16 {{.*}}, -84
; CHECK-NEXT: ret i16
- %B = getelementptr inbounds [42 x i16] addrspace(1)* @Arr_as1, i64 0, i64 %A
+ %B = getelementptr inbounds [42 x i16], [42 x i16] addrspace(1)* @Arr_as1, i64 0, i64 %A
%C = ptrtoint i16 addrspace(1)* %B to i16
%G = sub i16 %C, ptrtoint (i16 addrspace(1)* getelementptr ([42 x i16] addrspace(1)* @Arr_as1, i64 1, i64 0) to i16)
ret i16 %G
}
define i64 @test29(i8* %foo, i64 %i, i64 %j) {
- %gep1 = getelementptr inbounds i8* %foo, i64 %i
- %gep2 = getelementptr inbounds i8* %foo, i64 %j
+ %gep1 = getelementptr inbounds i8, i8* %foo, i64 %i
+ %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
%cast1 = ptrtoint i8* %gep1 to i64
%cast2 = ptrtoint i8* %gep2 to i64
%sub = sub i64 %cast1, %cast2
define i64 @test30(i8* %foo, i64 %i, i64 %j) {
%bit = bitcast i8* %foo to i32*
- %gep1 = getelementptr inbounds i32* %bit, i64 %i
- %gep2 = getelementptr inbounds i8* %foo, i64 %j
+ %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i
+ %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j
%cast1 = ptrtoint i32* %gep1 to i64
%cast2 = ptrtoint i8* %gep2 to i64
%sub = sub i64 %cast1, %cast2
; CHECK-NEXT: sub i16 %gep1.idx, %j
; CHECK-NEXT: ret i16
%bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)*
- %gep1 = getelementptr inbounds i32 addrspace(1)* %bit, i16 %i
- %gep2 = getelementptr inbounds i8 addrspace(1)* %foo, i16 %j
+ %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i
+ %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 %j
%cast1 = ptrtoint i32 addrspace(1)* %gep1 to i16
%cast2 = ptrtoint i8 addrspace(1)* %gep2 to i16
%sub = sub i16 %cast1, %cast2
%end = icmp ult i32 %elt, %limit
%3 = add i32 10, %elt
%4 = sext i32 %elt to i64
- %5 = getelementptr i32* %ptr, i64 %4
+ %5 = getelementptr i32, i32* %ptr, i64 %4
store i32 %3, i32* %5
%inc = add <16 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
br i1 %end, label %loop, label %ret
%dim31 = insertelement <4 x i32> %dim30, i32 %a, i32 2
%dim32 = insertelement <4 x i32> %dim31, i32 %a, i32 3
- %offset_ptr = getelementptr <4 x float>* null, i32 1
+ %offset_ptr = getelementptr <4 x float>, <4 x float>* null, i32 1
%offset_int = ptrtoint <4 x float>* %offset_ptr to i64
%sizeof32 = trunc i64 %offset_int to i32
}
define <2 x i1> @test3(<2 x i8*> %a) {
- %g = getelementptr <2 x i8*> %a, <2 x i32> <i32 1, i32 0>
+ %g = getelementptr i8, <2 x i8*> %a, <2 x i32> <i32 1, i32 0>
%B = icmp ult <2 x i8*> %g, zeroinitializer
ret <2 x i1> %B
}
define <1 x i1> @test4(<1 x i8*> %a) {
- %g = getelementptr <1 x i8*> %a, <1 x i32> <i32 1>
+ %g = getelementptr i8, <1 x i8*> %a, <1 x i32> <i32 1>
%B = icmp ult <1 x i8*> %g, zeroinitializer
ret <1 x i1> %B
}
define <2 x i1> @test5(<2 x i8*> %a) {
- %w = getelementptr <2 x i8*> %a, <2 x i32> zeroinitializer
- %e = getelementptr <2 x i8*> %w, <2 x i32> <i32 5, i32 9>
- %g = getelementptr <2 x i8*> %e, <2 x i32> <i32 1, i32 0>
+ %w = getelementptr i8, <2 x i8*> %a, <2 x i32> zeroinitializer
+ %e = getelementptr i8, <2 x i8*> %w, <2 x i32> <i32 5, i32 9>
+ %g = getelementptr i8, <2 x i8*> %e, <2 x i32> <i32 1, i32 0>
%B = icmp ult <2 x i8*> %g, zeroinitializer
ret <2 x i1> %B
}
define <2 x i32*> @test7(<2 x {i32, i32}*> %a) {
- %w = getelementptr <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
+ %w = getelementptr {i32, i32}, <2 x {i32, i32}*> %a, <2 x i32> <i32 5, i32 9>, <2 x i32> zeroinitializer
ret <2 x i32*> %w
}
define <2 x i8*> @testa(<2 x i8*> %a) {
; CHECK-LABEL: @testa(
- %g = getelementptr <2 x i8*> %a, <2 x i32> <i32 0, i32 1>
-; CHECK: getelementptr <2 x i8*> %a, <2 x i64> <i64 0, i64 1>
+ %g = getelementptr i8, <2 x i8*> %a, <2 x i32> <i32 0, i32 1>
+; CHECK: getelementptr i8, <2 x i8*> %a, <2 x i64> <i64 0, i64 1>
ret <2 x i8*> %g
}
; CHECK: ret i32 %temp1
entry:
- %str1 = getelementptr inbounds [2 x i8]* @fake_init, i64 0, i64 0
- %str2 = getelementptr inbounds [2 x i8]* @.str, i64 0, i64 0
+ %str1 = getelementptr inbounds [2 x i8], [2 x i8]* @fake_init, i64 0, i64 0
+ %str2 = getelementptr inbounds [2 x i8], [2 x i8]* @.str, i64 0, i64 0
%temp1 = call i32 @strcmp(i8* %str1, i8* %str2) nounwind readonly
ret i32 %temp1
}
; CHECK: ret i32 0
entry:
- %str1 = getelementptr inbounds [2 x i8]* @real_init, i64 0, i64 0
- %str2 = getelementptr inbounds [2 x i8]* @.str, i64 0, i64 0
+ %str1 = getelementptr inbounds [2 x i8], [2 x i8]* @real_init, i64 0, i64 0
+ %str2 = getelementptr inbounds [2 x i8], [2 x i8]* @.str, i64 0, i64 0
%temp1 = call i32 @strcmp(i8* %str1, i8* %str2) nounwind readonly
ret i32 %temp1
}
%tmp3 = and i32 %tmp2, 2 ; <i32> [#uses=1]
%tmp5 = and i32 %blk_i, 1 ; <i32> [#uses=1]
%tmp6 = or i32 %tmp3, %tmp5 ; <i32> [#uses=1]
- %tmp8 = getelementptr %struct.FooBar* %up, i32 0, i32 7 ; <i16*> [#uses=1]
+ %tmp8 = getelementptr %struct.FooBar, %struct.FooBar* %up, i32 0, i32 7 ; <i16*> [#uses=1]
%tmp9 = load i16* %tmp8, align 1 ; <i16> [#uses=1]
%tmp910 = zext i16 %tmp9 to i32 ; <i32> [#uses=1]
- %tmp12 = getelementptr [4 x i8]* @some_idx, i32 0, i32 %tmp6 ; <i8*> [#uses=1]
+ %tmp12 = getelementptr [4 x i8], [4 x i8]* @some_idx, i32 0, i32 %tmp6 ; <i8*> [#uses=1]
%tmp13 = load i8* %tmp12, align 1 ; <i8> [#uses=1]
%tmp1314 = zext i8 %tmp13 to i32 ; <i32> [#uses=1]
%tmp151 = lshr i32 %tmp910, %tmp1314 ; <i32> [#uses=1]
%tmp1516 = trunc i32 %tmp151 to i8 ; <i8> [#uses=1]
- %tmp18 = getelementptr %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp18 = getelementptr %struct.FooBar, %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp19 = load i8* %tmp18, align 1 ; <i8> [#uses=1]
%tmp22 = and i8 %tmp1516, %tmp19 ; <i8> [#uses=1]
- %tmp24 = getelementptr %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp24 = getelementptr %struct.FooBar, %struct.FooBar* %up, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp25 = load i8* %tmp24, align 1 ; <i8> [#uses=1]
%tmp26.mask = and i8 %tmp25, 1 ; <i8> [#uses=1]
%toBool = icmp eq i8 %tmp26.mask, 0 ; <i1> [#uses=1]
; CHECK-LABEL: for.body
; CHECK: load
-; CHECK: %2 = getelementptr inbounds i32* %in, i64 %indvars.iv
+; CHECK: %2 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
; CHECK: %3 = load i32* %2, align 4
for.body: ; preds = %for.body.lr.ph, %for.inc
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.inc ]
- %arrayidx = getelementptr inbounds i32* %trigger, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %trigger, i64 %indvars.iv
%1 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %1, 0
br i1 %cmp1, label %if.then, label %if.else
; CHECK-LABEL: if.then
if.then: ; preds = %for.body
; This load should be hoisted
- %arrayidx3 = getelementptr inbounds i32* %in, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%conv = sitofp i32 %2 to float
%add = fadd float %conv, 5.000000e-01
- %arrayidx5 = getelementptr inbounds float* %out, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds float, float* %out, i64 %indvars.iv
store float %add, float* %arrayidx5, align 4
br label %for.inc
if.else: ; preds = %for.body
- %arrayidx7 = getelementptr inbounds float* %out, i64 %indvars.iv
+ %arrayidx7 = getelementptr inbounds float, float* %out, i64 %indvars.iv
%3 = load float* %arrayidx7, align 4
%div = fdiv float %3, 3.000000e+00
store float %div, float* %arrayidx7, align 4
; This load should be hoisted in spite of store
- %arrayidx9 = getelementptr inbounds i32* %in, i64 %indvars.iv
+ %arrayidx9 = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
%4 = load i32* %arrayidx9, align 4
%conv10 = sitofp i32 %4 to float
%add13 = fadd float %div, %conv10
define i64 @foo(%struct.node* nocapture readonly %r) nounwind {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
%node.017 = load %struct.node** %node.0.in16, align 8
%tobool18 = icmp eq %struct.node* %node.017, null
br i1 %tobool18, label %while.end, label %while.body.preheader
while.body: ; preds = %while.body.preheader, %if.end
%node.020 = phi %struct.node* [ %node.0, %if.end ], [ %node.017, %while.body.preheader ]
%sum.019 = phi i64 [ %inc, %if.end ], [ 0, %while.body.preheader ]
- %orientation = getelementptr inbounds %struct.node* %node.020, i64 0, i32 4
+ %orientation = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 4
%0 = load i64* %orientation, align 8
%cmp = icmp eq i64 %0, 1
br i1 %cmp, label %if.then, label %if.else
; CHECK: if.then
if.then: ; preds = %while.body
- %a = getelementptr inbounds %struct.node* %node.020, i64 0, i32 5
+ %a = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 5
; CHECK-NOT: load %struct.arc
%1 = load %struct.arc** %a, align 8
- %cost = getelementptr inbounds %struct.arc* %1, i64 0, i32 0
+ %cost = getelementptr inbounds %struct.arc, %struct.arc* %1, i64 0, i32 0
; CHECK-NOT: load i64*
%2 = load i64* %cost, align 8
- %pred = getelementptr inbounds %struct.node* %node.020, i64 0, i32 1
+ %pred = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 1
; CHECK-NOT: load %struct.node**
%3 = load %struct.node** %pred, align 8
- %p = getelementptr inbounds %struct.node* %3, i64 0, i32 6
+ %p = getelementptr inbounds %struct.node, %struct.node* %3, i64 0, i32 6
; CHECK-NOT: load i64*
%4 = load i64* %p, align 8
%add = add nsw i64 %4, %2
- %p1 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
; CHECK-NOT: store i64
store i64 %add, i64* %p1, align 8
br label %if.end
; CHECK: if.else
if.else: ; preds = %while.body
- %pred2 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 1
+ %pred2 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 1
; CHECK-NOT: load %struct.node**
%5 = load %struct.node** %pred2, align 8
- %p3 = getelementptr inbounds %struct.node* %5, i64 0, i32 6
+ %p3 = getelementptr inbounds %struct.node, %struct.node* %5, i64 0, i32 6
; CHECK-NOT: load i64*
%6 = load i64* %p3, align 8
- %a4 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 5
+ %a4 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 5
; CHECK-NOT: load %struct.arc**
%7 = load %struct.arc** %a4, align 8
- %cost5 = getelementptr inbounds %struct.arc* %7, i64 0, i32 0
+ %cost5 = getelementptr inbounds %struct.arc, %struct.arc* %7, i64 0, i32 0
; CHECK-NOT: load i64*
%8 = load i64* %cost5, align 8
%sub = sub nsw i64 %6, %8
- %p6 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 6
+ %p6 = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 6
; CHECK-NOT: store i64
store i64 %sub, i64* %p6, align 8
br label %if.end
if.end: ; preds = %if.else, %if.then
; CHECK: store
%inc = add nsw i64 %sum.019, 1
- %node.0.in = getelementptr inbounds %struct.node* %node.020, i64 0, i32 2
+ %node.0.in = getelementptr inbounds %struct.node, %struct.node* %node.020, i64 0, i32 2
%node.0 = load %struct.node** %node.0.in, align 8
%tobool = icmp eq %struct.node* %node.0, null
br i1 %tobool, label %while.end.loopexit, label %while.body
; Function Attrs: nounwind uwtable
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
%node.017 = load %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
; CHECK: if.then
if.then: ; preds = %entry
%1 = load i32* %index.addr, align 4
- %p1 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
store i32 %1, i32* %p1, align 4
br label %if.end
if.else: ; preds = %entry
%2 = load i32* %index.addr, align 4
%add = add nsw i32 %2, 1
- %p3 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
store i32 %add, i32* %p3, align 4
call i32 @foo(i32 5) ;barrier
%idxprom = sext i32 %1 to i64
%2 = load i32* @b, align 4
%idxprom1 = sext i32 %2 to i64
- %arrayidx = getelementptr inbounds [1 x [3 x i8]]* @f, i32 0, i64 %idxprom1
- %arrayidx2 = getelementptr inbounds [3 x i8]* %arrayidx, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [1 x [3 x i8]], [1 x [3 x i8]]* @f, i32 0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds [3 x i8], [3 x i8]* %arrayidx, i32 0, i64 %idxprom
store i8 0, i8* %arrayidx2, align 1
store i32 0, i32* @e, align 4
br label %for.cond3
if.end: ; preds = %if.then, %for.body5
%6 = load i32* @e, align 4
%idxprom6 = sext i32 %6 to i64
- %arrayidx7 = getelementptr inbounds [3 x i8]* getelementptr inbounds ([1 x [3 x i8]]* @f, i32 0, i64 0), i32 0, i64 %idxprom6
+ %arrayidx7 = getelementptr inbounds [3 x i8], [3 x i8]* getelementptr inbounds ([1 x [3 x i8]]* @f, i32 0, i64 0), i32 0, i64 %idxprom6
store i8 1, i8* %arrayidx7, align 1
br label %for.inc
; Function Attrs: nounwind uwtable
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
%node.017 = load %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
; CHECK: if.then
if.then: ; preds = %entry
%1 = load i32* %index.addr, align 4
- %p1 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
br label %if.end
if.else: ; preds = %entry
%2 = load i32* %index.addr, align 4
%add = add nsw i32 %2, 1
- %p3 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %add, i32* %p3, align 4
call i32 @foo(i32 5) ;not a barrier
; Function Attrs: nounwind uwtable
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
%node.017 = load %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
; CHECK: if.then
if.then: ; preds = %entry
%1 = load i32* %index.addr, align 4
- %p1 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
- %p2 = getelementptr inbounds %struct.node* %node.017, i32 5, i32 6
+ %p2 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 5, i32 6
; CHECK: load i32*
%not_barrier = load i32 * %p2, align 4
br label %if.end
if.else: ; preds = %entry
%2 = load i32* %index.addr, align 4
%add = add nsw i32 %2, 1
- %p3 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %add, i32* %p3, align 4
br label %if.end
; Function Attrs: nounwind uwtable
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
%node.017 = load %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
; CHECK: if.then
if.then: ; preds = %entry
%1 = load i32* %index.addr, align 4
- %p1 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
br label %if.end
if.else: ; preds = %entry
%2 = load i32* %index.addr, align 4
%add = add nsw i32 %2, 1
- %p2 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p2 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
store i32 %add, i32* %p2, align 4
- %p3 = getelementptr inbounds %struct.node* %node.017, i32 5, i32 6
+ %p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 5, i32 6
; CHECK: store i32
store i32 %add, i32* %p3, align 4 ; This is not a barrier
br label %if.end
; Function Attrs: nounwind uwtable
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
%node.017 = load %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
; CHECK: if.then
if.then: ; preds = %entry
%1 = load i32* %index.addr, align 4
- %p1 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p1, align 4
- %p2 = getelementptr inbounds %struct.node* %node.017, i32 4, i32 6
+ %p2 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 4, i32 6
; CHECK-NOT: store i32
store i32 %1, i32* %p2, align 4
br label %if.end
if.else: ; preds = %entry
%2 = load i32* %index.addr, align 4
%add = add nsw i32 %2, 1
- %p3 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK-NOT: store i32
store i32 %add, i32* %p3, align 4
- %p4 = getelementptr inbounds %struct.node* %node.017, i32 4, i32 6
+ %p4 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 4, i32 6
; CHECK-NOT: store i32
store i32 %2, i32* %p4, align 4
br label %if.end
; Function Attrs: nounwind uwtable
define void @sink_store(%struct.node* nocapture %r, i32 %index) {
entry:
- %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
+ %node.0.in16 = getelementptr inbounds %struct.node, %struct.node* %r, i64 0, i32 2
%node.017 = load %struct.node** %node.0.in16, align 8
%index.addr = alloca i32, align 4
store i32 %index, i32* %index.addr, align 4
; CHECK: if.then
if.then: ; preds = %entry
%1 = load i32* %index.addr, align 4
- %p1 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p1 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
store i32 %1, i32* %p1, align 4
- %p2 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p2 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: load i32*
%barrier = load i32 * %p2, align 4
br label %if.end
if.else: ; preds = %entry
%2 = load i32* %index.addr, align 4
%add = add nsw i32 %2, 1
- %p3 = getelementptr inbounds %struct.node* %node.017, i32 0, i32 6
+ %p3 = getelementptr inbounds %struct.node, %struct.node* %node.017, i32 0, i32 6
; CHECK: store i32
store i32 %add, i32* %p3, align 4
br label %if.end
br i1 %cmp, label %cast.end, label %cast.notnull
cast.notnull: ; preds = %entry
- %add.ptr = getelementptr inbounds i8* %call, i64 4
+ %add.ptr = getelementptr inbounds i8, i8* %call, i64 4
br label %cast.end
cast.end: ; preds = %cast.notnull, %entry
br i1 %cmp, label %cast.end, label %cast.notnull
cast.notnull: ; preds = %entry
- %add.ptr = getelementptr inbounds i8* %call, i64 4
+ %add.ptr = getelementptr inbounds i8, i8* %call, i64 4
br label %cast.end
cast.end: ; preds = %cast.notnull, %entry
br i1 %cmp, label %cast.end, label %cast.notnull
cast.notnull: ; preds = %entry
- %add.ptr = getelementptr inbounds i8* %call, i64 4
+ %add.ptr = getelementptr inbounds i8, i8* %call, i64 4
br label %cast.end
cast.end: ; preds = %cast.notnull, %entry
define i1 @gep() {
; CHECK-LABEL: @gep(
%a = alloca [3 x i8], align 8
- %x = getelementptr inbounds [3 x i8]* %a, i32 0, i32 0
+ %x = getelementptr inbounds [3 x i8], [3 x i8]* %a, i32 0, i32 0
%cmp = icmp eq i8* %x, null
ret i1 %cmp
; CHECK-NEXT: ret i1 false
define i1 @gep2() {
; CHECK-LABEL: @gep2(
%a = alloca [3 x i8], align 8
- %x = getelementptr inbounds [3 x i8]* %a, i32 0, i32 0
- %y = getelementptr inbounds [3 x i8]* %a, i32 0, i32 0
+ %x = getelementptr inbounds [3 x i8], [3 x i8]* %a, i32 0, i32 0
+ %y = getelementptr inbounds [3 x i8], [3 x i8]* %a, i32 0, i32 0
%cmp = icmp eq i8* %x, %y
ret i1 %cmp
; CHECK-NEXT: ret i1 true
define i1 @gep3() {
; CHECK-LABEL: @gep3(
%x = alloca %gept, align 8
- %a = getelementptr %gept* %x, i64 0, i32 0
- %b = getelementptr %gept* %x, i64 0, i32 1
+ %a = getelementptr %gept, %gept* %x, i64 0, i32 0
+ %b = getelementptr %gept, %gept* %x, i64 0, i32 1
%equal = icmp eq i32* %a, %b
ret i1 %equal
; CHECK-NEXT: ret i1 false
define i1 @gep4() {
; CHECK-LABEL: @gep4(
%x = alloca %gept, align 8
- %a = getelementptr %gept* @gepy, i64 0, i32 0
- %b = getelementptr %gept* @gepy, i64 0, i32 1
+ %a = getelementptr %gept, %gept* @gepy, i64 0, i32 0
+ %b = getelementptr %gept, %gept* @gepy, i64 0, i32 1
%equal = icmp eq i32* %a, %b
ret i1 %equal
; CHECK-NEXT: ret i1 false
define i1 @gep5() {
; CHECK-LABEL: @gep5(
%x = alloca %gept, align 8
- %a = getelementptr inbounds %gept* %x, i64 0, i32 1
- %b = getelementptr %gept* @gepy, i64 0, i32 0
+ %a = getelementptr inbounds %gept, %gept* %x, i64 0, i32 1
+ %b = getelementptr %gept, %gept* @gepy, i64 0, i32 0
%equal = icmp eq i32* %a, %b
ret i1 %equal
; CHECK-NEXT: ret i1 false
define i1 @gep6(%gept* %x) {
; Same as @gep3 but potentially null.
; CHECK-LABEL: @gep6(
- %a = getelementptr %gept* %x, i64 0, i32 0
- %b = getelementptr %gept* %x, i64 0, i32 1
+ %a = getelementptr %gept, %gept* %x, i64 0, i32 0
+ %b = getelementptr %gept, %gept* %x, i64 0, i32 1
%equal = icmp eq i32* %a, %b
ret i1 %equal
; CHECK-NEXT: ret i1 false
define i1 @gep7(%gept* %x) {
; CHECK-LABEL: @gep7(
- %a = getelementptr %gept* %x, i64 0, i32 0
- %b = getelementptr %gept* @gepz, i64 0, i32 0
+ %a = getelementptr %gept, %gept* %x, i64 0, i32 0
+ %b = getelementptr %gept, %gept* @gepz, i64 0, i32 0
%equal = icmp eq i32* %a, %b
ret i1 %equal
; CHECK: ret i1 %equal
define i1 @gep8(%gept* %x) {
; CHECK-LABEL: @gep8(
- %a = getelementptr %gept* %x, i32 1
- %b = getelementptr %gept* %x, i32 -1
+ %a = getelementptr %gept, %gept* %x, i32 1
+ %b = getelementptr %gept, %gept* %x, i32 -1
%equal = icmp ugt %gept* %a, %b
ret i1 %equal
; CHECK: ret i1 %equal
; CHECK: ret i1 true
entry:
- %first1 = getelementptr inbounds i8* %ptr, i32 0
- %first2 = getelementptr inbounds i8* %first1, i32 1
- %first3 = getelementptr inbounds i8* %first2, i32 2
- %first4 = getelementptr inbounds i8* %first3, i32 4
- %last1 = getelementptr inbounds i8* %first2, i32 48
- %last2 = getelementptr inbounds i8* %last1, i32 8
- %last3 = getelementptr inbounds i8* %last2, i32 -4
- %last4 = getelementptr inbounds i8* %last3, i32 -4
+ %first1 = getelementptr inbounds i8, i8* %ptr, i32 0
+ %first2 = getelementptr inbounds i8, i8* %first1, i32 1
+ %first3 = getelementptr inbounds i8, i8* %first2, i32 2
+ %first4 = getelementptr inbounds i8, i8* %first3, i32 4
+ %last1 = getelementptr inbounds i8, i8* %first2, i32 48
+ %last2 = getelementptr inbounds i8, i8* %last1, i32 8
+ %last3 = getelementptr inbounds i8, i8* %last2, i32 -4
+ %last4 = getelementptr inbounds i8, i8* %last3, i32 -4
%first.int = ptrtoint i8* %first4 to i32
%last.int = ptrtoint i8* %last4 to i32
%cmp = icmp ne i32 %last.int, %first.int
; CHECK: ret i1 true
entry:
- %first1 = getelementptr inbounds i8* %ptr, i32 -2
- %first2 = getelementptr inbounds i8* %first1, i32 44
- %last1 = getelementptr inbounds i8* %ptr, i32 48
- %last2 = getelementptr inbounds i8* %last1, i32 -6
+ %first1 = getelementptr inbounds i8, i8* %ptr, i32 -2
+ %first2 = getelementptr inbounds i8, i8* %first1, i32 44
+ %last1 = getelementptr inbounds i8, i8* %ptr, i32 48
+ %last2 = getelementptr inbounds i8, i8* %last1, i32 -6
%first.int = ptrtoint i8* %first2 to i32
%last.int = ptrtoint i8* %last2 to i32
%cmp = icmp eq i32 %last.int, %first.int
; CHECK: ret i1 true
entry:
- %first1 = getelementptr inbounds i8* %ptr, i32 -2
- %last1 = getelementptr inbounds i8* %ptr, i32 48
- %last2 = getelementptr inbounds i8* %last1, i32 -6
+ %first1 = getelementptr inbounds i8, i8* %ptr, i32 -2
+ %last1 = getelementptr inbounds i8, i8* %ptr, i32 48
+ %last2 = getelementptr inbounds i8, i8* %last1, i32 -6
%cmp = icmp ult i8* %first1, %last2
ret i1 %cmp
}
; CHECK: ret i1 %cmp
entry:
- %first1 = getelementptr inbounds i8* %ptr, i32 -2
- %last1 = getelementptr inbounds i8* %ptr, i32 48
- %last2 = getelementptr inbounds i8* %last1, i32 -6
+ %first1 = getelementptr inbounds i8, i8* %ptr, i32 -2
+ %last1 = getelementptr inbounds i8, i8* %ptr, i32 48
+ %last2 = getelementptr inbounds i8, i8* %last1, i32 -6
%cmp = icmp slt i8* %first1, %last2
ret i1 %cmp
}
define i1 @gep13(i8* %ptr) {
; CHECK-LABEL: @gep13(
; We can prove this GEP is non-null because it is inbounds.
- %x = getelementptr inbounds i8* %ptr, i32 1
+ %x = getelementptr inbounds i8, i8* %ptr, i32 1
%cmp = icmp eq i8* %x, null
ret i1 %cmp
; CHECK-NEXT: ret i1 false
; CHECK-LABEL: @gep14(
; We can't simplify this because the offset of one in the GEP actually doesn't
; move the pointer.
- %x = getelementptr inbounds { {}, i8 }* %ptr, i32 0, i32 1
+ %x = getelementptr inbounds { {}, i8 }, { {}, i8 }* %ptr, i32 0, i32 1
%cmp = icmp eq i8* %x, null
ret i1 %cmp
; CHECK-NOT: ret i1 false
; CHECK-LABEL: @gep15(
; We can prove this GEP is non-null even though there is a user value, as we
; would necessarily violate inbounds on one side or the other.
- %x = getelementptr inbounds { {}, [4 x {i8, i8}]}* %ptr, i32 0, i32 1, i32 %y, i32 1
+ %x = getelementptr inbounds { {}, [4 x {i8, i8}]}, { {}, [4 x {i8, i8}]}* %ptr, i32 0, i32 1, i32 %y, i32 1
%cmp = icmp eq i8* %x, null
ret i1 %cmp
; CHECK-NEXT: ret i1 false
; We can prove this GEP is non-null because it is inbounds and because we know
; %b is non-zero even though we don't know its value.
%b = or i32 %a, 1
- %x = getelementptr inbounds i8* %ptr, i32 %b
+ %x = getelementptr inbounds i8, i8* %ptr, i32 %b
%cmp = icmp eq i8* %x, null
ret i1 %cmp
; CHECK-NEXT: ret i1 false
; PR12013
define i1 @alloca_compare(i64 %idx) {
%sv = alloca { i32, i32, [124 x i32] }
- %1 = getelementptr inbounds { i32, i32, [124 x i32] }* %sv, i32 0, i32 2, i64 %idx
+ %1 = getelementptr inbounds { i32, i32, [124 x i32] }, { i32, i32, [124 x i32] }* %sv, i32 0, i32 2, i64 %idx
%2 = icmp eq i32* %1, null
ret i1 %2
; CHECK: alloca_compare
ret i1 1
unreachableblock:
- %X = getelementptr i32 *%X, i32 1
+ %X = getelementptr i32, i32 *%X, i32 1
%Y = icmp eq i32* %X, null
ret i1 %Y
}
; We can prove this GEP is non-null because it is inbounds and the pointer
; is non-null.
%strs = alloca [1000 x [1001 x i8]], align 16
- %x = getelementptr inbounds [1000 x [1001 x i8]]* %strs, i64 0, i64 %a, i64 %b
+ %x = getelementptr inbounds [1000 x [1001 x i8]], [1000 x [1001 x i8]]* %strs, i64 0, i64 %a, i64 %b
%cmp = icmp eq i8* %x, null
ret i1 %cmp
; CHECK-NEXT: ret i1 false
define i1 @non_inbounds_gep_compare(i64* %a) {
; CHECK-LABEL: @non_inbounds_gep_compare(
; Equality compares with non-inbounds GEPs can be folded.
- %x = getelementptr i64* %a, i64 42
- %y = getelementptr inbounds i64* %x, i64 -42
- %z = getelementptr i64* %a, i64 -42
- %w = getelementptr inbounds i64* %z, i64 42
+ %x = getelementptr i64, i64* %a, i64 42
+ %y = getelementptr inbounds i64, i64* %x, i64 -42
+ %z = getelementptr i64, i64* %a, i64 -42
+ %w = getelementptr inbounds i64, i64* %z, i64 42
%cmp = icmp eq i64* %y, %w
ret i1 %cmp
; CHECK-NEXT: ret i1 true
define i1 @non_inbounds_gep_compare2(i64* %a) {
; CHECK-LABEL: @non_inbounds_gep_compare2(
; Equality compares with non-inbounds GEPs can be folded.
- %x = getelementptr i64* %a, i64 4294967297
- %y = getelementptr i64* %a, i64 1
+ %x = getelementptr i64, i64* %a, i64 4294967297
+ %y = getelementptr i64, i64* %a, i64 1
%cmp = icmp eq i64* %y, %y
ret i1 %cmp
; CHECK-NEXT: ret i1 true
%b_ptr = ptrtoint %struct.A* %b to i64
%sub = sub i64 %e_ptr, %b_ptr
%sdiv = sdiv exact i64 %sub, 7
- %gep = getelementptr inbounds %struct.A* %b, i64 %sdiv
+ %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv
ret %struct.A* %gep
; CHECK-LABEL: @test1
; CHECK-NEXT: ret %struct.A* %e
%e_ptr = ptrtoint i8* %e to i64
%b_ptr = ptrtoint i8* %b to i64
%sub = sub i64 %e_ptr, %b_ptr
- %gep = getelementptr inbounds i8* %b, i64 %sub
+ %gep = getelementptr inbounds i8, i8* %b, i64 %sub
ret i8* %gep
; CHECK-LABEL: @test2
; CHECK-NEXT: ret i8* %e
%b_ptr = ptrtoint i64* %b to i64
%sub = sub i64 %e_ptr, %b_ptr
%ashr = ashr exact i64 %sub, 3
- %gep = getelementptr inbounds i64* %b, i64 %ashr
+ %gep = getelementptr inbounds i64, i64* %b, i64 %ashr
ret i64* %gep
; CHECK-LABEL: @test3
; CHECK-NEXT: ret i64* %e
%b_ptr = ptrtoint %struct.A* %b to i64
%sub = sub i64 0, %b_ptr
%sdiv = sdiv exact i64 %sub, 7
- %gep = getelementptr inbounds %struct.A* %b, i64 %sdiv
+ %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv
ret %struct.A* %gep
; CHECK-LABEL: @test4
; CHECK-NEXT: ret %struct.A* null
define i8* @test5(i8* %b) {
%b_ptr = ptrtoint i8* %b to i64
%sub = sub i64 0, %b_ptr
- %gep = getelementptr inbounds i8* %b, i64 %sub
+ %gep = getelementptr inbounds i8, i8* %b, i64 %sub
ret i8* %gep
; CHECK-LABEL: @test5
; CHECK-NEXT: ret i8* null
%b_ptr = ptrtoint i64* %b to i64
%sub = sub i64 0, %b_ptr
%ashr = ashr exact i64 %sub, 3
- %gep = getelementptr inbounds i64* %b, i64 %ashr
+ %gep = getelementptr inbounds i64, i64* %b, i64 %ashr
ret i64* %gep
; CHECK-LABEL: @test6
; CHECK-NEXT: ret i64* null
%e_ptr = ptrtoint i8** %e to i64
%b_ptr = ptrtoint i8* %b to i64
%sub = sub i64 %e_ptr, %b_ptr
- %gep = getelementptr inbounds i8* %b, i64 %sub
+ %gep = getelementptr inbounds i8, i8* %b, i64 %sub
ret i8* %gep
; CHECK-LABEL: @test7
; CHECK-NEXT: ptrtoint
%1 = bitcast [10 x i32]* %mStackData to i8*
%2 = tail call noalias i8* @_Znam(i64 48) #4
%3 = bitcast i8* %2 to i32*
- %4 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %4 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%5 = icmp eq i32* %3, %4
br i1 %5, label %7, label %6
define void @_Z2p2bb(i1 zeroext %b1, i1 zeroext %b2) #0 {
%mStackData = alloca [10 x i32], align 16
%1 = bitcast [10 x i32]* %mStackData to i8*
- %2 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %2 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%3 = select i1 %b1, i32* %2, i32* @g2
%4 = tail call noalias i8* @_Znam(i64 48) #4
%5 = tail call noalias i8* @_Znam(i64 48) #4
define void @_Z2p4bb(i1 zeroext %b1, i1 zeroext %b2) #0 {
%mStackData = alloca [10 x i32], align 16
%1 = bitcast [10 x i32]* %mStackData to i8*
- %2 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %2 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%3 = select i1 %b1, i32* %2, i32* @g3
%4 = tail call noalias i8* @_Znam(i64 48) #4
%5 = tail call noalias i8* @_Znam(i64 48) #4
define void @_Z2p5bb(i1 zeroext %b1, i1 zeroext %b2) #0 {
%mStackData = alloca [10 x i32], align 16
%1 = bitcast [10 x i32]* %mStackData to i8*
- %2 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %2 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%3 = select i1 %b1, i32* %2, i32* @g4
%4 = tail call noalias i8* @_Znam(i64 48) #4
%5 = tail call noalias i8* @_Znam(i64 48) #4
define void @_Z2p6bb(i1 zeroext %b1, i1 zeroext %b2) #0 {
%mStackData = alloca [10 x i32], align 16
%1 = bitcast [10 x i32]* %mStackData to i8*
- %2 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %2 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%3 = select i1 %b1, i32* %2, i32* @g5
%4 = tail call noalias i8* @_Znam(i64 48) #4
%5 = tail call noalias i8* @_Znam(i64 48) #4
define void @_Z4nopebbPi(i1 zeroext %b1, i1 zeroext %b2, i32* readnone %q) #0 {
%mStackData = alloca [10 x i32], align 16
%1 = bitcast [10 x i32]* %mStackData to i8*
- %2 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %2 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%3 = select i1 %b1, i32* %2, i32* %q
%4 = tail call noalias i8* @_Znam(i64 48) #4
%5 = tail call noalias i8* @_Znam(i64 48) #4
define void @_Z2p3bb(i1 zeroext %b1, i1 zeroext %b2) #0 {
%mStackData = alloca [10 x i32], align 16
%1 = bitcast [10 x i32]* %mStackData to i8*
- %2 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %2 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%3 = select i1 %b1, i32* %2, i32* @g1
%4 = tail call noalias i8* @_Znam(i64 48) #4
%5 = tail call noalias i8* @_Znam(i64 48) #4
define void @_Z2p7bb(i1 zeroext %b1, i1 zeroext %b2) #0 {
%mStackData = alloca [10 x i32], align 16
%1 = bitcast [10 x i32]* %mStackData to i8*
- %2 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %2 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%3 = select i1 %b1, i32* %2, i32* @g6
%4 = tail call noalias i8* @_Znam(i64 48) #4
%5 = tail call noalias i8* @_Znam(i64 48) #4
%1 = bitcast [10 x i32]* %mStackData to i8*
%2 = tail call noalias i8* @_Znam(i64 48) #4
%3 = bitcast i8* %2 to i32*
- %4 = getelementptr inbounds [10 x i32]* %mStackData, i64 0, i64 0
+ %4 = getelementptr inbounds [10 x i32], [10 x i32]* %mStackData, i64 0, i64 0
%5 = icmp eq i32* %3, %4
br i1 %5, label %7, label %6
; Comparing past-the-end addresses of two distinct globals. Never equal.
define zeroext i1 @both_past_the_end() {
- %x = getelementptr i32* @opte_a, i32 1
- %y = getelementptr i32* @opte_b, i32 1
+ %x = getelementptr i32, i32* @opte_a, i32 1
+ %y = getelementptr i32, i32* @opte_b, i32 1
%t = icmp eq i32* %x, %y
ret i1 %t
; CHECK: both_past_the_end(
; of another. Can't fold this.
define zeroext i1 @just_one_past_the_end() {
- %x = getelementptr i32* @opte_a, i32 1
+ %x = getelementptr i32, i32* @opte_a, i32 1
%t = icmp eq i32* %x, @opte_b
ret i1 %t
; CHECK: just_one_past_the_end(
define zeroext i1 @both_past_the_end_alloca() {
%m = alloca i32
%n = alloca i32
- %x = getelementptr i32* %m, i32 1
- %y = getelementptr i32* %n, i32 1
+ %x = getelementptr i32, i32* %m, i32 1
+ %y = getelementptr i32, i32* %n, i32 1
%t = icmp eq i32* %x, %y
ret i1 %t
; CHECK: both_past_the_end_alloca(
define zeroext i1 @just_one_past_the_end_alloca() {
%m = alloca i32
%n = alloca i32
- %x = getelementptr i32* %m, i32 1
+ %x = getelementptr i32, i32* %m, i32 1
%t = icmp eq i32* %x, %n
ret i1 %t
; CHECK: just_one_past_the_end_alloca(
; CHECK-LABEL: @ptrdiff1(
; CHECK-NEXT: ret i64 42
- %first = getelementptr inbounds i8* %ptr, i32 0
- %last = getelementptr inbounds i8* %ptr, i32 42
+ %first = getelementptr inbounds i8, i8* %ptr, i32 0
+ %last = getelementptr inbounds i8, i8* %ptr, i32 42
%first.int = ptrtoint i8* %first to i64
%last.int = ptrtoint i8* %last to i64
%diff = sub i64 %last.int, %first.int
; CHECK-LABEL: @ptrdiff2(
; CHECK-NEXT: ret i64 42
- %first1 = getelementptr inbounds i8* %ptr, i32 0
- %first2 = getelementptr inbounds i8* %first1, i32 1
- %first3 = getelementptr inbounds i8* %first2, i32 2
- %first4 = getelementptr inbounds i8* %first3, i32 4
- %last1 = getelementptr inbounds i8* %first2, i32 48
- %last2 = getelementptr inbounds i8* %last1, i32 8
- %last3 = getelementptr inbounds i8* %last2, i32 -4
- %last4 = getelementptr inbounds i8* %last3, i32 -4
+ %first1 = getelementptr inbounds i8, i8* %ptr, i32 0
+ %first2 = getelementptr inbounds i8, i8* %first1, i32 1
+ %first3 = getelementptr inbounds i8, i8* %first2, i32 2
+ %first4 = getelementptr inbounds i8, i8* %first3, i32 4
+ %last1 = getelementptr inbounds i8, i8* %first2, i32 48
+ %last2 = getelementptr inbounds i8, i8* %last1, i32 8
+ %last3 = getelementptr inbounds i8, i8* %last2, i32 -4
+ %last4 = getelementptr inbounds i8, i8* %last3, i32 -4
%first.int = ptrtoint i8* %first4 to i64
%last.int = ptrtoint i8* %last4 to i64
%diff = sub i64 %last.int, %first.int
; CHECK: sub
; CHECK: ret
- %first = getelementptr i8* %ptr, i32 0
- %last = getelementptr i8* %ptr, i32 42
+ %first = getelementptr i8, i8* %ptr, i32 0
+ %last = getelementptr i8, i8* %ptr, i32 42
%first.int = ptrtoint i8* %first to i64
%last.int = ptrtoint i8* %last to i64
%diff = sub i64 %last.int, %first.int
define i32 @ptrdiff5() nounwind {
bb:
- %tmp = getelementptr inbounds %struct.ham* @global, i32 0, i32 1
- %tmp1 = getelementptr inbounds [2 x [2 x i32]]* %tmp, i32 0, i32 0
+ %tmp = getelementptr inbounds %struct.ham, %struct.ham* @global, i32 0, i32 1
+ %tmp1 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %tmp, i32 0, i32 0
%tmp2 = bitcast [2 x i32]* %tmp1 to i32*
%tmp3 = ptrtoint i32* %tmp2 to i32
- %tmp4 = getelementptr inbounds %struct.ham* @global, i32 0, i32 1
- %tmp5 = getelementptr inbounds [2 x [2 x i32]]* %tmp4, i32 0, i32 0
+ %tmp4 = getelementptr inbounds %struct.ham, %struct.ham* @global, i32 0, i32 1
+ %tmp5 = getelementptr inbounds [2 x [2 x i32]], [2 x [2 x i32]]* %tmp4, i32 0, i32 0
%tmp6 = ptrtoint [2 x i32]* %tmp5 to i32
%tmp7 = sub i32 %tmp3, %tmp6
ret i32 %tmp7
declare void @helper(<2 x i8*>)
define void @test(<2 x i8*> %a) {
- %A = getelementptr <2 x i8*> %a, <2 x i32> <i32 0, i32 0>
+ %A = getelementptr i8, <2 x i8*> %a, <2 x i32> <i32 0, i32 0>
call void @helper(<2 x i8*> %A)
ret void
}
define <4 x i8*> @test1(<4 x i8*> %a) {
- %gep = getelementptr <4 x i8*> %a, <4 x i32> zeroinitializer
+ %gep = getelementptr i8, <4 x i8*> %a, <4 x i32> zeroinitializer
ret <4 x i8*> %gep
; CHECK-LABEL: @test1
}
define <4 x i8*> @test2(<4 x i8*> %a) {
- %gep = getelementptr <4 x i8*> %a
+ %gep = getelementptr i8, <4 x i8*> %a
ret <4 x i8*> %gep
; CHECK-LABEL: @test2
%struct = type { double, float }
define <4 x float*> @test3() {
- %gep = getelementptr <4 x %struct*> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %gep = getelementptr %struct, <4 x %struct*> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x float*> %gep
; CHECK-LABEL: @test3
%struct.empty = type { }
define <4 x %struct.empty*> @test4(<4 x %struct.empty*> %a) {
- %gep = getelementptr <4 x %struct.empty*> %a, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %gep = getelementptr %struct.empty, <4 x %struct.empty*> %a, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x %struct.empty*> %gep
; CHECK-LABEL: @test4
define <4 x i8*> @test5() {
%c = inttoptr <4 x i64> <i64 1, i64 2, i64 3, i64 4> to <4 x i8*>
- %gep = getelementptr <4 x i8*> %c, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %gep = getelementptr i8, <4 x i8*> %c, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x i8*> %gep
; CHECK-LABEL: @test5
%retval.0.i.pre161 = phi i32 [ undef, %bb.nph ], [ %retval.0.i.pre, %_ZN12StringSwitchI5ColorE4CaseILj7EEERS1_RAT__KcRKS0_.exit134 ] ; <i32> [#uses=3]
%indvar = phi i64 [ 0, %bb.nph ], [ %tmp146, %_ZN12StringSwitchI5ColorE4CaseILj7EEERS1_RAT__KcRKS0_.exit134 ] ; <i64> [#uses=1]
%tmp146 = add i64 %indvar, 1 ; <i64> [#uses=3]
- %arrayidx = getelementptr i8** %argv, i64 %tmp146 ; <i8**> [#uses=1]
+ %arrayidx = getelementptr i8*, i8** %argv, i64 %tmp146 ; <i8**> [#uses=1]
%tmp6 = load i8** %arrayidx, align 8 ; <i8*> [#uses=8]
%call.i.i = call i64 @strlen(i8* %tmp6) nounwind ; <i64> [#uses=1]
%conv.i.i = trunc i64 %call.i.i to i32 ; <i32> [#uses=6]\
define void @_ZN1DC1Ev(%class.D* nocapture %this) unnamed_addr uwtable align 2 {
entry:
call void @_ZN24CompositeEditCommandImplC2Ev()
- %0 = getelementptr inbounds %class.D* %this, i64 0, i32 0, i32 0, i32 0
+ %0 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 0, i32 0, i32 0
store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*]* @_ZTV1D, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
ret void
}
define void @_ZN1DC2Ev(%class.D* nocapture %this) unnamed_addr uwtable align 2 {
entry:
call void @_ZN24CompositeEditCommandImplC2Ev()
- %0 = getelementptr inbounds %class.D* %this, i64 0, i32 0, i32 0, i32 0
+ %0 = getelementptr inbounds %class.D, %class.D* %this, i64 0, i32 0, i32 0, i32 0
store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*]* @_ZTV1D, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
ret void
}
_ZN1DC1Ev.exit: ; preds = %entry
%0 = bitcast i8* %call to i32 (...)***
store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*]* @_ZTV1D, i64 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8
- %_ref.i.i.i = getelementptr inbounds i8* %call, i64 8
+ %_ref.i.i.i = getelementptr inbounds i8, i8* %call, i64 8
%1 = bitcast i8* %_ref.i.i.i to i32*
%2 = load i32* %1, align 4
%inc.i.i.i = add nsw i32 %2, 1
define void @_ZN1BI1DEC1EPS0_(%class.B* nocapture %this, %class.D* %p1) unnamed_addr uwtable align 2 {
entry:
- %m_ptr.i = getelementptr inbounds %class.B* %this, i64 0, i32 0
+ %m_ptr.i = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
store %class.D* %p1, %class.D** %m_ptr.i, align 8
- %_ref.i.i = getelementptr inbounds %class.D* %p1, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
+ %_ref.i.i = getelementptr inbounds %class.D, %class.D* %p1, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
%0 = load i32* %_ref.i.i, align 4
%inc.i.i = add nsw i32 %0, 1
store i32 %inc.i.i, i32* %_ref.i.i, align 4
define %class.D* @_ZN1BI1DEptEv(%class.B* nocapture readonly %this) nounwind readonly uwtable align 2 {
entry:
- %m_ptr = getelementptr inbounds %class.B* %this, i64 0, i32 0
+ %m_ptr = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
%0 = load %class.D** %m_ptr, align 8
ret %class.D* %0
}
define void @_ZN1BI1DED1Ev(%class.B* nocapture readonly %this) unnamed_addr uwtable align 2 {
entry:
- %m_ptr.i = getelementptr inbounds %class.B* %this, i64 0, i32 0
+ %m_ptr.i = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
%0 = load %class.D** %m_ptr.i, align 8
- %_ref.i.i = getelementptr inbounds %class.D* %0, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
+ %_ref.i.i = getelementptr inbounds %class.D, %class.D* %0, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
%1 = load i32* %_ref.i.i, align 4
%tobool.i.i = icmp eq i32 %1, 0
br i1 %tobool.i.i, label %_ZN1BI1DED2Ev.exit, label %if.then.i.i
define void @_ZN1BI1DED2Ev(%class.B* nocapture readonly %this) unnamed_addr uwtable align 2 {
entry:
- %m_ptr = getelementptr inbounds %class.B* %this, i64 0, i32 0
+ %m_ptr = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
%0 = load %class.D** %m_ptr, align 8
- %_ref.i = getelementptr inbounds %class.D* %0, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
+ %_ref.i = getelementptr inbounds %class.D, %class.D* %0, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
%1 = load i32* %_ref.i, align 4
%tobool.i = icmp eq i32 %1, 0
br i1 %tobool.i, label %_ZN1AI1CE5derefEv.exit, label %if.then.i
define void @_ZN1AI1CE5derefEv(%class.A* nocapture readonly %this) nounwind uwtable align 2 {
entry:
- %_ref = getelementptr inbounds %class.A* %this, i64 0, i32 0
+ %_ref = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 0
%0 = load i32* %_ref, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
define void @_ZN1BI1DEC2EPS0_(%class.B* nocapture %this, %class.D* %p1) unnamed_addr uwtable align 2 {
entry:
- %m_ptr = getelementptr inbounds %class.B* %this, i64 0, i32 0
+ %m_ptr = getelementptr inbounds %class.B, %class.B* %this, i64 0, i32 0
store %class.D* %p1, %class.D** %m_ptr, align 8
- %_ref.i = getelementptr inbounds %class.D* %p1, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
+ %_ref.i = getelementptr inbounds %class.D, %class.D* %p1, i64 0, i32 0, i32 0, i32 1, i32 0, i32 0
%0 = load i32* %_ref.i, align 4
%inc.i = add nsw i32 %0, 1
store i32 %inc.i, i32* %_ref.i, align 4
define void @_ZN1AI1CE3refEv(%class.A* nocapture %this) nounwind uwtable align 2 {
entry:
- %_ref = getelementptr inbounds %class.A* %this, i64 0, i32 0
+ %_ref = getelementptr inbounds %class.A, %class.A* %this, i64 0, i32 0
%0 = load i32* %_ref, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %_ref, align 4
; CHECK: Z3fooPN4llvm5ValueE
define zeroext i8 @_Z3fooPN4llvm5ValueE(%"struct.llvm::Value"* %V) ssp {
entry:
- %0 = getelementptr inbounds %"struct.llvm::Value"* %V, i64 0, i32 1 ; <i8*> [#uses=1]
+ %0 = getelementptr inbounds %"struct.llvm::Value", %"struct.llvm::Value"* %V, i64 0, i32 1 ; <i8*> [#uses=1]
%1 = load i8* %0, align 8 ; <i8> [#uses=2]
%2 = icmp ugt i8 %1, 20 ; <i1> [#uses=1]
br i1 %2, label %bb.i, label %bb2
%4 = load %struct._GList** %0, align 8
%5 = load %struct._GList** %0, align 8
%call2 = call %struct._GList* @g_list_first(%struct._GList* %5)
- %data.i = getelementptr inbounds %struct._GList* %call2, i32 0, i32 0
+ %data.i = getelementptr inbounds %struct._GList, %struct._GList* %call2, i32 0, i32 0
%6 = load i8** %data.i, align 8
%7 = bitcast i8* %6 to %struct.filter_def*
- %name.i = getelementptr inbounds %struct.filter_def* %7, i32 0, i32 0
+ %name.i = getelementptr inbounds %struct.filter_def, %struct.filter_def* %7, i32 0, i32 0
%8 = load i8** %name.i, align 8
call void @g_free(i8* %8) nounwind
- %strval.i = getelementptr inbounds %struct.filter_def* %7, i32 0, i32 1
+ %strval.i = getelementptr inbounds %struct.filter_def, %struct.filter_def* %7, i32 0, i32 1
%9 = load i8** %strval.i, align 8
call void @g_free(i8* %9) nounwind
%10 = bitcast %struct.filter_def* %7 to i8*
br i1 %tobool12, label %while.body13, label %while.end16
while.body13: ; preds = %while.cond11
- %data = getelementptr inbounds %struct._GList* %cond10, i32 0, i32 0
+ %data = getelementptr inbounds %struct._GList, %struct._GList* %cond10, i32 0, i32 0
%12 = load i8** %data, align 8
%13 = bitcast i8* %12 to %struct.filter_def*
%14 = load %struct._GList** %0, align 8
- %name = getelementptr inbounds %struct.filter_def* %13, i32 0, i32 0
+ %name = getelementptr inbounds %struct.filter_def, %struct.filter_def* %13, i32 0, i32 0
%15 = load i8** %name, align 8
- %strval = getelementptr inbounds %struct.filter_def* %13, i32 0, i32 1
+ %strval = getelementptr inbounds %struct.filter_def, %struct.filter_def* %13, i32 0, i32 1
%16 = load i8** %strval, align 8
%call.i7 = call noalias i8* @g_malloc(i64 16) nounwind
%17 = bitcast i8* %call.i7 to %struct.filter_def*
%call1.i = call noalias i8* @g_strdup(i8* %15) nounwind
- %name.i8 = getelementptr inbounds %struct.filter_def* %17, i32 0, i32 0
+ %name.i8 = getelementptr inbounds %struct.filter_def, %struct.filter_def* %17, i32 0, i32 0
store i8* %call1.i, i8** %name.i8, align 8
%call2.i = call noalias i8* @g_strdup(i8* %16) nounwind
- %strval.i9 = getelementptr inbounds %struct.filter_def* %17, i32 0, i32 1
+ %strval.i9 = getelementptr inbounds %struct.filter_def, %struct.filter_def* %17, i32 0, i32 1
store i8* %call2.i, i8** %strval.i9, align 8
%18 = bitcast %struct.filter_def* %17 to i8*
%call3.i = call %struct._GList* @g_list_append(%struct._GList* %14, i8* %18) nounwind
br i1 %tobool15, label %cond.true, label %cond.false
cond.true: ; preds = %while.body13
- %next = getelementptr inbounds %struct._GList* %cond10, i32 0, i32 1
+ %next = getelementptr inbounds %struct._GList, %struct._GList* %cond10, i32 0, i32 1
%19 = load %struct._GList** %next, align 8
br label %cond.end
%SJE.0.0 = phi %struct.SetJmpMapEntry* [ %tmp.24, %endif ], [ null, %entry ] ; <%struct.SetJmpMapEntry*> [#uses=1]
br i1 false, label %then, label %endif
then: ; preds = %no_exit
- %tmp.20 = getelementptr %struct.SetJmpMapEntry* %SJE.0.0, i32 0, i32 1 ; <i32*> [#uses=0]
+ %tmp.20 = getelementptr %struct.SetJmpMapEntry, %struct.SetJmpMapEntry* %SJE.0.0, i32 0, i32 1 ; <i32*> [#uses=0]
ret void
endif: ; preds = %no_exit
%tmp.24 = load %struct.SetJmpMapEntry** null ; <%struct.SetJmpMapEntry*> [#uses=1]
no_exit: ; preds = %loopentry
br i1 false, label %then, label %endif
then: ; preds = %no_exit
- %tmp.21 = getelementptr %struct.SetJmpMapEntry* %SJE.0, i32 0, i32 1 ; <i32*> [#uses=0]
+ %tmp.21 = getelementptr %struct.SetJmpMapEntry, %struct.SetJmpMapEntry* %SJE.0, i32 0, i32 1 ; <i32*> [#uses=0]
br label %return
endif: ; preds = %no_exit
%tmp.25 = load %struct.SetJmpMapEntry** null ; <%struct.SetJmpMapEntry*> [#uses=1]
%X = alloca [2 x i64] ; <[2 x i64]*> [#uses=1]
br i1 false, label %bb13, label %bb4
bb4: ; preds = %bb3
- %reg3011 = getelementptr [2 x i64]* %X, i64 0, i64 0 ; <i64*> [#uses=1]
+ %reg3011 = getelementptr [2 x i64], [2 x i64]* %X, i64 0, i64 0 ; <i64*> [#uses=1]
br label %bb8
bb8: ; preds = %bb8, %bb4
store i64 0, i64* %reg3011
__main.entry:
br label %invoke_cont.3
invoke_cont.3: ; preds = %invoke_cont.3, %__main.entry
- %tmp.34.i.i502.7 = getelementptr %struct.roadlet* null, i32 0, i32 3, i32 7 ; <%struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)**> [#uses=1]
+ %tmp.34.i.i502.7 = getelementptr %struct.roadlet, %struct.roadlet* null, i32 0, i32 3, i32 7 ; <%struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)**> [#uses=1]
store %struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)* @_Z11return_nullP7roadletP7vehicle9direction, %struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)** %tmp.34.i.i502.7
store %struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)* @_Z14lane_switch_okP7roadletP7vehicle9direction, %struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)** null
- %tmp.4.i.i339 = getelementptr %struct.roadlet* null, i32 0, i32 3, i32 undef ; <%struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)**> [#uses=1]
+ %tmp.4.i.i339 = getelementptr %struct.roadlet, %struct.roadlet* null, i32 0, i32 3, i32 undef ; <%struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)**> [#uses=1]
store %struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)* @_Z11return_nullP7roadletP7vehicle9direction, %struct.roadlet* (%struct.roadlet*, %struct.vehicle*, i32)** %tmp.4.i.i339
br label %invoke_cont.3
}
bb: ; preds = %bb6
%tmp2 = load volatile i32* %DataIn ; <i32> [#uses=1]
- %tmp3 = getelementptr [64 x i32]* %buffer, i32 0, i32 %i.0 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr [64 x i32], [64 x i32]* %buffer, i32 0, i32 %i.0 ; <i32*> [#uses=1]
store i32 %tmp2, i32* %tmp3
%tmp5 = add i32 %i.0, 1 ; <i32> [#uses=1]
br label %bb6
bb12: ; preds = %bb22
%tmp14 = mul i32 %j.1, 8 ; <i32> [#uses=1]
%tmp16 = add i32 %tmp14, %i.1 ; <i32> [#uses=1]
- %tmp17 = getelementptr [64 x i32]* %buffer, i32 0, i32 %tmp16 ; <i32*> [#uses=1]
+ %tmp17 = getelementptr [64 x i32], [64 x i32]* %buffer, i32 0, i32 %tmp16 ; <i32*> [#uses=1]
%tmp18 = load i32* %tmp17 ; <i32> [#uses=1]
store volatile i32 %tmp18, i32* %DataOut
%tmp21 = add i32 %j.1, 1 ; <i32> [#uses=1]
bb35: ; preds = %cond_next60, %bb63.outer
%window.34 = phi i32 [ %tmp62, %cond_next60 ], [ 0, %bb63.outer ] ; <i32> [#uses=1]
- %tmp44 = getelementptr [4 x i32]* null, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp44 = getelementptr [4 x i32], [4 x i32]* null, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp46 = load i32* %tmp44, align 4 ; <i32> [#uses=0]
br i1 false, label %cond_true50, label %cond_next60
cond_true50: ; preds = %bb35
- %tmp59 = getelementptr [4 x i32]* null, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp59 = getelementptr [4 x i32], [4 x i32]* null, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %tmp59, align 4
br label %cond_next60
br i1 %tmp3.i, label %clear_modes.exit, label %cond_true.i
cond_true.i: ; preds = %blah.i
- %tmp1.i = getelementptr %struct.decision* null, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp1.i = getelementptr %struct.decision, %struct.decision* null, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 0, i8* %tmp1.i
br label %blah.i
br i1 %tmp3.i, label %cond.true, label %exit
cond.true: ; preds = %loop.head
- %ptr.i = getelementptr i8* %ptr, i32 0 ; <i8*> [#uses=2]
+ %ptr.i = getelementptr i8, i8* %ptr, i32 0 ; <i8*> [#uses=2]
store i8 0, i8* %ptr.i
br label %loop.head
br i1 %tmp3.i, label %exit, label %cond.true
cond.true: ; preds = %loop.head
- %ptr.i = getelementptr i8* %p, i32 0 ; <i8*> [#uses=2]
+ %ptr.i = getelementptr i8, i8* %p, i32 0 ; <i8*> [#uses=2]
store i8 0, i8* %ptr.i
br label %loop.head
forbody:
%tmp3 = load float** @a
- %arrayidx = getelementptr float* %tmp3, i32 %i.0
+ %arrayidx = getelementptr float, float* %tmp3, i32 %i.0
%tmp7 = uitofp i32 %i.0 to float
store float %tmp7, float* %arrayidx
%inc = add i32 %i.0, 1
br label %forcond
-; CHECK: %arrayidx = getelementptr float* %tmp3, i32 %i.0
+; CHECK: %arrayidx = getelementptr float, float* %tmp3, i32 %i.0
; CHECK: %tmp7 = uitofp i32 %i.0 to float
; CHECK: store float %tmp7, float* %arrayidx
; CHECK: %inc = add i32 %i.0, 1
for.cond:
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr [1024 x float]* @A, i64 0, i64 3
+ %arrayidx = getelementptr [1024 x float], [1024 x float]* @A, i64 0, i64 3
%vecidx = bitcast float* %arrayidx to <4 x float>*
store <4 x float> zeroinitializer, <4 x float>* %vecidx, align 4
%indvar.next = add i64 %indvar, 1
for.body: ; preds = %for.body, %for.body.preheader
%g.15 = phi i32 [ undef, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx2 = getelementptr inbounds i32* @fn3.i, i64 0
+ %arrayidx2 = getelementptr inbounds i32, i32* @fn3.i, i64 0
%0 = load i32* %arrayidx2, align 4
%call = call i32 @g()
br i1 false, label %for.body, label %for.end.loopexit
br i1 undef, label %for.cond, label %for.end
for.cond: ; preds = %for.cond, %entry
- %tmp1 = getelementptr { i32*}* %__first, i32 0, i32 0
+ %tmp1 = getelementptr { i32*}, { i32*}* %__first, i32 0, i32 0
%tmp2 = load i32** %tmp1, align 4
%call = tail call i32* @test3helper(i32* %tmp2)
- %tmp3 = getelementptr { i32*}* %__first, i32 0, i32 0
+ %tmp3 = getelementptr { i32*}, { i32*}* %__first, i32 0, i32 0
store i32* %call, i32** %tmp3, align 4
br i1 false, label %for.cond, label %for.end
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%1 = load i32* %c, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%1 = load i32* %c, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %indvars.iv
%0 = load i32 addrspace(1)* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%1 = load i32 addrspace(1)* %c, align 4
- %arrayidx3 = getelementptr inbounds i32 addrspace(1)* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %indvars.iv
%2 = load i32 addrspace(1)* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32 addrspace(1)* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%c = bitcast i64* %ca to i32*
%1 = load i32* %c, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%1 = load i32* %c, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%1 = load i32* %c, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%1 = load i32* %c, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
%1 = load i32* %c, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %c2 = getelementptr inbounds i32* %c, i64 2
+ %c2 = getelementptr inbounds i32, i32* %c, i64 2
%1 = load i32* %c2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 0
br i1 %cmp1, label %if.then, label %for.inc
if.then: ; preds = %for.body
- %c2 = getelementptr inbounds i32* %c, i64 2
+ %c2 = getelementptr inbounds i32, i32* %c, i64 2
%1 = load i32* %c2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx3, align 4
%mul = mul nsw i32 %2, %1
store i32 %mul, i32* %arrayidx, align 4
; CHECK-NEXT: br label %Loop
Loop: ; preds = %Loop, %0
- %X1 = getelementptr i32* @X, i64 1 ; <i32*> [#uses=1]
+ %X1 = getelementptr i32, i32* @X, i64 1 ; <i32*> [#uses=1]
%A = load i32* %X1 ; <i32> [#uses=1]
%V = add i32 %A, 1 ; <i32> [#uses=1]
- %X2 = getelementptr i32* @X, i64 1 ; <i32*> [#uses=1]
+ %X2 = getelementptr i32, i32* @X, i64 1 ; <i32*> [#uses=1]
store i32 %V, i32* %X2
br i1 false, label %Loop, label %Exit
br label %loop
loop:
- %tmp = getelementptr i8* %x, i64 8
+ %tmp = getelementptr i8, i8* %x, i64 8
store i8* %tmp, i8** %handle2
br label %subloop
%count = phi i8 [ 0, %loop ], [ %nextcount, %subloop ]
%offsetx2 = load i8** %handle2
store i8 %n, i8* %offsetx2
- %newoffsetx2 = getelementptr i8* %offsetx2, i64 -1
+ %newoffsetx2 = getelementptr i8, i8* %offsetx2, i64 -1
store i8* %newoffsetx2, i8** %handle2
%nextcount = add i8 %count, 1
%innerexitcond = icmp sge i8 %nextcount, 8
for.body: ; preds = %for.body.lr.ph, %for.body
%storemerge2 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%idxprom = sext i32 %storemerge2 to i64
- %arrayidx = getelementptr inbounds float* %a, i64 %idxprom
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %idxprom
store float 0.000000e+00, float* %arrayidx, align 4, !tbaa !3
%0 = load i32* %gi, align 4, !tbaa !0
%inc = add nsw i32 %0, 1
define i32 @test6() {
br label %Loop
Loop:
- %dead = getelementptr %Ty* @X2, i64 0, i32 0
+ %dead = getelementptr %Ty, %Ty* @X2, i64 0, i32 0
%sunk2 = load i32* %dead
br i1 false, label %Loop, label %Out
Out: ; preds = %Loop
ret i32 %sunk2
; CHECK-LABEL: @test6(
; CHECK: Out:
-; CHECK-NEXT: %dead.le = getelementptr %Ty* @X2, i64 0, i32 0
+; CHECK-NEXT: %dead.le = getelementptr %Ty, %Ty* @X2, i64 0, i32 0
; CHECK-NEXT: %sunk2.le = load i32* %dead.le
; CHECK-NEXT: ret i32 %sunk2.le
}
define void @test11() {
br label %Loop
Loop:
- %dead = getelementptr %Ty* @X2, i64 0, i32 0
+ %dead = getelementptr %Ty, %Ty* @X2, i64 0, i32 0
br i1 false, label %Loop, label %Out
Out:
ret void
l1.header:
%iv = phi i64 [ %iv.next, %l1.latch ], [ 0, %entry ]
- %arrayidx.i = getelementptr inbounds [1 x i32]* @c, i64 0, i64 %iv
+ %arrayidx.i = getelementptr inbounds [1 x i32], [1 x i32]* @c, i64 0, i64 %iv
br label %l2.header
l2.header:
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %p, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
%0 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
%div = udiv i64 %x, 2
- %arrayidx1 = getelementptr inbounds i64* %q, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02
store i64 %div, i64* %arrayidx1, align 8
br label %for.inc
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %p, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
%0 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
%div = udiv i64 %x, %m
- %arrayidx1 = getelementptr inbounds i64* %q, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02
store i64 %div, i64* %arrayidx1, align 8
br label %for.inc
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %p, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
%0 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
%div = sdiv i64 %x, 2
- %arrayidx1 = getelementptr inbounds i64* %q, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02
store i64 %div, i64* %arrayidx1, align 8
br label %for.inc
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %p, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
%0 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
%div = sdiv i64 %x, %or
- %arrayidx1 = getelementptr inbounds i64* %q, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02
store i64 %div, i64* %arrayidx1, align 8
br label %for.inc
for.body: ; preds = %entry, %for.inc
%i.02 = phi i64 [ %inc, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %p, i64 %i.02
+ %arrayidx = getelementptr inbounds i32, i32* %p, i64 %i.02
%0 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
%div = sdiv i64 %x, %and
- %arrayidx1 = getelementptr inbounds i64* %q, i64 %i.02
+ %arrayidx1 = getelementptr inbounds i64, i64* %q, i64 %i.02
store i64 %div, i64* %arrayidx1, align 8
br label %for.inc
%load1 = load i32* %a, align 4
%conv = zext i32 %load1 to i64
- %arrayidx1 = getelementptr inbounds i32* %a, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %load1, i32* %b, align 4
%load2 = load i32* %arrayidx1, align 4
%conv2 = zext i32 %load2 to i64
%load1 = load i32* %a, align 4
%conv = zext i32 %load1 to i64
- %arrayidx1 = getelementptr inbounds i32* %a, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %load1, i32* %b, align 4
%load2 = load i32* %arrayidx1, align 4
%conv2 = zext i32 %load2 to i64
%load1 = load i32* %a, align 4
%conv = zext i32 %load1 to i64
- %arrayidx1 = getelementptr inbounds i32* %a, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
%load2 = load i32* %arrayidx1, align 4
tail call void @llvm.assume(i1 %b)
%conv2 = zext i32 %load2 to i64
%load1 = load i32* %a, align 4
%conv = zext i32 %load1 to i64
- %arrayidx1 = getelementptr inbounds i32* %a, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %a, i64 1
tail call void @llvm.assume(i1 %b)
%load2 = load i32* %arrayidx1, align 4
%conv2 = zext i32 %load2 to i64
%2 = load i8* %1, align 1
%3 = zext i8 %2 to i64
%4 = shl nuw i64 %3, 56
- %5 = getelementptr inbounds i8* %1, i64 1
+ %5 = getelementptr inbounds i8, i8* %1, i64 1
%6 = load i8* %5, align 1
%7 = zext i8 %6 to i64
%8 = shl nuw nsw i64 %7, 48
%9 = or i64 %8, %4
- %10 = getelementptr inbounds i8* %1, i64 2
+ %10 = getelementptr inbounds i8, i8* %1, i64 2
%11 = load i8* %10, align 1
%12 = zext i8 %11 to i64
%13 = shl nuw nsw i64 %12, 40
%14 = or i64 %9, %13
- %15 = getelementptr inbounds i8* %1, i64 3
+ %15 = getelementptr inbounds i8, i8* %1, i64 3
%16 = load i8* %15, align 1
%17 = zext i8 %16 to i64
%18 = shl nuw nsw i64 %17, 32
%19 = or i64 %14, %18
- %20 = getelementptr inbounds i8* %1, i64 4
+ %20 = getelementptr inbounds i8, i8* %1, i64 4
%21 = load i8* %20, align 1
%22 = zext i8 %21 to i64
%23 = shl nuw nsw i64 %22, 24
%24 = or i64 %19, %23
- %25 = getelementptr inbounds i8* %1, i64 5
+ %25 = getelementptr inbounds i8, i8* %1, i64 5
%26 = load i8* %25, align 1
%27 = zext i8 %26 to i64
%28 = shl nuw nsw i64 %27, 16
%29 = or i64 %24, %28
- %30 = getelementptr inbounds i8* %1, i64 6
+ %30 = getelementptr inbounds i8, i8* %1, i64 6
%31 = load i8* %30, align 1
%32 = zext i8 %31 to i64
%33 = shl nuw nsw i64 %32, 8
%34 = or i64 %29, %33
- %35 = getelementptr inbounds i8* %1, i64 7
+ %35 = getelementptr inbounds i8, i8* %1, i64 7
%36 = load i8* %35, align 1
%37 = zext i8 %36 to i64
%38 = or i64 %34, %37
; Combine simple adjacent loads.
define i32 @"2xi16_i32"(i16* %x) {
%1 = load i16* %x, align 2
- %2 = getelementptr inbounds i16* %x, i64 1
+ %2 = getelementptr inbounds i16, i16* %x, i64 1
%3 = load i16* %2, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw i32 %4, 16
define i32 @"2xi16_i32_store"(i16* %x, i16* %y) {
%1 = load i16* %x, align 2
store i16 0, i16* %y, align 2
- %2 = getelementptr inbounds i16* %x, i64 1
+ %2 = getelementptr inbounds i16, i16* %x, i64 1
%3 = load i16* %2, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw i32 %4, 16
; Don't combine loads with a gap.
define i32 @"2xi16_i32_gap"(i16* %x) {
%1 = load i16* %x, align 2
- %2 = getelementptr inbounds i16* %x, i64 2
+ %2 = getelementptr inbounds i16, i16* %x, i64 2
%3 = load i16* %2, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw i32 %4, 16
; Combine out of order loads.
define i32 @"2xi16_i32_order"(i16* %x) {
- %1 = getelementptr inbounds i16* %x, i64 1
+ %1 = getelementptr inbounds i16, i16* %x, i64 1
%2 = load i16* %1, align 2
%3 = zext i16 %2 to i32
%4 = load i16* %x, align 2
define i32 @"2xi16_i32_overlap"(i8* %x) {
%1 = bitcast i8* %x to i16*
%2 = load i16* %1, align 2
- %3 = getelementptr inbounds i8* %x, i64 1
+ %3 = getelementptr inbounds i8, i8* %x, i64 1
%4 = bitcast i8* %3 to i16*
%5 = load i16* %4, align 2
%6 = zext i16 %5 to i32
define i64 @"2xi16_i64_align"(i8* %x) {
%1 = bitcast i8* %x to i32*
%2 = load i32* %1, align 4
- %3 = getelementptr inbounds i8* %x, i64 4
+ %3 = getelementptr inbounds i8, i8* %x, i64 4
%4 = bitcast i8* %3 to i16*
%5 = load i16* %4, align 2
- %6 = getelementptr inbounds i8* %x, i64 6
+ %6 = getelementptr inbounds i8, i8* %x, i64 6
%7 = bitcast i8* %6 to i16*
%8 = load i16* %7, align 2
%9 = zext i16 %8 to i64
define i64 @"2xi16_i64_npo2"(i8* %x) {
%1 = load i8* %x, align 1
%2 = zext i8 %1 to i64
- %3 = getelementptr inbounds i8* %x, i64 1
+ %3 = getelementptr inbounds i8, i8* %x, i64 1
%4 = load i8* %3, align 1
%5 = zext i8 %4 to i64
%6 = shl nuw nsw i64 %5, 8
%7 = or i64 %6, %2
- %8 = getelementptr inbounds i8* %x, i64 2
+ %8 = getelementptr inbounds i8, i8* %x, i64 2
%9 = load i8* %8, align 1
%10 = zext i8 %9 to i64
%11 = shl nuw nsw i64 %10, 16
%12 = or i64 %11, %7
- %13 = getelementptr inbounds i8* %x, i64 3
+ %13 = getelementptr inbounds i8, i8* %x, i64 3
%14 = load i8* %13, align 1
%15 = zext i8 %14 to i64
%16 = shl nuw nsw i64 %15, 24
%17 = or i64 %16, %12
- %18 = getelementptr inbounds i8* %x, i64 4
+ %18 = getelementptr inbounds i8, i8* %x, i64 4
%19 = load i8* %18, align 1
%20 = zext i8 %19 to i64
%21 = shl nuw nsw i64 %20, 32
%22 = or i64 %21, %17
- %23 = getelementptr inbounds i8* %x, i64 5
+ %23 = getelementptr inbounds i8, i8* %x, i64 5
%24 = load i8* %23, align 1
%25 = zext i8 %24 to i64
%26 = shl nuw nsw i64 %25, 40
%27 = or i64 %26, %22
- %28 = getelementptr inbounds i8* %x, i64 6
+ %28 = getelementptr inbounds i8, i8* %x, i64 6
%29 = load i8* %28, align 1
%30 = zext i8 %29 to i64
%31 = shl nuw nsw i64 %30, 48
define %struct.BF_PartHolder* @BF_addEntry(%struct.BF_PartHolder* %thePH, i32 %value, i32 %length) nounwind {
entry:
%myElement = alloca %struct.BF_BitstreamElement ; <%struct.BF_BitstreamElement*> [#uses=2]
- %tmp1 = getelementptr %struct.BF_BitstreamElement* %myElement, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp1 = getelementptr %struct.BF_BitstreamElement, %struct.BF_BitstreamElement* %myElement, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 %value, i32* %tmp1, align 8
%tmp7 = icmp eq i32 %length, 0 ; <i1> [#uses=1]
br i1 %tmp7, label %bb13, label %bb
define internal fastcc void @encodeMainData(%struct.lame_global_flags* %gfp, [2 x [576 x i32]]* %l3_enc, %struct.III_side_info_t* %si, [2 x %struct.III_scalefac_t]* %scalefac) nounwind {
entry:
- %tmp69 = getelementptr %struct.lame_global_flags* %gfp, i32 0, i32 43 ; <i32*> [#uses=1]
+ %tmp69 = getelementptr %struct.lame_global_flags, %struct.lame_global_flags* %gfp, i32 0, i32 43 ; <i32*> [#uses=1]
%tmp70 = load i32* %tmp69, align 4 ; <i32> [#uses=1]
%tmp71 = icmp eq i32 %tmp70, 1 ; <i1> [#uses=1]
br i1 %tmp71, label %bb352, label %bb498
bb113: ; preds = %bb132
- %tmp123 = getelementptr [2 x %struct.III_scalefac_t]* %scalefac, i32 0, i32 0, i32 1, i32 %sfb.0, i32 %window.0 ; <i32*> [#uses=1]
+ %tmp123 = getelementptr [2 x %struct.III_scalefac_t], [2 x %struct.III_scalefac_t]* %scalefac, i32 0, i32 0, i32 1, i32 %sfb.0, i32 %window.0 ; <i32*> [#uses=1]
%tmp124 = load i32* %tmp123, align 4 ; <i32> [#uses=1]
%tmp126 = load %struct.BF_PartHolder** %tmp80, align 4 ; <%struct.BF_PartHolder*> [#uses=1]
%tmp128 = call %struct.BF_PartHolder* @BF_addEntry( %struct.BF_PartHolder* %tmp126, i32 %tmp124, i32 %tmp93 ) nounwind ; <%struct.BF_PartHolder*> [#uses=1]
br i1 %tmp176, label %bb166, label %bb341
bb341: ; preds = %bb352, %bb174
- %tmp80 = getelementptr [2 x [2 x %struct.BF_PartHolder*]]* @scaleFactorsPH, i32 0, i32 0, i32 0 ; <%struct.BF_PartHolder**> [#uses=3]
- %tmp92 = getelementptr [16 x i32]* @slen1_tab, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp80 = getelementptr [2 x [2 x %struct.BF_PartHolder*]], [2 x [2 x %struct.BF_PartHolder*]]* @scaleFactorsPH, i32 0, i32 0, i32 0 ; <%struct.BF_PartHolder**> [#uses=3]
+ %tmp92 = getelementptr [16 x i32], [16 x i32]* @slen1_tab, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp93 = load i32* %tmp92, align 4 ; <i32> [#uses=1]
br label %bb140
%j.02 = phi i16 [ 0, %bb.nph ], [ %inc, %for.body5 ]
%mul = mul nsw i16 %i.04, 100
%add = add nsw i16 %j.02, %mul
- %arrayidx = getelementptr inbounds i8 addrspace(2)* %X, i16 %add
+ %arrayidx = getelementptr inbounds i8, i8 addrspace(2)* %X, i16 %add
store i8 0, i8 addrspace(2)* %arrayidx, align 1
%inc = add nsw i16 %j.02, 1
%cmp4 = icmp eq i16 %inc, 100
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i32 addrspace(2)* %P, i64 %indvar
+ %arrayidx = getelementptr i32, i32 addrspace(2)* %P, i64 %indvar
store i32 1, i32 addrspace(2)* %arrayidx, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
%tmp5 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%add = add nsw i32 %tmp5, 4
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom
%tmp2 = load i32 addrspace(2)* %arrayidx, align 4
%add4 = add nsw i32 %tmp5, 5
%idxprom5 = sext i32 %add4 to i64
- %arrayidx6 = getelementptr inbounds [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32] addrspace(2)* @g_50, i32 0, i64 %idxprom5
store i32 %tmp2, i32 addrspace(2)* %arrayidx6, align 4
%inc = add nsw i32 %tmp5, 1
%cmp = icmp slt i32 %inc, 2
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8* %Base, i64 %indvar
+ %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
store i8 0, i8* %I.0.014, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ]
- %I.0.014 = getelementptr i8* %Base, i64 %indvar
+ %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
store i8 0, i8* %I.0.014, align 1
%indvar.next = add i64 %indvar, 1
br label %for.body.cont
for.body: ; preds = %entry, %for.body
%i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %add.ptr.i = getelementptr i32* %Base, i64 %i.011
+ %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011
store i32 16843009, i32* %add.ptr.i, align 4
%inc = add nsw i64 %i.011, 1
%exitcond = icmp eq i64 %inc, %Size
for.body: ; preds = %entry, %for.body
%i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %add.ptr.i = getelementptr i32* %Base, i64 %i.011
+ %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011
store i32 16843009, i32* %add.ptr.i, align 4
store i8 42, i8* %MayAlias
;; TODO: We should be able to promote this memset. Not yet though.
define void @test4(i8* %Base) nounwind ssp {
bb.nph: ; preds = %entry
- %Base100 = getelementptr i8* %Base, i64 1000
+ %Base100 = getelementptr i8, i8* %Base, i64 1000
br label %for.body
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8* %Base, i64 %indvar
+ %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
store i8 0, i8* %I.0.014, align 1
;; Store beyond the range memset, should be safe to promote.
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8* %Base, i64 %indvar
+ %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
%V = trunc i64 %indvar to i8
store i8 %V, i8* %I.0.014, align 1
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8* %Base, i64 %indvar
- %DestI = getelementptr i8* %Dest, i64 %indvar
+ %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+ %DestI = getelementptr i8, i8* %Dest, i64 %indvar
%V = load i8* %I.0.014, align 1
store i8 %V, i8* %DestI, align 1
%indvar.next = add i64 %indvar, 1
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ]
br label %for.body.cont
for.body.cont:
- %I.0.014 = getelementptr i8* %Base, i64 %indvar
+ %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
store i8 0, i8* %I.0.014, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, %Size
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %PI = getelementptr i64* %Ptr, i64 %indvar
+ %PI = getelementptr i64, i64* %Ptr, i64 %indvar
store i64 0, i64 *%PI
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 1
for.body: ; preds = %bb.nph, %for.body
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ]
- %I.0.014 = getelementptr i8* %Base, i64 %indvar
- %DestI = getelementptr i8* %Dest, i64 %indvar
+ %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar
+ %DestI = getelementptr i8, i8* %Dest, i64 %indvar
%V = load i8* %I.0.014, align 1
store i8 %V, i8* %DestI, align 1
%mul = mul nsw i32 %i.04, 100
%add = add nsw i32 %j.02, %mul
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds i8* %X, i64 %idxprom
+ %arrayidx = getelementptr inbounds i8, i8* %X, i64 %idxprom
store i8 0, i8* %arrayidx, align 1
%inc = add nsw i32 %j.02, 1
%cmp4 = icmp eq i32 %inc, 100
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i32* %P, i64 %indvar
+ %arrayidx = getelementptr i32, i32* %P, i64 %indvar
store i32 1, i32* %arrayidx, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i32** %P, i64 %indvar
+ %arrayidx = getelementptr i32*, i32** %P, i64 %indvar
store i32* null, i32** %arrayidx, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr i32** %P, i64 %indvar
+ %arrayidx = getelementptr i32*, i32** %P, i64 %indvar
store i32* @G, i32** %arrayidx, align 4
%indvar.next = add i64 %indvar, 1
%exitcond = icmp eq i64 %indvar.next, 10000
%tmp5 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%add = add nsw i32 %tmp5, 4
%idxprom = sext i32 %add to i64
- %arrayidx = getelementptr inbounds [7 x i32]* @g_50, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom
%tmp2 = load i32* %arrayidx, align 4
%add4 = add nsw i32 %tmp5, 5
%idxprom5 = sext i32 %add4 to i64
- %arrayidx6 = getelementptr inbounds [7 x i32]* @g_50, i32 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom5
store i32 %tmp2, i32* %arrayidx6, align 4
%inc = add nsw i32 %tmp5, 1
%cmp = icmp slt i32 %inc, 2
entry:
%end.idx = add i64 %size, -1
- %end.ptr = getelementptr inbounds i32* %s, i64 %end.idx
+ %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx
br label %while.body
; CHECK-NOT: memcpy
;
while.body:
%phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ]
- %src.ptr = getelementptr inbounds i32* %phi.ptr, i64 1
+ %src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
%val = load i32* %src.ptr, align 4
; CHECK: load
- %dst.ptr = getelementptr inbounds i32* %phi.ptr, i64 0
+ %dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0
store i32 %val, i32* %dst.ptr, align 4
; CHECK: store
- %next.ptr = getelementptr inbounds i32* %phi.ptr, i64 1
+ %next.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1
%cmp = icmp eq i32* %next.ptr, %end.ptr
br i1 %cmp, label %exit, label %while.body
bb0:
%mul116 = mul nsw i64 %x, %x
%incdec.ptr6.sum175 = add i64 42, %x
- %arrayidx135 = getelementptr inbounds i64* %iwork, i64 %incdec.ptr6.sum175
+ %arrayidx135 = getelementptr inbounds i64, i64* %iwork, i64 %incdec.ptr6.sum175
br label %bb1
bb1:
%storemerge4226 = phi i64 [ 0, %bb0 ], [ %inc139, %bb1 ]
store i64 1, i64* %arrayidx135, align 8
%incdec.ptr6.sum176 = add i64 %mul116, %storemerge4226
- %arrayidx137 = getelementptr inbounds i64* %iwork, i64 %incdec.ptr6.sum176
+ %arrayidx137 = getelementptr inbounds i64, i64* %iwork, i64 %incdec.ptr6.sum176
store i64 1, i64* %arrayidx137, align 8
%inc139 = add nsw i64 %storemerge4226, 1
%cmp131 = icmp sgt i64 %storemerge4226, 42
for.body: ; preds = %entry, %for.body
%indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ]
- %arrayidx = getelementptr double* %a, i64 %indvar
+ %arrayidx = getelementptr double, double* %a, i64 %indvar
; CHECK: call void @llvm.memset{{.+}} !dbg
store double 0.000000e+00, double* %arrayidx, align 8, !dbg !15
%indvar.next = add i64 %indvar, 1
for.body: ; preds = %for.body.lr.ph, %for.body
%indvar = phi i64 [ 0, %for.body.lr.ph ], [ %indvar.next, %for.body ]
- %p.02 = getelementptr i8* %b, i64 %indvar
+ %p.02 = getelementptr i8, i8* %b, i64 %indvar
store i8 %conv6, i8* %p.02, align 1
%indvar.next = add i64 %indvar, 1
%exitcond = icmp ne i64 %indvar.next, %len
for.bodyprime: ; preds = %for.bodyprime, %start.exit
%i.057375 = phi i32 [ 0, %start.exit ], [ %1, %for.bodyprime ]
- %arrayidx8prime = getelementptr inbounds i32* %currMB, i32 %i.057375
+ %arrayidx8prime = getelementptr inbounds i32, i32* %currMB, i32 %i.057375
store i32 0, i32* %arrayidx8prime, align 4
%1 = add i32 %i.057375, 1
%cmp5prime = icmp slt i32 %1, 4
while.body: ; preds = %while.body.lr.ph, %while.body
%q.112 = phi i8* [ %q.0, %while.body.lr.ph ], [ %incdec.ptr, %while.body ]
%backslashes.111 = phi i32 [ %backslashes.0, %while.body.lr.ph ], [ %dec, %while.body ]
- %incdec.ptr = getelementptr inbounds i8* %q.112, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %q.112, i64 1
store i8 92, i8* %incdec.ptr, align 1
%dec = add nsw i32 %backslashes.111, -1
%tobool2 = icmp eq i32 %dec, 0
while.cond.for.inc.loopexit_crit_edge: ; preds = %while.body
%scevgep.sum = add i64 %2, 1
- %scevgep13 = getelementptr i8* %q.0, i64 %scevgep.sum
+ %scevgep13 = getelementptr i8, i8* %q.0, i64 %scevgep.sum
br label %for.inc.loopexit
for.inc.loopexit: ; preds = %while.cond.for.inc.loopexit_crit_edge, %while.cond.preheader
for.inc: ; preds = %for.inc.loopexit, %for.cond
%backslashes.2 = phi i32 [ %backslashes.0, %for.cond ], [ 0, %for.inc.loopexit ]
%q.2 = phi i8* [ %q.0, %for.cond ], [ %q.1.lcssa, %for.inc.loopexit ]
- %incdec.ptr3 = getelementptr inbounds i8* %p.0, i64 1
+ %incdec.ptr3 = getelementptr inbounds i8, i8* %p.0, i64 1
br label %for.cond
for.body6: ; preds = %for.body6.lr.ph, %for.body6
%q.39 = phi i8* [ %q.0, %for.body6.lr.ph ], [ %incdec.ptr7, %for.body6 ]
%backslashes.38 = phi i32 [ %backslashes.0, %for.body6.lr.ph ], [ %dec9, %for.body6 ]
- %incdec.ptr7 = getelementptr inbounds i8* %q.39, i64 1
+ %incdec.ptr7 = getelementptr inbounds i8, i8* %q.39, i64 1
store i8 92, i8* %incdec.ptr7, align 1
%dec9 = add nsw i32 %backslashes.38, -1
%tobool5 = icmp eq i32 %dec9, 0
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%call = tail call i32 @foo(i32 0) #1
- %arrayidx = getelementptr inbounds i32* %x, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
store i32 %call, i32* %arrayidx, align 4
%call1 = tail call i32 @foo(i32 0) #1
%0 = add nsw i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds i32* %x, i64 %0
+ %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %0
store i32 %call1, i32* %arrayidx3, align 4
%call4 = tail call i32 @foo(i32 0) #1
%1 = add nsw i64 %indvars.iv, 2
- %arrayidx7 = getelementptr inbounds i32* %x, i64 %1
+ %arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %1
store i32 %call4, i32* %arrayidx7, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
%2 = trunc i64 %indvars.iv.next to i32
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %call = tail call i32 @foo(i32 0) #1
-; CHECK: %arrayidx = getelementptr inbounds i32* %x, i64 %indvar
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvar
; CHECK: store i32 %call, i32* %arrayidx, align 4
; CHECK: %indvar.next = add i64 %indvar, 1
; CHECK: %exitcond = icmp eq i64 %indvar, 1499
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%call = tail call i32 @foo(i32 0) #1
%0 = mul nsw i64 %indvars.iv, 3
- %arrayidx = getelementptr inbounds i32* %x, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %0
store i32 %call, i32* %arrayidx, align 4
%call1 = tail call i32 @foo(i32 0) #1
%1 = add nsw i64 %0, 1
- %arrayidx4 = getelementptr inbounds i32* %x, i64 %1
+ %arrayidx4 = getelementptr inbounds i32, i32* %x, i64 %1
store i32 %call1, i32* %arrayidx4, align 4
%call5 = tail call i32 @foo(i32 0) #1
%2 = add nsw i64 %0, 2
- %arrayidx9 = getelementptr inbounds i32* %x, i64 %2
+ %arrayidx9 = getelementptr inbounds i32, i32* %x, i64 %2
store i32 %call5, i32* %arrayidx9, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 500
; CHECK: for.body:
; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
; CHECK: %call = tail call i32 @foo(i32 0) #1
-; CHECK: %arrayidx = getelementptr inbounds i32* %x, i64 %indvars.iv
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
; CHECK: store i32 %call, i32* %arrayidx, align 4
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; CHECK: %exitcond1 = icmp eq i64 %indvars.iv, 1499
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%mul = fmul float %0, %alpha
- %arrayidx2 = getelementptr inbounds float* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
%add = fadd float %1, %mul
store float %add, float* %arrayidx2, align 4
%2 = add nsw i64 %indvars.iv, 1
- %arrayidx5 = getelementptr inbounds float* %b, i64 %2
+ %arrayidx5 = getelementptr inbounds float, float* %b, i64 %2
%3 = load float* %arrayidx5, align 4
%mul6 = fmul float %3, %alpha
- %arrayidx9 = getelementptr inbounds float* %a, i64 %2
+ %arrayidx9 = getelementptr inbounds float, float* %a, i64 %2
%4 = load float* %arrayidx9, align 4
%add10 = fadd float %4, %mul6
store float %add10, float* %arrayidx9, align 4
%5 = add nsw i64 %indvars.iv, 2
- %arrayidx13 = getelementptr inbounds float* %b, i64 %5
+ %arrayidx13 = getelementptr inbounds float, float* %b, i64 %5
%6 = load float* %arrayidx13, align 4
%mul14 = fmul float %6, %alpha
- %arrayidx17 = getelementptr inbounds float* %a, i64 %5
+ %arrayidx17 = getelementptr inbounds float, float* %a, i64 %5
%7 = load float* %arrayidx17, align 4
%add18 = fadd float %7, %mul14
store float %add18, float* %arrayidx17, align 4
%8 = add nsw i64 %indvars.iv, 3
- %arrayidx21 = getelementptr inbounds float* %b, i64 %8
+ %arrayidx21 = getelementptr inbounds float, float* %b, i64 %8
%9 = load float* %arrayidx21, align 4
%mul22 = fmul float %9, %alpha
- %arrayidx25 = getelementptr inbounds float* %a, i64 %8
+ %arrayidx25 = getelementptr inbounds float, float* %a, i64 %8
%10 = load float* %arrayidx25, align 4
%add26 = fadd float %10, %mul22
store float %add26, float* %arrayidx25, align 4
%11 = add nsw i64 %indvars.iv, 4
- %arrayidx29 = getelementptr inbounds float* %b, i64 %11
+ %arrayidx29 = getelementptr inbounds float, float* %b, i64 %11
%12 = load float* %arrayidx29, align 4
%mul30 = fmul float %12, %alpha
- %arrayidx33 = getelementptr inbounds float* %a, i64 %11
+ %arrayidx33 = getelementptr inbounds float, float* %a, i64 %11
%13 = load float* %arrayidx33, align 4
%add34 = fadd float %13, %mul30
store float %add34, float* %arrayidx33, align 4
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
-; CHECK: %arrayidx = getelementptr inbounds float* %b, i64 %indvar
+; CHECK: %arrayidx = getelementptr inbounds float, float* %b, i64 %indvar
; CHECK: %0 = load float* %arrayidx, align 4
; CHECK: %mul = fmul float %0, %alpha
-; CHECK: %arrayidx2 = getelementptr inbounds float* %a, i64 %indvar
+; CHECK: %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvar
; CHECK: %1 = load float* %arrayidx2, align 4
; CHECK: %add = fadd float %1, %mul
; CHECK: store float %add, float* %arrayidx2, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %ip, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %ip, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%idxprom1 = sext i32 %0 to i64
- %arrayidx2 = getelementptr inbounds float* %b, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds float, float* %b, i64 %idxprom1
%1 = load float* %arrayidx2, align 4
%mul = fmul float %1, %alpha
- %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv
%2 = load float* %arrayidx4, align 4
%add = fadd float %2, %mul
store float %add, float* %arrayidx4, align 4
%3 = add nsw i64 %indvars.iv, 1
- %arrayidx7 = getelementptr inbounds i32* %ip, i64 %3
+ %arrayidx7 = getelementptr inbounds i32, i32* %ip, i64 %3
%4 = load i32* %arrayidx7, align 4
%idxprom8 = sext i32 %4 to i64
- %arrayidx9 = getelementptr inbounds float* %b, i64 %idxprom8
+ %arrayidx9 = getelementptr inbounds float, float* %b, i64 %idxprom8
%5 = load float* %arrayidx9, align 4
%mul10 = fmul float %5, %alpha
- %arrayidx13 = getelementptr inbounds float* %a, i64 %3
+ %arrayidx13 = getelementptr inbounds float, float* %a, i64 %3
%6 = load float* %arrayidx13, align 4
%add14 = fadd float %6, %mul10
store float %add14, float* %arrayidx13, align 4
%7 = add nsw i64 %indvars.iv, 2
- %arrayidx17 = getelementptr inbounds i32* %ip, i64 %7
+ %arrayidx17 = getelementptr inbounds i32, i32* %ip, i64 %7
%8 = load i32* %arrayidx17, align 4
%idxprom18 = sext i32 %8 to i64
- %arrayidx19 = getelementptr inbounds float* %b, i64 %idxprom18
+ %arrayidx19 = getelementptr inbounds float, float* %b, i64 %idxprom18
%9 = load float* %arrayidx19, align 4
%mul20 = fmul float %9, %alpha
- %arrayidx23 = getelementptr inbounds float* %a, i64 %7
+ %arrayidx23 = getelementptr inbounds float, float* %a, i64 %7
%10 = load float* %arrayidx23, align 4
%add24 = fadd float %10, %mul20
store float %add24, float* %arrayidx23, align 4
%11 = add nsw i64 %indvars.iv, 3
- %arrayidx27 = getelementptr inbounds i32* %ip, i64 %11
+ %arrayidx27 = getelementptr inbounds i32, i32* %ip, i64 %11
%12 = load i32* %arrayidx27, align 4
%idxprom28 = sext i32 %12 to i64
- %arrayidx29 = getelementptr inbounds float* %b, i64 %idxprom28
+ %arrayidx29 = getelementptr inbounds float, float* %b, i64 %idxprom28
%13 = load float* %arrayidx29, align 4
%mul30 = fmul float %13, %alpha
- %arrayidx33 = getelementptr inbounds float* %a, i64 %11
+ %arrayidx33 = getelementptr inbounds float, float* %a, i64 %11
%14 = load float* %arrayidx33, align 4
%add34 = fadd float %14, %mul30
store float %add34, float* %arrayidx33, align 4
%15 = add nsw i64 %indvars.iv, 4
- %arrayidx37 = getelementptr inbounds i32* %ip, i64 %15
+ %arrayidx37 = getelementptr inbounds i32, i32* %ip, i64 %15
%16 = load i32* %arrayidx37, align 4
%idxprom38 = sext i32 %16 to i64
- %arrayidx39 = getelementptr inbounds float* %b, i64 %idxprom38
+ %arrayidx39 = getelementptr inbounds float, float* %b, i64 %idxprom38
%17 = load float* %arrayidx39, align 4
%mul40 = fmul float %17, %alpha
- %arrayidx43 = getelementptr inbounds float* %a, i64 %15
+ %arrayidx43 = getelementptr inbounds float, float* %a, i64 %15
%18 = load float* %arrayidx43, align 4
%add44 = fadd float %18, %mul40
store float %add44, float* %arrayidx43, align 4
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
-; CHECK: %arrayidx = getelementptr inbounds i32* %ip, i64 %indvar
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %ip, i64 %indvar
; CHECK: %0 = load i32* %arrayidx, align 4
; CHECK: %idxprom1 = sext i32 %0 to i64
-; CHECK: %arrayidx2 = getelementptr inbounds float* %b, i64 %idxprom1
+; CHECK: %arrayidx2 = getelementptr inbounds float, float* %b, i64 %idxprom1
; CHECK: %1 = load float* %arrayidx2, align 4
; CHECK: %mul = fmul float %1, %alpha
-; CHECK: %arrayidx4 = getelementptr inbounds float* %a, i64 %indvar
+; CHECK: %arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvar
; CHECK: %2 = load float* %arrayidx4, align 4
; CHECK: %add = fadd float %2, %mul
; CHECK: store float %add, float* %arrayidx4, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = mul nsw i64 %indvars.iv, 3
- %arrayidx = getelementptr inbounds i32* %x, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %0
store i32 %call, i32* %arrayidx, align 4
%1 = add nsw i64 %0, 1
- %arrayidx4 = getelementptr inbounds i32* %x, i64 %1
+ %arrayidx4 = getelementptr inbounds i32, i32* %x, i64 %1
store i32 %call, i32* %arrayidx4, align 4
%2 = add nsw i64 %0, 2
- %arrayidx9 = getelementptr inbounds i32* %x, i64 %2
+ %arrayidx9 = getelementptr inbounds i32, i32* %x, i64 %2
store i32 %call, i32* %arrayidx9, align 4
%3 = add nsw i64 %0, 6
- %arrayidx6 = getelementptr inbounds i32* %x, i64 %3
+ %arrayidx6 = getelementptr inbounds i32, i32* %x, i64 %3
store i32 %call, i32* %arrayidx6, align 4
%4 = add nsw i64 %0, 7
- %arrayidx7 = getelementptr inbounds i32* %x, i64 %4
+ %arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %4
store i32 %call, i32* %arrayidx7, align 4
%5 = add nsw i64 %0, 8
- %arrayidx8 = getelementptr inbounds i32* %x, i64 %5
+ %arrayidx8 = getelementptr inbounds i32, i32* %x, i64 %5
store i32 %call, i32* %arrayidx8, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 500
; CHECK:for.body:
; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
; CHECK: %0 = add i64 %indvars.iv, 6
-; CHECK: %arrayidx = getelementptr inbounds i32* %x, i64 %indvars.iv
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
; CHECK: store i32 %call, i32* %arrayidx, align 4
-; CHECK: %arrayidx6 = getelementptr inbounds i32* %x, i64 %0
+; CHECK: %arrayidx6 = getelementptr inbounds i32, i32* %x, i64 %0
; CHECK: store i32 %call, i32* %arrayidx6, align 4
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; CHECK: %exitcond2 = icmp eq i64 %0, 1505
%0 = mul nsw i64 %indvars.iv, 3
%add = add nsw i64 %indvars.iv, 1
%newmul = mul nsw i64 %add, 3
- %arrayidx = getelementptr inbounds i32* %x, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %0
store i32 %call, i32* %arrayidx, align 4
%1 = add nsw i64 %0, 1
- %arrayidx4 = getelementptr inbounds i32* %x, i64 %1
+ %arrayidx4 = getelementptr inbounds i32, i32* %x, i64 %1
store i32 %call, i32* %arrayidx4, align 4
%2 = add nsw i64 %0, 2
- %arrayidx9 = getelementptr inbounds i32* %x, i64 %2
+ %arrayidx9 = getelementptr inbounds i32, i32* %x, i64 %2
store i32 %call, i32* %arrayidx9, align 4
- %arrayidx6 = getelementptr inbounds i32* %x, i64 %newmul
+ %arrayidx6 = getelementptr inbounds i32, i32* %x, i64 %newmul
store i32 %call, i32* %arrayidx6, align 4
%3 = add nsw i64 %newmul, 1
- %arrayidx7 = getelementptr inbounds i32* %x, i64 %3
+ %arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %3
store i32 %call, i32* %arrayidx7, align 4
%4 = add nsw i64 %newmul, 2
- %arrayidx8 = getelementptr inbounds i32* %x, i64 %4
+ %arrayidx8 = getelementptr inbounds i32, i32* %x, i64 %4
store i32 %call, i32* %arrayidx8, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 500
; CHECK:for.body:
; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
; CHECK: %0 = add i64 %indvars.iv, 3
-; CHECK: %arrayidx = getelementptr inbounds i32* %x, i64 %indvars.iv
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
; CHECK: store i32 %call, i32* %arrayidx, align 4
-; CHECK: %arrayidx6 = getelementptr inbounds i32* %x, i64 %0
+; CHECK: %arrayidx6 = getelementptr inbounds i32, i32* %x, i64 %0
; CHECK: store i32 %call, i32* %arrayidx6, align 4
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; CHECK: %exitcond2 = icmp eq i64 %indvars.iv, 1499
%0 = mul nsw i64 %indvars.iv, 3
%x0 = add nsw i64 %0, 3
%add = add nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds i32* %x, i64 %x0
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %x0
store i32 %call, i32* %arrayidx, align 4
%1 = add nsw i64 %0, 4
- %arrayidx4 = getelementptr inbounds i32* %x, i64 %1
+ %arrayidx4 = getelementptr inbounds i32, i32* %x, i64 %1
store i32 %call, i32* %arrayidx4, align 4
%2 = add nsw i64 %0, 5
- %arrayidx9 = getelementptr inbounds i32* %x, i64 %2
+ %arrayidx9 = getelementptr inbounds i32, i32* %x, i64 %2
store i32 %call, i32* %arrayidx9, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 500
; CHECK: for.body:
; CHECK: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
; CHECK: %0 = add i64 %indvars.iv, 3
-; CHECK: %arrayidx = getelementptr inbounds i32* %x, i64 %0
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %0
; CHECK: store i32 %call, i32* %arrayidx, align 4
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
; CHECK: %exitcond1 = icmp eq i64 %0, 1502
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = mul nsw i64 %indvars.iv, 3
- %arrayidx = getelementptr inbounds %struct.s* %x, i64 %0, i32 0
+ %arrayidx = getelementptr inbounds %struct.s, %struct.s* %x, i64 %0, i32 0
store i32 %call, i32* %arrayidx, align 4
%1 = add nsw i64 %0, 1
- %arrayidx4 = getelementptr inbounds %struct.s* %x, i64 %1, i32 0
+ %arrayidx4 = getelementptr inbounds %struct.s, %struct.s* %x, i64 %1, i32 0
store i32 %call, i32* %arrayidx4, align 4
%2 = add nsw i64 %0, 2
- %arrayidx9 = getelementptr inbounds %struct.s* %x, i64 %2, i32 0
+ %arrayidx9 = getelementptr inbounds %struct.s, %struct.s* %x, i64 %2, i32 0
store i32 %call, i32* %arrayidx9, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 500
for.body: ; preds = %entry, %for.body
%i.035 = phi i32 [ %add18, %for.body ], [ %m, %entry ]
- %arrayidx = getelementptr inbounds i32* %B, i32 %i.035
+ %arrayidx = getelementptr inbounds i32, i32* %B, i32 %i.035
%0 = load i32* %arrayidx, align 4
%mul = shl nsw i32 %0, 2
- %arrayidx2 = getelementptr inbounds i32* %A, i32 %i.035
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %i.035
store i32 %mul, i32* %arrayidx2, align 4
%add3 = add nsw i32 %i.035, 1
- %arrayidx4 = getelementptr inbounds i32* %B, i32 %add3
+ %arrayidx4 = getelementptr inbounds i32, i32* %B, i32 %add3
%1 = load i32* %arrayidx4, align 4
%mul5 = shl nsw i32 %1, 2
- %arrayidx7 = getelementptr inbounds i32* %A, i32 %add3
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i32 %add3
store i32 %mul5, i32* %arrayidx7, align 4
%add8 = add nsw i32 %i.035, 2
- %arrayidx9 = getelementptr inbounds i32* %B, i32 %add8
+ %arrayidx9 = getelementptr inbounds i32, i32* %B, i32 %add8
%2 = load i32* %arrayidx9, align 4
%mul10 = shl nsw i32 %2, 2
- %arrayidx12 = getelementptr inbounds i32* %A, i32 %add8
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i32 %add8
store i32 %mul10, i32* %arrayidx12, align 4
%add13 = add nsw i32 %i.035, 3
- %arrayidx14 = getelementptr inbounds i32* %B, i32 %add13
+ %arrayidx14 = getelementptr inbounds i32, i32* %B, i32 %add13
%3 = load i32* %arrayidx14, align 4
%mul15 = shl nsw i32 %3, 2
- %arrayidx17 = getelementptr inbounds i32* %A, i32 %add13
+ %arrayidx17 = getelementptr inbounds i32, i32* %A, i32 %add13
store i32 %mul15, i32* %arrayidx17, align 4
%add18 = add nsw i32 %i.035, 4
%cmp = icmp slt i32 %add18, %n
; CHECK: for.body: ; preds = %for.body, %for.body.preheader
; CHECK: %indvar = phi i32 [ %indvar.next, %for.body ], [ 0, %for.body.preheader ]
; CHECK: %6 = add i32 %m, %indvar
-; CHECK: %arrayidx = getelementptr inbounds i32* %B, i32 %6
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %B, i32 %6
; CHECK: %7 = load i32* %arrayidx, align 4
; CHECK: %mul = shl nsw i32 %7, 2
-; CHECK: %arrayidx2 = getelementptr inbounds i32* %A, i32 %6
+; CHECK: %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %6
; CHECK: store i32 %mul, i32* %arrayidx2, align 4
; CHECK: %indvar.next = add i32 %indvar, 1
; CHECK: %exitcond = icmp eq i32 %6, %5
for.body: ; preds = %entry, %for.body
%i.056 = phi i32 [ %add27, %for.body ], [ %rem, %entry ]
- %arrayidx = getelementptr inbounds float* %dy, i32 %i.056
+ %arrayidx = getelementptr inbounds float, float* %dy, i32 %i.056
%0 = load float* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds float* %dx, i32 %i.056
+ %arrayidx1 = getelementptr inbounds float, float* %dx, i32 %i.056
%1 = load float* %arrayidx1, align 4
%mul = fmul float %1, %da
%add = fadd float %0, %mul
store float %add, float* %arrayidx, align 4
%add3 = add nsw i32 %i.056, 1
- %arrayidx4 = getelementptr inbounds float* %dy, i32 %add3
+ %arrayidx4 = getelementptr inbounds float, float* %dy, i32 %add3
%2 = load float* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds float* %dx, i32 %add3
+ %arrayidx6 = getelementptr inbounds float, float* %dx, i32 %add3
%3 = load float* %arrayidx6, align 4
%mul7 = fmul float %3, %da
%add8 = fadd float %2, %mul7
store float %add8, float* %arrayidx4, align 4
%add11 = add nsw i32 %i.056, 2
- %arrayidx12 = getelementptr inbounds float* %dy, i32 %add11
+ %arrayidx12 = getelementptr inbounds float, float* %dy, i32 %add11
%4 = load float* %arrayidx12, align 4
- %arrayidx14 = getelementptr inbounds float* %dx, i32 %add11
+ %arrayidx14 = getelementptr inbounds float, float* %dx, i32 %add11
%5 = load float* %arrayidx14, align 4
%mul15 = fmul float %5, %da
%add16 = fadd float %4, %mul15
store float %add16, float* %arrayidx12, align 4
%add19 = add nsw i32 %i.056, 3
- %arrayidx20 = getelementptr inbounds float* %dy, i32 %add19
+ %arrayidx20 = getelementptr inbounds float, float* %dy, i32 %add19
%6 = load float* %arrayidx20, align 4
- %arrayidx22 = getelementptr inbounds float* %dx, i32 %add19
+ %arrayidx22 = getelementptr inbounds float, float* %dx, i32 %add19
%7 = load float* %arrayidx22, align 4
%mul23 = fmul float %7, %da
%add24 = fadd float %6, %mul23
; CHECK: for.body:
; CHECK: %indvar = phi i32 [ %indvar.next, %for.body ], [ 0, %for.body.preheader ]
; CHECK: %6 = add i32 %rem, %indvar
-; CHECK: %arrayidx = getelementptr inbounds float* %dy, i32 %6
+; CHECK: %arrayidx = getelementptr inbounds float, float* %dy, i32 %6
; CHECK: %7 = load float* %arrayidx, align 4
-; CHECK: %arrayidx1 = getelementptr inbounds float* %dx, i32 %6
+; CHECK: %arrayidx1 = getelementptr inbounds float, float* %dx, i32 %6
; CHECK: %8 = load float* %arrayidx1, align 4
; CHECK: %mul = fmul float %8, %da
; CHECK: %add = fadd float %7, %mul
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi i32 [ 0, %entry ], [ %add12, %for.body ]
- %arrayidx = getelementptr inbounds i32* %x, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %r.029
%1 = or i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds i32* %x, i64 %1
+ %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %1
%2 = load i32* %arrayidx3, align 4
%add4 = add nsw i32 %add, %2
%3 = or i64 %indvars.iv, 2
- %arrayidx7 = getelementptr inbounds i32* %x, i64 %3
+ %arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %3
%4 = load i32* %arrayidx7, align 4
%add8 = add nsw i32 %add4, %4
%5 = or i64 %indvars.iv, 3
- %arrayidx11 = getelementptr inbounds i32* %x, i64 %5
+ %arrayidx11 = getelementptr inbounds i32, i32* %x, i64 %5
%6 = load i32* %arrayidx11, align 4
%add12 = add nsw i32 %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %r.029 = phi i32 [ 0, %entry ], [ %add, %for.body ]
-; CHECK: %arrayidx = getelementptr inbounds i32* %x, i64 %indvar
+; CHECK: %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvar
; CHECK: %0 = load i32* %arrayidx, align 4
; CHECK: %add = add nsw i32 %0, %r.029
; CHECK: %indvar.next = add i64 %indvar, 1
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi float [ 0.0, %entry ], [ %add12, %for.body ]
- %arrayidx = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%add = fadd float %0, %r.029
%1 = or i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds float* %x, i64 %1
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 %1
%2 = load float* %arrayidx3, align 4
%add4 = fadd float %add, %2
%3 = or i64 %indvars.iv, 2
- %arrayidx7 = getelementptr inbounds float* %x, i64 %3
+ %arrayidx7 = getelementptr inbounds float, float* %x, i64 %3
%4 = load float* %arrayidx7, align 4
%add8 = fadd float %add4, %4
%5 = or i64 %indvars.iv, 3
- %arrayidx11 = getelementptr inbounds float* %x, i64 %5
+ %arrayidx11 = getelementptr inbounds float, float* %x, i64 %5
%6 = load float* %arrayidx11, align 4
%add12 = fadd float %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
; CHECK: for.body:
; CHECK: %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ]
; CHECK: %r.029 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
-; CHECK: %arrayidx = getelementptr inbounds float* %x, i64 %indvar
+; CHECK: %arrayidx = getelementptr inbounds float, float* %x, i64 %indvar
; CHECK: %0 = load float* %arrayidx, align 4
; CHECK: %add = fadd float %0, %r.029
; CHECK: %indvar.next = add i64 %indvar, 1
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.029 = phi i32 [ 0, %entry ], [ %add12, %for.body ]
- %arrayidx = getelementptr inbounds i32* %x, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %0
%1 = or i64 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds i32* %x, i64 %1
+ %arrayidx3 = getelementptr inbounds i32, i32* %x, i64 %1
%2 = load i32* %arrayidx3, align 4
%add4 = add nsw i32 %add, %2
%3 = or i64 %indvars.iv, 2
- %arrayidx7 = getelementptr inbounds i32* %x, i64 %3
+ %arrayidx7 = getelementptr inbounds i32, i32* %x, i64 %3
%4 = load i32* %arrayidx7, align 4
%add8 = add nsw i32 %add4, %4
%5 = or i64 %indvars.iv, 3
- %arrayidx11 = getelementptr inbounds i32* %x, i64 %5
+ %arrayidx11 = getelementptr inbounds i32, i32* %x, i64 %5
%6 = load i32* %arrayidx11, align 4
%add12 = add nsw i32 %add8, %6
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 4
br label %bb21
bb: ; preds = %bb21
- %tmp3 = getelementptr %struct.list* %tmp22, i32 0, i32 0 ; <i8**> [#uses=1]
+ %tmp3 = getelementptr %struct.list, %struct.list* %tmp22, i32 0, i32 0 ; <i8**> [#uses=1]
%tmp4 = load i8** %tmp3 ; <i8*> [#uses=1]
%tmp45 = bitcast i8* %tmp4 to %struct.operator* ; <%struct.operator*> [#uses=1]
store %struct.operator* %tmp45, %struct.operator** %op
%tmp6 = load %struct.operator** %op ; <%struct.operator*> [#uses=1]
- %tmp7 = getelementptr %struct.operator* %tmp6, i32 0, i32 5 ; <i32*> [#uses=1]
+ %tmp7 = getelementptr %struct.operator, %struct.operator* %tmp6, i32 0, i32 5 ; <i32*> [#uses=1]
%tmp8 = load i32* %tmp7 ; <i32> [#uses=1]
%tmp9 = load i32* %arity_addr ; <i32> [#uses=1]
icmp eq i32 %tmp8, %tmp9 ; <i1>:0 [#uses=1]
cond_true: ; preds = %bb
%tmp10 = load %struct.operator** %op ; <%struct.operator*> [#uses=1]
- %tmp11 = getelementptr %struct.operator* %tmp10, i32 0, i32 2 ; <i32*> [#uses=1]
+ %tmp11 = getelementptr %struct.operator, %struct.operator* %tmp10, i32 0, i32 2 ; <i32*> [#uses=1]
%tmp12 = load i32* %tmp11 ; <i32> [#uses=1]
%tmp13 = load %struct.FILE** @outfile ; <%struct.FILE*> [#uses=1]
- %tmp14 = getelementptr [11 x i8]* @str1, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp14 = getelementptr [11 x i8], [11 x i8]* @str1, i32 0, i32 0 ; <i8*> [#uses=1]
%tmp15 = call i32 (%struct.FILE*, i8*, ...)* @fprintf( %struct.FILE* %tmp13, i8* %tmp14, i32 %tmp12 ) ; <i32> [#uses=0]
%tmp16 = load i32* %c ; <i32> [#uses=1]
%tmp17 = add i32 %tmp16, 1 ; <i32> [#uses=1]
br label %cond_next
cond_next: ; preds = %cond_true, %bb
- %tmp19 = getelementptr %struct.list* %tmp22, i32 0, i32 1 ; <%struct.list**> [#uses=1]
+ %tmp19 = getelementptr %struct.list, %struct.list* %tmp22, i32 0, i32 1 ; <%struct.list**> [#uses=1]
%tmp20 = load %struct.list** %tmp19 ; <%struct.list*> [#uses=1]
store %struct.list* %tmp20, %struct.list** %l
br label %bb21
bb307: ; preds = %bb243, %bb52
%sx_addr.2.pn = phi float* [ %sx_addr.5, %bb243 ], [ null, %bb52 ] ; <float*> [#uses=1]
- %sx_addr.5 = getelementptr float* %sx_addr.2.pn, i32 %incx ; <float*> [#uses=1]
+ %sx_addr.5 = getelementptr float, float* %sx_addr.2.pn, i32 %incx ; <float*> [#uses=1]
br i1 false, label %bb243, label %bb310
bb310: ; preds = %bb307
for.cond: ; preds = %for.body, %entry
%i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%cmp = icmp slt i32 %i.0, 100
- %arrayidx = getelementptr inbounds [20 x i32]* %array, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [20 x i32], [20 x i32]* %array, i64 0, i64 0
br i1 %cmp, label %for.body, label %for.end
; CHECK: for.body:
br label %bb15
bb6: ; preds = %bb15
%gep.upgrd.1 = zext i32 %offset.1 to i64 ; <i64> [#uses=1]
- %tmp11 = getelementptr i8* %msg, i64 %gep.upgrd.1 ; <i8*> [#uses=0]
+ %tmp11 = getelementptr i8, i8* %msg, i64 %gep.upgrd.1 ; <i8*> [#uses=0]
br label %bb15
bb15: ; preds = %bb6, %entry
%offset.1 = add i32 0, 1 ; <i32> [#uses=2]
%mul = mul i64 %0, %row
%add = add i64 %mul, %i.0
%1 = load i8** @horzPlane, align 8
- %arrayidx = getelementptr inbounds i8* %1, i64 %add
+ %arrayidx = getelementptr inbounds i8, i8* %1, i64 %add
%2 = load i8* %arrayidx, align 1
%tobool = icmp eq i8 %2, 0
br i1 %tobool, label %for.inc, label %for.end
land.rhs: ; preds = %for.cond1
%conv = zext i32 %i.1 to i64
- %arrayidx = getelementptr inbounds [100 x i32]* %a, i64 0, i64 %conv
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* %a, i64 0, i64 %conv
%0 = load i32* %arrayidx, align 4
%add = add i32 %0, %sum.1
%cmp4 = icmp ugt i32 %add, 1000
br i1 %cmp, label %for.end, label %for.inc
for.inc: ; preds = %for.body
- %incdec.ptr.i = getelementptr inbounds i32* %coerce.val.ip9, i64 1
+ %incdec.ptr.i = getelementptr inbounds i32, i32* %coerce.val.ip9, i64 1
br label %for.cond
for.end: ; preds = %for.cond, %for.body
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %arrayidx = getelementptr inbounds double* %G, i64 %j.0 ; <double*> [#uses=1]
+ %arrayidx = getelementptr inbounds double, double* %G, i64 %j.0 ; <double*> [#uses=1]
%tmp3 = load double* %arrayidx ; <double> [#uses=1]
%sub = sub i64 %j.0, 1 ; <i64> [#uses=1]
- %arrayidx6 = getelementptr inbounds double* %G, i64 %sub ; <double*> [#uses=1]
+ %arrayidx6 = getelementptr inbounds double, double* %G, i64 %sub ; <double*> [#uses=1]
%tmp7 = load double* %arrayidx6 ; <double> [#uses=1]
%add = fadd double %tmp3, %tmp7 ; <double> [#uses=1]
- %arrayidx10 = getelementptr inbounds double* %G, i64 %j.0 ; <double*> [#uses=1]
+ %arrayidx10 = getelementptr inbounds double, double* %G, i64 %j.0 ; <double*> [#uses=1]
store double %add, double* %arrayidx10
%inc = add nsw i64 %j.0, 1 ; <i64> [#uses=1]
br label %for.cond
define void @f() {
; CHECK-LABEL: define void @f(
-; CHECK: getelementptr i8* @a, i32 0
+; CHECK: getelementptr i8, i8* @a, i32 0
entry:
br label %for.preheader
br i1 undef, label %if.end, label %if.then8
if.end:
- %arrayidx = getelementptr i8* @a, i32 0
+ %arrayidx = getelementptr i8, i8* @a, i32 0
br label %for.preheader
if.then8:
bb: ; preds = %bb4, %entry
%mode.0 = phi i8 [ 0, %entry ], [ %indvar.next, %bb4 ] ; <i8> [#uses=4]
zext i8 %mode.0 to i32 ; <i32>:1 [#uses=1]
- getelementptr [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
+ getelementptr [4 x i32], [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
load i32* %2, align 4 ; <i32>:3 [#uses=1]
icmp eq i32 %3, %0 ; <i1>:4 [#uses=1]
br i1 %4, label %bb1, label %bb2
declare void @raise_exception() noreturn
;CHECK: for.body.lr.ph:
-;CHECK-NEXT: %arrayidx1 = getelementptr inbounds i8* %CurPtr, i64 0
+;CHECK-NEXT: %arrayidx1 = getelementptr inbounds i8, i8* %CurPtr, i64 0
;CHECK-NEXT: %0 = load i8* %arrayidx1, align 1
;CHECK-NEXT: %conv2 = sext i8 %0 to i32
;CHECK-NEXT: br label %for.body
for.body: ; preds = %for.cond
%idxprom = zext i32 %i.0 to i64
- %arrayidx = getelementptr inbounds i8* %CurPtr, i64 %idxprom
+ %arrayidx = getelementptr inbounds i8, i8* %CurPtr, i64 %idxprom
%0 = load i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
- %arrayidx1 = getelementptr inbounds i8* %CurPtr, i64 0
+ %arrayidx1 = getelementptr inbounds i8, i8* %CurPtr, i64 0
%1 = load i8* %arrayidx1, align 1
%conv2 = sext i8 %1 to i32
%cmp3 = icmp ne i32 %conv, %conv2
loopentry.0: ; preds = %else.26, %0
store i32* getelementptr ([16386 x i32]* @yy_state_buf, i64 0, i64 0), i32** @yy_state_ptr
%tmp.35 = load i32** @yy_state_ptr ; <i32*> [#uses=2]
- %inc.0 = getelementptr i32* %tmp.35, i64 1 ; <i32*> [#uses=1]
+ %inc.0 = getelementptr i32, i32* %tmp.35, i64 1 ; <i32*> [#uses=1]
store i32* %inc.0, i32** @yy_state_ptr
%tmp.36 = load i32* null ; <i32> [#uses=1]
store i32 %tmp.36, i32* %tmp.35
store i8* null, i8** null
%tmp.91 = load i32* null ; <i32> [#uses=1]
%tmp.92 = sext i32 %tmp.91 to i64 ; <i64> [#uses=1]
- %tmp.93 = getelementptr [787 x i16]* @yy_base, i64 0, i64 %tmp.92 ; <i16*> [#uses=1]
+ %tmp.93 = getelementptr [787 x i16], [787 x i16]* @yy_base, i64 0, i64 %tmp.92 ; <i16*> [#uses=1]
%tmp.94 = load i16* %tmp.93 ; <i16> [#uses=1]
%tmp.95 = icmp ne i16 %tmp.94, 4394 ; <i1> [#uses=1]
br i1 %tmp.95, label %loopexit.2, label %yy_find_action
bb: ; preds = %bb2
%t1 = sext i32 %hiPart.0 to i64
- %t2 = getelementptr float* %pTmp1, i64 %t1
+ %t2 = getelementptr float, float* %pTmp1, i64 %t1
%t3 = load float* %t2, align 4
%t4 = fadd float %t3, %distERBhi.0
%t5 = add i32 %hiPart.0, 1
%t6 = sext i32 %t5 to i64
- %t7 = getelementptr float* %peakWeight, i64 %t6
+ %t7 = getelementptr float, float* %peakWeight, i64 %t6
%t8 = load float* %t7, align 4
%t9 = fadd float %t8, %peakCount.0
br label %bb1
br i1 undef, label %for.cond142.preheader.us, label %for.end174.us
for.body145.us:
- %arrayidx163.us = getelementptr inbounds %struct.Params* undef, i64 0, i32 0, i64 %idxprom130, i64 %idxprom146.us
+ %arrayidx163.us = getelementptr inbounds %struct.Params, %struct.Params* undef, i64 0, i32 0, i64 %idxprom130, i64 %idxprom146.us
br i1 undef, label %for.body145.us, label %for.inc172.us
for.cond142.preheader.us:
%indvar = phi i32 [ %indvar.next, %no_exit.2 ], [ 0, %loopexit.2 ], [ 0, %loopentry.1 ] ; <i32> [#uses=5]
%b.1.4.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %c.2.4 = getelementptr [100 x i32]* @C, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %c.2.4 = getelementptr [100 x i32], [100 x i32]* @C, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
%gep.upgrd.2 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %a.0.4 = getelementptr i32* %a.0.4.ph, i64 %gep.upgrd.2 ; <i32*> [#uses=1]
+ %a.0.4 = getelementptr i32, i32* %a.0.4.ph, i64 %gep.upgrd.2 ; <i32*> [#uses=1]
%gep.upgrd.3 = zext i32 %indvar to i64 ; <i64> [#uses=1]
- %b.1.4 = getelementptr i32* %b.1.4.ph, i64 %gep.upgrd.3 ; <i32*> [#uses=1]
+ %b.1.4 = getelementptr i32, i32* %b.1.4.ph, i64 %gep.upgrd.3 ; <i32*> [#uses=1]
%inc.0.rec = add i32 %b.1.4.rec, 1 ; <i32> [#uses=2]
- %inc.0 = getelementptr i32* %a.0.4.ph, i32 %inc.0.rec ; <i32*> [#uses=2]
+ %inc.0 = getelementptr i32, i32* %a.0.4.ph, i32 %inc.0.rec ; <i32*> [#uses=2]
%tmp.13 = load i32* %a.0.4 ; <i32> [#uses=1]
- %inc.1 = getelementptr i32* %b.1.4.ph, i32 %inc.0.rec ; <i32*> [#uses=1]
+ %inc.1 = getelementptr i32, i32* %b.1.4.ph, i32 %inc.0.rec ; <i32*> [#uses=1]
%tmp.15 = load i32* %b.1.4 ; <i32> [#uses=1]
%tmp.18 = load i32* %c.2.4 ; <i32> [#uses=1]
%tmp.16 = mul i32 %tmp.15, %tmp.13 ; <i32> [#uses=1]
%tmp. = add i32 %indvar340, %indvar342 ; <i32> [#uses=1]
%tmp.526 = load i32** null ; <i32*> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp. to i64 ; <i64> [#uses=1]
- %tmp.528 = getelementptr i32* %tmp.526, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp.528 = getelementptr i32, i32* %tmp.526, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.528
%indvar.next341 = add i32 %indvar340, 1 ; <i32> [#uses=1]
br label %loopentry.4
cond_next102: ; preds = %bb16
%tmp138145.rec = add i32 %ABC.2146.0.rec, 3 ; <i32> [#uses=1]
- %tmp138145 = getelementptr i8* %ABC, i32 %tmp138145.rec ; <i8*> [#uses=4]
+ %tmp138145 = getelementptr i8, i8* %ABC, i32 %tmp138145.rec ; <i8*> [#uses=4]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
switch i8 0, label %bb129.loopexit [
i8 0, label %UnifiedReturnBlock.loopexit
%0 = trunc i32 %j.0.reg2mem.0 to i8 ; <i8> [#uses=1]
%1 = sext i8 %0 to i32 ; <i32> [#uses=1]
%2 = mul i32 %1, %i.0.reg2mem.0.ph ; <i32> [#uses=1]
- %3 = getelementptr [32 x [256 x i32]]* @table, i32 0, i32 %i.0.reg2mem.0.ph, i32 %j.0.reg2mem.0 ; <i32*> [#uses=1]
+ %3 = getelementptr [32 x [256 x i32]], [32 x [256 x i32]]* @table, i32 0, i32 %i.0.reg2mem.0.ph, i32 %j.0.reg2mem.0 ; <i32*> [#uses=1]
store i32 %2, i32* %3, align 4
%indvar.next = add i32 %j.0.reg2mem.0, 1 ; <i32> [#uses=2]
%exitcond = icmp eq i32 %indvar.next, 256 ; <i1> [#uses=1]
while.cond.i: ; preds = %while.body.i15795, %if.then.i15791
%phi = phi i64 [ %tmp20916, %while.body.i15795 ], [ 0, %H_MPZ_LBL ]
%tmp20916 = add i64 %phi, 1
- %incdec.ptr.i15793 = getelementptr i64* %pc.0.lcssa.i1610719352, i64 %tmp20916
+ %incdec.ptr.i15793 = getelementptr i64, i64* %pc.0.lcssa.i1610719352, i64 %tmp20916
%boo2 = call i1 @foo()
br i1 %boo2, label %indirectgoto, label %while.body.i15795
loop:
%rec = phi i32 [ %next, %loop ], [ 0, %entry ]
%next = add i32 %rec, 1
- %tmp75 = getelementptr i8* null, i32 %next
+ %tmp75 = getelementptr i8, i8* null, i32 %next
br i1 false, label %loop, label %loopexit
loopexit:
loop:
%rec = phi i32 [ %next, %loop ], [ 0, %entry ]
%next = add i32 %rec, 1
- %tmp75 = getelementptr i8* null, i32 %next
+ %tmp75 = getelementptr i8, i8* null, i32 %next
br i1 false, label %loop, label %loopexit
loopexit:
; CHECK-NOT: phi
; CHECK: bitcast float* {{.*}} to i8*
; CHECK: bitcast float* {{.*}} to i8*
-; CHECK: getelementptr i8*
-; CHECK: getelementptr i8*
+; CHECK: getelementptr i8, i8*
+; CHECK: getelementptr i8, i8*
define float @test(float* nocapture %A, float* nocapture %B, i32 %N, i32 %IA, i32 %IB) nounwind uwtable readonly ssp {
entry:
%1 = load float* %B.addr.04, align 4
%mul = fmul float %0, %1
%add = fadd float %Sum0.02, %mul
- %add.ptr = getelementptr inbounds float* %A.addr.05, i64 %idx.ext
- %add.ptr3 = getelementptr inbounds float* %B.addr.04, i64 %idx.ext2
+ %add.ptr = getelementptr inbounds float, float* %A.addr.05, i64 %idx.ext
+ %add.ptr3 = getelementptr inbounds float, float* %B.addr.04, i64 %idx.ext2
%sub = add nsw i32 %N.addr.03, -1
%cmp = icmp sgt i32 %sub, 0
br i1 %cmp, label %while.body, label %while.end
%t15 = icmp ugt i32 %n15, -4
%m15 = select i1 %t15, i32 %n15, i32 -4
%a16 = add i32 %m15, %a15
- %gep = getelementptr i8* %base, i32 %a16
+ %gep = getelementptr i8, i8* %base, i32 %a16
%ofs = add i32 %a16, 4
- %limit = getelementptr i8* %base, i32 %ofs
+ %limit = getelementptr i8, i8* %base, i32 %ofs
br label %loop
loop:
%iv = phi i8* [ %gep, %bb1 ], [ %inc, %loop ]
- %inc = getelementptr inbounds i8* %iv, i64 1
+ %inc = getelementptr inbounds i8, i8* %iv, i64 1
%exitcond = icmp eq i8* %inc, %limit
br i1 %exitcond, label %loop, label %exit
for.body83: ; preds = %for.body83, %for.end
%ptr.0157 = phi i8* [ %add.ptr96, %for.body83 ], [ null, %for.end ]
store i8 undef, i8* %ptr.0157, align 1
- %add.ptr96 = getelementptr inbounds i8* %ptr.0157, i32 %cond
+ %add.ptr96 = getelementptr inbounds i8, i8* %ptr.0157, i32 %cond
br label %for.body83
}
; Inner recurrence:
; CHECK: %lsr.iv = phi i32
; Outer step (relative to inner recurrence):
-; CHECK: %scevgep = getelementptr i1* %{{.*}}, i32 %lsr.iv
+; CHECK: %scevgep = getelementptr i1, i1* %{{.*}}, i32 %lsr.iv
; Outer use:
; CHECK: %lsr.iv3 = phi [121 x i32]* [ %lsr.iv1, %for.body43.preheader ]
define void @vb() nounwind {
for.body43:
%bf.459 = phi i32 [ %inc44, %for.body43 ], [ %t1, %for.body7 ]
%inc44 = add nsw i32 %bf.459, 1
- %arrayidx45 = getelementptr inbounds [121 x i32]* @b, i32 0, i32 %bf.459
+ %arrayidx45 = getelementptr inbounds [121 x i32], [121 x i32]* @b, i32 0, i32 %bf.459
%t2 = load i32* %arrayidx45, align 4
br label %for.body43
}
bb7.us: ; preds = %bb7.lr.ph.us, %bb7.us
%j.01.us = phi i32 [ 0, %bb7.lr.ph.us ], [ %1, %bb7.us ]
%tmp31 = add i32 %tmp30, %j.01.us
- %scevgep9 = getelementptr float* %a, i32 %tmp31
+ %scevgep9 = getelementptr float, float* %a, i32 %tmp31
store float undef, float* %scevgep9, align 1
%1 = add nsw i32 %j.01.us, 1
indirectbr i8* undef, [label %bb9.us, label %bb7.us]
bb7: ; preds = %bb8.preheader, %bb7
%indvar = phi i32 [ 0, %bb8.preheader ], [ %indvar.next, %bb7 ]
- %scevgep = getelementptr [200 x i32]* %Array2, i32 %tmp26, i32 %indvar
+ %scevgep = getelementptr [200 x i32], [200 x i32]* %Array2, i32 %tmp26, i32 %indvar
store i32 undef, i32* %scevgep, align 4
%indvar.next = add i32 %indvar, 1
indirectbr i8* undef, [label %bb9, label %bb7]
while.cond238: ; preds = %land.rhs243, %while.cond238.preheader
%1 = phi i64 [ %indvar.next15, %land.rhs243 ], [ 0, %while.cond238.preheader ]
%tmp36 = add i64 %tmp16, %1
- %s.3 = getelementptr i8* %s.1, i64 %tmp36
+ %s.3 = getelementptr i8, i8* %s.1, i64 %tmp36
%cmp241 = icmp ult i8* %s.3, %end
indirectbr i8* undef, [label %land.rhs243, label %while.end256]
indirectbr i8* undef, [label %PREMATURE, label %if.end348]
if.end348: ; preds = %if.end340
- %incdec.ptr356 = getelementptr inbounds i8* undef, i64 2
+ %incdec.ptr356 = getelementptr inbounds i8, i8* undef, i64 2
indirectbr i8* undef, [label %while.cond179]
if.else386: ; preds = %if.else
for.cond: ; preds = %for.inc, %lor.lhs.false184, %if.end152
%indvar65 = phi i64 [ %indvar.next66, %for.inc ], [ 0, %lor.lhs.false184 ], [ 0, %if.end152 ]
%tmp128 = add i64 %0, %indvar65
- %s.4 = getelementptr i8* %cmd, i64 %tmp128
+ %s.4 = getelementptr i8, i8* %cmd, i64 %tmp128
%tmp195 = load i8* %s.4, align 1
indirectbr i8* undef, [label %return, label %land.rhs198]
define internal fastcc void @someFunction(%struct.this_structure_s.0.5* nocapture %scratch, i32 %stage, i32 %cbSize) nounwind {
entry:
- %0 = getelementptr inbounds %struct.this_structure_s.0.5* %scratch, i32 0, i32 4, i32 %stage
+ %0 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 4, i32 %stage
%1 = load i8** %0, align 4
- %2 = getelementptr inbounds %struct.this_structure_s.0.5* %scratch, i32 0, i32 5, i32 %stage
+ %2 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 5, i32 %stage
%3 = load i8** %2, align 4
- %4 = getelementptr inbounds %struct.this_structure_s.0.5* %scratch, i32 0, i32 2, i32 0, i32 0
+ %4 = getelementptr inbounds %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 2, i32 0, i32 0
%tmp11 = shl i32 %stage, 1
%tmp1325 = or i32 %tmp11, 1
br label %__label_D_1608
__label_D_1608: ; preds = %__label_D_1608, %entry
%i.12 = phi i32 [ 0, %entry ], [ %10, %__label_D_1608 ]
%tmp = shl i32 %i.12, 2
- %lvar_g.13 = getelementptr i32* %4, i32 %tmp
+ %lvar_g.13 = getelementptr i32, i32* %4, i32 %tmp
%tmp626 = or i32 %tmp, 1
- %scevgep = getelementptr i32* %4, i32 %tmp626
+ %scevgep = getelementptr i32, i32* %4, i32 %tmp626
%tmp727 = or i32 %tmp, 2
- %scevgep8 = getelementptr i32* %4, i32 %tmp727
+ %scevgep8 = getelementptr i32, i32* %4, i32 %tmp727
%tmp928 = or i32 %tmp, 3
- %scevgep10 = getelementptr i32* %4, i32 %tmp928
- %scevgep12 = getelementptr %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp11, i32 %i.12
- %scevgep14 = getelementptr %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp1325, i32 %i.12
+ %scevgep10 = getelementptr i32, i32* %4, i32 %tmp928
+ %scevgep12 = getelementptr %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp11, i32 %i.12
+ %scevgep14 = getelementptr %struct.this_structure_s.0.5, %struct.this_structure_s.0.5* %scratch, i32 0, i32 9, i32 %tmp1325, i32 %i.12
%5 = load i8* %scevgep12, align 1
%6 = sext i8 %5 to i32
%7 = load i8* %scevgep14, align 1
define i32 @main() nounwind uwtable ssp {
entry:
%l_2 = alloca [1 x i32], align 4
- %arrayidx = getelementptr inbounds [1 x i32]* %l_2, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [1 x i32], [1 x i32]* %l_2, i64 0, i64 0
store i32 0, i32* %arrayidx, align 4
%tmp = load i32* @g_3, align 4
%idxprom = sext i32 %tmp to i64
- %arrayidx1 = getelementptr inbounds [1 x i32]* %l_2, i64 0, i64 %idxprom
+ %arrayidx1 = getelementptr inbounds [1 x i32], [1 x i32]* %l_2, i64 0, i64 %idxprom
%tmp1 = load i32* %arrayidx1, align 4
%conv.i.i = and i32 %tmp1, 65535
%tobool.i.i.i = icmp ne i32 %tmp, 0
bb6: ; preds = %bb
tail call void @zot(i8* getelementptr inbounds (%struct.jim* @global3, i64 0, i32 5, i64 0), i8* getelementptr inbounds (%struct.jim* @global3, i64 0, i32 3, i64 1), i64 undef, i32 1, i1 false) nounwind
- %tmp7 = getelementptr inbounds %struct.jim* @global3, i64 0, i32 5, i64 undef
+ %tmp7 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 undef
store i8 0, i8* %tmp7, align 1
%tmp8 = add nsw i32 0, 1
%tmp9 = sext i32 %tmp8 to i64
%tmp10 = add i64 %tmp9, 1
- %tmp11 = getelementptr inbounds %struct.jim* @global3, i64 0, i32 3, i64 %tmp10
+ %tmp11 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 %tmp10
%tmp12 = sub i64 2047, %tmp9
%tmp13 = icmp eq i32 undef, 1
br i1 %tmp13, label %bb14, label %bb15
br i1 %tmp21, label %bb22, label %bb32
bb22: ; preds = %bb17
- %tmp23 = getelementptr inbounds %struct.jim* @global3, i64 0, i32 3, i64 0
+ %tmp23 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 0
%tmp24 = load i8* %tmp23, align 1
%tmp25 = icmp eq i8 %tmp24, 58
br i1 %tmp25, label %bb30, label %bb26
bb33: ; preds = %bb30
tail call void @zot(i8* getelementptr inbounds (%struct.jim* @global3, i64 0, i32 5, i64 0), i8* %tmp11, i64 undef, i32 1, i1 false) nounwind
- %tmp34 = getelementptr inbounds %struct.jim* @global3, i64 0, i32 5, i64 undef
+ %tmp34 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 undef
store i8 0, i8* %tmp34, align 1
%tmp35 = add nsw i32 %tmp19, 1
%tmp36 = sext i32 %tmp35 to i64
%tmp37 = add i64 %tmp36, %tmp10
- %tmp38 = getelementptr inbounds %struct.jim* @global3, i64 0, i32 3, i64 %tmp37
+ %tmp38 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 %tmp37
%tmp39 = sub i64 %tmp12, %tmp36
br i1 false, label %bb40, label %bb41
bb59: ; preds = %bb55
%tmp60 = sext i32 %tmp45 to i64
tail call void @zot(i8* getelementptr inbounds (%struct.jim* @global3, i64 0, i32 5, i64 0), i8* %tmp38, i64 %tmp60, i32 1, i1 false) nounwind
- %tmp61 = getelementptr inbounds %struct.jim* @global3, i64 0, i32 5, i64 %tmp60
+ %tmp61 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 5, i64 %tmp60
store i8 0, i8* %tmp61, align 1
%tmp62 = add nsw i32 %tmp45, 1
%tmp63 = sext i32 %tmp62 to i64
bb226: ; preds = %bb221
%tmp227 = add i64 %tmp222, %tmp216
- %tmp228 = getelementptr inbounds %struct.jim* @global3, i64 0, i32 3, i64 %tmp227
+ %tmp228 = getelementptr inbounds %struct.jim, %struct.jim* @global3, i64 0, i32 3, i64 %tmp227
%tmp229 = load i8* %tmp228, align 1
br i1 false, label %bb233, label %bb230
br i1 %tmp62, label %_ZNK4llvm9StringRef4findEcm.exit._crit_edge, label %bb63
bb63: ; preds = %bb61
- %tmp64 = getelementptr inbounds i8* %tmp3, i64 %i.0.i
+ %tmp64 = getelementptr inbounds i8, i8* %tmp3, i64 %i.0.i
%tmp65 = load i8* %tmp64, align 1
%tmp67 = add i64 %i.0.i, 1
br i1 undef, label %_ZNK4llvm9StringRef4findEcm.exit.loopexit, label %bb61
%len.06 = phi i64 [ 1288, %entry ], [ %sub, %while.body ]
%pDst.05 = phi i64* [ inttoptr (i64 6442450944 to i64*), %entry ], [ %incdec.ptr1, %while.body ]
%pSrc.04 = phi i64* [ inttoptr (i64 4294967296 to i64*), %entry ], [ %incdec.ptr, %while.body ]
- %incdec.ptr = getelementptr inbounds i64* %pSrc.04, i64 1
+ %incdec.ptr = getelementptr inbounds i64, i64* %pSrc.04, i64 1
%tmp = load volatile i64* %pSrc.04, align 8
- %incdec.ptr1 = getelementptr inbounds i64* %pDst.05, i64 1
+ %incdec.ptr1 = getelementptr inbounds i64, i64* %pDst.05, i64 1
store volatile i64 %tmp, i64* %pDst.05, align 8
%sub = add i64 %len.06, -8
%cmp = icmp sgt i64 %sub, -1
br i1 %cmp5, label %if.end9, label %while.body
while.body: ; preds = %land.rhs
- %incdec.ptr = getelementptr inbounds i8* %ptr.0, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %ptr.0, i64 1
store i8 %conv, i8* %ptr.0, align 1, !tbaa !0
%dec = add i64 %len.addr.0, -1
br label %while.cond
while.body18: ; preds = %if.end9, %while.body18
%wideptr.038 = phi i64* [ %incdec.ptr19, %while.body18 ], [ %9, %if.end9 ]
%len.addr.137 = phi i64 [ %sub, %while.body18 ], [ %len.addr.0, %if.end9 ]
- %incdec.ptr19 = getelementptr inbounds i64* %wideptr.038, i64 1
+ %incdec.ptr19 = getelementptr inbounds i64, i64* %wideptr.038, i64 1
store i64 %ins, i64* %wideptr.038, align 8, !tbaa !2
%sub = add i64 %len.addr.137, -8
%cmp16 = icmp ugt i64 %sub, 7
%len.addr.235 = phi i64 [ %len.addr.1.lcssa49, %while.body29.lr.ph ], [ %dec26, %while.body29 ]
%ptr.134 = phi i8* [ %10, %while.body29.lr.ph ], [ %incdec.ptr31, %while.body29 ]
%dec26 = add i64 %len.addr.235, -1
- %incdec.ptr31 = getelementptr inbounds i8* %ptr.134, i64 1
+ %incdec.ptr31 = getelementptr inbounds i8, i8* %ptr.134, i64 1
store i8 %conv, i8* %ptr.134, align 1, !tbaa !0
%cmp27 = icmp eq i64 %dec26, 0
br i1 %cmp27, label %done, label %while.body29
br label %end_of_chain
end_of_chain:
- %state.i = getelementptr inbounds %s* %call18, i32 0, i32 0
+ %state.i = getelementptr inbounds %s, %s* %call18, i32 0, i32 0
%v4 = load i32** %state.i, align 4
br label %while.cond.i.i
br i1 %tobool.i.i, label %where.exit, label %land.rhs.i.i
land.rhs.i.i:
- %arrayidx.i.i = getelementptr inbounds i32* %v4, i32 %dec.i.i
+ %arrayidx.i.i = getelementptr inbounds i32, i32* %v4, i32 %dec.i.i
%v5 = load i32* %arrayidx.i.i, align 4
- %arrayidx1.i.i = getelementptr inbounds i32* %v1, i32 %dec.i.i
+ %arrayidx1.i.i = getelementptr inbounds i32, i32* %v1, i32 %dec.i.i
%v6 = load i32* %arrayidx1.i.i, align 4
%cmp.i.i = icmp eq i32 %v5, %v6
br i1 %cmp.i.i, label %while.cond.i.i, label %equal_data.exit.i
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
%v = load i32* %iv
- %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
%v1 = load i32* %iv1
- %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
%v2 = load i32* %iv2
- %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
%v3 = load i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
%s4 = add i32 %s3, %v3
- %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ %iv4 = getelementptr inbounds i32, i32* %iv3, i32 %x
%cmp = icmp eq i32* %iv4, %b
br i1 %cmp, label %exit, label %loop
exit:
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
%v = load i32* %iv
- %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
%v1 = load i32* %iv1
- %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
%v2 = load i32* %iv2
- %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
%v3 = load i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
%s4 = add i32 %s3, %v3
- %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ %iv4 = getelementptr inbounds i32, i32* %iv3, i32 %x
store i32 %s4, i32* %iv
%cmp = icmp eq i32* %iv4, %b
br i1 %cmp, label %exit, label %loop
%res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
%0 = bitcast i8* %main.addr.011 to i32*
%1 = load i32* %0, align 4
- %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride
+ %add.ptr = getelementptr inbounds i8, i8* %main.addr.011, i32 %main_stride
%2 = bitcast i8* %add.ptr to i32*
%3 = load i32* %2, align 4
- %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr.sum
%4 = bitcast i8* %add.ptr1 to i32*
%5 = load i32* %4, align 4
- %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum
+ %add.ptr2 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr1.sum
%6 = bitcast i8* %add.ptr2 to i32*
%7 = load i32* %6, align 4
- %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum
+ %add.ptr3 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr4.sum
%8 = bitcast i8* %add.ptr3 to i32*
%9 = load i32* %8, align 4
%add = add i32 %3, %1
%add5 = add i32 %add4, %7
%add6 = add i32 %add5, %9
store i32 %add6, i32* %res.addr.09, align 4
- %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum
- %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y
+ %add.ptr6 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr3.sum
+ %add.ptr7 = getelementptr inbounds i32, i32* %res.addr.09, i32 %y
%inc = add i32 %i.010, 1
%cmp = icmp eq i32 %inc, %z
br i1 %cmp, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
- %arrayidx = getelementptr inbounds i8* %a, i32 %i.07
+ %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.07
%0 = load i8* %arrayidx, align 1
%conv5 = zext i8 %0 to i32
- %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07
+ %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.07
%1 = load i8* %arrayidx1, align 1
%conv26 = zext i8 %1 to i32
%add = add nsw i32 %conv26, %conv5
%conv3 = trunc i32 %add to i8
- %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07
+ %arrayidx4 = getelementptr inbounds i8, i8* %c, i32 %i.07
store i8 %conv3, i8* %arrayidx4, align 1
%inc1 = or i32 %i.07, 1
- %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1
+ %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc1
%2 = load i8* %arrayidx.1, align 1
%conv5.1 = zext i8 %2 to i32
- %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1
+ %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc1
%3 = load i8* %arrayidx1.1, align 1
%conv26.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv26.1, %conv5.1
%conv3.1 = trunc i32 %add.1 to i8
- %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1
+ %arrayidx4.1 = getelementptr inbounds i8, i8* %c, i32 %inc1
store i8 %conv3.1, i8* %arrayidx4.1, align 1
%inc.12 = or i32 %i.07, 2
- %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12
+ %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.12
%4 = load i8* %arrayidx.2, align 1
%conv5.2 = zext i8 %4 to i32
- %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12
+ %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.12
%5 = load i8* %arrayidx1.2, align 1
%conv26.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv26.2, %conv5.2
%conv3.2 = trunc i32 %add.2 to i8
- %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12
+ %arrayidx4.2 = getelementptr inbounds i8, i8* %c, i32 %inc.12
store i8 %conv3.2, i8* %arrayidx4.2, align 1
%inc.23 = or i32 %i.07, 3
- %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23
+ %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.23
%6 = load i8* %arrayidx.3, align 1
%conv5.3 = zext i8 %6 to i32
- %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23
+ %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.23
%7 = load i8* %arrayidx1.3, align 1
%conv26.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv26.3, %conv5.3
%conv3.3 = trunc i32 %add.3 to i8
- %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23
+ %arrayidx4.3 = getelementptr inbounds i8, i8* %c, i32 %inc.23
store i8 %conv3.3, i8* %arrayidx4.3, align 1
%inc.3 = add nsw i32 %i.07, 4
%exitcond.3 = icmp eq i32 %inc.3, 400
%result.03 = phi <16 x i8> [ zeroinitializer, %.lr.ph ], [ %41, %11 ]
%.012 = phi <16 x i8>* [ %data, %.lr.ph ], [ %43, %11 ]
%12 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %.05, i32 1) nounwind
- %13 = getelementptr inbounds i8* %.05, i32 %ref_stride
+ %13 = getelementptr inbounds i8, i8* %.05, i32 %ref_stride
%14 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %13, i32 1) nounwind
%15 = shufflevector <1 x i64> %12, <1 x i64> %14, <2 x i32> <i32 0, i32 1>
%16 = bitcast <2 x i64> %15 to <16 x i8>
- %17 = getelementptr inbounds <16 x i8>* %.012, i32 1
+ %17 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 1
store <16 x i8> %16, <16 x i8>* %.012, align 4
- %18 = getelementptr inbounds i8* %.05, i32 %2
+ %18 = getelementptr inbounds i8, i8* %.05, i32 %2
%19 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %18, i32 1) nounwind
- %20 = getelementptr inbounds i8* %.05, i32 %3
+ %20 = getelementptr inbounds i8, i8* %.05, i32 %3
%21 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %20, i32 1) nounwind
%22 = shufflevector <1 x i64> %19, <1 x i64> %21, <2 x i32> <i32 0, i32 1>
%23 = bitcast <2 x i64> %22 to <16 x i8>
- %24 = getelementptr inbounds <16 x i8>* %.012, i32 2
+ %24 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 2
store <16 x i8> %23, <16 x i8>* %17, align 4
- %25 = getelementptr inbounds i8* %.05, i32 %4
+ %25 = getelementptr inbounds i8, i8* %.05, i32 %4
%26 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %25, i32 1) nounwind
- %27 = getelementptr inbounds i8* %.05, i32 %5
+ %27 = getelementptr inbounds i8, i8* %.05, i32 %5
%28 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %27, i32 1) nounwind
%29 = shufflevector <1 x i64> %26, <1 x i64> %28, <2 x i32> <i32 0, i32 1>
%30 = bitcast <2 x i64> %29 to <16 x i8>
- %31 = getelementptr inbounds <16 x i8>* %.012, i32 3
+ %31 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 3
store <16 x i8> %30, <16 x i8>* %24, align 4
- %32 = getelementptr inbounds i8* %.05, i32 %6
+ %32 = getelementptr inbounds i8, i8* %.05, i32 %6
%33 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %32, i32 1) nounwind
- %34 = getelementptr inbounds i8* %.05, i32 %7
+ %34 = getelementptr inbounds i8, i8* %.05, i32 %7
%35 = tail call <1 x i64> @llvm.arm.neon.vld1.v1i64(i8* %34, i32 1) nounwind
%36 = shufflevector <1 x i64> %33, <1 x i64> %35, <2 x i32> <i32 0, i32 1>
%37 = bitcast <2 x i64> %36 to <16 x i8>
%39 = add <16 x i8> %38, %30
%40 = add <16 x i8> %39, %37
%41 = add <16 x i8> %result.03, %40
- %42 = getelementptr i8* %.05, i32 %9
- %43 = getelementptr inbounds <16 x i8>* %.012, i32 -64
+ %42 = getelementptr i8, i8* %.05, i32 %9
+ %43 = getelementptr inbounds <16 x i8>, <16 x i8>* %.012, i32 -64
%44 = add nsw i32 %counter.04, 1
%exitcond = icmp eq i32 %44, %limit
br i1 %exitcond, label %._crit_edge, label %11
._crit_edge: ; preds = %11
- %scevgep = getelementptr <16 x i8>* %data, i32 %10
+ %scevgep = getelementptr <16 x i8>, <16 x i8>* %data, i32 %10
br label %45
; <label>:45 ; preds = %._crit_edge, %0
for.body: ; preds = %for.body, %entry
%i.0110 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%src.addr = phi i8* [ %src, %entry ], [ %add.ptr45, %for.body ]
- %add.ptr = getelementptr inbounds i8* %src.addr, i32 %idx.neg
+ %add.ptr = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg
%vld1 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr, i32 1)
- %add.ptr3 = getelementptr inbounds i8* %src.addr, i32 %idx.neg2
+ %add.ptr3 = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg2
%vld2 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr3, i32 1)
- %add.ptr7 = getelementptr inbounds i8* %src.addr, i32 %idx.neg6
+ %add.ptr7 = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg6
%vld3 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr7, i32 1)
- %add.ptr11 = getelementptr inbounds i8* %src.addr, i32 %idx.neg10
+ %add.ptr11 = getelementptr inbounds i8, i8* %src.addr, i32 %idx.neg10
%vld4 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr11, i32 1)
%vld5 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %src.addr, i32 1)
- %add.ptr17 = getelementptr inbounds i8* %src.addr, i32 %stride
+ %add.ptr17 = getelementptr inbounds i8, i8* %src.addr, i32 %stride
%vld6 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr17, i32 1)
- %add.ptr20 = getelementptr inbounds i8* %src.addr, i32 %mul5
+ %add.ptr20 = getelementptr inbounds i8, i8* %src.addr, i32 %mul5
%vld7 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr20, i32 1)
- %add.ptr23 = getelementptr inbounds i8* %src.addr, i32 %mul1
+ %add.ptr23 = getelementptr inbounds i8, i8* %src.addr, i32 %mul1
%vld8 = tail call <8 x i8> @llvm.arm.neon.vld1.v8i8(i8* %add.ptr23, i32 1)
%vadd1 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld1, <8 x i8> %vld2) nounwind
%vadd2 = tail call <8 x i8> @llvm.arm.neon.vhaddu.v8i8(<8 x i8> %vld2, <8 x i8> %vld3) nounwind
tail call void @llvm.arm.neon.vst1.v8i8(i8* %add.ptr17, <8 x i8> %vadd5, i32 1)
tail call void @llvm.arm.neon.vst1.v8i8(i8* %add.ptr20, <8 x i8> %vadd6, i32 1)
%inc = add nsw i32 %i.0110, 1
- %add.ptr45 = getelementptr inbounds i8* %src.addr, i32 8
+ %add.ptr45 = getelementptr inbounds i8, i8* %src.addr, i32 8
%exitcond = icmp eq i32 %inc, 4
br i1 %exitcond, label %for.end, label %for.body
%indvars.iv39 = phi i64 [ %indvars.iv.next40, %for.body ], [ 0, %entry ]
%dp.036 = phi i32* [ %add.ptr, %for.body ], [ %destrow, %entry ]
%p.035 = phi float* [ %incdec.ptr4, %for.body ], [ %srcrow, %entry ]
- %incdec.ptr = getelementptr inbounds float* %p.035, i64 1
+ %incdec.ptr = getelementptr inbounds float, float* %p.035, i64 1
%0 = load float* %incdec.ptr, align 4
- %incdec.ptr2 = getelementptr inbounds float* %p.035, i64 2
+ %incdec.ptr2 = getelementptr inbounds float, float* %p.035, i64 2
%1 = load float* %incdec.ptr2, align 4
- %incdec.ptr3 = getelementptr inbounds float* %p.035, i64 3
+ %incdec.ptr3 = getelementptr inbounds float, float* %p.035, i64 3
%2 = load float* %incdec.ptr3, align 4
- %incdec.ptr4 = getelementptr inbounds float* %p.035, i64 4
+ %incdec.ptr4 = getelementptr inbounds float, float* %p.035, i64 4
%3 = load float* %incdec.ptr4, align 4
%4 = load i32* %dp.036, align 4
%conv5 = fptoui float %0 to i32
%or = or i32 %4, %conv5
- %arrayidx6 = getelementptr inbounds i32* %dp.036, i64 1
+ %arrayidx6 = getelementptr inbounds i32, i32* %dp.036, i64 1
%5 = load i32* %arrayidx6, align 4
%conv7 = fptoui float %1 to i32
%or8 = or i32 %5, %conv7
- %arrayidx9 = getelementptr inbounds i32* %dp.036, i64 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %dp.036, i64 2
%6 = load i32* %arrayidx9, align 4
%conv10 = fptoui float %2 to i32
%or11 = or i32 %6, %conv10
- %arrayidx12 = getelementptr inbounds i32* %dp.036, i64 3
+ %arrayidx12 = getelementptr inbounds i32, i32* %dp.036, i64 3
%7 = load i32* %arrayidx12, align 4
%conv13 = fptoui float %3 to i32
%or14 = or i32 %7, %conv13
store i32 %or8, i32* %arrayidx6, align 4
store i32 %or11, i32* %arrayidx9, align 4
store i32 %or14, i32* %arrayidx12, align 4
- %add.ptr = getelementptr inbounds i32* %dp.036, i64 4
+ %add.ptr = getelementptr inbounds i32, i32* %dp.036, i64 4
%indvars.iv.next40 = add i64 %indvars.iv39, 4
%dummycnt = add i64 %dummyiv, 1
%cmp = icmp ult i64 %indvars.iv.next40, %count
%indvars.iv = phi i64 [ 0, %for.body23.lr.ph ], [ %indvars.iv.next, %for.body23 ]
%dp.132 = phi i32* [ %add.ptr, %for.body23.lr.ph ], [ %incdec.ptr28, %for.body23 ]
%p.131 = phi float* [ %incdec.ptr4, %for.body23.lr.ph ], [ %incdec.ptr24, %for.body23 ]
- %incdec.ptr24 = getelementptr inbounds float* %p.131, i64 1
+ %incdec.ptr24 = getelementptr inbounds float, float* %p.131, i64 1
%9 = load float* %incdec.ptr24, align 4
%10 = load i32* %dp.132, align 4
%conv25 = fptoui float %9 to i32
%or26 = or i32 %10, %conv25
store i32 %or26, i32* %dp.132, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
- %incdec.ptr28 = getelementptr inbounds i32* %dp.132, i64 1
+ %incdec.ptr28 = getelementptr inbounds i32, i32* %dp.132, i64 1
%exitcond = icmp eq i64 %indvars.iv.next, %8
br i1 %exitcond, label %for.end29, label %for.body23
for.body.i: ; preds = %for.body.i, %while.body.i
%indvars.iv.i = phi i64 [ 0, %while.body.i ], [ %indvars.iv.next.i, %for.body.i ]
%add.ptr.sum = add i64 %add.ptr.sum.i, %indvars.iv.i
- %arrayidx22.i = getelementptr inbounds i8* %base, i64 %add.ptr.sum
+ %arrayidx22.i = getelementptr inbounds i8, i8* %base, i64 %add.ptr.sum
%0 = load i8* %arrayidx22.i, align 1
%indvars.iv.next.i = add i64 %indvars.iv.i, 1
%cmp = call i1 @check() nounwind
br i1 %cmp, label %for.end.i, label %for.body.i
for.end.i: ; preds = %for.body.i
- %add.ptr.i144 = getelementptr inbounds i8* %base, i64 %add.ptr.sum.i
+ %add.ptr.i144 = getelementptr inbounds i8, i8* %base, i64 %add.ptr.sum.i
%cmp2 = tail call i1 @foo(i8* %add.ptr.i144, i8* %add.ptr.i144, i8* undef) nounwind
br i1 %cmp2, label %cond.true29.i, label %cond.false35.i
br i1 %cmp469, label %for.body471, label %for.inc498
for.body471: ; preds = %for.cond468
- %first = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 1
+ %first = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 1
%1 = load i32* %first, align 4
br i1 undef, label %if.then477, label %for.inc498
if.then477: ; preds = %for.body471
- %last = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 2
+ %last = getelementptr inbounds [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771], [5000 x %struct.anon.7.91.199.307.415.475.559.643.751.835.943.1003.1111.1219.1351.1375.1399.1435.1471.1483.1519.1531.1651.1771]* @tags, i64 0, i64 %indvars.iv1163, i32 2
%indvars.iv.next1164 = add i64 %indvars.iv1163, 1
br label %for.cond468
%0 = add nsw i64 %indvars.iv.i.SV.phi, %indvars.iv8.i.SV.phi26
%1 = trunc i64 %0 to i32
%mul.i.us.i = mul nsw i32 0, %1
- %arrayidx5.us.i = getelementptr inbounds double* %u, i64 %indvars.iv.i.SV.phi
+ %arrayidx5.us.i = getelementptr inbounds double, double* %u, i64 %indvars.iv.i.SV.phi
%2 = load double* %arrayidx5.us.i, align 8
%indvars.iv.next.i = add i64 %indvars.iv.i.SV.phi, 1
br i1 undef, label %for.inc8.us.i, label %meshBB
for.body3.lr.ph.us.i: ; preds = %meshBB1, %meshBB
%indvars.iv8.i.SV.phi26 = phi i64 [ undef, %meshBB1 ], [ %indvars.iv8.i.SV.phi24, %meshBB ]
- %arrayidx.us.i = getelementptr inbounds double* undef, i64 %indvars.iv8.i.SV.phi26
+ %arrayidx.us.i = getelementptr inbounds double, double* undef, i64 %indvars.iv8.i.SV.phi26
%3 = add i64 %indvars.iv8.i.SV.phi26, 1
br label %for.body3.us.i
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
%v = load i32* %iv
- %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
%v1 = load i32* %iv1
- %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
%v2 = load i32* %iv2
- %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
%v3 = load i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
%s4 = add i32 %s3, %v3
- %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ %iv4 = getelementptr inbounds i32, i32* %iv3, i32 %x
%cmp = icmp eq i32* %iv4, %b
br i1 %cmp, label %exit, label %loop
exit:
%iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
%s = phi i32 [ 0, %entry ], [ %s4, %loop ]
%v = load i32* %iv
- %iv1 = getelementptr inbounds i32* %iv, i32 %x
+ %iv1 = getelementptr inbounds i32, i32* %iv, i32 %x
%v1 = load i32* %iv1
- %iv2 = getelementptr inbounds i32* %iv1, i32 %x
+ %iv2 = getelementptr inbounds i32, i32* %iv1, i32 %x
%v2 = load i32* %iv2
- %iv3 = getelementptr inbounds i32* %iv2, i32 %x
+ %iv3 = getelementptr inbounds i32, i32* %iv2, i32 %x
%v3 = load i32* %iv3
%s1 = add i32 %s, %v
%s2 = add i32 %s1, %v1
%s3 = add i32 %s2, %v2
%s4 = add i32 %s3, %v3
- %iv4 = getelementptr inbounds i32* %iv3, i32 %x
+ %iv4 = getelementptr inbounds i32, i32* %iv3, i32 %x
store i32 %s4, i32* %iv
%cmp = icmp eq i32* %iv4, %b
br i1 %cmp, label %exit, label %loop
%res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
%0 = bitcast i8* %main.addr.011 to i32*
%1 = load i32* %0, align 4
- %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride
+ %add.ptr = getelementptr inbounds i8, i8* %main.addr.011, i32 %main_stride
%2 = bitcast i8* %add.ptr to i32*
%3 = load i32* %2, align 4
- %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum
+ %add.ptr1 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr.sum
%4 = bitcast i8* %add.ptr1 to i32*
%5 = load i32* %4, align 4
- %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum
+ %add.ptr2 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr1.sum
%6 = bitcast i8* %add.ptr2 to i32*
%7 = load i32* %6, align 4
- %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum
+ %add.ptr3 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr4.sum
%8 = bitcast i8* %add.ptr3 to i32*
%9 = load i32* %8, align 4
%add = add i32 %3, %1
%add5 = add i32 %add4, %7
%add6 = add i32 %add5, %9
store i32 %add6, i32* %res.addr.09, align 4
- %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum
- %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y
+ %add.ptr6 = getelementptr inbounds i8, i8* %main.addr.011, i32 %add.ptr3.sum
+ %add.ptr7 = getelementptr inbounds i32, i32* %res.addr.09, i32 %y
%inc = add i32 %i.010, 1
%cmp = icmp eq i32 %inc, %z
br i1 %cmp, label %for.end, label %for.body
for.body: ; preds = %for.body, %entry
%i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
- %arrayidx = getelementptr inbounds i8* %a, i32 %i.07
+ %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.07
%0 = load i8* %arrayidx, align 1
%conv5 = zext i8 %0 to i32
- %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07
+ %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.07
%1 = load i8* %arrayidx1, align 1
%conv26 = zext i8 %1 to i32
%add = add nsw i32 %conv26, %conv5
%conv3 = trunc i32 %add to i8
- %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07
+ %arrayidx4 = getelementptr inbounds i8, i8* %c, i32 %i.07
store i8 %conv3, i8* %arrayidx4, align 1
%inc1 = or i32 %i.07, 1
- %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1
+ %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %inc1
%2 = load i8* %arrayidx.1, align 1
%conv5.1 = zext i8 %2 to i32
- %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1
+ %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %inc1
%3 = load i8* %arrayidx1.1, align 1
%conv26.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv26.1, %conv5.1
%conv3.1 = trunc i32 %add.1 to i8
- %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1
+ %arrayidx4.1 = getelementptr inbounds i8, i8* %c, i32 %inc1
store i8 %conv3.1, i8* %arrayidx4.1, align 1
%inc.12 = or i32 %i.07, 2
- %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12
+ %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %inc.12
%4 = load i8* %arrayidx.2, align 1
%conv5.2 = zext i8 %4 to i32
- %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12
+ %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %inc.12
%5 = load i8* %arrayidx1.2, align 1
%conv26.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv26.2, %conv5.2
%conv3.2 = trunc i32 %add.2 to i8
- %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12
+ %arrayidx4.2 = getelementptr inbounds i8, i8* %c, i32 %inc.12
store i8 %conv3.2, i8* %arrayidx4.2, align 1
%inc.23 = or i32 %i.07, 3
- %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23
+ %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %inc.23
%6 = load i8* %arrayidx.3, align 1
%conv5.3 = zext i8 %6 to i32
- %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23
+ %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %inc.23
%7 = load i8* %arrayidx1.3, align 1
%conv26.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv26.3, %conv5.3
%conv3.3 = trunc i32 %add.3 to i8
- %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23
+ %arrayidx4.3 = getelementptr inbounds i8, i8* %c, i32 %inc.23
store i8 %conv3.3, i8* %arrayidx4.3, align 1
%inc.3 = add nsw i32 %i.07, 4
%exitcond.3 = icmp eq i32 %inc.3, 400
%i = phi i32 [ %inc4, %for.body ], [ 0, %entry ]
store i32 %i, i32* %p, align 4
%inc1 = or i32 %i, 1
- %add.ptr.i1 = getelementptr inbounds i32* %p, i32 1
+ %add.ptr.i1 = getelementptr inbounds i32, i32* %p, i32 1
store i32 %inc1, i32* %add.ptr.i1, align 4
%inc2 = add nsw i32 %i, 2
- %add.ptr.i2 = getelementptr inbounds i32* %p, i32 2
+ %add.ptr.i2 = getelementptr inbounds i32, i32* %p, i32 2
store i32 %inc2, i32* %add.ptr.i2, align 4
%inc3 = add nsw i32 %i, 3
- %add.ptr.i3 = getelementptr inbounds i32* %p, i32 3
+ %add.ptr.i3 = getelementptr inbounds i32, i32* %p, i32 3
store i32 %inc3, i32* %add.ptr.i3, align 4
- %p.next = getelementptr inbounds i32* %p, i32 4
+ %p.next = getelementptr inbounds i32, i32* %p, i32 4
%inc4 = add nsw i32 %i, 4
%cmp = icmp slt i32 %inc4, %n
br i1 %cmp, label %for.body, label %exit
; X32: jne
define void @testCmpZero(i8* %src, i8* %dst, i32 %srcidx, i32 %dstidx, i32 %len) nounwind ssp {
entry:
- %dest0 = getelementptr inbounds i8* %src, i32 %srcidx
- %source0 = getelementptr inbounds i8* %dst, i32 %dstidx
+ %dest0 = getelementptr inbounds i8, i8* %src, i32 %srcidx
+ %source0 = getelementptr inbounds i8, i8* %dst, i32 %dstidx
%add.ptr79.us.sum = add i32 %srcidx, %len
- %lftr.limit = getelementptr i8* %src, i32 %add.ptr79.us.sum
+ %lftr.limit = getelementptr i8, i8* %src, i32 %add.ptr79.us.sum
br label %for.body82.us
for.body82.us:
%0 = bitcast i8* %source to i32*
%1 = load i32* %0, align 4
%trunc = trunc i32 %1 to i8
- %add.ptr83.us = getelementptr inbounds i8* %source, i32 4
- %incdec.ptr91.us = getelementptr inbounds i8* %dest, i32 1
+ %add.ptr83.us = getelementptr inbounds i8, i8* %source, i32 4
+ %incdec.ptr91.us = getelementptr inbounds i8, i8* %dest, i32 1
store i8 %trunc, i8* %dest, align 1
%exitcond = icmp eq i8* %incdec.ptr91.us, %lftr.limit
br i1 %exitcond, label %return, label %for.body82.us
for.body: ; preds = %entry, %for.body.3
%i.09 = phi i32 [ %add5.3, %for.body.3 ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i8* %a, i32 %i.09
+ %arrayidx = getelementptr inbounds i8, i8* %a, i32 %i.09
%0 = load i8* %arrayidx, align 1
%conv6 = zext i8 %0 to i32
- %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.09
+ %arrayidx1 = getelementptr inbounds i8, i8* %b, i32 %i.09
%1 = load i8* %arrayidx1, align 1
%conv27 = zext i8 %1 to i32
%add = add nsw i32 %conv27, %conv6
%conv3 = trunc i32 %add to i8
- %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.09
+ %arrayidx4 = getelementptr inbounds i8, i8* %c, i32 %i.09
store i8 %conv3, i8* %arrayidx4, align 1
%add5 = add i32 %i.09, %s
%cmp = icmp ult i32 %add5, %len
ret void
for.body.1: ; preds = %for.body
- %arrayidx.1 = getelementptr inbounds i8* %a, i32 %add5
+ %arrayidx.1 = getelementptr inbounds i8, i8* %a, i32 %add5
%2 = load i8* %arrayidx.1, align 1
%conv6.1 = zext i8 %2 to i32
- %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %add5
+ %arrayidx1.1 = getelementptr inbounds i8, i8* %b, i32 %add5
%3 = load i8* %arrayidx1.1, align 1
%conv27.1 = zext i8 %3 to i32
%add.1 = add nsw i32 %conv27.1, %conv6.1
%conv3.1 = trunc i32 %add.1 to i8
- %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %add5
+ %arrayidx4.1 = getelementptr inbounds i8, i8* %c, i32 %add5
store i8 %conv3.1, i8* %arrayidx4.1, align 1
%add5.1 = add i32 %add5, %s
%cmp.1 = icmp ult i32 %add5.1, %len
br i1 %cmp.1, label %for.body.2, label %for.end
for.body.2: ; preds = %for.body.1
- %arrayidx.2 = getelementptr inbounds i8* %a, i32 %add5.1
+ %arrayidx.2 = getelementptr inbounds i8, i8* %a, i32 %add5.1
%4 = load i8* %arrayidx.2, align 1
%conv6.2 = zext i8 %4 to i32
- %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %add5.1
+ %arrayidx1.2 = getelementptr inbounds i8, i8* %b, i32 %add5.1
%5 = load i8* %arrayidx1.2, align 1
%conv27.2 = zext i8 %5 to i32
%add.2 = add nsw i32 %conv27.2, %conv6.2
%conv3.2 = trunc i32 %add.2 to i8
- %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %add5.1
+ %arrayidx4.2 = getelementptr inbounds i8, i8* %c, i32 %add5.1
store i8 %conv3.2, i8* %arrayidx4.2, align 1
%add5.2 = add i32 %add5.1, %s
%cmp.2 = icmp ult i32 %add5.2, %len
br i1 %cmp.2, label %for.body.3, label %for.end
for.body.3: ; preds = %for.body.2
- %arrayidx.3 = getelementptr inbounds i8* %a, i32 %add5.2
+ %arrayidx.3 = getelementptr inbounds i8, i8* %a, i32 %add5.2
%6 = load i8* %arrayidx.3, align 1
%conv6.3 = zext i8 %6 to i32
- %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %add5.2
+ %arrayidx1.3 = getelementptr inbounds i8, i8* %b, i32 %add5.2
%7 = load i8* %arrayidx1.3, align 1
%conv27.3 = zext i8 %7 to i32
%add.3 = add nsw i32 %conv27.3, %conv6.3
%conv3.3 = trunc i32 %add.3 to i8
- %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %add5.2
+ %arrayidx4.3 = getelementptr inbounds i8, i8* %c, i32 %add5.2
store i8 %conv3.3, i8* %arrayidx4.3, align 1
%add5.3 = add i32 %add5.2, %s
%cmp.3 = icmp ult i32 %add5.3, %len
vector.body:
%index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body14.lr.ph ]
- %4 = getelementptr inbounds i8* %rowsptr, i64 %index
+ %4 = getelementptr inbounds i8, i8* %rowsptr, i64 %index
%5 = bitcast i8* %4 to <4 x i8>*
%wide.load = load <4 x i8>* %5, align 1
%index.next = add i64 %index, 8
; CHECK-NOT: cast
; Make sure the GEP has the right index type
-; CHECK: getelementptr double addrspace(1)* [[IV]], i16 1
+; CHECK: getelementptr double, double addrspace(1)* [[IV]], i16 1
; CHECK: br {{.*}} label %bb1
; Make sure the GEP has the right index type
-; CHECK: getelementptr double addrspace(1)* {{.*}}, i16
+; CHECK: getelementptr double, double addrspace(1)* {{.*}}, i16
; This test tests several things. The load and store should use the
%tmp3 = add i64 %j.01, %tmp1 ; <i64> [#uses=1]
%tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
%z0 = add i64 %tmp3, 5203
- %tmp5 = getelementptr double addrspace(1)* %p, i64 %z0 ; <double addrspace(1)*> [#uses=1]
+ %tmp5 = getelementptr double, double addrspace(1)* %p, i64 %z0 ; <double addrspace(1)*> [#uses=1]
%tmp6 = load double addrspace(1)* %tmp5, align 8 ; <double> [#uses=1]
%tmp7 = fdiv double %tmp6, 2.100000e+00 ; <double> [#uses=1]
%z1 = add i64 %tmp4, 5203
- %tmp8 = getelementptr double addrspace(1)* %p, i64 %z1 ; <double addrspace(1)*> [#uses=1]
+ %tmp8 = getelementptr double, double addrspace(1)* %p, i64 %z1 ; <double addrspace(1)*> [#uses=1]
store double %tmp7, double addrspace(1)* %tmp8, align 8
%tmp9 = add i64 %j.01, 1 ; <i64> [#uses=2]
br label %bb2
; CHECK: bb1:
; CHECK: load double* [[IV:%[^,]+]]
; CHECK: store double {{.*}}, double* [[IV]]
-; CHECK: getelementptr double*
+; CHECK: getelementptr double, double*
; CHECK-NOT: cast
; CHECK: br {{.*}} label %bb1
%tmp3 = add i64 %j.01, %tmp1 ; <i64> [#uses=1]
%tmp4 = add i64 %j.01, %tmp2 ; <i64> [#uses=1]
%z0 = add i64 %tmp3, 5203
- %tmp5 = getelementptr double* %p, i64 %z0 ; <double*> [#uses=1]
+ %tmp5 = getelementptr double, double* %p, i64 %z0 ; <double*> [#uses=1]
%tmp6 = load double* %tmp5, align 8 ; <double> [#uses=1]
%tmp7 = fdiv double %tmp6, 2.100000e+00 ; <double> [#uses=1]
%z1 = add i64 %tmp4, 5203
- %tmp8 = getelementptr double* %p, i64 %z1 ; <double*> [#uses=1]
+ %tmp8 = getelementptr double, double* %p, i64 %z1 ; <double*> [#uses=1]
store double %tmp7, double* %tmp8, align 8
%tmp9 = add i64 %j.01, 1 ; <i64> [#uses=2]
br label %bb2
; CHECK: bb10:
; CHECK-NEXT: %t7 = icmp eq i16 %t4, 0
; Host %t2 computation outside the loop.
-; CHECK-NEXT: [[SCEVGEP:%[^ ]+]] = getelementptr i8 addrspace(1)* undef, i16 %t4
+; CHECK-NEXT: [[SCEVGEP:%[^ ]+]] = getelementptr i8, i8 addrspace(1)* undef, i16 %t4
; CHECK-NEXT: br label %bb14
bb10: ; preds = %bb9
%t7 = icmp eq i16 %t4, 0 ; <i1> [#uses=1]
; CHECK-NEXT: store i8 undef, i8 addrspace(1)* [[SCEVGEP]]
; CHECK-NEXT: %t6 = load float addrspace(1)* addrspace(1)* undef
; Fold %t3's add within the address.
-; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float addrspace(1)* %t6, i16 4
+; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float, float addrspace(1)* %t6, i16 4
; CHECK-NEXT: [[SCEVGEP2:%[^ ]+]] = bitcast float addrspace(1)* [[SCEVGEP1]] to i8 addrspace(1)*
; Use the induction variable (%t4) to access the right element
-; CHECK-NEXT: [[ADDRESS:%[^ ]+]] = getelementptr i8 addrspace(1)* [[SCEVGEP2]], i16 %t4
+; CHECK-NEXT: [[ADDRESS:%[^ ]+]] = getelementptr i8, i8 addrspace(1)* [[SCEVGEP2]], i16 %t4
; CHECK-NEXT: store i8 undef, i8 addrspace(1)* [[ADDRESS]]
; CHECK-NEXT: br label %bb14
bb14: ; preds = %bb14, %bb10
- %t2 = getelementptr inbounds i8 addrspace(1)* undef, i16 %t4 ; <i8*> [#uses=1]
+ %t2 = getelementptr inbounds i8, i8 addrspace(1)* undef, i16 %t4 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t2
%t6 = load float addrspace(1)* addrspace(1)* undef
%t8 = bitcast float addrspace(1)* %t6 to i8 addrspace(1)* ; <i8*> [#uses=1]
- %t9 = getelementptr inbounds i8 addrspace(1)* %t8, i16 %t3 ; <i8*> [#uses=1]
+ %t9 = getelementptr inbounds i8, i8 addrspace(1)* %t8, i16 %t3 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t9
br label %bb14
}
br label %bb9
bb5:
%v5 = bitcast i8* %v3 to i32**
- %add.ptr.i = getelementptr inbounds i32** %v5, i64 %v0
+ %add.ptr.i = getelementptr inbounds i32*, i32** %v5, i64 %v0
br label %bb6
bb6:
%v6 = phi i32** [ null, %bb2 ], [ %add.ptr.i, %bb5 ]
invoke void @g() optsize
to label %bb3 unwind label %bb6
bb3:
- %arrayctor.next = getelementptr inbounds i8* %arrayctor.cur, i64 1
+ %arrayctor.next = getelementptr inbounds i8, i8* %arrayctor.cur, i64 1
br label %bb2
bb4:
ret void
while.cond: ; preds = %while.cond, %entry
%c.0 = phi i16* [ undef, %entry ], [ %incdec.ptr, %while.cond ]
- %incdec.ptr = getelementptr inbounds i16* %c.0, i64 1
+ %incdec.ptr = getelementptr inbounds i16, i16* %c.0, i64 1
br i1 undef, label %while.cond1, label %while.cond
while.cond1: ; preds = %while.cond1, %while.cond
%c.1 = phi i16* [ %incdec.ptr5, %while.cond1 ], [ %c.0, %while.cond ]
- %incdec.ptr5 = getelementptr inbounds i16* %c.1, i64 1
+ %incdec.ptr5 = getelementptr inbounds i16, i16* %c.1, i64 1
br i1 undef, label %while.cond7, label %while.cond1
while.cond7: ; preds = %while.cond7, %while.cond1
%0 = phi i16* [ %incdec.ptr10, %while.cond7 ], [ %c.1, %while.cond1 ]
- %incdec.ptr10 = getelementptr inbounds i16* %0, i64 1
+ %incdec.ptr10 = getelementptr inbounds i16, i16* %0, i64 1
br i1 undef, label %while.cond12.preheader, label %while.cond7
while.cond12.preheader: ; preds = %while.cond7
br label %while.body13
if.else: ; preds = %while.body13
- %incdec.ptr15 = getelementptr inbounds i16* %1, i64 1
+ %incdec.ptr15 = getelementptr inbounds i16, i16* %1, i64 1
%cmp = icmp eq i16* %incdec.ptr15, %0
br i1 %cmp, label %while.end16, label %while.body13
no_exit.2: ; preds = %no_exit.2, %then.0
%indvar630 = phi i32 [ 0, %then.0 ], [ %indvar.next631, %no_exit.2 ] ; <i32> [#uses=4]
%gep.upgrd.1 = zext i32 %indvar630 to i64 ; <i64> [#uses=1]
- %tmp.38 = getelementptr [700 x i32]* %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp.38 = getelementptr [700 x i32], [700 x i32]* %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.38
%inc.2 = add i32 %indvar630, 2 ; <i32> [#uses=2]
%tmp.34 = icmp slt i32 %inc.2, 701 ; <i1> [#uses=1]
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=3]
- %gep1 = getelementptr { i32, i32 }* %P, i32 %INDVAR, i32 0 ; <i32*> [#uses=1]
+ %gep1 = getelementptr { i32, i32 }, { i32, i32 }* %P, i32 %INDVAR, i32 0 ; <i32*> [#uses=1]
store i32 0, i32* %gep1
- %gep2 = getelementptr { i32, i32 }* %P, i32 %INDVAR, i32 1 ; <i32*> [#uses=1]
+ %gep2 = getelementptr { i32, i32 }, { i32, i32 }* %P, i32 %INDVAR, i32 1 ; <i32*> [#uses=1]
store i32 0, i32* %gep2
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=3]
- %gep1 = getelementptr [2 x i32]* %P, i32 %INDVAR, i64 0 ; <i32*> [#uses=1]
+ %gep1 = getelementptr [2 x i32], [2 x i32]* %P, i32 %INDVAR, i64 0 ; <i32*> [#uses=1]
store i32 0, i32* %gep1
- %gep2 = getelementptr [2 x i32]* %P, i32 %INDVAR, i64 1 ; <i32*> [#uses=1]
+ %gep2 = getelementptr [2 x i32], [2 x i32]* %P, i32 %INDVAR, i64 1 ; <i32*> [#uses=1]
store i32 0, i32* %gep2
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr i8* %PTR, i32 %INDVAR ; <i8*> [#uses=1]
+ %STRRED = getelementptr i8, i8* %PTR, i32 %INDVAR ; <i8*> [#uses=1]
store i8 0, i8* %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=2]
;; cannot eliminate indvar
%indvar414 = phi i64 [ %indvar.next415, %loop2.backedge ], [ 0, %loop1 ]
%tmp473 = mul i64 %indvar414, -4
%tmp485 = add i64 %tmp484, %tmp473
- %storemerge4 = getelementptr i8* %a, i64 %tmp485
+ %storemerge4 = getelementptr i8, i8* %a, i64 %tmp485
%0 = icmp ugt i8* %storemerge4, %a
br i1 false, label %loop2.exit, label %loop2.backedge
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr [10000 x i32]* %P, i32 %outer, i32 %INDVAR ; <i32*> [#uses=1]
+ %STRRED = getelementptr [10000 x i32], [10000 x i32]* %P, i32 %outer, i32 %INDVAR ; <i32*> [#uses=1]
store i32 0, i32* %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br label %Loop
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr [10000 x i32]* %P, i32 %outer, i32 %INDVAR ; <i32*> [#uses=1]
+ %STRRED = getelementptr [10000 x i32], [10000 x i32]* %P, i32 %outer, i32 %INDVAR ; <i32*> [#uses=1]
store i32 0, i32* %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
br label %if.end388
if.end388: ; preds = %if.then380, %if.else356
- %ColLength = getelementptr inbounds %struct* %fbh.0, i64 0, i32 7
+ %ColLength = getelementptr inbounds %struct, %struct* %fbh.0, i64 0, i32 7
%call405 = call signext i16 @SQLColAttribute(i8* undef, i16 zeroext %conv258, i16 zeroext 1003, i8* null, i16 signext 0, i16* null, i64* %ColLength) nounwind
br label %sw.epilog
sw.epilog: ; preds = %sw.bb542, %sw.bb523, %if.end475
%inc601 = add i16 %column_n.0, 1
- %incdec.ptr = getelementptr inbounds %struct* %fbh.0, i64 1
+ %incdec.ptr = getelementptr inbounds %struct, %struct* %fbh.0, i64 1
br label %for.body
return: ; preds = %entry
Loop: ; preds = %Loop, %0
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
%idx = call i32 @getidx( ) ; <i32> [#uses=1]
- %STRRED = getelementptr [10000 x i32]* %P, i32 %INDVAR, i32 %idx ; <i32*> [#uses=1]
+ %STRRED = getelementptr [10000 x i32], [10000 x i32]* %P, i32 %INDVAR, i32 %idx ; <i32*> [#uses=1]
store i32 0, i32* %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%cond = call i1 @pred( ) ; <i1> [#uses=1]
shortcirc_next: ; preds = %no_exit.2, %entry
%indvar37 = phi i32 [ 0, %entry ], [ %indvar.next38, %no_exit.2 ] ; <i32> [#uses=3]
%gep.upgrd.1 = zext i32 %indvar37 to i64 ; <i64> [#uses=1]
- %wp.2.4 = getelementptr i8* null, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
+ %wp.2.4 = getelementptr i8, i8* null, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
br i1 false, label %loopexit.2, label %no_exit.2
no_exit.2: ; preds = %shortcirc_next
%wp.2.4.rec = bitcast i32 %indvar37 to i32 ; <i32> [#uses=1]
%inc.1.rec = add i32 %wp.2.4.rec, 1 ; <i32> [#uses=1]
- %inc.1 = getelementptr i8* null, i32 %inc.1.rec ; <i8*> [#uses=2]
+ %inc.1 = getelementptr i8, i8* null, i32 %inc.1.rec ; <i8*> [#uses=2]
%indvar.next38 = add i32 %indvar37, 1 ; <i32> [#uses=1]
switch i8 0, label %shortcirc_next [
i8 32, label %loopexit.2
define void @_Z15IntegerToStringjjR7Vector2(i32 %i, i32 %radix, %struct.Vector2* nocapture %result) nounwind noinline {
entry:
%buffer = alloca [33 x i16], align 16
- %add.ptr = getelementptr inbounds [33 x i16]* %buffer, i64 0, i64 33
+ %add.ptr = getelementptr inbounds [33 x i16], [33 x i16]* %buffer, i64 0, i64 33
br label %do.body
do.body: ; preds = %do.body, %entry
%0 = phi i64 [ %indvar.next44, %do.body ], [ 0, %entry ]
%i.addr.0 = phi i32 [ %div, %do.body ], [ %i, %entry ]
%tmp51 = sub i64 32, %0
- %incdec.ptr = getelementptr [33 x i16]* %buffer, i64 0, i64 %tmp51
+ %incdec.ptr = getelementptr [33 x i16], [33 x i16]* %buffer, i64 0, i64 %tmp51
%rem = urem i32 %i.addr.0, 10
%div = udiv i32 %i.addr.0, 10
%idxprom = zext i32 %rem to i64
- %arrayidx = getelementptr inbounds [37 x i8]* @.str, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [37 x i8], [37 x i8]* @.str, i64 0, i64 %idxprom
%tmp5 = load i8* %arrayidx, align 1
%conv = sext i8 %tmp5 to i16
store i16 %conv, i16* %incdec.ptr, align 2
%sub.ptr.sub = sub i64 %sub.ptr.lhs.cast, %sub.ptr.rhs.cast
%sub.ptr.div39 = lshr exact i64 %sub.ptr.sub, 1
%conv11 = trunc i64 %sub.ptr.div39 to i32
- %mLength = getelementptr inbounds %struct.Vector2* %result, i64 0, i32 2
+ %mLength = getelementptr inbounds %struct.Vector2, %struct.Vector2* %result, i64 0, i32 2
%idx.ext21 = bitcast i64 %sub.ptr.div39 to i64
%incdec.ptr.sum = add i64 %idx.ext21, -1
%cp.0.sum = sub i64 %incdec.ptr.sum, %0
- %add.ptr22 = getelementptr [33 x i16]* %buffer, i64 1, i64 %cp.0.sum
+ %add.ptr22 = getelementptr [33 x i16], [33 x i16]* %buffer, i64 1, i64 %cp.0.sum
%cmp2740 = icmp eq i64 %idx.ext21, 0
br i1 %cmp2740, label %for.end, label %for.body.lr.ph
for.body.lr.ph: ; preds = %do.end
%tmp16 = load i32* %mLength, align 4
- %mBegin = getelementptr inbounds %struct.Vector2* %result, i64 0, i32 0
+ %mBegin = getelementptr inbounds %struct.Vector2, %struct.Vector2* %result, i64 0, i32 0
%tmp14 = load i16** %mBegin, align 8
%tmp48 = zext i32 %tmp16 to i64
br label %for.body
for.body: ; preds = %for.body, %for.body.lr.ph
%indvar = phi i64 [ 0, %for.body.lr.ph ], [ %indvar.next, %for.body ]
%tmp46 = add i64 %tmp51, %indvar
- %p.042 = getelementptr [33 x i16]* %buffer, i64 0, i64 %tmp46
+ %p.042 = getelementptr [33 x i16], [33 x i16]* %buffer, i64 0, i64 %tmp46
%tmp47 = sub i64 %indvar, %0
- %incdec.ptr32 = getelementptr [33 x i16]* %buffer, i64 1, i64 %tmp47
+ %incdec.ptr32 = getelementptr [33 x i16], [33 x i16]* %buffer, i64 1, i64 %tmp47
%tmp49 = add i64 %tmp48, %indvar
- %dst.041 = getelementptr i16* %tmp14, i64 %tmp49
+ %dst.041 = getelementptr i16, i16* %tmp14, i64 %tmp49
%tmp29 = load i16* %p.042, align 2
store i16 %tmp29, i16* %dst.041, align 2
%cmp27 = icmp eq i16* %incdec.ptr32, %add.ptr22
for.body: ; preds = %_ZN8nsTArray9ElementAtEi.exit, %entry
%i.06 = phi i32 [ %add, %_ZN8nsTArray9ElementAtEi.exit ], [ 0, %entry ]
%call.i = call %struct.nsTArrayHeader* @_ZN8nsTArray4Hdr2Ev() nounwind
- %add.ptr.i = getelementptr inbounds %struct.nsTArrayHeader* %call.i, i32 1
+ %add.ptr.i = getelementptr inbounds %struct.nsTArrayHeader, %struct.nsTArrayHeader* %call.i, i32 1
%tmp = bitcast %struct.nsTArrayHeader* %add.ptr.i to %struct.nsTArray*
- %arrayidx = getelementptr inbounds %struct.nsTArray* %tmp, i32 %i.06
+ %arrayidx = getelementptr inbounds %struct.nsTArray, %struct.nsTArray* %tmp, i32 %i.06
%add = add nsw i32 %i.06, 1
call void @llvm.dbg.value(metadata %struct.nsTArray* %aValues, i64 0, metadata !0, metadata !{}) nounwind
br label %_ZN8nsTArray9ElementAtEi.exit
_ZN8nsTArray9ElementAtEi.exit: ; preds = %for.body
- %arrayidx.i = getelementptr inbounds %struct.nsTArray* %tmp, i32 %add
+ %arrayidx.i = getelementptr inbounds %struct.nsTArray, %struct.nsTArray* %tmp, i32 %add
call void @_ZN11nsTArray15ComputeDistanceERKS_Rd(%struct.nsTArray* %arrayidx, %struct.nsTArray* %arrayidx.i) nounwind
%cmp = icmp slt i32 %add, %foo
br i1 %cmp, label %for.body, label %for.end
i8 0, label %if.then59
]
while.body51: ; preds = %land.end50
- %incdec.ptr = getelementptr inbounds i8* %tmp.1, i64 1
- %scevgep = getelementptr i8* %indvars.iv194, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %tmp.1, i64 1
+ %scevgep = getelementptr i8, i8* %indvars.iv194, i64 1
br label %while.cond40
if.then59: ; preds = %while.end
br i1 undef, label %if.then64, label %if.end113
%cmp133173 = icmp eq i8* %tmp.1, %tmp.4
br i1 %cmp133173, label %while.cond40.preheader, label %while.body139.lr.ph
while.body139.lr.ph: ; preds = %while.cond132.preheader
- %scevgep198 = getelementptr i8* %indvars.iv194, i64 0
+ %scevgep198 = getelementptr i8, i8* %indvars.iv194, i64 0
%scevgep198199 = ptrtoint i8* %scevgep198 to i64
br label %while.body139
while.body139: ; preds = %while.body139, %while.body139.lr.ph
bb9: ; preds = %bb22, %bb5
%storemerge.rec = phi i64 [ %indvar.next8, %bb5 ], [ 0, %bb22 ] ; <i64> [#uses=2]
- %storemerge = getelementptr %struct.Lit** null, i64 %storemerge.rec ; <%struct.Lit**> [#uses=2]
+ %storemerge = getelementptr %struct.Lit*, %struct.Lit** null, i64 %storemerge.rec ; <%struct.Lit**> [#uses=2]
%1 = icmp ugt %struct.Lit** null, %storemerge ; <i1> [#uses=1]
br i1 %1, label %bb5, label %bb22
%conv3 = sext i8 %conv to i64
%add = add nsw i64 %call, %storemerge1
%add4 = add nsw i64 %add, %conv3
- %arrayidx = getelementptr inbounds i8* %p, i64 %add4
+ %arrayidx = getelementptr inbounds i8, i8* %p, i64 %add4
store i8 0, i8* %arrayidx
%inc = add nsw i64 %storemerge1, 1
br label %for.cond
no_exit: ; preds = %no_exit, %entry
%indvar = phi i32 [ 0, %entry ], [ %indvar.next, %no_exit ] ; <i32> [#uses=2]
%D_addr.0.0.rec = bitcast i32 %indvar to i32 ; <i32> [#uses=2]
- %D_addr.0.0 = getelementptr double* %D, i32 %D_addr.0.0.rec ; <double*> [#uses=1]
+ %D_addr.0.0 = getelementptr double, double* %D, i32 %D_addr.0.0.rec ; <double*> [#uses=1]
%inc.rec = add i32 %D_addr.0.0.rec, 1 ; <i32> [#uses=1]
- %inc = getelementptr double* %D, i32 %inc.rec ; <double*> [#uses=1]
+ %inc = getelementptr double, double* %D, i32 %inc.rec ; <double*> [#uses=1]
store double %F, double* %D_addr.0.0
%tmp.2 = icmp eq double* %inc, %E ; <i1> [#uses=1]
%indvar.next = add i32 %indvar, 1 ; <i32> [#uses=1]
Loop: ; preds = %Loop, %0
%i = phi i32 [ 0, %0 ], [ %i.next, %Loop ]
%INDVAR = phi i32 [ 0, %0 ], [ %INDVAR2, %Loop ] ; <i32> [#uses=2]
- %STRRED = getelementptr i32* %P, i32 %INDVAR ; <i32*> [#uses=1]
+ %STRRED = getelementptr i32, i32* %P, i32 %INDVAR ; <i32*> [#uses=1]
store i32 0, i32* %STRRED
%INDVAR2 = add i32 %INDVAR, 1 ; <i32> [#uses=1]
%i.next = add i32 %i, 1
; <label>:16 ; preds = %16, %15
%17 = phi i32 [ %21, %16 ], [ undef, %15 ]
%18 = sub i32 %17, 1623127498
- %19 = getelementptr inbounds i32* undef, i32 %18
+ %19 = getelementptr inbounds i32, i32* undef, i32 %18
store i32 undef, i32* %19, align 4
%20 = add i32 %17, 1623127499
%21 = add i32 %20, -1623127498
%indvar = bitcast i32 %indvar.ui to i32 ; <i32> [#uses=1]
%N_addr.0.0 = sub i32 %N.s, %indvar ; <i32> [#uses=1]
%tmp.8 = add i32 %N_addr.0.0, %tmp.6 ; <i32> [#uses=2]
- %tmp.9 = getelementptr i8* %A, i32 %tmp.8 ; <i8*> [#uses=1]
+ %tmp.9 = getelementptr i8, i8* %A, i32 %tmp.8 ; <i8*> [#uses=1]
%tmp.10 = load i8* %tmp.9 ; <i8> [#uses=1]
- %tmp.17 = getelementptr i8* %B, i32 %tmp.8 ; <i8*> [#uses=1]
+ %tmp.17 = getelementptr i8, i8* %B, i32 %tmp.8 ; <i8*> [#uses=1]
%tmp.18 = load i8* %tmp.17 ; <i8> [#uses=1]
%tmp.19 = sub i8 %tmp.10, %tmp.18 ; <i8> [#uses=1]
%tmp.21 = add i8 %tmp.19, %Sum.0.0 ; <i8> [#uses=2]
; CHECK: bb10:
; CHECK-NEXT: %t7 = icmp eq i16 %t4, 0
; Host %t2 computation outside the loop.
-; CHECK-NEXT: [[SCEVGEP:%[^ ]+]] = getelementptr i8 addrspace(1)* undef, i16 %t4
+; CHECK-NEXT: [[SCEVGEP:%[^ ]+]] = getelementptr i8, i8 addrspace(1)* undef, i16 %t4
; CHECK-NEXT: br label %bb14
bb10: ; preds = %bb9
%t7 = icmp eq i16 %t4, 0 ; <i1> [#uses=1]
; CHECK-NEXT: store i8 undef, i8 addrspace(1)* [[SCEVGEP]]
; CHECK-NEXT: %t6 = load float addrspace(1)* addrspace(1)* undef
; Fold %t3's add within the address.
-; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float addrspace(1)* %t6, i16 4
+; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float, float addrspace(1)* %t6, i16 4
; CHECK-NEXT: [[SCEVGEP2:%[^ ]+]] = bitcast float addrspace(1)* [[SCEVGEP1]] to i8 addrspace(1)*
; Use the induction variable (%t4) to access the right element
-; CHECK-NEXT: [[ADDRESS:%[^ ]+]] = getelementptr i8 addrspace(1)* [[SCEVGEP2]], i16 %t4
+; CHECK-NEXT: [[ADDRESS:%[^ ]+]] = getelementptr i8, i8 addrspace(1)* [[SCEVGEP2]], i16 %t4
; CHECK-NEXT: store i8 undef, i8 addrspace(1)* [[ADDRESS]]
; CHECK-NEXT: br label %bb14
bb14: ; preds = %bb14, %bb10
- %t2 = getelementptr inbounds i8 addrspace(1)* undef, i16 %t4 ; <i8*> [#uses=1]
+ %t2 = getelementptr inbounds i8, i8 addrspace(1)* undef, i16 %t4 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t2
%t6 = load float addrspace(1)* addrspace(1)* undef
%t8 = bitcast float addrspace(1)* %t6 to i8 addrspace(1)* ; <i8*> [#uses=1]
- %t9 = getelementptr inbounds i8 addrspace(1)* %t8, i16 %t3 ; <i8*> [#uses=1]
+ %t9 = getelementptr inbounds i8, i8 addrspace(1)* %t8, i16 %t3 ; <i8*> [#uses=1]
store i8 undef, i8 addrspace(1)* %t9
br label %bb14
}
; CHECK: bb10:
; CHECK-NEXT: %t7 = icmp eq i64 %t4, 0
; Host %t2 computation outside the loop.
-; CHECK-NEXT: [[SCEVGEP:%[^ ]+]] = getelementptr i8* undef, i64 %t4
+; CHECK-NEXT: [[SCEVGEP:%[^ ]+]] = getelementptr i8, i8* undef, i64 %t4
; CHECK-NEXT: br label %bb14
bb10: ; preds = %bb9
%t7 = icmp eq i64 %t4, 0 ; <i1> [#uses=1]
; CHECK-NEXT: store i8 undef, i8* [[SCEVGEP]]
; CHECK-NEXT: %t6 = load float** undef
; Fold %t3's add within the address.
-; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float* %t6, i64 4
+; CHECK-NEXT: [[SCEVGEP1:%[^ ]+]] = getelementptr float, float* %t6, i64 4
; CHECK-NEXT: [[SCEVGEP2:%[^ ]+]] = bitcast float* [[SCEVGEP1]] to i8*
; Use the induction variable (%t4) to access the right element
-; CHECK-NEXT: [[ADDRESS:%[^ ]+]] = getelementptr i8* [[SCEVGEP2]], i64 %t4
+; CHECK-NEXT: [[ADDRESS:%[^ ]+]] = getelementptr i8, i8* [[SCEVGEP2]], i64 %t4
; CHECK-NEXT: store i8 undef, i8* [[ADDRESS]]
; CHECK-NEXT: br label %bb14
bb14: ; preds = %bb14, %bb10
- %t2 = getelementptr inbounds i8* undef, i64 %t4 ; <i8*> [#uses=1]
+ %t2 = getelementptr inbounds i8, i8* undef, i64 %t4 ; <i8*> [#uses=1]
store i8 undef, i8* %t2
%t6 = load float** undef
%t8 = bitcast float* %t6 to i8* ; <i8*> [#uses=1]
- %t9 = getelementptr inbounds i8* %t8, i64 %t3 ; <i8*> [#uses=1]
+ %t9 = getelementptr inbounds i8, i8* %t8, i64 %t3 ; <i8*> [#uses=1]
store i8 undef, i8* %t9
br label %bb14
}
%indvar630.ui = phi i32 [ 0, %then.0 ], [ %indvar.next631, %no_exit.2 ] ; <i32> [#uses=3]
%indvar630 = bitcast i32 %indvar630.ui to i32 ; <i32> [#uses=2]
%gep.upgrd.1 = zext i32 %indvar630.ui to i64 ; <i64> [#uses=1]
- %tmp.38 = getelementptr [700 x i32]* %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
+ %tmp.38 = getelementptr [700 x i32], [700 x i32]* %nbeaux_.0__558, i32 0, i64 %gep.upgrd.1 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.38
%inc.2 = add i32 %indvar630, 2 ; <i32> [#uses=1]
%tmp.34 = call i1 @pred( i32 %indvar630 ) ; <i1> [#uses=1]
%tmp.16 = add i32 %tmp.15, %tmp. ; <i32> [#uses=2]
%k_addr.0.0 = bitcast i32 %tmp.16 to i32 ; <i32> [#uses=1]
%gep.upgrd.1 = zext i32 %tmp.16 to i64 ; <i64> [#uses=1]
- %tmp = getelementptr [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
+ %tmp = getelementptr [8193 x i8], [8193 x i8]* @flags2, i32 0, i64 %gep.upgrd.1 ; <i8*> [#uses=1]
store i8 0, i8* %tmp
%k_addr.0 = add i32 %k_addr.0.0, %i.s ; <i32> [#uses=1]
%tmp.upgrd.2 = icmp sgt i32 %k_addr.0, 8192 ; <i1> [#uses=1]
define void @Foo(%struct.__mpz_struct* %base) {
entry:
%want = alloca [1 x %struct.__mpz_struct], align 16 ; <[1 x %struct.__mpz_struct]*> [#uses=4]
- %want1 = getelementptr [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
+ %want1 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
call void @__gmpz_init( %struct.__mpz_struct* %want1 )
- %want27 = getelementptr [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
- %want3 = getelementptr [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
- %want2 = getelementptr [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=2]
+ %want27 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
+ %want3 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=1]
+ %want2 = getelementptr [1 x %struct.__mpz_struct], [1 x %struct.__mpz_struct]* %want, i32 0, i32 0 ; <%struct.__mpz_struct*> [#uses=2]
br label %bb
bb: ; preds = %bb, %entry
bb: ; preds = %bb.nph, %bb1
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb1 ] ; <i64> [#uses=2]
%s.01 = phi i32 [ 0, %bb.nph ], [ %2, %bb1 ] ; <i32> [#uses=1]
- %scevgep = getelementptr i32* %p, i64 %indvar ; <i32*> [#uses=1]
+ %scevgep = getelementptr i32, i32* %p, i64 %indvar ; <i32*> [#uses=1]
%1 = load i32* %scevgep, align 1 ; <i32> [#uses=1]
%2 = add nsw i32 %1, %s.01 ; <i32> [#uses=2]
br label %bb1
; CHECK: while.body:
; CHECK-NOT: while.body.1:
; CHECK: %shr.1 = lshr i32 %bit_addr.addr.01, 5
-; CHECK: %arrayidx.1 = getelementptr inbounds i32* %bitmap, i32 %shr.1
+; CHECK: %arrayidx.1 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.1
; CHECK: %shr.2 = lshr i32 %bit_addr.addr.01, 5
-; CHECK: %arrayidx.2 = getelementptr inbounds i32* %bitmap, i32 %shr.2
+; CHECK: %arrayidx.2 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.2
; CHECK: %shr.3 = lshr i32 %bit_addr.addr.01, 5
-; CHECK: %arrayidx.3 = getelementptr inbounds i32* %bitmap, i32 %shr.3
+; CHECK: %arrayidx.3 = getelementptr inbounds i32, i32* %bitmap, i32 %shr.3
define void @FlipBit(i32* nocapture %bitmap, i32 %bit_addr, i32 %nbits) nounwind {
entry:
br label %while.body
%shr = lshr i32 %bit_addr.addr.01, 5
%rem = and i32 %bit_addr.addr.01, 31
%shl = shl i32 1, %rem
- %arrayidx = getelementptr inbounds i32* %bitmap, i32 %shr
+ %arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
%tmp6 = load i32* %arrayidx, align 4
%xor = xor i32 %tmp6, %shl
store i32 %xor, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i8* %arr, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i8, i8* %arr, i64 %indvars.iv
%0 = load i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%add = add nsw i32 %conv, %sum.02
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
- %0 = getelementptr inbounds double* %b, i64 %index
+ %0 = getelementptr inbounds double, double* %b, i64 %index
%1 = bitcast double* %0 to <2 x double>*
%wide.load = load <2 x double>* %1, align 8
%.sum9 = or i64 %index, 2
- %2 = getelementptr double* %b, i64 %.sum9
+ %2 = getelementptr double, double* %b, i64 %.sum9
%3 = bitcast double* %2 to <2 x double>*
%wide.load8 = load <2 x double>* %3, align 8
%4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
%5 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00>
- %6 = getelementptr inbounds double* %a, i64 %index
+ %6 = getelementptr inbounds double, double* %a, i64 %index
%7 = bitcast double* %6 to <2 x double>*
store <2 x double> %4, <2 x double>* %7, align 8
%.sum10 = or i64 %index, 2
- %8 = getelementptr double* %a, i64 %.sum10
+ %8 = getelementptr double, double* %a, i64 %.sum10
%9 = bitcast double* %8 to <2 x double>*
store <2 x double> %5, <2 x double>* %9, align 8
%index.next = add i64 %index, 4
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
- %v0 = getelementptr inbounds double* %b, i64 %index
+ %v0 = getelementptr inbounds double, double* %b, i64 %index
%v1 = bitcast double* %v0 to <2 x double>*
%wide.load = load <2 x double>* %v1, align 8
%v4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
%v5 = fmul <2 x double> %v4, <double 8.000000e+00, double 8.000000e+00>
- %v6 = getelementptr inbounds double* %a, i64 %index
+ %v6 = getelementptr inbounds double, double* %a, i64 %index
%v7 = bitcast double* %v6 to <2 x double>*
store <2 x double> %v5, <2 x double>* %v7, align 8
%index.next = add i64 %index, 2
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i16* %arr, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i16, i16* %arr, i64 %indvars.iv
%0 = load i16* %arrayidx, align 2
%add = add i16 %0, %reduction.026
%sext = mul i64 %indvars.iv, 12884901888
%idxprom3 = ashr exact i64 %sext, 32
- %arrayidx4 = getelementptr inbounds i16* %arr, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds i16, i16* %arr, i64 %idxprom3
%1 = load i16* %arrayidx4, align 2
%add7 = add i16 %add, %1
%sext28 = mul i64 %indvars.iv, 21474836480
%idxprom10 = ashr exact i64 %sext28, 32
- %arrayidx11 = getelementptr inbounds i16* %arr, i64 %idxprom10
+ %arrayidx11 = getelementptr inbounds i16, i16* %arr, i64 %idxprom10
%2 = load i16* %arrayidx11, align 2
%add14 = add i16 %add7, %2
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
; This loop will be completely unrolled, even with these extra instructions,
; If the absolute threshold is too low, or if we can't optimize away requested
; percent of instructions, we shouldn't unroll:
-; TEST1: %array_const_idx = getelementptr inbounds [9 x i32]* @known_constant, i64 0, i64 %iv
-; TEST3: %array_const_idx = getelementptr inbounds [9 x i32]* @known_constant, i64 0, i64 %iv
+; TEST1: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
+; TEST3: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
; Otherwise, we should:
-; TEST2-NOT: %array_const_idx = getelementptr inbounds [9 x i32]* @known_constant, i64 0, i64 %iv
+; TEST2-NOT: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
; Also, we should unroll if the 'unroll-threshold' is big enough:
-; TEST4-NOT: %array_const_idx = getelementptr inbounds [9 x i32]* @known_constant, i64 0, i64 %iv
+; TEST4-NOT: %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
; And check that we don't crash when we're not allowed to do any analysis.
; RUN: opt < %s -loop-unroll -unroll-max-iteration-count-to-analyze=0 -disable-output
loop: ; preds = %loop, %entry
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
- %arrayidx = getelementptr inbounds i32* %src, i64 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
%src_element = load i32* %arrayidx, align 4
- %array_const_idx = getelementptr inbounds [9 x i32]* @known_constant, i64 0, i64 %iv
+ %array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
%const_array_element = load i32* %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
; The real loop.
%mul = mul nsw i32 %B, %C
- %arrayidx = getelementptr inbounds i32* %A, i32 %i.01
+ %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.01
store i32 %mul, i32* %arrayidx, align 4
%inc = add nsw i32 %i.01, 1
%exitcond = icmp ne i32 %inc, 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.01
%indvars.iv.next = add i64 %indvars.iv, 1
%p.addr.05 = phi i16* [ %incdec.ptr, %for.body ], [ %p, %entry ]
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %entry ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %incdec.ptr = getelementptr inbounds i16* %p.addr.05, i64 1
+ %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i64 1
%0 = load i16* %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body3 ], [ 0, %for.cond1.preheader ]
%sum.19 = phi i32 [ %add4, %for.body3 ], [ %sum.012, %for.cond1.preheader ]
%0 = add nsw i64 %indvars.iv, %indvars.iv16
- %arrayidx = getelementptr inbounds i32* %a, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %0
%1 = load i32* %arrayidx, align 4
%add4 = add nsw i32 %1, %sum.19
%indvars.iv.next = add i64 %indvars.iv, 1
%iv = phi i64 [ 10, %entry ], [ %iv.next, %while.body ]
%sum = phi i32 [ 0, %entry ], [ %sum.next, %while.body ]
%iv.next = add i64 %iv, -1
- %adr = getelementptr inbounds i32* %base, i64 %iv.next
+ %adr = getelementptr inbounds i32, i32* %base, i64 %iv.next
%tmp = load i32* %adr, align 8
%sum.next = add i32 %sum, %tmp
%iv.narrow = trunc i64 %iv.next to i32
loop:
%iv = phi i64 [ 0, %entry ], [ %inc, %tail ]
%s = phi i64 [ 0, %entry ], [ %s.next, %tail ]
- %adr = getelementptr i64* %base, i64 %iv
+ %adr = getelementptr i64, i64* %base, i64 %iv
%val = load i64* %adr
%s.next = add i64 %s, %val
%inc = add i64 %iv, 1
; SCEV properly unrolls multi-exit loops.
;
; CHECK-LABEL: @multiExit(
-; CHECK: getelementptr i32* %base, i32 10
+; CHECK: getelementptr i32, i32* %base, i32 10
; CHECK-NEXT: load i32*
; CHECK: br i1 false, label %l2.10, label %exit1
; CHECK: l2.10:
%iv2 = phi i32 [ 0, %entry ], [ %inc2, %l2 ]
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
- %adr = getelementptr i32* %base, i32 %iv1
+ %adr = getelementptr i32, i32* %base, i32 %iv1
%val = load i32* %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
%iv2 = phi i32 [ 0, %entry ], [ %inc2, %l3 ]
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
- %adr = getelementptr i32* %base, i32 %iv1
+ %adr = getelementptr i32, i32* %base, i32 %iv1
%val = load i32* %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
for.body: ; preds = %entry, %for.body
%i.013 = phi i64 [ %tmp16, %for.body ], [ 0, %entry ] ; <i64> [#uses=2]
- %arrayidx7 = getelementptr double* %p, i64 %i.013 ; <double*> [#uses=2]
+ %arrayidx7 = getelementptr double, double* %p, i64 %i.013 ; <double*> [#uses=2]
%tmp16 = add i64 %i.013, 1 ; <i64> [#uses=3]
- %arrayidx = getelementptr double* %p, i64 %tmp16 ; <double*> [#uses=1]
+ %arrayidx = getelementptr double, double* %p, i64 %tmp16 ; <double*> [#uses=1]
%tmp4 = load double* %arrayidx ; <double> [#uses=1]
%tmp8 = load double* %arrayidx7 ; <double> [#uses=1]
%mul9 = fmul double %tmp8, %tmp4 ; <double> [#uses=1]
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body3: ; preds = %for.body3, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx = getelementptr inbounds i32* %List, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %List, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add4 = add nsw i32 %0, 10
store i32 %add4, i32* %arrayidx, align 4
for.body3.1: ; preds = %for.body3.1.preheader, %for.body3.1
%indvars.iv.1 = phi i64 [ %1, %for.body3.1 ], [ 0, %for.body3.1.preheader ]
%1 = add nsw i64 %indvars.iv.1, 1
- %arrayidx.1 = getelementptr inbounds i32* %List, i64 %1
+ %arrayidx.1 = getelementptr inbounds i32, i32* %List, i64 %1
%2 = load i32* %arrayidx.1, align 4
%add4.1 = add nsw i32 %2, 10
store i32 %add4.1, i32* %arrayidx.1, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
br i1 false, label %cond_next1961, label %cond_next2009
cond_next1961: ; preds = %cond_next1929
- %tmp1992 = getelementptr i8* %b.10.ph, i32 0 ; <i8*> [#uses=0]
+ %tmp1992 = getelementptr i8, i8* %b.10.ph, i32 0 ; <i8*> [#uses=0]
br label %cond_next1915
cond_next2009: ; preds = %cond_next1929
if.then: ; preds = %for.body
%idxprom = sext i32 %inc1 to i64
- %array_ = getelementptr inbounds %class.MyContainer.1.3.19.29* %this, i32 0, i32 0
- %arrayidx = getelementptr inbounds [6 x %class.MyMemVarClass.0.2.18.28*]* %array_, i32 0, i64 %idxprom
+ %array_ = getelementptr inbounds %class.MyContainer.1.3.19.29, %class.MyContainer.1.3.19.29* %this, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [6 x %class.MyMemVarClass.0.2.18.28*], [6 x %class.MyMemVarClass.0.2.18.28*]* %array_, i32 0, i64 %idxprom
%tmp4 = load %class.MyMemVarClass.0.2.18.28** %arrayidx, align 8
%isnull = icmp eq %class.MyMemVarClass.0.2.18.28* %tmp4, null
br i1 %isnull, label %for.inc, label %delete.notnull
%this.addr = alloca %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379*, align 8
store %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this, %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr, align 8
%this1 = load %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379** %this.addr
- %px = getelementptr inbounds %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this1, i32 0, i32 0
+ %px = getelementptr inbounds %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379, %class.D.22.42.66.102.138.158.178.198.238.242.246.250.262.294.302.338.346.379* %this1, i32 0, i32 0
%0 = load %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376** %px, align 8
%tobool = icmp ne %class.C.23.43.67.103.139.159.179.199.239.243.247.251.263.295.303.339.347.376* %0, null
br i1 %tobool, label %cond.end, label %cond.false
no_exit: ; preds = %no_exit.backedge, %entry
%i.0.0 = phi i32 [ 0, %entry ], [ %i.0.0.be, %no_exit.backedge ] ; <i32> [#uses=3]
%gep.upgrd.1 = zext i32 %i.0.0 to i64 ; <i64> [#uses=1]
- %tmp.7 = getelementptr i32* %A, i64 %gep.upgrd.1 ; <i32*> [#uses=4]
+ %tmp.7 = getelementptr i32, i32* %A, i64 %gep.upgrd.1 ; <i32*> [#uses=4]
%tmp.13 = load i32* %tmp.7 ; <i32> [#uses=2]
%tmp.14 = add i32 %tmp.13, 1 ; <i32> [#uses=1]
store i32 %tmp.14, i32* %tmp.7
br i1 %2, label %bb10, label %bb2
bb2: ; preds = %bb
- %3 = getelementptr inbounds i8* %p_addr.0, i32 1 ; <i8*> [#uses=3]
+ %3 = getelementptr inbounds i8, i8* %p_addr.0, i32 1 ; <i8*> [#uses=3]
switch i32 %ineq.0.ph, label %bb8.backedge [
i32 0, label %bb3
i32 1, label %bb6
for.body: ; preds = %entry, %if.end
%indvars.iv = phi i64 [ %indvars.iv.next, %if.end ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %if.end, label %if.then
"3": ; preds = %"3.lr.ph", %"3"
%indvars.iv = phi i64 [ 0, %"3.lr.ph" ], [ %indvars.iv.next, %"3" ]
%3 = shl nsw i64 %indvars.iv, 2
- %4 = getelementptr inbounds i8* %1, i64 %3
+ %4 = getelementptr inbounds i8, i8* %1, i64 %3
%5 = bitcast i8* %4 to float*
store float %value, float* %5, align 4
%indvars.iv.next = add i64 %indvars.iv, %2
"3": ; preds = %"3", %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %"3" ]
%0 = shl nsw i64 %indvars.iv, 2
- %1 = getelementptr inbounds i8* bitcast (float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 16000) to i8*), i64 %0
+ %1 = getelementptr inbounds i8, i8* bitcast (float* getelementptr inbounds ([32000 x float]* @b, i64 0, i64 16000) to i8*), i64 %0
%2 = bitcast i8* %1 to float*
store float -1.000000e+00, float* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
- %arrayidx4 = getelementptr inbounds i32* %c, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
store i32 %add, i32* %arrayidx4, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%A.addr = phi i32* [ %A, %entry ], [ %inc.ptr, %for.body ]
%i = phi i32 [ 0, %entry ], [ %add1, %for.body ]
%sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %inc.ptr = getelementptr inbounds i32* %A.addr, i64 1
+ %inc.ptr = getelementptr inbounds i32, i32* %A.addr, i64 1
%0 = load i32* %A.addr, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%A.addr = phi i32* [ %A, %entry ], [ %inc.ptr, %for.body ]
%i = phi i32 [ 1024, %entry ], [ %sub, %for.body ]
%sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %inc.ptr = getelementptr inbounds i32* %A.addr, i64 1
+ %inc.ptr = getelementptr inbounds i32, i32* %A.addr, i64 1
%0 = load i32* %A.addr, align 4
%mul = mul nsw i32 %0, %i
%add = add nsw i32 %mul, %sum
%A.addr = phi i32* [ %A, %entry ], [ %inc.ptr1, %for.body ]
%sum = phi i32 [ 0, %entry ], [ %add, %for.body ]
%i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
- %inc.ptr = getelementptr inbounds i32* %A.addr, i64 1
+ %inc.ptr = getelementptr inbounds i32, i32* %A.addr, i64 1
%0 = load i32* %A.addr, align 4
- %inc.ptr1 = getelementptr inbounds i32* %A.addr, i64 2
+ %inc.ptr1 = getelementptr inbounds i32, i32* %A.addr, i64 2
%1 = load i32* %inc.ptr, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, %sum
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
- %arrayidx4 = getelementptr inbounds i32* %c, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
store i32 %add, i32* %arrayidx4, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
%add = add i64 %v.055, %offset
%mul = mul i64 %add, 3
- %arrayidx = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %mul
+ %arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %mul
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds [512 x float]* @kernel, i64 0, i64 %v.055
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i64 0, i64 %v.055
%1 = load float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %1
- %arrayidx4 = getelementptr inbounds [512 x float]* @kernel2, i64 0, i64 %v.055
+ %arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i64 0, i64 %v.055
%2 = load float* %arrayidx4, align 4
%mul5 = fmul fast float %mul3, %2
- %arrayidx6 = getelementptr inbounds [512 x float]* @kernel3, i64 0, i64 %v.055
+ %arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i64 0, i64 %v.055
%3 = load float* %arrayidx6, align 4
%mul7 = fmul fast float %mul5, %3
- %arrayidx8 = getelementptr inbounds [512 x float]* @kernel4, i64 0, i64 %v.055
+ %arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i64 0, i64 %v.055
%4 = load float* %arrayidx8, align 4
%mul9 = fmul fast float %mul7, %4
%add10 = fadd fast float %r.057, %mul9
%arrayidx.sum = add i64 %mul, 1
- %arrayidx11 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
+ %arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
%5 = load float* %arrayidx11, align 4
%mul13 = fmul fast float %1, %5
%mul15 = fmul fast float %2, %mul13
%mul19 = fmul fast float %4, %mul17
%add20 = fadd fast float %g.056, %mul19
%arrayidx.sum52 = add i64 %mul, 2
- %arrayidx21 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
+ %arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
%6 = load float* %arrayidx21, align 4
%mul23 = fmul fast float %1, %6
%mul25 = fmul fast float %2, %mul23
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%div = sdiv i32 %0, 2
- %arrayidx2 = getelementptr inbounds %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv
store i32 %div, i32* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 100
.lr.ph: ; preds = %0, %.lr.ph
%i.02 = phi i32 [ %5, %.lr.ph ], [ 0, %0 ]
%sum.01 = phi i32 [ %4, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i32* %A, i32 %i.02
+ %2 = getelementptr inbounds i32, i32* %A, i32 %i.02
%3 = load i32* %2, align 4
%4 = add nsw i32 %3, %sum.01
%5 = add nsw i32 %i.02, 1
%sum.04 = phi i32 [ %8, %.lr.ph ], [ 0, %0 ]
%sum.05 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
%sum.06 = phi i32 [ %10, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i32* %A, i32 %i.02
+ %2 = getelementptr inbounds i32, i32* %A, i32 %i.02
%3 = load i32* %2, align 4
%4 = add nsw i32 %3, %sum.01
%5 = add nsw i32 %i.02, 1
%b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
%add = add i32 %v.055, %offset
%mul = mul i32 %add, 3
- %arrayidx = getelementptr inbounds [1536 x float]* @src_data, i32 0, i32 %mul
+ %arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %mul
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds [512 x float]* @kernel, i32 0, i32 %v.055
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i32 0, i32 %v.055
%1 = load float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %1
- %arrayidx4 = getelementptr inbounds [512 x float]* @kernel2, i32 0, i32 %v.055
+ %arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i32 0, i32 %v.055
%2 = load float* %arrayidx4, align 4
%mul5 = fmul fast float %mul3, %2
- %arrayidx6 = getelementptr inbounds [512 x float]* @kernel3, i32 0, i32 %v.055
+ %arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i32 0, i32 %v.055
%3 = load float* %arrayidx6, align 4
%mul7 = fmul fast float %mul5, %3
- %arrayidx8 = getelementptr inbounds [512 x float]* @kernel4, i32 0, i32 %v.055
+ %arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i32 0, i32 %v.055
%4 = load float* %arrayidx8, align 4
%mul9 = fmul fast float %mul7, %4
%add10 = fadd fast float %r.057, %mul9
%arrayidx.sum = add i32 %mul, 1
- %arrayidx11 = getelementptr inbounds [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum
+ %arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum
%5 = load float* %arrayidx11, align 4
%mul13 = fmul fast float %1, %5
%mul15 = fmul fast float %2, %mul13
%mul19 = fmul fast float %4, %mul17
%add20 = fadd fast float %g.056, %mul19
%arrayidx.sum52 = add i32 %mul, 2
- %arrayidx21 = getelementptr inbounds [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum52
+ %arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i32 0, i32 %arrayidx.sum52
%6 = load float* %arrayidx21, align 4
%mul23 = fmul fast float %1, %6
%mul25 = fmul fast float %2, %mul23
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds i16* %sb, i64 %indvars.iv
+ %2 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
%3 = load i16* %2, align 2
%4 = sext i16 %3 to i32
- %5 = getelementptr inbounds i32* %ia, i64 %indvars.iv
+ %5 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %4, i32* %5, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%prod.01 = phi float [ %4, %.lr.ph ], [ 0.000000e+00, %0 ]
- %2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
%3 = load float* %2, align 8
%4 = fmul fast float %prod.01, %3
%indvars.iv.next = add i64 %indvars.iv, 1
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%red.01 = phi i8 [ %4, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i8* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv
%3 = load i8* %2, align 1
%4 = xor i8 %3, %red.01
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.body, %for.body.lr.ph
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%redx.05 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds double* %arr, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %arr, i64 %indvars.iv
%1 = load double* %arrayidx, align 8
%add = fadd fast double %1, %redx.05
%indvars.iv.next = add i64 %indvars.iv, 1
for.body3: ; preds = %for.body3, %for.cond1.preheader
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
- %arrayidx = getelementptr inbounds %struct.GlobalData* @global_data, i64 0, i32 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds %struct.GlobalData, %struct.GlobalData* @global_data, i64 0, i32 0, i64 %indvars.iv
%1 = load float* %arrayidx, align 4
- %arrayidx5 = getelementptr inbounds %struct.GlobalData* @global_data, i64 0, i32 3, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds %struct.GlobalData, %struct.GlobalData* @global_data, i64 0, i32 3, i64 %indvars.iv
%2 = load float* %arrayidx5, align 4
%add = fadd float %1, %2
%3 = add nsw i64 %indvars.iv, 16000
- %arrayidx8 = getelementptr inbounds %struct.GlobalData* @global_data, i64 0, i32 0, i64 %3
+ %arrayidx8 = getelementptr inbounds %struct.GlobalData, %struct.GlobalData* @global_data, i64 0, i32 0, i64 %3
store float %add, float* %arrayidx8, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 16000
; CHECK: LV: We can vectorize this loop!
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%red.05 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds [255 x i32]* @a, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [255 x i32], [255 x i32]* @a, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %red.05
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp1 = fcmp ogt float %0, 1.000000e+02
tail call void @llvm.assume(i1 %cmp1)
%add = fadd float %0, 1.000000e+00
- %arrayidx5 = getelementptr inbounds float* %a, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds float, float* %a, i64 %indvars.iv
store float %add, float* %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 1599
; Function Attrs: nounwind uwtable
define void @test2(%struct.data* nocapture readonly %d) #0 {
entry:
- %b = getelementptr inbounds %struct.data* %d, i64 0, i32 1
+ %b = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 1
%0 = load float** %b, align 8
%ptrint = ptrtoint float* %0 to i64
%maskedptr = and i64 %ptrint, 31
%maskcond = icmp eq i64 %maskedptr, 0
- %a = getelementptr inbounds %struct.data* %d, i64 0, i32 0
+ %a = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 0
%1 = load float** %a, align 8
%ptrint2 = ptrtoint float* %1 to i64
%maskedptr3 = and i64 %ptrint2, 31
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
tail call void @llvm.assume(i1 %maskcond)
- %arrayidx = getelementptr inbounds float* %0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %0, i64 %indvars.iv
%2 = load float* %arrayidx, align 4
%add = fadd float %2, 1.000000e+00
tail call void @llvm.assume(i1 %maskcond4)
- %arrayidx5 = getelementptr inbounds float* %1, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds float, float* %1, i64 %indvars.iv
store float %add, float* %arrayidx5, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv, 1599
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds float* %a, i64 %indvars.iv
+ %2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
%3 = load float* %2, align 4
%4 = fmul float %3, 3.000000e+00
store float %4, float* %2, align 4
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i64* %a, i64 %indvars.iv
+ %2 = getelementptr inbounds i64, i64* %a, i64 %indvars.iv
%3 = load i64* %2, align 4
%4 = add i64 %3, 3
store i64 %4, i64* %2, align 4
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %n, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @B, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%shl = ashr i32 %0, 3
- %arrayidx2 = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
store i32 %shl, i32* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 3, %0 ]
%2 = trunc i64 %indvars.iv to i8
- %3 = getelementptr inbounds i8* %A, i64 %indvars.iv
+ %3 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv
store i8 %2, i8* %3, align 1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
%add = add nsw i64 %indvars.iv, 3
%tofp = sitofp i64 %add to float
- %gep = getelementptr inbounds float* %B, i64 %indvars.iv
+ %gep = getelementptr inbounds float, float* %B, i64 %indvars.iv
store float %tofp, float* %gep, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = shl nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %0
+ %arrayidx = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %0
%1 = load i32* %arrayidx, align 8
%idxprom1 = sext i32 %1 to i64
- %arrayidx2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %idxprom1
%2 = load i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds [2048 x i32]* @d, i64 0, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @d, i64 0, i64 %indvars.iv
%3 = load i32* %arrayidx4, align 4
%idxprom5 = sext i32 %3 to i64
- %arrayidx6 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %idxprom5
store i32 %2, i32* %arrayidx6, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds [10000 x float]* @float_array, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [10000 x float], [10000 x float]* @float_array, i64 0, i64 %indvars.iv
%1 = load float* %arrayidx, align 4
%conv = fptoui float %1 to i32
- %arrayidx2 = getelementptr inbounds [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds [10000 x i32], [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds [10000 x double]* @double_array, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [10000 x double], [10000 x double]* @double_array, i64 0, i64 %indvars.iv
%1 = load double* %arrayidx, align 8
%conv = fptoui double %1 to i32
- %arrayidx2 = getelementptr inbounds [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds [10000 x i32], [10000 x i32]* @unsigned_array, i64 0, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%2 = trunc i64 %indvars.iv.next to i32
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
%tmp = load float* %arrayidx, align 4
%conv = fptosi float %tmp to i8
- %arrayidx2 = getelementptr inbounds i8* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i8, i8* %a, i64 %indvars.iv
store i8 %conv, i8* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 256
%b.054 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add30, %for.body ]
%add = add i64 %v.055, %offset
%mul = mul i64 %add, 3
- %arrayidx = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %mul
+ %arrayidx = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %mul
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds [512 x float]* @kernel, i64 0, i64 %v.055
+ %arrayidx2 = getelementptr inbounds [512 x float], [512 x float]* @kernel, i64 0, i64 %v.055
%1 = load float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %1
- %arrayidx4 = getelementptr inbounds [512 x float]* @kernel2, i64 0, i64 %v.055
+ %arrayidx4 = getelementptr inbounds [512 x float], [512 x float]* @kernel2, i64 0, i64 %v.055
%2 = load float* %arrayidx4, align 4
%mul5 = fmul fast float %mul3, %2
- %arrayidx6 = getelementptr inbounds [512 x float]* @kernel3, i64 0, i64 %v.055
+ %arrayidx6 = getelementptr inbounds [512 x float], [512 x float]* @kernel3, i64 0, i64 %v.055
%3 = load float* %arrayidx6, align 4
%mul7 = fmul fast float %mul5, %3
- %arrayidx8 = getelementptr inbounds [512 x float]* @kernel4, i64 0, i64 %v.055
+ %arrayidx8 = getelementptr inbounds [512 x float], [512 x float]* @kernel4, i64 0, i64 %v.055
%4 = load float* %arrayidx8, align 4
%mul9 = fmul fast float %mul7, %4
%add10 = fadd fast float %r.057, %mul9
%arrayidx.sum = add i64 %mul, 1
- %arrayidx11 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
+ %arrayidx11 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum
%5 = load float* %arrayidx11, align 4
%mul13 = fmul fast float %1, %5
%mul15 = fmul fast float %2, %mul13
%mul19 = fmul fast float %4, %mul17
%add20 = fadd fast float %g.056, %mul19
%arrayidx.sum52 = add i64 %mul, 2
- %arrayidx21 = getelementptr inbounds [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
+ %arrayidx21 = getelementptr inbounds [1536 x float], [1536 x float]* @src_data, i64 0, i64 %arrayidx.sum52
%6 = load float* %arrayidx21, align 4
%mul23 = fmul fast float %1, %6
%mul25 = fmul fast float %2, %mul23
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds i16* %sb, i64 %indvars.iv
+ %2 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
%3 = load i16* %2, align 2
%4 = sext i16 %3 to i32
- %5 = getelementptr inbounds i32* %ia, i64 %indvars.iv
+ %5 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %4, i32* %5, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
br i1 %cmp27, label %for.body3.lr.ph.us, label %for.end15
for.end.us: ; preds = %for.body3.us
- %arrayidx9.us = getelementptr inbounds i32* %b, i64 %indvars.iv33
+ %arrayidx9.us = getelementptr inbounds i32, i32* %b, i64 %indvars.iv33
%0 = load i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3
%add10.us = add nsw i32 %0, 3
store i32 %add10.us, i32* %arrayidx9.us, align 4, !llvm.mem.parallel_loop_access !3
%1 = trunc i64 %indvars.iv29 to i32
%add4.us = add i32 %add.us, %1
%idxprom.us = sext i32 %add4.us to i64
- %arrayidx.us = getelementptr inbounds i32* %a, i64 %idxprom.us
+ %arrayidx.us = getelementptr inbounds i32, i32* %a, i64 %idxprom.us
%2 = load i32* %arrayidx.us, align 4, !llvm.mem.parallel_loop_access !3
%add5.us = add nsw i32 %2, 1
store i32 %add5.us, i32* %arrayidx7.us, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv33 = phi i64 [ %indvars.iv.next34, %for.end.us ], [ 0, %entry ]
%3 = trunc i64 %indvars.iv33 to i32
%add.us = add i32 %3, %k
- %arrayidx7.us = getelementptr inbounds i32* %a, i64 %indvars.iv33
+ %arrayidx7.us = getelementptr inbounds i32, i32* %a, i64 %indvars.iv33
br label %for.body3.us
for.end15: ; preds = %for.end.us, %entry
%1 = load i32* %i, align 4
%idxprom = sext i32 %1 to i64
%2 = load i32** %trigger.addr, align 8
- %arrayidx = getelementptr inbounds i32* %2, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
%3 = load i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
%4 = load i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
%5 = load i32** %B.addr, align 8
- %arrayidx3 = getelementptr inbounds i32* %5, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds i32, i32* %5, i64 %idxprom2
%6 = load i32* %arrayidx3, align 4
%7 = load i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
%8 = load i32** %trigger.addr, align 8
- %arrayidx5 = getelementptr inbounds i32* %8, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
%9 = load i32* %arrayidx5, align 4
%add = add nsw i32 %6, %9
%10 = load i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
%11 = load i32** %A.addr, align 8
- %arrayidx7 = getelementptr inbounds i32* %11, i64 %idxprom6
+ %arrayidx7 = getelementptr inbounds i32, i32* %11, i64 %idxprom6
store i32 %add, i32* %arrayidx7, align 4
br label %if.end
%1 = load i32* %i, align 4
%idxprom = sext i32 %1 to i64
%2 = load i32** %trigger.addr, align 8
- %arrayidx = getelementptr inbounds i32* %2, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
%3 = load i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
%4 = load i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
%5 = load float** %B.addr, align 8
- %arrayidx3 = getelementptr inbounds float* %5, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds float, float* %5, i64 %idxprom2
%6 = load float* %arrayidx3, align 4
%7 = load i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
%8 = load i32** %trigger.addr, align 8
- %arrayidx5 = getelementptr inbounds i32* %8, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
%9 = load i32* %arrayidx5, align 4
%conv = sitofp i32 %9 to float
%add = fadd float %6, %conv
%10 = load i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
%11 = load float** %A.addr, align 8
- %arrayidx7 = getelementptr inbounds float* %11, i64 %idxprom6
+ %arrayidx7 = getelementptr inbounds float, float* %11, i64 %idxprom6
store float %add, float* %arrayidx7, align 4
br label %if.end
%1 = load i32* %i, align 4
%idxprom = sext i32 %1 to i64
%2 = load i32** %trigger.addr, align 8
- %arrayidx = getelementptr inbounds i32* %2, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
%3 = load i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
%4 = load i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
%5 = load double** %B.addr, align 8
- %arrayidx3 = getelementptr inbounds double* %5, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds double, double* %5, i64 %idxprom2
%6 = load double* %arrayidx3, align 8
%7 = load i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
%8 = load i32** %trigger.addr, align 8
- %arrayidx5 = getelementptr inbounds i32* %8, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
%9 = load i32* %arrayidx5, align 4
%conv = sitofp i32 %9 to double
%add = fadd double %6, %conv
%10 = load i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
%11 = load double** %A.addr, align 8
- %arrayidx7 = getelementptr inbounds double* %11, i64 %idxprom6
+ %arrayidx7 = getelementptr inbounds double, double* %11, i64 %idxprom6
store double %add, double* %arrayidx7, align 8
br label %if.end
%1 = load i32* %i, align 4
%idxprom = sext i32 %1 to i64
%2 = load i32** %trigger.addr, align 8
- %arrayidx = getelementptr inbounds i32* %2, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
%3 = load i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
%mul = mul nsw i32 %4, 2
%idxprom2 = sext i32 %mul to i64
%5 = load double** %B.addr, align 8
- %arrayidx3 = getelementptr inbounds double* %5, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds double, double* %5, i64 %idxprom2
%6 = load double* %arrayidx3, align 8
%7 = load i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
%8 = load i32** %trigger.addr, align 8
- %arrayidx5 = getelementptr inbounds i32* %8, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
%9 = load i32* %arrayidx5, align 4
%conv = sitofp i32 %9 to double
%add = fadd double %6, %conv
%10 = load i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
%11 = load double** %A.addr, align 8
- %arrayidx7 = getelementptr inbounds double* %11, i64 %idxprom6
+ %arrayidx7 = getelementptr inbounds double, double* %11, i64 %idxprom6
store double %add, double* %arrayidx7, align 8
br label %if.end
%1 = load i32* %i, align 4
%idxprom = sext i32 %1 to i64
%2 = load i32** %trigger.addr, align 8
- %arrayidx = getelementptr inbounds i32* %2, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
%3 = load i32* %arrayidx, align 4
%cmp1 = icmp slt i32 %3, 100
br i1 %cmp1, label %if.then, label %if.end
%4 = load i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
%5 = load i32** %B.addr, align 8
- %arrayidx3 = getelementptr inbounds i32* %5, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds i32, i32* %5, i64 %idxprom2
%6 = load i32* %arrayidx3, align 4
%7 = load i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
%8 = load i32** %trigger.addr, align 8
- %arrayidx5 = getelementptr inbounds i32* %8, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds i32, i32* %8, i64 %idxprom4
%9 = load i32* %arrayidx5, align 4
%add = add nsw i32 %6, %9
%10 = load i32* %i, align 4
%idxprom6 = sext i32 %10 to i64
%11 = load i32** %A.addr, align 8
- %arrayidx7 = getelementptr inbounds i32* %11, i64 %idxprom6
+ %arrayidx7 = getelementptr inbounds i32, i32* %11, i64 %idxprom6
store i32 sdiv (i32 1, i32 zext (i1 icmp eq (i32** getelementptr inbounds ([1 x i32*]* @a, i64 0, i64 1), i32** @c) to i32)), i32* %arrayidx7, align 4
br label %if.end
%1 = load i32* %i, align 4
%idxprom = sext i32 %1 to i64
%2 = load i32** %trigger.addr, align 8
- %arrayidx = getelementptr inbounds i32* %2, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %2, i64 %idxprom
%3 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %3, 0
br i1 %cmp1, label %if.then, label %if.end
%4 = load i32* %i, align 4
%idxprom2 = sext i32 %4 to i64
%5 = load double** %in.addr, align 8
- %arrayidx3 = getelementptr inbounds double* %5, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds double, double* %5, i64 %idxprom2
%6 = load double* %arrayidx3, align 8
%add = fadd double %6, 5.000000e-01
%7 = load i32* %i, align 4
%idxprom4 = sext i32 %7 to i64
%8 = load double** %out.addr, align 8
- %arrayidx5 = getelementptr inbounds double* %8, i64 %idxprom4
+ %arrayidx5 = getelementptr inbounds double, double* %8, i64 %idxprom4
store double %add, double* %arrayidx5, align 8
br label %if.end
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %N
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %N
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %N
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %add, i32* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%add = fadd float %0, 1.000000e+00
store float %add, float* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%i.06 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%r.05 = phi i32 [ %xor, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i8* %s, i32 %i.06
+ %arrayidx = getelementptr inbounds i8, i8* %s, i32 %i.06
%0 = load i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%xor = xor i32 %conv, %r.05
for.body: ; preds = %for.body.for.body_crit_edge, %entry
%indvars.iv.reload = load i64* %indvars.iv.reg2mem
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv.reload
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.reload
%0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv.reload
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.reload
%1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%idxprom3 = sext i32 %1 to i64
- %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next = add i64 %indvars.iv.reload, 1
; A new store without the parallel metadata here:
store i64 %indvars.iv.next, i64* %indvars.iv.next.reg2mem
%indvars.iv.next.reload1 = load i64* %indvars.iv.next.reg2mem
- %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next.reload1
+ %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next.reload1
%2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next.reload = load i64* %indvars.iv.next.reg2mem
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%idxprom3 = sext i32 %1 to i64
- %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
store i32 %0, i32* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
- %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
%2 = load i32* %arrayidx6, align 4
store i32 %2, i32* %arrayidx2, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%idxprom3 = sext i32 %1 to i64
- %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
; This store might have originated from inlining a function with a parallel
; loop. Refers to a list with the "original loop reference" (!4) also included.
store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !5
%indvars.iv.next = add i64 %indvars.iv, 1
- %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
%2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !3
store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4, !llvm.mem.parallel_loop_access !6
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
%idxprom3 = sext i32 %1 to i64
- %arrayidx4 = getelementptr inbounds i32* %a, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
; This refers to the loop marked with !7 which we are not in at the moment.
; It should prevent detecting as a parallel loop.
store i32 %0, i32* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !7
%indvars.iv.next = add i64 %indvars.iv, 1
- %arrayidx6 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
%2 = load i32* %arrayidx6, align 4, !llvm.mem.parallel_loop_access !6
store i32 %2, i32* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !6
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 2, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%div = sdiv i32 %0, 2
- %arrayidx2 = getelementptr inbounds %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds %struct.anon, %struct.anon* @Foo, i64 0, i32 0, i64 %indvars.iv
store i32 %div, i32* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 100
bb3: ; preds = %bb3, %bb2
%tmp4 = phi double [ %tmp9, %bb3 ], [ %tmp, %bb2 ]
%tmp5 = phi i32 [ %tmp8, %bb3 ], [ 0, %bb2 ]
- %tmp6 = getelementptr inbounds [16 x double]* undef, i32 0, i32 %tmp5
+ %tmp6 = getelementptr inbounds [16 x double], [16 x double]* undef, i32 0, i32 %tmp5
%tmp7 = load double* %tmp6, align 4
%tmp8 = add nsw i32 %tmp5, 1
%tmp9 = fadd fast double %tmp4, undef
- %tmp10 = getelementptr inbounds float* %arg, i32 %tmp5
+ %tmp10 = getelementptr inbounds float, float* %arg, i32 %tmp5
store float undef, float* %tmp10, align 4
%tmp11 = icmp eq i32 %tmp8, %arg1
br i1 %tmp11, label %bb12, label %bb3
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
.lr.ph5: ; preds = %0, %.lr.ph5
%indvars.iv6 = phi i64 [ %indvars.iv.next7, %.lr.ph5 ], [ 0, %0 ]
- %3 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv6
+ %3 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv6
store i32 %x, i32* %3, align 4
%indvars.iv.next7 = add i64 %indvars.iv6, 1
%lftr.wideiv = trunc i64 %indvars.iv.next7 to i32
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ %i.0.lcssa, %.preheader ]
%.02 = phi i32 [ %4, %.lr.ph ], [ %n, %.preheader ]
%4 = add nsw i32 %.02, -1
- %5 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %5 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%6 = load i32* %5, align 4
- %7 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%8 = load i32* %7, align 4
%9 = and i32 %8, %6
- %10 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %10 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %9, i32* %10, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%11 = icmp eq i32 %4, 0
%.014 = phi i32* [ %5, %.lr.ph ], [ %p, %0 ]
%.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
%2 = add nsw i32 %.05, -1
- %3 = getelementptr inbounds i32* %.023, i64 1
+ %3 = getelementptr inbounds i32, i32* %.023, i64 1
%4 = load i32* %.023, align 16
- %5 = getelementptr inbounds i32* %.014, i64 1
+ %5 = getelementptr inbounds i32, i32* %.014, i64 1
store i32 %4, i32* %.014, align 16
%6 = icmp eq i32 %2, 0
br i1 %6, label %._crit_edge, label %.lr.ph
%.014 = phi i32* [ %5, %.lr.ph ], [ %p, %0 ]
%.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
%2 = add nsw i32 %.05, -1
- %3 = getelementptr inbounds i32* %.023, i64 1
+ %3 = getelementptr inbounds i32, i32* %.023, i64 1
%4 = load i32* %.023, align 16
- %5 = getelementptr inbounds i32* %.014, i64 1
+ %5 = getelementptr inbounds i32, i32* %.014, i64 1
store i32 %4, i32* %.014, align 16
%6 = icmp eq i32 %2, 0
br i1 %6, label %._crit_edge, label %.lr.ph
%.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
%.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
%i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
- %2 = getelementptr inbounds i16* %.04, i64 1
+ %2 = getelementptr inbounds i16, i16* %.04, i64 1
%3 = load i16* %.04, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw nsw i32 %4, 7
- %6 = getelementptr inbounds i32* %.013, i64 1
+ %6 = getelementptr inbounds i32, i32* %.013, i64 1
store i32 %5, i32* %.013, align 4
%7 = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %7, 256
%.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
%.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
%i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
- %2 = getelementptr inbounds i16* %.04, i64 1
+ %2 = getelementptr inbounds i16, i16* %.04, i64 1
%3 = load i16* %.04, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw nsw i32 %4, 7
- %6 = getelementptr inbounds i32* %.013, i64 1
+ %6 = getelementptr inbounds i32, i32* %.013, i64 1
store i32 %5, i32* %.013, align 4
%7 = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %7, 256
loop:
%indvars.iv = phi i64 [ %indvars.iv.next, %loop ], [ 0, %entry ]
- %tmp = getelementptr inbounds [16 x { i64, i64 }]* @glbl, i64 0, i64 %indvars.iv
+ %tmp = getelementptr inbounds [16 x { i64, i64 }], [16 x { i64, i64 }]* @glbl, i64 0, i64 %indvars.iv
store { i64, i64 } { i64 ptrtoint (void ()* @fn to i64), i64 0 }, { i64, i64 }* %tmp, align 16
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%i.07 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds [0 x i32]* @big, i32 0, i32 %i.07
+ %arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @big, i32 0, i32 %i.07
%0 = load i32* %arrayidx, align 4
%neg = xor i32 %0, -1
store i32 %neg, i32* %arrayidx, align 4
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i64* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i64, i64* %a, i64 %indvars.iv
%tmp = load i64* %arrayidx, align 4
%conv = uitofp i64 %tmp to double
- %arrayidx2 = getelementptr inbounds double* %b, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %b, i64 %indvars.iv
store double %conv, double* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 256
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = add nsw i32 %3, 6
store i32 %4, i32* %2, align 4
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = add nsw i32 %3, 6
store i32 %4, i32* %2, align 4
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = add nsw i32 %3, 6
store i32 %4, i32* %2, align 4
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%mul = fmul float %0, %N
- %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %mul, float* %arrayidx2, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 256
; <label>:2 ; preds = %2, %0
%indvars.iv = phi i64 [ %indvars.iv.next, %2 ], [ %1, %0 ]
- %3 = getelementptr inbounds double* %A, i64 %indvars.iv
+ %3 = getelementptr inbounds double, double* %A, i64 %indvars.iv
%4 = load double* %3, align 8
%5 = fadd double %4, 3.000000e+00
%6 = fmul double %4, 2.000000e+00
.lr.ph: ; preds = %0, %.lr.ph
%i.01 = phi i64 [ %5, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i16* %A, i64 %i.01
+ %2 = getelementptr inbounds i16, i16* %A, i64 %i.01
%3 = load i16* %2, align 2
%4 = xor i16 %3, 3
store i16 %4, i16* %2, align 2
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
%call = tail call float @llvm.sin.f32(float %0)
- %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%call = tail call float @llvm.sin.f32(float %0)
- %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
- %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
%add = fadd fast float %0, %1
store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
- %arrayidx2 = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
%add = fadd fast float %0, %1
store float %add, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
; <label>:1
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
; A scalar select has a cost of 1 on core2
; CHECK: cost of 1 for VF 2 {{.*}} select i1 %cond, i32 %6, i32 0
; <label>:1
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%8 = icmp ult i64 %indvars.iv, 8
; A vector select has a cost of 1 on core2
; <label>:7 ; preds = %7, %6
%8 = phi %0** [ %0, %6 ], [ %9, %7 ]
store %0* %4, %0** %8, align 8
- %9 = getelementptr inbounds %0** %8, i64 1
+ %9 = getelementptr inbounds %0*, %0** %8, i64 1
%10 = icmp eq %0** %9, %1
br i1 %10, label %11, label %7
; <label>:3 ; preds = %3, %1
%4 = phi i64 [ 0, %1 ], [ %11, %3 ]
- %5 = getelementptr inbounds [2048 x i16]* @q, i64 0, i64 %4
+ %5 = getelementptr inbounds [2048 x i16], [2048 x i16]* @q, i64 0, i64 %4
%6 = load i16* %5, align 2
%7 = sext i16 %6 to i64
%8 = add i64 %7, 1
%9 = inttoptr i64 %8 to i32*
- %10 = getelementptr inbounds [2048 x [8 x i32*]]* @p, i64 0, i64 %4, i64 %2
+ %10 = getelementptr inbounds [2048 x [8 x i32*]], [2048 x [8 x i32*]]* @p, i64 0, i64 %4, i64 %2
store i32* %9, i32** %10, align 8
%11 = add i64 %4, 1
%12 = trunc i64 %11 to i32
; <label>:1 ; preds = %1, %0
%2 = phi i64 [ 0, %0 ], [ %10, %1 ]
%3 = phi i8 [ 0, %0 ], [ %9, %1 ]
- %4 = getelementptr inbounds [1024 x i32*]* @ia, i32 0, i64 %2
+ %4 = getelementptr inbounds [1024 x i32*], [1024 x i32*]* @ia, i32 0, i64 %2
%5 = load i32** %4, align 4
%6 = ptrtoint i32* %5 to i64
%7 = trunc i64 %6 to i8
; <label>:3 ; preds = %3, %1
%4 = phi i64 [ 0, %1 ], [ %10, %3 ]
- %5 = getelementptr inbounds [2048 x [8 x i32*]]* @p2, i64 0, i64 %4, i64 %2
- %6 = getelementptr inbounds [2048 x i16]* @q2, i64 0, i64 %4
+ %5 = getelementptr inbounds [2048 x [8 x i32*]], [2048 x [8 x i32*]]* @p2, i64 0, i64 %4, i64 %2
+ %6 = getelementptr inbounds [2048 x i16], [2048 x i16]* @q2, i64 0, i64 %4
%7 = load i32** %5, align 2
%8 = ptrtoint i32* %7 to i64
%9 = trunc i64 %8 to i16
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !16
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !16
%0 = trunc i64 %indvars.iv to i32, !dbg !16
store i32 %0, i32* %arrayidx, align 4, !dbg !16, !tbaa !18
%cmp3 = icmp sle i32 %0, %Length, !dbg !22
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !30
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !30
%0 = trunc i64 %indvars.iv to i32, !dbg !30
store i32 %0, i32* %arrayidx, align 4, !dbg !30, !tbaa !18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !25
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv, !dbg !35
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv, !dbg !35
%0 = load i32* %arrayidx, align 4, !dbg !35, !tbaa !18
%idxprom1 = sext i32 %0 to i64, !dbg !35
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1, !dbg !35
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !35
%1 = load i32* %arrayidx2, align 4, !dbg !35, !tbaa !18
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !35
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !35
store i32 %1, i32* %arrayidx4, align 4, !dbg !35, !tbaa !18
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !32
%lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !32
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%add8 = phi i32 [ 0, %entry ], [ %add, %for.body ], !dbg !19
- %arrayidx = getelementptr inbounds [16 x i8]* %cb, i64 0, i64 %indvars.iv, !dbg !19
+ %arrayidx = getelementptr inbounds [16 x i8], [16 x i8]* %cb, i64 0, i64 %indvars.iv, !dbg !19
%0 = load i8* %arrayidx, align 1, !dbg !19, !tbaa !21
%conv = sext i8 %0 to i32, !dbg !19
- %arrayidx2 = getelementptr inbounds [16 x i8]* %cc, i64 0, i64 %indvars.iv, !dbg !19
+ %arrayidx2 = getelementptr inbounds [16 x i8], [16 x i8]* %cc, i64 0, i64 %indvars.iv, !dbg !19
%1 = load i8* %arrayidx2, align 1, !dbg !19, !tbaa !21
%conv3 = sext i8 %1 to i32, !dbg !19
%sub = sub i32 %conv, %conv3, !dbg !19
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%conv = sitofp i32 1 to x86_fp80
- %arrayidx = getelementptr inbounds [1024 x x86_fp80]* @x, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x x86_fp80], [1024 x x86_fp80]* @x, i64 0, i64 %indvars.iv
store x86_fp80 %conv, x86_fp80* %arrayidx, align 16
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
do.body:
%ptr.addr.0 = phi i8* [ %ptr, %entry ], [ %incdec.ptr, %do.body ]
%len.addr.0 = phi i32 [ %len, %entry ], [ %dec, %do.body ]
- %incdec.ptr = getelementptr inbounds i8* %ptr.addr.0, i32 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %ptr.addr.0, i32 1
store i8 0, i8* %ptr.addr.0, align 1
%dec = add nsw i32 %len.addr.0, -1
%tobool = icmp eq i32 %len.addr.0, 0
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%3 = load i32* %2
- %4 = getelementptr inbounds i32* %c, i64 %indvars.iv
+ %4 = getelementptr inbounds i32, i32* %c, i64 %indvars.iv
%5 = load i32* %4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %7 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %6, i32* %7
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%0 = shl nsw i64 %indvars.iv, 2
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
store i32 4, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
do.body: ; preds = %cond.end, %entry
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %cond.end ]
%p.addr.0 = phi i16* [ %p, %entry ], [ %incdec.ptr, %cond.end ]
- %incdec.ptr = getelementptr inbounds i16* %p.addr.0, i64 -1
+ %incdec.ptr = getelementptr inbounds i16, i16* %p.addr.0, i64 -1
%0 = load i16* %incdec.ptr, align 2
%conv = zext i16 %0 to i32
%cmp = icmp ult i32 %conv, %size
do.body: ; preds = %do.body, %entry
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.body ]
%p.0 = phi i32* [ %a, %entry ], [ %incdec.ptr, %do.body ]
- %incdec.ptr = getelementptr inbounds i32* %p.0, i64 -1
+ %incdec.ptr = getelementptr inbounds i32, i32* %p.0, i64 -1
%0 = load i32* %incdec.ptr, align 4
%cmp = icmp slt i32 %0, %wsize
%sub = sub nsw i32 %0, %wsize
for.body: ; preds = %for.body, %for.body.lr.ph
%i.030 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%shr = lshr i64 %i.030, 1
- %arrayidx = getelementptr inbounds i8* %bytes, i64 %shr
+ %arrayidx = getelementptr inbounds i8, i8* %bytes, i64 %shr
%1 = load i8* %arrayidx, align 1
%conv = zext i8 %1 to i32
%and = shl i64 %i.030, 2
%cond = select i1 %cmp15, i32 87, i32 48
%add17 = add nsw i32 %cond, %shr11
%conv18 = trunc i32 %add17 to i8
- %arrayidx19 = getelementptr inbounds i8* %call, i64 %i.030
+ %arrayidx19 = getelementptr inbounds i8, i8* %call, i64 %i.030
store i8 %conv18, i8* %arrayidx19, align 1
%inc = add i64 %i.030, 1
%exitcond = icmp eq i64 %inc, %0
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%3 = trunc i64 %indvars.iv to i32
store i32 %3, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
- %arrayidx = getelementptr inbounds i32* %indices, i64 %indvars.iv, !dbg !12
+ %arrayidx = getelementptr inbounds i32, i32* %indices, i64 %indvars.iv, !dbg !12
%0 = load i32* %arrayidx, align 4, !dbg !12, !tbaa !14
%cmp1 = icmp eq i32 %0, 1024, !dbg !12
br i1 %cmp1, label %if.then, label %for.inc, !dbg !12
for.body: ; preds = %for.body.preheader, %if.else
%indvars.iv = phi i64 [ %indvars.iv.next, %if.else ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !12
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !12
%0 = load i32* %arrayidx, align 4, !dbg !12, !tbaa !15
%cmp1 = icmp sgt i32 %0, 10, !dbg !12
br i1 %cmp1, label %end.loopexit, label %if.else, !dbg !12
for.body: ; preds = %entry, %for.body
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.01 to i64
- %arrayidx = getelementptr inbounds float* %0, i64 %idxprom
+ %arrayidx = getelementptr inbounds float, float* %0, i64 %idxprom
%3 = load float* %arrayidx, align 4
%idxprom5 = sext i32 %i.01 to i64
- %arrayidx6 = getelementptr inbounds float* %1, i64 %idxprom5
+ %arrayidx6 = getelementptr inbounds float, float* %1, i64 %idxprom5
%4 = load float* %arrayidx6, align 4
%add = fadd float %3, %4
%idxprom7 = sext i32 %i.01 to i64
- %arrayidx8 = getelementptr inbounds float* %2, i64 %idxprom7
+ %arrayidx8 = getelementptr inbounds float, float* %2, i64 %idxprom7
store float %add, float* %arrayidx8, align 4
%inc = add nsw i32 %i.01, 1
%cmp = icmp slt i32 %inc, 1000
for.body:
;CHECK: load <4 x i32>
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @B, i64 0, i64 %indvars.iv, !dbg !19
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @B, i64 0, i64 %indvars.iv, !dbg !19
%0 = load i32* %arrayidx, align 4, !dbg !19
- %arrayidx2 = getelementptr inbounds [1024 x i32]* @C, i64 0, i64 %indvars.iv, !dbg !19
+ %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @C, i64 0, i64 %indvars.iv, !dbg !19
%1 = load i32* %arrayidx2, align 4, !dbg !19
%add = add nsw i32 %1, %0, !dbg !19
- %arrayidx4 = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv, !dbg !19
+ %arrayidx4 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv, !dbg !19
store i32 %add, i32* %arrayidx4, align 4, !dbg !19
%indvars.iv.next = add i64 %indvars.iv, 1, !dbg !18
tail call void @llvm.dbg.value(metadata !{null}, i64 0, metadata !9, metadata !{}), !dbg !18
; CHECK: cmp.zero = icmp eq i64 {{.*}}, 0, !dbg ![[LOC:[0-9]+]]
; CHECK: vector.body
; CHECK: index {{.*}}, !dbg ![[LOC]]
-; CHECK: getelementptr inbounds i32* %a, {{.*}}, !dbg ![[LOC2:[0-9]+]]
+; CHECK: getelementptr inbounds i32, i32* %a, {{.*}}, !dbg ![[LOC2:[0-9]+]]
; CHECK: load <2 x i32>* {{.*}}, !dbg ![[LOC2]]
; CHECK: add <2 x i32> {{.*}}, !dbg ![[LOC2]]
; CHECK: add i64 %index, 2, !dbg ![[LOC]]
for.body: ; preds = %for.body.lr.ph, %for.body
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%sum.05 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv, !dbg !22
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv, !dbg !22
%0 = load i32* %arrayidx, align 4, !dbg !22
%add = add i32 %0, %sum.05, !dbg !22
tail call void @llvm.dbg.value(metadata i32 %add.lcssa, i64 0, metadata !15, metadata !{}), !dbg !22
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
%p = load float* %arrayidx, align 4
%mul = fmul float %p, 2.000000e+00
store float %mul, float* %arrayidx, align 4
define i32 @_Z4foo1Pii(i32* %A, i32 %n, <2 x i32> %q) #0 {
entry:
%idx.ext = sext i32 %n to i64
- %add.ptr = getelementptr inbounds i32* %A, i64 %idx.ext
+ %add.ptr = getelementptr inbounds i32, i32* %A, i64 %idx.ext
%cmp3.i = icmp eq i32 %n, 0
br i1 %cmp3.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
%q1 = extractelement <2 x i32> %q, i32 %n
%q2 = add nsw i32 %0, %q1
%add.i = add nsw i32 %q2, %__init.addr.05.i
- %incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
+ %incdec.ptr.i = getelementptr inbounds i32, i32* %__first.addr.04.i, i64 1
%cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
br i1 %cmp.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
for.body:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %x, i64 %iv
+ %arrayidx = getelementptr inbounds i32, i32* %x, i64 %iv
%0 = load i32* %arrayidx, align 4
%conv1 = lshr exact i32 %0, 1
store i32 %conv1, i32* %arrayidx, align 4
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = mul nsw i32 %3, 3
store i32 %4, i32* %2, align 4
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = mul i32 %3, 3
store i32 %4, i32* %2, align 4
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%q.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds float* %s, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %s, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%add = fadd fast float %q.04, %0
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.04 = phi float [ 0.000000e+00, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%add = fadd fast float %sum.04, %0
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.04 = phi float [ 0.000000e+00, %entry ], [ %sub, %for.body ]
- %arrayidx = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%sub = fsub fast float %sum.04, %0
%indvars.iv.next = add i64 %indvars.iv, 1
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double* %d, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %d, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%1 = tail call double @llvm.pow.f64(double %0, double %t)
store double %1, double* %arrayidx, align 8
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
.lr.ph5: ; preds = %0, %.lr.ph5
%indvars.iv6 = phi i64 [ %indvars.iv.next7, %.lr.ph5 ], [ 0, %0 ]
- %3 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv6
+ %3 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv6
store i32 %x, i32* %3, align 4
%indvars.iv.next7 = add i64 %indvars.iv6, 1
%lftr.wideiv = trunc i64 %indvars.iv.next7 to i32
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ %i.0.lcssa, %.preheader ]
%.02 = phi i32 [ %4, %.lr.ph ], [ %n, %.preheader ]
%4 = add nsw i32 %.02, -1
- %5 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %5 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%6 = load i32* %5, align 4
- %7 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%8 = load i32* %7, align 4
%9 = and i32 %8, %6
- %10 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %10 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %9, i32* %10, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%11 = icmp eq i32 %4, 0
%.014 = phi i32* [ %5, %.lr.ph ], [ %p, %0 ]
%.023 = phi i32* [ %3, %.lr.ph ], [ %q, %0 ]
%2 = add nsw i32 %.05, -1
- %3 = getelementptr inbounds i32* %.023, i64 1
+ %3 = getelementptr inbounds i32, i32* %.023, i64 1
%4 = load i32* %.023, align 16
- %5 = getelementptr inbounds i32* %.014, i64 1
+ %5 = getelementptr inbounds i32, i32* %.014, i64 1
store i32 %4, i32* %.014, align 16
%6 = icmp eq i32 %2, 0
br i1 %6, label %._crit_edge, label %.lr.ph
%4 = phi i32 [ %9, %.lr.ph10 ], [ %1, %0 ]
%.018 = phi i32* [ %8, %.lr.ph10 ], [ %p, %0 ]
%.027 = phi i32* [ %5, %.lr.ph10 ], [ %q, %0 ]
- %5 = getelementptr inbounds i32* %.027, i64 1
+ %5 = getelementptr inbounds i32, i32* %.027, i64 1
%6 = load i32* %.027, align 16
%7 = add nsw i32 %6, 5
- %8 = getelementptr inbounds i32* %.018, i64 1
+ %8 = getelementptr inbounds i32, i32* %.018, i64 1
store i32 %7, i32* %.018, align 16
%9 = add nsw i32 %4, -1
%10 = icmp eq i32 %4, 0
.lr.ph6: ; preds = %.preheader4, %.lr.ph6
%indvars.iv11 = phi i64 [ %indvars.iv.next12, %.lr.ph6 ], [ 0, %.preheader4 ]
%indvars.iv.next12 = add i64 %indvars.iv11, 1
- %11 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv.next12
+ %11 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv.next12
%12 = load i32* %11, align 4
%13 = add nsw i64 %indvars.iv11, 3
- %14 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %13
+ %14 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %13
%15 = load i32* %14, align 4
%16 = add nsw i32 %15, %12
- %17 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv11
+ %17 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv11
store i32 %16, i32* %17, align 4
%lftr.wideiv13 = trunc i64 %indvars.iv.next12 to i32
%exitcond14 = icmp eq i32 %lftr.wideiv13, %1
.lr.ph: ; preds = %.preheader, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %.preheader ]
- %18 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %18 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%19 = load i32* %18, align 4
%20 = icmp sgt i32 %19, 4
%21 = select i1 %20, i32 4, i32 0
- %22 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %22 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
store i32 %21, i32* %22, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; <label>:1 ; preds = %1, %.preheader
%indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [32 x [1024 x i32]]* @G, i64 0, i64 %indvars.iv3, i64 %indvars.iv
+ %2 = getelementptr inbounds [32 x [1024 x i32]], [32 x [1024 x i32]]* @G, i64 0, i64 %indvars.iv3, i64 %indvars.iv
store i32 %x, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%diff.01 = phi i32 [ 0, %0 ], [ %7, %1 ]
- %2 = getelementptr inbounds [1024 x i32]* @ub, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @ub, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [1024 x i32]* @uc, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [1024 x i32], [1024 x i32]* @uc, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add i32 %3, %diff.01
%7 = sub i32 %6, %5
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds i32* %ib, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %ib, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds i32* %ic, i64 %indvars.iv
+ %4 = getelementptr inbounds i32, i32* %ic, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds i32* %ia, i64 %indvars.iv
+ %7 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %6, i32* %7, align 4
- %8 = getelementptr inbounds i16* %sb, i64 %indvars.iv
+ %8 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
%9 = load i16* %8, align 2
- %10 = getelementptr inbounds i16* %sc, i64 %indvars.iv
+ %10 = getelementptr inbounds i16, i16* %sc, i64 %indvars.iv
%11 = load i16* %10, align 2
%12 = add i16 %11, %9
- %13 = getelementptr inbounds i16* %sa, i64 %indvars.iv
+ %13 = getelementptr inbounds i16, i16* %sa, i64 %indvars.iv
store i16 %12, i16* %13, align 2
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds i16* %sb, i64 %indvars.iv
+ %2 = getelementptr inbounds i16, i16* %sb, i64 %indvars.iv
%3 = load i16* %2, align 2
%4 = sext i16 %3 to i32
- %5 = getelementptr inbounds i32* %ia, i64 %indvars.iv
+ %5 = getelementptr inbounds i32, i32* %ia, i64 %indvars.iv
store i32 %4, i32* %5, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
%2 = shl nsw i64 %indvars.iv, 1
%3 = or i64 %2, 1
- %4 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %3
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %3
%5 = load i32* %4, align 4
- %6 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %3
+ %6 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %3
%7 = load i32* %6, align 4
%8 = mul nsw i32 %7, %5
- %9 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %2
+ %9 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %2
%10 = load i32* %9, align 8
- %11 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %2
+ %11 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %2
%12 = load i32* %11, align 8
%13 = mul nsw i32 %12, %10
%14 = sub nsw i32 %8, %13
- %15 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %15 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %14, i32* %15, align 4
%16 = mul nsw i32 %7, %10
%17 = mul nsw i32 %12, %5
%18 = add nsw i32 %17, %16
- %19 = getelementptr inbounds [2048 x i32]* @d, i64 0, i64 %indvars.iv
+ %19 = getelementptr inbounds [2048 x i32], [2048 x i32]* @d, i64 0, i64 %indvars.iv
store i32 %18, i32* %19, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%3 = trunc i64 %indvars.iv to i32
store i32 %3, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
.preheader: ; preds = %14, %0
%indvars.iv4 = phi i64 [ 0, %0 ], [ %indvars.iv.next5, %14 ]
- %1 = getelementptr inbounds i32** %A, i64 %indvars.iv4
+ %1 = getelementptr inbounds i32*, i32** %A, i64 %indvars.iv4
%2 = load i32** %1, align 8
- %3 = getelementptr inbounds i32** %B, i64 %indvars.iv4
+ %3 = getelementptr inbounds i32*, i32** %B, i64 %indvars.iv4
%4 = load i32** %3, align 8
br label %5
; <label>:5 ; preds = %.preheader, %5
%indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %5 ]
%diff.02 = phi i32 [ 0, %.preheader ], [ %11, %5 ]
- %6 = getelementptr inbounds i32* %2, i64 %indvars.iv
+ %6 = getelementptr inbounds i32, i32* %2, i64 %indvars.iv
%7 = load i32* %6, align 4
- %8 = getelementptr inbounds i32* %4, i64 %indvars.iv
+ %8 = getelementptr inbounds i32, i32* %4, i64 %indvars.iv
%9 = load i32* %8, align 4
%10 = add i32 %7, %diff.02
%11 = sub i32 %10, %9
br i1 %13, label %5, label %14
; <label>:14 ; preds = %5
- %15 = getelementptr inbounds i32* %out, i64 %indvars.iv4
+ %15 = getelementptr inbounds i32, i32* %out, i64 %indvars.iv4
store i32 %11, i32* %15, align 4
%indvars.iv.next5 = add i64 %indvars.iv4, 1
%lftr.wideiv = trunc i64 %indvars.iv.next5 to i32
; <label>:0 ; preds = %0, %.preheader
%indvars.iv = phi i64 [ 0, %.preheader ], [ %indvars.iv.next, %0 ]
%sum.12 = phi i32 [ %sum.05, %.preheader ], [ %10, %0 ]
- %1 = getelementptr inbounds i32** %in, i64 %indvars.iv
+ %1 = getelementptr inbounds i32*, i32** %in, i64 %indvars.iv
%2 = load i32** %1, align 8
- %3 = getelementptr inbounds i32* %2, i64 %indvars.iv7
+ %3 = getelementptr inbounds i32, i32* %2, i64 %indvars.iv7
%4 = load i32* %3, align 4
- %5 = getelementptr inbounds i32** %coeff, i64 %indvars.iv
+ %5 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv
%6 = load i32** %5, align 8
- %7 = getelementptr inbounds i32* %6, i64 %indvars.iv7
+ %7 = getelementptr inbounds i32, i32* %6, i64 %indvars.iv7
%8 = load i32* %7, align 4
%9 = mul nsw i32 %8, %4
%10 = add nsw i32 %9, %sum.12
%indvars.iv.1 = phi i64 [ 0, %.preheader.1 ], [ %13, %12 ]
%sum.12.1 = phi i32 [ %sum.05.1, %.preheader.1 ], [ %23, %12 ]
%13 = add nsw i64 %indvars.iv.1, 1
- %14 = getelementptr inbounds i32** %in, i64 %13
+ %14 = getelementptr inbounds i32*, i32** %in, i64 %13
%15 = load i32** %14, align 8
- %16 = getelementptr inbounds i32* %15, i64 %indvars.iv7.1
+ %16 = getelementptr inbounds i32, i32* %15, i64 %indvars.iv7.1
%17 = load i32* %16, align 4
- %18 = getelementptr inbounds i32** %coeff, i64 %indvars.iv.1
+ %18 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv.1
%19 = load i32** %18, align 8
- %20 = getelementptr inbounds i32* %19, i64 %indvars.iv7.1
+ %20 = getelementptr inbounds i32, i32* %19, i64 %indvars.iv7.1
%21 = load i32* %20, align 4
%22 = mul nsw i32 %21, %17
%23 = add nsw i32 %22, %sum.12.1
br i1 %exitcond10.1, label %.preheader3.2, label %.preheader.1
.preheader3.2: ; preds = %24
- %25 = getelementptr inbounds i32* %out, i64 1
+ %25 = getelementptr inbounds i32, i32* %out, i64 1
store i32 %23, i32* %25, align 4
br label %.preheader.2
%indvars.iv.2 = phi i64 [ 0, %.preheader.2 ], [ %indvars.iv.next.2, %26 ]
%sum.12.2 = phi i32 [ %sum.05.2, %.preheader.2 ], [ %37, %26 ]
%27 = add nsw i64 %indvars.iv.2, 2
- %28 = getelementptr inbounds i32** %in, i64 %27
+ %28 = getelementptr inbounds i32*, i32** %in, i64 %27
%29 = load i32** %28, align 8
- %30 = getelementptr inbounds i32* %29, i64 %indvars.iv7.2
+ %30 = getelementptr inbounds i32, i32* %29, i64 %indvars.iv7.2
%31 = load i32* %30, align 4
- %32 = getelementptr inbounds i32** %coeff, i64 %indvars.iv.2
+ %32 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv.2
%33 = load i32** %32, align 8
- %34 = getelementptr inbounds i32* %33, i64 %indvars.iv7.2
+ %34 = getelementptr inbounds i32, i32* %33, i64 %indvars.iv7.2
%35 = load i32* %34, align 4
%36 = mul nsw i32 %35, %31
%37 = add nsw i32 %36, %sum.12.2
br i1 %exitcond10.2, label %.preheader3.3, label %.preheader.2
.preheader3.3: ; preds = %38
- %39 = getelementptr inbounds i32* %out, i64 2
+ %39 = getelementptr inbounds i32, i32* %out, i64 2
store i32 %37, i32* %39, align 4
br label %.preheader.3
%indvars.iv.3 = phi i64 [ 0, %.preheader.3 ], [ %indvars.iv.next.3, %40 ]
%sum.12.3 = phi i32 [ %sum.05.3, %.preheader.3 ], [ %51, %40 ]
%41 = add nsw i64 %indvars.iv.3, 3
- %42 = getelementptr inbounds i32** %in, i64 %41
+ %42 = getelementptr inbounds i32*, i32** %in, i64 %41
%43 = load i32** %42, align 8
- %44 = getelementptr inbounds i32* %43, i64 %indvars.iv7.3
+ %44 = getelementptr inbounds i32, i32* %43, i64 %indvars.iv7.3
%45 = load i32* %44, align 4
- %46 = getelementptr inbounds i32** %coeff, i64 %indvars.iv.3
+ %46 = getelementptr inbounds i32*, i32** %coeff, i64 %indvars.iv.3
%47 = load i32** %46, align 8
- %48 = getelementptr inbounds i32* %47, i64 %indvars.iv7.3
+ %48 = getelementptr inbounds i32, i32* %47, i64 %indvars.iv7.3
%49 = load i32* %48, align 4
%50 = mul nsw i32 %49, %45
%51 = add nsw i32 %50, %sum.12.3
br i1 %exitcond10.3, label %53, label %.preheader.3
; <label>:53 ; preds = %52
- %54 = getelementptr inbounds i32* %out, i64 3
+ %54 = getelementptr inbounds i32, i32* %out, i64 3
store i32 %51, i32* %54, align 4
ret void
}
%indvars.iv = phi i64 [ %2, %.lr.ph ], [ %indvars.iv.next, %3 ]
%a.02 = phi i32 [ 0, %.lr.ph ], [ %6, %3 ]
%indvars.iv.next = add i64 %indvars.iv, -1
- %4 = getelementptr inbounds i32* %b, i64 %indvars.iv.next
+ %4 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %a.02
%7 = trunc i64 %indvars.iv.next to i32
%.04 = phi i16* [ %src, %0 ], [ %2, %1 ]
%.013 = phi i32* [ %dst, %0 ], [ %6, %1 ]
%i.02 = phi i32 [ 0, %0 ], [ %7, %1 ]
- %2 = getelementptr inbounds i16* %.04, i64 1
+ %2 = getelementptr inbounds i16, i16* %.04, i64 1
%3 = load i16* %.04, align 2
%4 = zext i16 %3 to i32
%5 = shl nuw nsw i32 %4, 7
- %6 = getelementptr inbounds i32* %.013, i64 1
+ %6 = getelementptr inbounds i32, i32* %.013, i64 1
store i32 %5, i32* %.013, align 4
%7 = add nsw i32 %i.02, 1
%exitcond = icmp eq i32 %7, 256
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [1024 x float]* @fa, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [1024 x float], [1024 x float]* @fa, i64 0, i64 %indvars.iv
%3 = load float* %2, align 4
- %4 = getelementptr inbounds [1024 x float]* @fb, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [1024 x float], [1024 x float]* @fb, i64 0, i64 %indvars.iv
%5 = load float* %4, align 4
%6 = fcmp olt float %3, %5
%x.y = select i1 %6, i16 %x, i16 %y
%7 = sext i16 %x.y to i32
- %8 = getelementptr inbounds [1024 x i32]* @ic, i64 0, i64 %indvars.iv
+ %8 = getelementptr inbounds [1024 x i32], [1024 x i32]* @ic, i64 0, i64 %indvars.iv
store i32 %7, i32* %8, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [1024 x float]* @da, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [1024 x float], [1024 x float]* @da, i64 0, i64 %indvars.iv
%3 = load float* %2, align 4
- %4 = getelementptr inbounds [1024 x float]* @db, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [1024 x float], [1024 x float]* @db, i64 0, i64 %indvars.iv
%5 = load float* %4, align 4
%6 = fcmp olt float %3, %5
- %7 = getelementptr inbounds [1024 x float]* @dc, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [1024 x float], [1024 x float]* @dc, i64 0, i64 %indvars.iv
%8 = load float* %7, align 4
- %9 = getelementptr inbounds [1024 x float]* @dd, i64 0, i64 %indvars.iv
+ %9 = getelementptr inbounds [1024 x float], [1024 x float]* @dd, i64 0, i64 %indvars.iv
%10 = load float* %9, align 4
%11 = fcmp olt float %8, %10
%12 = and i1 %6, %11
%13 = zext i1 %12 to i32
- %14 = getelementptr inbounds [1024 x i32]* @dj, i64 0, i64 %indvars.iv
+ %14 = getelementptr inbounds [1024 x i32], [1024 x i32]* @dj, i64 0, i64 %indvars.iv
store i32 %13, i32* %14, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.cond
%1 = load i32* %i, align 4
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
- %arrayidx1 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx1, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx2, align 4
ret i32 %7
}
for.body: ; preds = %for.cond
%1 = load i32* %i, align 4
%add = add nsw i32 %1, 10
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %add
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %add
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add1 = add nsw i32 %2, %3
%4 = load i32* %i, align 4
- %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add1, i32* %arrayidx2, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx3, align 4
ret i32 %7
}
for.body: ; preds = %for.cond
%1 = load i32* %i, align 4
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%add1 = add nsw i32 %4, 10
- %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add1
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add1
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx3, align 4
ret i32 %7
}
for.body: ; preds = %for.cond
%1 = load i32** @PB, align 4
%2 = load i32* %i, align 4
- %add.ptr = getelementptr inbounds i32* %1, i32 %2
+ %add.ptr = getelementptr inbounds i32, i32* %1, i32 %2
%3 = load i32* %add.ptr, align 4
%4 = load i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32** @PA, align 4
%6 = load i32* %i, align 4
- %add.ptr1 = getelementptr inbounds i32* %5, i32 %6
+ %add.ptr1 = getelementptr inbounds i32, i32* %5, i32 %6
store i32 %add, i32* %add.ptr1, align 4
br label %for.inc
for.end: ; preds = %for.cond
%8 = load i32** @PA, align 4
%9 = load i32* %a.addr, align 4
- %add.ptr2 = getelementptr inbounds i32* %8, i32 %9
+ %add.ptr2 = getelementptr inbounds i32, i32* %8, i32 %9
%10 = load i32* %add.ptr2, align 4
ret i32 %10
}
for.body: ; preds = %for.cond
%1 = load i32* %i, align 4
%2 = load i32* %N, align 4
- %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
- %arrayidx1 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %1
%3 = load i32* %arrayidx1, align 4
%4 = load i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32* %i, align 4
%6 = load i32* %N, align 4
- %arrayidx2 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
- %arrayidx3 = getelementptr inbounds [100 x i32]* %arrayidx2, i32 0, i32 %5
+ %arrayidx2 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx2, i32 0, i32 %5
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.end: ; preds = %for.cond
%8 = load i32* %a.addr, align 4
%9 = load i32* %N, align 4
- %arrayidx4 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
- %arrayidx5 = getelementptr inbounds [100 x i32]* %arrayidx4, i32 0, i32 %8
+ %arrayidx4 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx4, i32 0, i32 %8
%10 = load i32* %arrayidx5, align 4
ret i32 %10
}
%1 = load i32* %i, align 4
%2 = load i32* %N, align 4
%add = add nsw i32 %2, 1
- %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
- %arrayidx1 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
+ %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %1
%3 = load i32* %arrayidx1, align 4
%4 = load i32* %a.addr, align 4
%add2 = add nsw i32 %3, %4
%5 = load i32* %i, align 4
%6 = load i32* %N, align 4
- %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
- %arrayidx4 = getelementptr inbounds [100 x i32]* %arrayidx3, i32 0, i32 %5
+ %arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx3, i32 0, i32 %5
store i32 %add2, i32* %arrayidx4, align 4
br label %for.inc
for.end: ; preds = %for.cond
%8 = load i32* %a.addr, align 4
%9 = load i32* %N, align 4
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
- %arrayidx6 = getelementptr inbounds [100 x i32]* %arrayidx5, i32 0, i32 %8
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx5, i32 0, i32 %8
%10 = load i32* %arrayidx6, align 4
ret i32 %10
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
- %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx5, align 4
ret i32 %7
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
- %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx5, align 4
ret i32 %7
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 10
- %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx5, align 4
ret i32 %7
}
for.body: ; preds = %for.cond
%1 = load i32** @PB, align 4
- %add.ptr = getelementptr inbounds i32* %1, i32 100
+ %add.ptr = getelementptr inbounds i32, i32* %1, i32 100
%2 = load i32* %i, align 4
%idx.neg = sub i32 0, %2
- %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 %idx.neg
- %add.ptr2 = getelementptr inbounds i32* %add.ptr1, i32 -1
+ %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %idx.neg
+ %add.ptr2 = getelementptr inbounds i32, i32* %add.ptr1, i32 -1
%3 = load i32* %add.ptr2, align 4
%4 = load i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32** @PA, align 4
- %add.ptr3 = getelementptr inbounds i32* %5, i32 100
+ %add.ptr3 = getelementptr inbounds i32, i32* %5, i32 100
%6 = load i32* %i, align 4
%idx.neg4 = sub i32 0, %6
- %add.ptr5 = getelementptr inbounds i32* %add.ptr3, i32 %idx.neg4
- %add.ptr6 = getelementptr inbounds i32* %add.ptr5, i32 -1
+ %add.ptr5 = getelementptr inbounds i32, i32* %add.ptr3, i32 %idx.neg4
+ %add.ptr6 = getelementptr inbounds i32, i32* %add.ptr5, i32 -1
store i32 %add, i32* %add.ptr6, align 4
br label %for.inc
for.end: ; preds = %for.cond
%8 = load i32** @PA, align 4
%9 = load i32* %a.addr, align 4
- %add.ptr7 = getelementptr inbounds i32* %8, i32 %9
+ %add.ptr7 = getelementptr inbounds i32, i32* %8, i32 %9
%10 = load i32* %add.ptr7, align 4
ret i32 %10
}
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
%2 = load i32* %N, align 4
- %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
- %arrayidx2 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 2), i32 0, i32 %2
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %sub1
%3 = load i32* %arrayidx2, align 4
%4 = load i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%sub3 = sub nsw i32 100, %5
%sub4 = sub nsw i32 %sub3, 1
%6 = load i32* %N, align 4
- %arrayidx5 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
- %arrayidx6 = getelementptr inbounds [100 x i32]* %arrayidx5, i32 0, i32 %sub4
+ %arrayidx5 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx5, i32 0, i32 %sub4
store i32 %add, i32* %arrayidx6, align 4
br label %for.inc
for.end: ; preds = %for.cond
%8 = load i32* %a.addr, align 4
%9 = load i32* %N, align 4
- %arrayidx7 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
- %arrayidx8 = getelementptr inbounds [100 x i32]* %arrayidx7, i32 0, i32 %8
+ %arrayidx7 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx7, i32 0, i32 %8
%10 = load i32* %arrayidx8, align 4
ret i32 %10
}
%sub1 = sub nsw i32 %sub, 1
%2 = load i32* %N, align 4
%add = add nsw i32 %2, 1
- %arrayidx = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
- %arrayidx2 = getelementptr inbounds [100 x i32]* %arrayidx, i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %add
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx, i32 0, i32 %sub1
%3 = load i32* %arrayidx2, align 4
%4 = load i32* %a.addr, align 4
%add3 = add nsw i32 %3, %4
%sub4 = sub nsw i32 100, %5
%sub5 = sub nsw i32 %sub4, 1
%6 = load i32* %N, align 4
- %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
- %arrayidx7 = getelementptr inbounds [100 x i32]* %arrayidx6, i32 0, i32 %sub5
+ %arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx6, i32 0, i32 %sub5
store i32 %add3, i32* %arrayidx7, align 4
br label %for.inc
for.end: ; preds = %for.cond
%8 = load i32* %a.addr, align 4
%9 = load i32* %N, align 4
- %arrayidx8 = getelementptr inbounds [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
- %arrayidx9 = getelementptr inbounds [100 x i32]* %arrayidx8, i32 0, i32 %8
+ %arrayidx8 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* getelementptr inbounds (%struct.anon.0* @Bar, i32 0, i32 0), i32 0, i32 %9
+ %arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %arrayidx8, i32 0, i32 %8
%10 = load i32* %arrayidx9, align 4
ret i32 %10
}
for.body: ; preds = %for.cond
%1 = load i32* %i, align 4
%add = add nsw i32 %1, 4
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add1 = add nsw i32 %2, %3
%4 = load i32* %i, align 4
- %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add1, i32* %arrayidx2, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx3, align 4
ret i32 %7
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 5
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%sub2 = sub nsw i32 100, %4
%sub3 = sub nsw i32 %sub2, 1
- %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub3
store i32 %add, i32* %arrayidx4, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx5 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx5, align 4
ret i32 %7
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
- %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx3, align 4
ret i32 %7
}
for.body: ; preds = %for.cond
%1 = load i32* %i, align 4
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%sub = sub nsw i32 100, %4
%sub1 = sub nsw i32 %sub, 1
- %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %sub1
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx3, align 4
ret i32 %7
}
for.body: ; preds = %for.cond
%1 = load i32** @PB, align 4
- %add.ptr = getelementptr inbounds i32* %1, i32 100
+ %add.ptr = getelementptr inbounds i32, i32* %1, i32 100
%2 = load i32* %i, align 4
%idx.neg = sub i32 0, %2
- %add.ptr1 = getelementptr inbounds i32* %add.ptr, i32 %idx.neg
- %add.ptr2 = getelementptr inbounds i32* %add.ptr1, i32 -1
+ %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i32 %idx.neg
+ %add.ptr2 = getelementptr inbounds i32, i32* %add.ptr1, i32 -1
%3 = load i32* %add.ptr2, align 4
%4 = load i32* %a.addr, align 4
%add = add nsw i32 %3, %4
%5 = load i32** @PA, align 4
%6 = load i32* %i, align 4
- %add.ptr3 = getelementptr inbounds i32* %5, i32 %6
+ %add.ptr3 = getelementptr inbounds i32, i32* %5, i32 %6
store i32 %add, i32* %add.ptr3, align 4
br label %for.inc
for.end: ; preds = %for.cond
%8 = load i32** @PA, align 4
%9 = load i32* %a.addr, align 4
- %add.ptr4 = getelementptr inbounds i32* %8, i32 %9
+ %add.ptr4 = getelementptr inbounds i32, i32* %8, i32 %9
%10 = load i32* %add.ptr4, align 4
ret i32 %10
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 1
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%add2 = add nsw i32 %4, 10
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx4, align 4
ret i32 %7
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
- %arrayidx2 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
+ %arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %4
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx3, align 4
ret i32 %7
}
%1 = load i32* %i, align 4
%sub = sub nsw i32 100, %1
%sub1 = sub nsw i32 %sub, 10
- %arrayidx = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 2), i32 0, i32 %sub1
%2 = load i32* %arrayidx, align 4
%3 = load i32* %a.addr, align 4
%add = add nsw i32 %2, %3
%4 = load i32* %i, align 4
%add2 = add nsw i32 %4, 10
- %arrayidx3 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %add2
store i32 %add, i32* %arrayidx3, align 4
br label %for.inc
for.end: ; preds = %for.cond
%6 = load i32* %a.addr, align 4
- %arrayidx4 = getelementptr inbounds [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
+ %arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* getelementptr inbounds (%struct.anon* @Foo, i32 0, i32 0), i32 0, i32 %6
%7 = load i32* %arrayidx4, align 4
ret i32 %7
}
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %if.end9 ]
- %arrayidx = getelementptr inbounds [1024 x float]* @A, i64 0, i64 %indvars.iv
- %arrayidx2 = getelementptr inbounds [1024 x float]* @B, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds [1024 x float], [1024 x float]* @B, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx2, align 4
%cmp3 = fcmp oeq float %0, 0.000000e+00
br i1 %cmp3, label %if.end9, label %if.else
br label %for.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %if.end9 ]
- %arrayidx = getelementptr inbounds [1024 x float]* @A, i64 0, i64 %indvars.iv
- %arrayidx2 = getelementptr inbounds [1024 x float]* @B, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @A, i64 0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds [1024 x float], [1024 x float]* @B, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx2, align 4
%cmp3 = fcmp oeq float %0, 0.000000e+00
br i1 %cmp3, label %if.end9, label %if.else
for.body:
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %_ZL3fn3ii.exit58 ]
- %arrayidx = getelementptr inbounds i32* %0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %0, i64 %indvars.iv
%3 = load i32* %arrayidx, align 4 %4 = trunc i64 %indvars.iv to i32
%and.i = and i32 %4, 1
%tobool.i.i = icmp eq i32 %and.i, 0
_ZL3fn3ii.exit:
%p1.addr.0.i16.i = phi i32 [ %or.i14.i, %if.then.i15.i ], [ %p1.addr.3.i.i, %_Z3fn2iii.exit.i ]
- %arrayidx2 = getelementptr inbounds i32* %1, i64 %indvars.iv
- store i32 %p1.addr.0.i16.i, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds i32* %0, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %1, i64 %indvars.iv
+ store i32 %p1.addr.0.i16.i, i32* %arrayidx2, align 4 %arrayidx4 = getelementptr inbounds i32, i32* %0, i64 %indvars.iv
%10 = load i32* %arrayidx4, align 4 br i1 %tobool.i.i, label %_Z3fn1ii.exit.i26, label %if.then.i.i21
if.then.i.i21:
_ZL3fn3ii.exit58:
%p1.addr.0.i16.i57 = phi i32 [ %or.i14.i55, %if.then.i15.i56 ], [ %p1.addr.3.i.i50, %_Z3fn2iii.exit.i52 ]
- %arrayidx7 = getelementptr inbounds i32* %2, i64 %indvars.iv
+ %arrayidx7 = getelementptr inbounds i32, i32* %2, i64 %indvars.iv
store i32 %p1.addr.0.i16.i57, i32* %arrayidx7, align 4 %indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, %p1
for.body:
%indvars.iv = phi i64 [ %indvars.iv.next, %if.end14 ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%cmp3 = icmp sgt i32 %0, %1
br i1 %cmp3, label %if.then, label %if.end14
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%sum.011 = phi i32 [ %sum.1, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 30
br i1 %cmp1, label %if.then, label %for.inc
for.body:
%indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %if.end ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %b, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%2 = load i32* %arrayidx4, align 4
%cmp5 = icmp sgt i32 %1, %2
br i1 %cmp5, label %if.then, label %if.end
for.body: ; preds = %entry, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%sum.011 = phi i32 [ %sum.1, %for.inc ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 30
br i1 %cmp1, label %if.then, label %for.inc
; UNROLL: vector.body:
; UNROLL: %[[IND:[a-zA-Z0-9]+]] = add i64 %{{.*}}, 0
; UNROLL: %[[IND1:[a-zA-Z0-9]+]] = add i64 %{{.*}}, 1
-; UNROLL: %[[v0:[a-zA-Z0-9]+]] = getelementptr inbounds i32* %f, i64 %[[IND]]
-; UNROLL: %[[v1:[a-zA-Z0-9]+]] = getelementptr inbounds i32* %f, i64 %[[IND1]]
+; UNROLL: %[[v0:[a-zA-Z0-9]+]] = getelementptr inbounds i32, i32* %f, i64 %[[IND]]
+; UNROLL: %[[v1:[a-zA-Z0-9]+]] = getelementptr inbounds i32, i32* %f, i64 %[[IND1]]
; UNROLL: %[[v2:[a-zA-Z0-9]+]] = load i32* %[[v0]], align 4
; UNROLL: %[[v3:[a-zA-Z0-9]+]] = load i32* %[[v1]], align 4
; UNROLL: %[[v4:[a-zA-Z0-9]+]] = icmp sgt i32 %[[v2]], 100
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
- %arrayidx = getelementptr inbounds i32* %f, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp1 = icmp sgt i32 %0, 100
br i1 %cmp1, label %if.then, label %for.inc
for.body14:
%indvars.iv3 = phi i64 [ %indvars.iv.next4, %for.inc23 ], [ undef, %for.body9 ]
%iNewChunks.120 = phi i32 [ %iNewChunks.2, %for.inc23 ], [ undef, %for.body9 ]
- %arrayidx16 = getelementptr inbounds [768 x i32]* undef, i64 0, i64 %indvars.iv3
+ %arrayidx16 = getelementptr inbounds [768 x i32], [768 x i32]* undef, i64 0, i64 %indvars.iv3
%tmp = load i32* %arrayidx16, align 4
br i1 undef, label %if.then18, label %for.inc23
.thread-pre-split.loopexit_crit_edge: ; preds = %19
%scevgep.sum = xor i64 %umax, -1
- %scevgep45 = getelementptr i8* %d.020, i64 %scevgep.sum
+ %scevgep45 = getelementptr i8, i8* %d.020, i64 %scevgep.sum
br label %thread-pre-split.loopexit
thread-pre-split.loopexit: ; preds = %11, %.thread-pre-split.loopexit_crit_edge
br i1 undef, label %11, label %22
; <label>:11 ; preds = %.lr.ph21
- %12 = getelementptr inbounds [0 x i8]* @PL_utf8skip, i64 0, i64 undef
+ %12 = getelementptr inbounds [0 x i8], [0 x i8]* @PL_utf8skip, i64 0, i64 undef
%13 = load i8* %12, align 1
%14 = zext i8 %13 to i64
%15 = icmp ugt i64 %14, %10
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%idxprom1 = sext i32 %0 to i64
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
%1 = load i32* %arrayidx2, align 4
%inc = add nsw i32 %1, 1
store i32 %inc, i32* %arrayidx2, align 4
for.body:
%indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
%count.09 = phi i32 [ 190, %for.body.lr.ph ], [ %inc, %for.body ]
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
store i32 %count.09, i32* %arrayidx2, align 4
%inc = add nsw i32 %count.09, 1
%indvars.iv.next = add i64 %indvars.iv, 1
for.body:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
%ind.sum = add i64 %iv, %offset
- %arr.idx = getelementptr inbounds float* %a, i64 %ind.sum
+ %arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum
%l1 = load float* %arr.idx, align 4
%ind.sum2 = add i64 %iv, %offset2
- %arr.idx2 = getelementptr inbounds float* %a, i64 %ind.sum2
+ %arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2
%l2 = load float* %arr.idx2, align 4
%m = fmul fast float %b, %l2
%ad = fadd fast float %l1, %m
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%2 = add nsw i64 %indvars.iv, 12
- %3 = getelementptr inbounds [1024 x i32]* @array, i64 0, i64 %2
+ %3 = getelementptr inbounds [1024 x i32], [1024 x i32]* @array, i64 0, i64 %2
%4 = trunc i64 %indvars.iv to i32
store i32 %4, i32* %3, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.sqrt.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.sqrt.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.sin.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.sin.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.cos.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.cos.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.exp.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.exp.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.exp2.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.exp2.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.log.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.log.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.log10.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.log10.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.log2.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.log2.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.fabs.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.fabs(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds float, float* %z, i64 %indvars.iv
%1 = load float* %arrayidx1, align 4
%call = tail call float @llvm.copysign.f32(float %0, float %1) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds double* %z, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds double, double* %z, i64 %indvars.iv
%1 = load double* %arrayidx, align 8
%call = tail call double @llvm.copysign(double %0, double %1) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.floor.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.floor.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.ceil.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.ceil.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.trunc.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.trunc.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.rint.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.rint.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.nearbyint.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.nearbyint.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @llvm.round.f32(float %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.round.f64(double %0) nounwind readnone
- %arrayidx2 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx2, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %w, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %w, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %z, i64 %indvars.iv
%2 = load float* %arrayidx4, align 4
%3 = tail call float @llvm.fma.f32(float %0, float %2, float %1)
- %arrayidx6 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %3, float* %arrayidx6, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds double* %w, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %w, i64 %indvars.iv
%1 = load double* %arrayidx2, align 8
- %arrayidx4 = getelementptr inbounds double* %z, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds double, double* %z, i64 %indvars.iv
%2 = load double* %arrayidx4, align 8
%3 = tail call double @llvm.fma.f64(double %0, double %2, double %1)
- %arrayidx6 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %3, double* %arrayidx6, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %w, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %w, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %z, i64 %indvars.iv
%2 = load float* %arrayidx4, align 4
%3 = tail call float @llvm.fmuladd.f32(float %0, float %2, float %1)
- %arrayidx6 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %3, float* %arrayidx6, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds double* %w, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %w, i64 %indvars.iv
%1 = load double* %arrayidx2, align 8
- %arrayidx4 = getelementptr inbounds double* %z, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds double, double* %z, i64 %indvars.iv
%2 = load double* %arrayidx4, align 8
%3 = tail call double @llvm.fmuladd.f64(double %0, double %2, double %1)
- %arrayidx6 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %3, double* %arrayidx6, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %z, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
%call = tail call float @llvm.pow.f32(float %0, float %1) nounwind readnone
- %arrayidx4 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
- %arrayidx2 = getelementptr inbounds double* %z, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds double, double* %z, i64 %indvars.iv
%1 = load double* %arrayidx2, align 8
%call = tail call double @llvm.pow.f64(double %0, double %1) nounwind readnone
- %arrayidx4 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx4, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @fabsf(float %0) nounwind readnone
store float %call, float* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %x, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%call = tail call float @roundf(float %0) nounwind readnone
store float %call, float* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %x, i64 %indvars.iv
%0 = load double* %arrayidx, align 4
store double %0, double* %arrayidx, align 4
tail call void @round(double %0) nounwind readnone
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%call = tail call double @llvm.powi.f64(double %0, i32 %P) nounwind readnone
- %arrayidx4 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx4, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds double* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds double, double* %y, i64 %indvars.iv
%0 = load double* %arrayidx, align 8
%1 = trunc i64 %indvars.iv to i32
%call = tail call double @llvm.powi.f64(double %0, i32 %1) nounwind readnone
- %arrayidx4 = getelementptr inbounds double* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds double, double* %x, i64 %indvars.iv
store double %call, double* %arrayidx4, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i64* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i64, i64* %y, i64 %indvars.iv
%0 = load i64* %arrayidx, align 8
%call = tail call i64 @llvm.cttz.i64(i64 %0, i1 true) nounwind readnone
- %arrayidx4 = getelementptr inbounds i64* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds i64, i64* %x, i64 %indvars.iv
store i64 %call, i64* %arrayidx4, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i64* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i64, i64* %y, i64 %indvars.iv
%0 = load i64* %arrayidx, align 8
%call = tail call i64 @llvm.ctlz.i64(i64 %0, i1 true) nounwind readnone
- %arrayidx4 = getelementptr inbounds i64* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds i64, i64* %x, i64 %indvars.iv
store i64 %call, i64* %arrayidx4, align 8
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %z, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
%call = tail call float @llvm.minnum.f32(float %0, float %1) nounwind readnone
- %arrayidx4 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %y, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %y, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %z, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %z, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
%call = tail call float @llvm.maxnum.f32(float %0, float %1) nounwind readnone
- %arrayidx4 = getelementptr inbounds float* %x, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %x, i64 %indvars.iv
store float %call, float* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
call void @llvm.lifetime.end(i64 4096, i8* %0) #1
- %arrayidx = getelementptr inbounds i32* %d, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
%1 = load i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
call void @llvm.lifetime.start(i64 4096, i8* %0) #1
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%1 = bitcast [1024 x i32]* %arr to i8*
call void @llvm.lifetime.end(i64 4096, i8* %1) #1
- %arrayidx = getelementptr inbounds i32* %d, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
%2 = load i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
call void @llvm.lifetime.start(i64 4096, i8* %1) #1
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %0 = getelementptr [1024 x i32]* %arr, i32 0, i64 %indvars.iv
+ %0 = getelementptr [1024 x i32], [1024 x i32]* %arr, i32 0, i64 %indvars.iv
%1 = bitcast [1024 x i32]* %arr to i8*
call void @llvm.lifetime.end(i64 4096, i8* %1) #1
- %arrayidx = getelementptr inbounds i32* %d, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %d, i64 %indvars.iv
%2 = load i32* %arrayidx, align 8
store i32 100, i32* %arrayidx, align 8
call void @llvm.lifetime.start(i64 4096, i8* %1) #1
for.body:
%iprom = sext i32 %i to i64
- %b = getelementptr inbounds %struct.X* undef, i64 %iprom, i32 1
+ %b = getelementptr inbounds %struct.X, %struct.X* undef, i64 %iprom, i32 1
store i16 0, i16* %b, align 4
%inc = add nsw i32 %i, 1
br label %for.cond
for.body: ; preds = %for.body, %for.body.lr.ph
%i = phi i64 [ 0, %for.body.lr.ph ], [ %i.next, %for.body ]
- %a = getelementptr inbounds double* %t, i64 %i
+ %a = getelementptr inbounds double, double* %t, i64 %i
%i.next = add nuw nsw i64 %i, 1
- %a.next = getelementptr inbounds double* %t, i64 %i.next
+ %a.next = getelementptr inbounds double, double* %t, i64 %i.next
%t1 = load double* %a, align 8
%t2 = load double* %a.next, align 8
store double %t1, double* %a.next, align 8
for.body:
%indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%indvars.iv.next = add i32 %indvars.iv, 1
- %arrayidx = getelementptr inbounds i32* %A, i32 %indvars.iv.next
+ %arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv.next
%0 = load i32* %arrayidx, align 4
%add1 = add nsw i32 %0, 1
- %arrayidx3 = getelementptr inbounds i32* %A, i32 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv
store i32 %add1, i32* %arrayidx3, align 4
%exitcond = icmp ne i32 %indvars.iv.next, 1024
br i1 %exitcond, label %for.body, label %for.end
for.body:
%indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %A, i32 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i32 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, 1
%indvars.iv.next = add i32 %indvars.iv, 1
- %arrayidx3 = getelementptr inbounds i32* %A, i32 %indvars.iv.next
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i32 %indvars.iv.next
store i32 %add, i32* %arrayidx3, align 4
%exitcond = icmp ne i32 %indvars.iv.next, 1024
br i1 %exitcond, label %for.body, label %for.end
for.body:
%i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%idxprom = sext i32 %i.01 to i64
- %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, 1
%add1 = add nsw i32 %i.01, 2
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds i32* %A, i64 %idxprom2
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %idxprom2
store i32 %add, i32* %arrayidx3, align 4
%inc = add nsw i32 %i.01, 1
%cmp = icmp slt i32 %inc, 1024
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
store i32 %0, i32* %arrayidx2, align 4
%indvars.iv.next = add nsw i64 %indvars.iv, 1
- %arrayidx4 = getelementptr inbounds i32* %B, i64 %indvars.iv.next
+ %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv.next
%1 = load i32* %arrayidx4, align 4
store i32 %1, i32* %arrayidx, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%tmp.addr.08 = phi i32 [ %tmp, %entry ], [ %0, %for.body ]
%indvars.iv.next = add nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv.next
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv.next
store i32 %tmp.addr.08, i32* %arrayidx, align 4
- %arrayidx3 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx3, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp ne i32 %lftr.wideiv, 1024
for.body:
%indvars.iv = phi i64 [ 16, %entry ], [ %indvars.iv.next, %for.body ]
%0 = add nsw i64 %indvars.iv, -3
- %arrayidx = getelementptr inbounds i32* %A, i64 %0
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %0
%1 = load i32* %arrayidx, align 4
%2 = add nsw i64 %indvars.iv, 4
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %2
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %2
%3 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %3, %1
- %arrayidx5 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
store i32 %add3, i32* %arrayidx5, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%indvars.iv = phi i64 [ 16, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
store i32 %0, i32* %arrayidx2, align 4
%1 = add nsw i64 %indvars.iv, -3
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %1
%2 = load i32* %arrayidx4, align 4
- %arrayidx6 = getelementptr inbounds i32* %C, i64 %indvars.iv
+ %arrayidx6 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
store i32 %2, i32* %arrayidx6, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = trunc i64 %indvars.iv to i32
store i32 %0, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
%0 = load float* %arrayidx, align 4, !tbaa !0
%conv = fptosi float %0 to i32
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4, !tbaa !4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp sgt i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp slt i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp slt i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp sgt i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp ugt i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp ult i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp ult i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp ugt i32 %max.red.08, %0
%max.red.0 = select i1 %cmp3, i32 %0, i32 %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp sge i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp sle i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp uge i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%cmp3 = icmp ule i32 %0, %max.red.08
%max.red.0 = select i1 %cmp3, i32 %max.red.08, i32 %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %arrayidx1 = getelementptr inbounds [1024 x i32]* @A, i64 1, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 1, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%1 = load i32* %arrayidx1, align 4
%cmp3 = icmp sgt i32 %0, %1
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi i32 [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x i32]* @A, i64 0, i64 %indvars.iv
- %arrayidx1 = getelementptr inbounds [1024 x i32]* @A, i64 1, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 0, i64 %indvars.iv
+ %arrayidx1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @A, i64 1, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%1 = load i32* %arrayidx1, align 4
%cmp3 = icmp sgt i32 %0, %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ogt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp oge float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp olt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ole float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ugt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp uge float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ult float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ule float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %max.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp olt float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ole float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ogt float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp oge float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ult float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ule float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %0, float %min.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ugt float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi float [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp uge float %0, %min.red.08
%min.red.0 = select i1 %cmp3, float %min.red.08, float %0
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%min.red.08 = phi double [ %min, %entry ], [ %min.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x double]* @dA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x double], [1024 x double]* @dA, i64 0, i64 %indvars.iv
%0 = load double* %arrayidx, align 4
%cmp3 = fcmp olt double %0, %min.red.08
%min.red.0 = select i1 %cmp3, double %0, double %min.red.08
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%max.red.08 = phi float [ %max, %entry ], [ %max.red.0, %for.body ]
- %arrayidx = getelementptr inbounds [1024 x float]* @fA, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x float], [1024 x float]* @fA, i64 0, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%cmp3 = fcmp ogt float %0, %max.red.08
%max.red.0 = select i1 %cmp3, float %0, float %max.red.08
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds [40000 x i8] addrspace(1)* @Y, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [40000 x i8], [40000 x i8] addrspace(1)* @Y, i64 0, i64 %indvars.iv
%0 = load i8 addrspace(1)* %arrayidx, align 1
%add = add i8 %0, 1
- %arrayidx3 = getelementptr inbounds [40000 x i8]* @X, i64 0, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds [40000 x i8], [40000 x i8]* @X, i64 0, i64 %indvars.iv
store i8 %add, i8* %arrayidx3, align 1
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body.preheader, %for.body
%indvars.iv27 = phi i64 [ %indvars.iv.next28, %for.body ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv27, !dbg !14
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv27, !dbg !14
%0 = load i32* %arrayidx, align 4, !dbg !14, !tbaa !22
%idxprom1 = sext i32 %0 to i64, !dbg !14
- %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1, !dbg !14
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1, !dbg !14
%1 = load i32* %arrayidx2, align 4, !dbg !14, !tbaa !22
%inc = add nsw i32 %1, 1, !dbg !14
store i32 %inc, i32* %arrayidx2, align 4, !dbg !14, !tbaa !22
for.body7: ; preds = %for.body7.preheader, %for.body7
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body7 ], [ 0, %for.body7.preheader ]
- %arrayidx9 = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !20
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !20
%2 = load i32* %arrayidx9, align 4, !dbg !20, !tbaa !22
%idxprom10 = sext i32 %2 to i64, !dbg !20
- %arrayidx11 = getelementptr inbounds i32* %B, i64 %idxprom10, !dbg !20
+ %arrayidx11 = getelementptr inbounds i32, i32* %B, i64 %idxprom10, !dbg !20
%3 = load i32* %arrayidx11, align 4, !dbg !20, !tbaa !22
%inc12 = add nsw i32 %3, 1, !dbg !20
store i32 %inc12, i32* %arrayidx11, align 4, !dbg !20, !tbaa !22
; CHECK-NOT: sdiv <2 x i32>
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%r.05 = phi i32 [ 80, %entry ], [ %div, %for.body ]
- %arrayidx = getelementptr inbounds [128 x i32]* @a, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* @a, i64 0, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%div = sdiv i32 %r.05, %0
%indvars.iv.next = add i64 %indvars.iv, 1
;CHECK: ret i32
define i32 @sum_array(i32* %A, i32 %n) nounwind uwtable readonly noinline ssp {
%1 = sext i32 %n to i64
- %2 = getelementptr inbounds i32* %A, i64 %1
+ %2 = getelementptr inbounds i32, i32* %A, i64 %1
%3 = icmp eq i32 %n, 0
br i1 %3, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %.lr.ph.i
%.012.i = phi i32 [ %5, %.lr.ph.i ], [ 0, %0 ]
%4 = load i32* %.03.i, align 4
%5 = add nsw i32 %4, %.012.i
- %6 = getelementptr inbounds i32* %.03.i, i64 1
+ %6 = getelementptr inbounds i32, i32* %.03.i, i64 1
%7 = icmp eq i32* %6, %2
br i1 %7, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %.lr.ph.i
;CHECK: ret i32
define i32 @sum_array_as1(i32 addrspace(1)* %A, i32 %n) nounwind uwtable readonly noinline ssp {
%1 = sext i32 %n to i64
- %2 = getelementptr inbounds i32 addrspace(1)* %A, i64 %1
+ %2 = getelementptr inbounds i32, i32 addrspace(1)* %A, i64 %1
%3 = icmp eq i32 %n, 0
br i1 %3, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %.lr.ph.i
%.012.i = phi i32 [ %5, %.lr.ph.i ], [ 0, %0 ]
%4 = load i32 addrspace(1)* %.03.i, align 4
%5 = add nsw i32 %4, %.012.i
- %6 = getelementptr inbounds i32 addrspace(1)* %.03.i, i64 1
+ %6 = getelementptr inbounds i32, i32 addrspace(1)* %.03.i, i64 1
%7 = icmp eq i32 addrspace(1)* %6, %2
br i1 %7, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %.lr.ph.i
for.body: ; preds = %for.body.preheader, %for.inc
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %for.body.preheader ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv, !dbg !14
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !14
%0 = load i32* %arrayidx, align 4, !dbg !14, !tbaa !16
switch i32 %0, label %for.inc [
i32 0, label %sw.bb
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%3 = trunc i64 %indvars.iv to i32
store i32 %3, i32* %2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
while.body:
%it.sroa.0.091 = phi i32* [ undef, %while.body.lr.ph ], [ %incdec.ptr.i, %while.body ]
- %incdec.ptr.i = getelementptr inbounds i32* %it.sroa.0.091, i64 1
+ %incdec.ptr.i = getelementptr inbounds i32, i32* %it.sroa.0.091, i64 1
%inc32 = add i32 undef, 1 ; <------------- Make sure we don't set NSW flags to the undef.
%cmp.i11 = icmp eq i32* %incdec.ptr.i, undef
br i1 %cmp.i11, label %while.end, label %while.body
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%red.05 = phi i32 [ 0, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %red.05
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%2 = load i32* %b.01, align 4
%3 = shl nsw i32 %2, 1
store i32 %3, i32* %p.02, align 4
- %4 = getelementptr inbounds i32* %p.02, i64 -1
- %5 = getelementptr inbounds i32* %b.01, i64 1
+ %4 = getelementptr inbounds i32, i32* %p.02, i64 -1
+ %5 = getelementptr inbounds i32, i32* %b.01, i64 1
%6 = icmp eq i32* %4, getelementptr ([36 x i32]* @A, i64 128102389400760775, i64 3)
br i1 %6, label %7, label %1
%b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 2), %0 ], [ %4, %1 ]
%2 = load i32* %b.01, align 4
store i32 %2, i32* %p.02, align 4
- %3 = getelementptr inbounds i32* %p.02, i64 -1
- %4 = getelementptr inbounds i32* %b.01, i64 1
+ %3 = getelementptr inbounds i32, i32* %p.02, i64 -1
+ %4 = getelementptr inbounds i32, i32* %b.01, i64 1
%5 = icmp eq i32* %4, getelementptr inbounds ([36 x i32]* @A, i64 0, i64 18)
br i1 %5, label %6, label %1
%b.01 = phi i32* [ getelementptr inbounds ([36 x i32]* @B, i64 0, i64 5), %0 ], [ %4, %1 ]
%2 = load i32* %b.01, align 4
store i32 %2, i32* %p.02, align 4
- %3 = getelementptr inbounds i32* %p.02, i64 -1
- %4 = getelementptr inbounds i32* %b.01, i64 1
+ %3 = getelementptr inbounds i32, i32* %p.02, i64 -1
+ %4 = getelementptr inbounds i32, i32* %b.01, i64 1
%5 = icmp eq i32* %3, getelementptr ([36 x i32]* @A, i64 128102389400760775, i64 3)
br i1 %5, label %6, label %1
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = add nsw i64 %indvars.iv, 13
- %5 = getelementptr inbounds i32* %B, i64 %4
+ %5 = getelementptr inbounds i32, i32* %B, i64 %4
%6 = load i32* %5, align 4
%7 = shl i32 %6, 1
%8 = add i32 %3, %sum.02
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = trunc i64 %indvars.iv to i32
%7 = add i32 %sum.02, %6
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%prod.02 = phi i32 [ %9, %.lr.ph ], [ 1, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = trunc i64 %indvars.iv to i32
%7 = mul i32 %prod.02, %6
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = mul nsw i32 %5, %3
%7 = trunc i64 %indvars.iv to i32
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
%sum.02 = phi i32 [ %9, %.lr.ph ], [ 19, %0 ]
- %2 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %4 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = trunc i64 %indvars.iv to i32
%7 = add i32 %3, %6
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.09 = phi i32 [ %add, %for.body ], [ 120, %entry ]
- %arrayidx = getelementptr inbounds i32* %in, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %in, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %coeff, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %coeff, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%mul = mul nsw i32 %1, %0
%add = add nsw i32 %mul, %sum.09
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%result.08 = phi i32 [ %and, %for.body ], [ -1, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%and = and i32 %add, %result.08
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%result.08 = phi i32 [ %or, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%or = or i32 %add, %result.08
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%result.08 = phi i32 [ %xor, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
%xor = xor i32 %add, %result.08
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%x.05 = phi i32 [ %sub, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%sub = sub nsw i32 %0, %x.05
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%x.05 = phi i32 [ %sub, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%sub = sub nsw i32 %x.05, %0
%indvars.iv.next = add i64 %indvars.iv, 1
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%sum.033 = phi float [ %S, %entry ], [ %sum.1, %for.inc ]
- %arrayidx = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %B, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
%cmp3 = fcmp ogt float %0, %1
br i1 %cmp3, label %if.then, label %for.inc
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%sum.033 = phi float [ %S, %entry ], [ %sum.1, %for.inc ]
- %arrayidx = getelementptr inbounds float* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %A, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %B, i64 %indvars.iv
%1 = load float* %arrayidx2, align 4
%cmp3 = fcmp ogt float %0, %1
br i1 %cmp3, label %if.then, label %for.inc
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum2.09 = phi float [ 0.000000e+00, %entry ], [ %add1, %for.body ]
%sum.08 = phi float [ %S, %entry ], [ %add, %for.body ]
- %arrayidx = getelementptr inbounds float* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%add = fadd fast float %sum.08, %0
%add1 = fadd fast float %sum2.09, %add
%i.06 = phi i32 [ 0, %entry ], [ %inc4, %for.body ]
%redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
%add.i = add i64 %add.i7, -1
- %kind_.i = getelementptr inbounds i32* %ptr, i64 %add.i
+ %kind_.i = getelementptr inbounds i32, i32* %ptr, i64 %add.i
%tmp.i1 = load i32* %kind_.i, align 4
%inc.redux = add i32 %tmp.i1, %redux5
%inc4 = add i32 %i.06, 1
%i.06 = phi i32 [ 0, %entry ], [ %inc4, %for.body ]
%redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
%add.i = add i128 %add.i7, -1
- %kind_.i = getelementptr inbounds i32* %ptr, i128 %add.i
+ %kind_.i = getelementptr inbounds i32, i32* %ptr, i128 %add.i
%tmp.i1 = load i32* %kind_.i, align 4
%inc.redux = add i32 %tmp.i1, %redux5
%inc4 = add i32 %i.06, 1
%i.06 = phi i32 [ 0, %entry ], [ %inc4, %for.body ]
%redux5 = phi i32 [ 0, %entry ], [ %inc.redux, %for.body ]
%add.i = add i16 %add.i7, -1
- %kind_.i = getelementptr inbounds i32* %ptr, i16 %add.i
+ %kind_.i = getelementptr inbounds i32, i32* %ptr, i16 %add.i
%tmp.i1 = load i32* %kind_.i, align 4
%inc.redux = add i32 %tmp.i1, %redux5
%inc4 = add i32 %i.06, 1
%forward_induction.05 = phi i8 [ 0, %entry ], [ %inc, %while.body ]
%inc = add i8 %forward_induction.05, 1
%conv = zext i8 %inc to i32
- %arrayidx = getelementptr inbounds [1024 x i32]* @a, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %conv, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, -1
%0 = trunc i64 %indvars.iv to i32
%forward_induction.05 = phi i8 [ -127, %entry ], [ %inc, %while.body ]
%inc = add i8 %forward_induction.05, 1
%conv = sext i8 %inc to i32
- %arrayidx = getelementptr inbounds [1024 x i32]* @a, i64 0, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %conv, i32* %arrayidx, align 4
%indvars.iv.next = add i64 %indvars.iv, -1
%0 = trunc i64 %indvars.iv to i32
%4 = trunc i64 %indvars.iv to i32
%5 = shl nsw i32 %4, 1
%6 = sext i32 %5 to i64
- %7 = getelementptr inbounds i32* %A, i64 %6
+ %7 = getelementptr inbounds i32, i32* %A, i64 %6
%8 = load i32* %7, align 4
%9 = add nsw i32 %8, %sum.01
%indvars.iv.next = add i64 %indvars.iv, -1
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.02, 1
%cmp = icmp slt i32 %inc, %n
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
store i32 %mul, i32* %arrayidx2, align 4
%inc = add nsw i32 %i.02, 1
%cmp = icmp slt i32 %inc, %n
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds i32* %b, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.02, 1
%cmp = icmp slt i32 %inc, %n
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %idxprom1
store i32 %mul, i32* %arrayidx2, align 4
%inc = add nsw i32 %i.02, 1
%cmp = icmp slt i32 %inc, %n
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds i32* %b, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %idxprom
%0 = load i32* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.02, 1
%cmp = icmp slt i32 %inc, %n
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom
%0 = load i32 addrspace(1)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds i32* %b, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds i32, i32* %b, i64 %idxprom1
store i32 %mul, i32* %arrayidx2, align 4
%inc = add nsw i32 %i.02, 1
%cmp = icmp slt i32 %inc, %n
for.body: ; preds = %entry, %for.body
%i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%idxprom = sext i32 %i.02 to i64
- %arrayidx = getelementptr inbounds [1024 x i32] addrspace(2)* @q_as2, i64 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(2)* @q_as2, i64 0, i64 %idxprom
%0 = load i32 addrspace(2)* %arrayidx, align 4
%mul = mul nsw i32 %0, 3
%idxprom1 = sext i32 %i.02 to i64
- %arrayidx2 = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
+ %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
%inc = add nsw i32 %i.02, 1
%cmp = icmp slt i32 %inc, %n
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
%0 = load i32 addrspace(1)* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.01
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %i.01
%1 = load i32 addrspace(1)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.01
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %i.01
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
%inc = add i64 %i.01, 1
%cmp = icmp ult i64 %inc, 200
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32* %b, i64 %i.01
+ %arrayidx = getelementptr inbounds i32, i32* %b, i64 %i.01
%0 = load i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %c, i64 %i.01
+ %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 %i.01
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %0, %1
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.01
+ %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %a, i64 %i.01
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
%inc = add i64 %i.01, 1
%cmp = icmp ult i64 %inc, 200
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
%0 = load i32 addrspace(1)* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %c, i64 %i.01
+ %arrayidx1 = getelementptr inbounds i32, i32* %c, i64 %i.01
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %0, %1
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.01
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.01
store i32 %add, i32* %arrayidx2, align 4
%inc = add i64 %i.01, 1
%cmp = icmp ult i64 %inc, 200
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
%0 = load i32 addrspace(1)* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.01
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %c, i64 %i.01
%1 = load i32 addrspace(1)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.01
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.01
store i32 %add, i32* %arrayidx2, align 4
%inc = add i64 %i.01, 1
%cmp = icmp ult i64 %inc, 200
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %b, i64 %i.01
%0 = load i32 addrspace(1)* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32 addrspace(2)* %c, i64 %i.01
+ %arrayidx1 = getelementptr inbounds i32, i32 addrspace(2)* %c, i64 %i.01
%1 = load i32 addrspace(2)* %arrayidx1, align 4
%add = add nsw i32 %0, %1
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.01
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %i.01
store i32 %add, i32* %arrayidx2, align 4
%inc = add i64 %i.01, 1
%cmp = icmp ult i64 %inc, 200
for.body:
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %B, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
- %arrayidx2 = getelementptr inbounds i32* %C, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4
%add = add nsw i32 %1, %0
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
store i32 %add, i32* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%mul = fmul float %0, 3.000000e+00
- %arrayidx2 = getelementptr inbounds float* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
store float %mul, float* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
%ind.sum = add i64 %iv, %offset
- %arr.idx = getelementptr inbounds float* %a, i64 %ind.sum
+ %arr.idx = getelementptr inbounds float, float* %a, i64 %ind.sum
%l1 = load float* %arr.idx, align 4
%ind.sum2 = add i64 %iv, %offset2
- %arr.idx2 = getelementptr inbounds float* %a, i64 %ind.sum2
+ %arr.idx2 = getelementptr inbounds float, float* %a, i64 %ind.sum2
%l2 = load float* %arr.idx2, align 4
%m = fmul fast float %b, %l2
%ad = fadd fast float %l1, %m
for.body: ; preds = %for.body, %entry
%i.016 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.016
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.016
%0 = load i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %B, i64 %i.016
+ %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 %i.016
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %1, %0
- %arrayidx2 = getelementptr inbounds i32* %C, i64 %i.016
+ %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %i.016
%2 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %add, %2
- %arrayidx4 = getelementptr inbounds i32* %E, i64 %i.016
+ %arrayidx4 = getelementptr inbounds i32, i32* %E, i64 %i.016
%3 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %add3, %3
- %arrayidx6 = getelementptr inbounds i32* %F, i64 %i.016
+ %arrayidx6 = getelementptr inbounds i32, i32* %F, i64 %i.016
%4 = load i32* %arrayidx6, align 4
%add7 = add nsw i32 %add5, %4
- %arrayidx8 = getelementptr inbounds i32* %out, i64 %i.016
+ %arrayidx8 = getelementptr inbounds i32, i32* %out, i64 %i.016
store i32 %add7, i32* %arrayidx8, align 4
%inc = add i64 %i.016, 1
%exitcond = icmp eq i64 %inc, 256
for.body: ; preds = %for.body, %entry
%i.037 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %i.037
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %i.037
%0 = load i32* %arrayidx, align 4
- %arrayidx1 = getelementptr inbounds i32* %B, i64 %i.037
+ %arrayidx1 = getelementptr inbounds i32, i32* %B, i64 %i.037
%1 = load i32* %arrayidx1, align 4
%add = add nsw i32 %1, %0
- %arrayidx2 = getelementptr inbounds i32* %C, i64 %i.037
+ %arrayidx2 = getelementptr inbounds i32, i32* %C, i64 %i.037
%2 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %add, %2
- %arrayidx4 = getelementptr inbounds i32* %E, i64 %i.037
+ %arrayidx4 = getelementptr inbounds i32, i32* %E, i64 %i.037
%3 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %add3, %3
- %arrayidx6 = getelementptr inbounds i32* %F, i64 %i.037
+ %arrayidx6 = getelementptr inbounds i32, i32* %F, i64 %i.037
%4 = load i32* %arrayidx6, align 4
%add7 = add nsw i32 %add5, %4
- %arrayidx8 = getelementptr inbounds i32* %out, i64 %i.037
+ %arrayidx8 = getelementptr inbounds i32, i32* %out, i64 %i.037
store i32 %add7, i32* %arrayidx8, align 4
%5 = load i32* %arrayidx, align 4
%6 = load i32* %arrayidx1, align 4
%add15 = add nsw i32 %add13, %8
%9 = load i32* %arrayidx6, align 4
%add17 = add nsw i32 %add15, %9
- %arrayidx18 = getelementptr inbounds i32* %out2, i64 %i.037
+ %arrayidx18 = getelementptr inbounds i32, i32* %out2, i64 %i.037
store i32 %add17, i32* %arrayidx18, align 4
%inc = add i64 %i.037, 1
%exitcond = icmp eq i64 %inc, 256
"<bb 3>":
%i_15 = phi i32 [ 0, %entry ], [ %i_19, %"<bb 3>" ]
- %pp3 = getelementptr float* %A, i32 %i_15
+ %pp3 = getelementptr float, float* %A, i32 %i_15
%D.1396_10 = load float* %pp3, align 4
- %pp24 = getelementptr float* %B, i32 %i_15
+ %pp24 = getelementptr float, float* %B, i32 %i_15
%D.1398_15 = load float* %pp24, align 4
%D.1399_17 = fadd float %D.1398_15, %K
%D.1400_18 = fmul float %D.1396_10, %D.1399_17
"<bb 3>":
%i_15 = phi i32 [ 0, %entry ], [ %i_19, %"<bb 3>" ]
- %pp3 = getelementptr float addrspace(5) * %A, i32 %i_15
+ %pp3 = getelementptr float, float addrspace(5) * %A, i32 %i_15
%D.1396_10 = load float addrspace(5) * %pp3, align 4
- %pp24 = getelementptr float* %B, i32 %i_15
+ %pp24 = getelementptr float, float* %B, i32 %i_15
%D.1398_15 = load float* %pp24, align 4
%D.1399_17 = fadd float %D.1398_15, %K
%D.1400_18 = fmul float %D.1396_10, %D.1399_17
%10 = sub nsw i32 %9, 1
%11 = sext i32 %10 to i64
%12 = load double** %1, align 8
- %13 = getelementptr inbounds double* %12, i64 %11
+ %13 = getelementptr inbounds double, double* %12, i64 %11
%14 = load double* %13, align 8
%15 = load i32* %k, align 4
%16 = sext i32 %15 to i64
%17 = load double** %2, align 8
- %18 = getelementptr inbounds double* %17, i64 %16
+ %18 = getelementptr inbounds double, double* %17, i64 %16
%19 = load double* %18, align 8
%20 = fadd double %14, %19
%21 = load i32* %k, align 4
%22 = sext i32 %21 to i64
%23 = load double** %1, align 8
- %24 = getelementptr inbounds double* %23, i64 %22
+ %24 = getelementptr inbounds double, double* %23, i64 %22
store double %20, double* %24, align 8
br label %25
; <label>:1 ; preds = %7, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %7 ]
%2 = mul nsw i64 %indvars.iv, 7
- %3 = getelementptr inbounds i32* %a, i64 %2
+ %3 = getelementptr inbounds i32, i32* %a, i64 %2
%4 = load i32* %3, align 4
%5 = icmp sgt i32 %4, 3
br i1 %5, label %6, label %7
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%sel = select i1 %cond, i32 %6, i32 zeroinitializer
store i32 %sel, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%add10 = add i32 %add9, %2
store i32 %add10, i32* @f, align 4, !tbaa !5
%idx.ext = sext i32 %add10 to i64
- %add.ptr = getelementptr inbounds i32* @a, i64 %idx.ext
+ %add.ptr = getelementptr inbounds i32, i32* @a, i64 %idx.ext
%tobool129 = icmp eq i32 %i.213, 0
br i1 %tobool129, label %for.inc19, label %for.body13.lr.ph
for.body13: ; preds = %for.body13.lr.ph, %for.body13
%indvars.iv = phi i64 [ %3, %for.body13.lr.ph ], [ %indvars.iv.next, %for.body13 ]
%add.ptr.sum = add i64 %idx.ext, %indvars.iv
- %arrayidx = getelementptr inbounds i32* @a, i64 %add.ptr.sum
+ %arrayidx = getelementptr inbounds i32, i32* @a, i64 %add.ptr.sum
%4 = load i32* %arrayidx, align 4, !tbaa !5
- %arrayidx15 = getelementptr inbounds i32* %0, i64 %indvars.iv
+ %arrayidx15 = getelementptr inbounds i32, i32* %0, i64 %indvars.iv
store i32 %4, i32* %arrayidx15, align 4, !tbaa !5
%indvars.iv.next = add i64 %indvars.iv, 1
%5 = trunc i64 %indvars.iv.next to i32
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
; <label>:1 ; preds = %1, %0
%indvars.iv = phi i64 [ 0, %0 ], [ %indvars.iv.next, %1 ]
- %2 = getelementptr inbounds [2048 x i32]* @b, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @b, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
- %4 = getelementptr inbounds [2048 x i32]* @c, i64 0, i64 %indvars.iv
+ %4 = getelementptr inbounds [2048 x i32], [2048 x i32]* @c, i64 0, i64 %indvars.iv
%5 = load i32* %4, align 4
%6 = add nsw i32 %5, %3
- %7 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %7 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
store i32 %6, i32* %7, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
for.body: ; preds = %for.body.lr.ph, %for.body
%indvars.iv = phi i64 [ %0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32* %arrayidx, align 4
%mul = mul nuw i32 %1, 333
store i32 %mul, i32* %arrayidx, align 4
for.body:
%indvars.iv = phi i64 [ 93, %entry ], [ %indvars.iv.next, %for.body ]
%0 = add i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds [100 x i32]* @uf, i64 0, i64 %0
- %arrayidx3 = getelementptr inbounds [100 x i32]* @xi, i64 0, i64 %0
+ %arrayidx = getelementptr inbounds [100 x i32], [100 x i32]* @uf, i64 0, i64 %0
+ %arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* @xi, i64 0, i64 %0
%1 = load i32* %arrayidx3, align 4
%2 = load i32* %arrayidx, align 4
%add4 = add nsw i32 %2, %1
store i32 %add4, i32* %arrayidx, align 4
- %arrayidx7 = getelementptr inbounds [100 x i32]* @q, i64 0, i64 %0
+ %arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* @q, i64 0, i64 %0
%3 = load i32* %arrayidx7, align 4
%add8 = add nsw i32 %add4, %3
store i32 %add8, i32* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %x = getelementptr inbounds %struct.coordinate* %A, i64 %indvars.iv, i32 0
+ %x = getelementptr inbounds %struct.coordinate, %struct.coordinate* %A, i64 %indvars.iv, i32 0
%0 = load i32* %x, align 4
%add = add nsw i32 %0, %sum.05
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.05 = phi i32 [ %add, %for.body ], [ 0, %entry ]
- %x = getelementptr inbounds %struct.lit* %A, i64 %indvars.iv, i32 0
+ %x = getelementptr inbounds %struct.lit, %struct.lit* %A, i64 %indvars.iv, i32 0
%0 = load i32* %x, align 4
%add = add nsw i32 %0, %sum.05
%indvars.iv.next = add i64 %indvars.iv, 1
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
%0 = load float* %arrayidx, align 4, !tbaa !0
%conv = fptosi float %0 to i32
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
store i32 %conv, i32* %arrayidx2, align 4, !tbaa !4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
- %arrayidx = getelementptr inbounds float* %b, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv
%0 = load float* %arrayidx, align 4, !tbaa !0
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32* %arrayidx2, align 4, !tbaa !4
%conv = sitofp i32 %1 to float
%mul = fmul float %0, %conv
- %arrayidx4 = getelementptr inbounds float* %c, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %c, i64 %indvars.iv
store float %mul, float* %arrayidx4, align 4, !tbaa !0
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 1600
; Loop invariant anchored in loop.
%idxprom21 = zext i32 undef to i64
- %arrayidx23 = getelementptr inbounds [100 x [100 x i32]]* undef, i64 0, i64 %idxprom21, i64 %indvars.iv17
+ %arrayidx23 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* undef, i64 0, i64 %idxprom21, i64 %indvars.iv17
store i32 undef, i32* %arrayidx23, align 4
%indvars.next= add i64 %indvars.iv17, -1
%0 = trunc i64 %indvars.next to i32
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds [2048 x i32]* @a, i64 0, i64 %indvars.iv
+ %2 = getelementptr inbounds [2048 x i32], [2048 x i32]* @a, i64 0, i64 %indvars.iv
%3 = load i32* %2, align 4
%4 = trunc i64 %indvars.iv to i32
%5 = add nsw i32 %3, %4
%b.05 = phi i32 (...)* [ undef, %entry ], [ %1, %for.body ]
%a.04 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%0 = bitcast i32 (...)* %b.05 to i8*
- %add.ptr = getelementptr i8* %0, i64 1
+ %add.ptr = getelementptr i8, i8* %0, i64 1
%1 = bitcast i8* %add.ptr to i32 (...)*
; CHECK: %[[cst:.*]] = bitcast i32 (...)* {{.*}} to i8*
-; CHECK-NEXT: %[[gep:.*]] = getelementptr i8* %[[cst]], i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr i8, i8* %[[cst]], i64 1
%inc = add nsw i32 %a.04, 1
%exitcond = icmp eq i32 %a.04, 63
br i1 %exitcond, label %for.end, label %for.body
loop:
%inc = phi i32 [ %sub267, %entry ], [ %add, %loop]
%ext.inc = sext i32 %inc to i64
- %add.ptr265 = getelementptr inbounds i32* %ptr265, i64 %ext.inc
- %add.ptr266 = getelementptr inbounds i32* %ptr266, i64 %ext.inc
+ %add.ptr265 = getelementptr inbounds i32, i32* %ptr265, i64 %ext.inc
+ %add.ptr266 = getelementptr inbounds i32, i32* %ptr266, i64 %ext.inc
%add = add i32 %inc, 9
%cmp = icmp slt i32 %add, 140
br i1 %cmp, label %block1, label %loop
%row_width.5 = phi i32 [ %sub267.lcssa, %block1 ], [ %dec, %do.body272 ]
%sp.4 = phi i8* [ %tmp30, %block1 ], [ %incdec.ptr273, %do.body272 ]
%dp.addr.4 = phi i8* [ %tmp29, %block1 ], [ %incdec.ptr274, %do.body272 ]
- %incdec.ptr273 = getelementptr inbounds i8* %sp.4, i64 1
+ %incdec.ptr273 = getelementptr inbounds i8, i8* %sp.4, i64 1
%tmp31 = load i8* %sp.4, align 1
- %incdec.ptr274 = getelementptr inbounds i8* %dp.addr.4, i64 1
+ %incdec.ptr274 = getelementptr inbounds i8, i8* %dp.addr.4, i64 1
store i8 %tmp31, i8* %dp.addr.4, align 1
%dec = add i32 %row_width.5, -1
%cmp276 = icmp eq i32 %dec, 0
for.body:
%0 = add nsw i64 %indvars.iv, -5
- %arrayidx = getelementptr inbounds float* %a, i64 %0
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %0
%1 = load float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
%2 = add nsw i64 %indvars.iv, 2
- %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+ %arrayidx2 = getelementptr inbounds float, float* %a, i64 %2
%3 = load float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
%mul = fmul float %1, %3
- %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
+ %arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv
store float %mul, float* %arrayidx4, align 4, !llvm.mem.parallel_loop_access !1
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
for.body: ; preds = %entry, %for.body
%indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv2
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv2
%0 = load float* %arrayidx, align 4
%mul = fmul float %0, %0
store float %mul, float* %arrayidx, align 4
for.body: ; preds = %entry, %for.body
%indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%0 = add nsw i64 %indvars.iv2, -5
- %arrayidx = getelementptr inbounds float* %a, i64 %0
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %0
%1 = load float* %arrayidx, align 4
%2 = add nsw i64 %indvars.iv2, 2
- %arrayidx2 = getelementptr inbounds float* %a, i64 %2
+ %arrayidx2 = getelementptr inbounds float, float* %a, i64 %2
%3 = load float* %arrayidx2, align 4
%mul = fmul float %1, %3
- %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv2
+ %arrayidx4 = getelementptr inbounds float, float* %a, i64 %indvars.iv2
store float %mul, float* %arrayidx4, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv2, 1
%cmp2 = icmp sgt i64 %indvars.iv.next, %size
define i32 @_Z4foo1Pii(i32* %A, i32 %n) #0 {
entry:
%idx.ext = sext i32 %n to i64
- %add.ptr = getelementptr inbounds i32* %A, i64 %idx.ext
+ %add.ptr = getelementptr inbounds i32, i32* %A, i64 %idx.ext
%cmp3.i = icmp eq i32 %n, 0
br i1 %cmp3.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
%__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
%0 = load i32* %__first.addr.04.i, align 4
%add.i = add nsw i32 %0, %__init.addr.05.i
- %incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
+ %incdec.ptr.i = getelementptr inbounds i32, i32* %__first.addr.04.i, i64 1
%cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
br i1 %cmp.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
define i32 @_Z4foo2Pii(i32* %A, i32 %n) #0 {
entry:
%idx.ext = sext i32 %n to i64
- %add.ptr = getelementptr inbounds i32* %A, i64 %idx.ext
+ %add.ptr = getelementptr inbounds i32, i32* %A, i64 %idx.ext
%cmp3.i = icmp eq i32 %n, 0
br i1 %cmp3.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i
%__first.addr.04.i = phi i32* [ %incdec.ptr.i, %for.body.i ], [ %A, %entry ]
%0 = load i32* %__first.addr.04.i, align 4
%add.i = add nsw i32 %0, %__init.addr.05.i
- %incdec.ptr.i = getelementptr inbounds i32* %__first.addr.04.i, i64 1
+ %incdec.ptr.i = getelementptr inbounds i32, i32* %__first.addr.04.i, i64 1
%cmp.i = icmp eq i32* %incdec.ptr.i, %add.ptr
br i1 %cmp.i, label %_ZSt10accumulateIPiiET0_T_S2_S1_.exit, label %for.body.i, !llvm.loop !0
%iv.trunc = trunc i64 %indvars.iv to i32
%mul = mul i32 %iv.trunc, %BStride
%mul64 = zext i32 %mul to i64
- %arrayidx = getelementptr inbounds i32* %B, i64 %mul64
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 %mul64
%0 = load i32* %arrayidx, align 4
%mul2 = mul nsw i64 %indvars.iv, %CStride
- %arrayidx3 = getelementptr inbounds i32* %C, i64 %mul2
+ %arrayidx3 = getelementptr inbounds i32, i32* %C, i64 %mul2
%1 = load i32* %arrayidx3, align 4
%mul4 = mul nsw i32 %1, %0
%mul3 = mul nsw i64 %indvars.iv, %AStride
- %arrayidx7 = getelementptr inbounds i32* %A, i64 %mul3
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
store i32 %mul4, i32* %arrayidx7, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%0 = trunc i64 %indvars.iv to i32
%mul = mul nsw i32 %0, %conv
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds double* %x, i64 %idxprom
+ %arrayidx = getelementptr inbounds double, double* %x, i64 %idxprom
%1 = load double* %arrayidx, align 8
- %arrayidx3 = getelementptr inbounds double* %c, i64 %indvars.iv
+ %arrayidx3 = getelementptr inbounds double, double* %c, i64 %indvars.iv
store double %1, double* %arrayidx3, align 8
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
.lr.ph: ; preds = %0, %.lr.ph
%indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 0, %0 ]
- %2 = getelementptr inbounds float* %a, i64 %indvars.iv
+ %2 = getelementptr inbounds float, float* %a, i64 %indvars.iv
%3 = load float* %2, align 4
%4 = fmul float %3, 3.000000e+00
store float %4, float* %2, align 4
; CHECK: br i1 [[R6]]
; CHECK: [[R8:%[^ ]*]] = lshr i32 [[R5]], 5
- ; CHECK: [[R9:%[^ ]*]] = getelementptr i32* bitcast ([9 x i8]* @bitset1.bits to i32*), i32 [[R8]]
+ ; CHECK: [[R9:%[^ ]*]] = getelementptr i32, i32* bitcast ([9 x i8]* @bitset1.bits to i32*), i32 [[R8]]
; CHECK: [[R10:%[^ ]*]] = load i32* [[R9]]
; CHECK: [[R11:%[^ ]*]] = and i32 [[R5]], 31
; CHECK: [[R12:%[^ ]*]] = shl i32 1, [[R11]]
; CHECK: br i1 [[T6]]
; CHECK: [[T8:%[^ ]*]] = lshr i32 [[T5]], 5
- ; CHECK: [[T9:%[^ ]*]] = getelementptr i32* bitcast ([9 x i8]* @bitset3.bits to i32*), i32 [[T8]]
+ ; CHECK: [[T9:%[^ ]*]] = getelementptr i32, i32* bitcast ([9 x i8]* @bitset3.bits to i32*), i32 [[T8]]
; CHECK: [[T10:%[^ ]*]] = load i32* [[T9]]
; CHECK: [[T11:%[^ ]*]] = and i32 [[T5]], 31
; CHECK: [[T12:%[^ ]*]] = shl i32 1, [[T11]]
%tmp.9 = load i8** %p_addr ; <i8*> [#uses=1]
%tmp.10 = load i32* %i ; <i32> [#uses=1]
%tmp.11 = sub i32 %tmp.10, 1 ; <i32> [#uses=1]
- %tmp.12 = getelementptr i8* %tmp.9, i32 %tmp.11 ; <i8*> [#uses=1]
+ %tmp.12 = getelementptr i8, i8* %tmp.9, i32 %tmp.11 ; <i8*> [#uses=1]
%tmp.13 = load i32* %out ; <i32> [#uses=1]
%tmp.14 = trunc i32 %tmp.13 to i8 ; <i8> [#uses=1]
store i8 %tmp.14, i8* %tmp.12
; CHECK: test2
; CHECK-NOT: alloca
%A = alloca {i8, i16}
- %B = getelementptr {i8, i16}* %A, i32 0, i32 0
+ %B = getelementptr {i8, i16}, {i8, i16}* %A, i32 0, i32 0
call void @llvm.lifetime.start(i64 2, i8* %B)
store {i8, i16} zeroinitializer, {i8, i16}* %A
call void @llvm.lifetime.end(i64 2, i8* %B)
define internal fastcc void @initialize(%0* noalias nocapture sret %agg.result) nounwind {
entry:
- %agg.result.03 = getelementptr %0* %agg.result, i32 0, i32 0
+ %agg.result.03 = getelementptr %0, %0* %agg.result, i32 0, i32 0
store x86_fp80 0xK00000000000000000000, x86_fp80* %agg.result.03
- %agg.result.15 = getelementptr %0* %agg.result, i32 0, i32 1
+ %agg.result.15 = getelementptr %0, %0* %agg.result, i32 0, i32 1
store x86_fp80 0xK00000000000000000000, x86_fp80* %agg.result.15
ret void
}
%a_i8 = bitcast %a* %a_var to i8*
%b_i8 = bitcast %b* %b_var to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_i8, i8* %a_i8, i32 4, i32 1, i1 false)
- %tmp1 = getelementptr %b* %b_var, i32 0, i32 0
+ %tmp1 = getelementptr %b, %b* %b_var, i32 0, i32 0
%tmp2 = load float* %tmp1
ret float %tmp2
}
call void @bar(%struct1* sret %x) nounwind
; CHECK: call void @bar(%struct1* sret %x)
- %gepn1 = getelementptr inbounds %struct2* %y, i32 0, i32 0, i32 0
+ %gepn1 = getelementptr inbounds %struct2, %struct2* %y, i32 0, i32 0, i32 0
store i32 0, i32* %gepn1, align 8
- %gepn2 = getelementptr inbounds %struct2* %y, i32 0, i32 0, i32 1
+ %gepn2 = getelementptr inbounds %struct2, %struct2* %y, i32 0, i32 0, i32 1
store i32 0, i32* %gepn2, align 4
%bit1 = bitcast %struct1* %x to i64*
; CHECK: %load = load i64* %bit1, align 8
; CHECK: store i64 %load, i64* %bit2, align 8
- %gep1 = getelementptr %struct2* %y, i32 0, i32 0, i32 0
+ %gep1 = getelementptr %struct2, %struct2* %y, i32 0, i32 0, i32 0
%ret = load i32* %gep1
ret i32 %ret
}
define void @foo(i32* %p) {
; CHECK-LABEL: @foo(
; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 16, i32 4, i1 false)
- %a0 = getelementptr i32* %p, i64 0
+ %a0 = getelementptr i32, i32* %p, i64 0
store i32 0, i32* %a0, align 4
- %a1 = getelementptr i32* %p, i64 1
+ %a1 = getelementptr i32, i32* %p, i64 1
store i32 0, i32* %a1, align 16
- %a2 = getelementptr i32* %p, i64 2
+ %a2 = getelementptr i32, i32* %p, i64 2
store i32 0, i32* %a2, align 4
- %a3 = getelementptr i32* %p, i64 3
+ %a3 = getelementptr i32, i32* %p, i64 3
store i32 0, i32* %a3, align 4
ret void
}
%x = alloca [101 x i32], align 16
%bc = bitcast [101 x i32]* %x to i8*
call void @llvm.memset.p0i8.i64(i8* %bc, i8 0, i64 400, i32 16, i1 false)
- %gep1 = getelementptr inbounds [101 x i32]* %x, i32 0, i32 100
+ %gep1 = getelementptr inbounds [101 x i32], [101 x i32]* %x, i32 0, i32 100
store atomic i32 0, i32* %gep1 unordered, align 4
- %gep2 = getelementptr inbounds [101 x i32]* %x, i32 0, i32 0
+ %gep2 = getelementptr inbounds [101 x i32], [101 x i32]* %x, i32 0, i32 0
call void @otherf(i32* %gep2)
ret void
}
; CHECK: call void @llvm.memset.p0i8.i64
; CHECK-NOT: call void @llvm.memcpy.p0i8.p0i8.i64
%src = alloca [4096 x i8], align 1
- %p = getelementptr inbounds [4096 x i8]* %src, i64 0, i64 0
+ %p = getelementptr inbounds [4096 x i8], [4096 x i8]* %src, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %p, i8 0, i64 4096, i32 1, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %p, i64 4096, i32 1, i1 false) #2
ret void
; CHECK: call void @llvm.memset.p0i8.i64
; CHECK: call void @llvm.memcpy.p0i8.p0i8.i64
%src = alloca [4096 x i8], align 1
- %p = getelementptr inbounds [4096 x i8]* %src, i64 0, i64 0
+ %p = getelementptr inbounds [4096 x i8], [4096 x i8]* %src, i64 0, i64 0
call void @llvm.memset.p0i8.i64(i8* %p, i8 0, i64 4096, i32 1, i1 false)
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %p, i64 4096, i32 1, i1 false) #2
ret void
; PR4882
define void @test1(%struct.bar* %this) {
entry:
- %0 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 0
+ %0 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 0
store float 0.000000e+00, float* %0, align 4
- %1 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 1
+ %1 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 1
store float 0.000000e+00, float* %1, align 4
- %2 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 2
+ %2 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 2
store float 0.000000e+00, float* %2, align 4
- %3 = getelementptr inbounds %struct.bar* %this, i32 0, i32 0, i32 0, i32 3
+ %3 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 0, i32 0, i32 3
store float 0.000000e+00, float* %3, align 4
- %4 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 0
+ %4 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 0
store float 0.000000e+00, float* %4, align 4
- %5 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 1
+ %5 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 1
store float 0.000000e+00, float* %5, align 4
- %6 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 2
+ %6 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 2
store float 0.000000e+00, float* %6, align 4
- %7 = getelementptr inbounds %struct.bar* %this, i32 0, i32 1, i32 0, i32 3
+ %7 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 1, i32 0, i32 3
store float 0.000000e+00, float* %7, align 4
- %8 = getelementptr inbounds %struct.bar* %this, i32 0, i32 3, i32 0, i32 1
+ %8 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 3, i32 0, i32 1
store float 0.000000e+00, float* %8, align 4
- %9 = getelementptr inbounds %struct.bar* %this, i32 0, i32 3, i32 0, i32 2
+ %9 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 3, i32 0, i32 2
store float 0.000000e+00, float* %9, align 4
- %10 = getelementptr inbounds %struct.bar* %this, i32 0, i32 3, i32 0, i32 3
+ %10 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 3, i32 0, i32 3
store float 0.000000e+00, float* %10, align 4
- %11 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 0
+ %11 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 0
store float 0.000000e+00, float* %11, align 4
- %12 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 1
+ %12 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 1
store float 0.000000e+00, float* %12, align 4
- %13 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 2
+ %13 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 2
store float 0.000000e+00, float* %13, align 4
- %14 = getelementptr inbounds %struct.bar* %this, i32 0, i32 4, i32 0, i32 3
+ %14 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 4, i32 0, i32 3
store float 0.000000e+00, float* %14, align 4
- %15 = getelementptr inbounds %struct.bar* %this, i32 0, i32 5
+ %15 = getelementptr inbounds %struct.bar, %struct.bar* %this, i32 0, i32 5
store float 0.000000e+00, float* %15, align 4
unreachable
}
define void @test1(i8 signext %c) nounwind {
entry:
%x = alloca [19 x i8] ; <[19 x i8]*> [#uses=20]
- %tmp = getelementptr [19 x i8]* %x, i32 0, i32 0 ; <i8*> [#uses=1]
+ %tmp = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 0 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp, align 1
- %tmp5 = getelementptr [19 x i8]* %x, i32 0, i32 1 ; <i8*> [#uses=1]
+ %tmp5 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 1 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp5, align 1
- %tmp9 = getelementptr [19 x i8]* %x, i32 0, i32 2 ; <i8*> [#uses=1]
+ %tmp9 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 2 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp9, align 1
- %tmp13 = getelementptr [19 x i8]* %x, i32 0, i32 3 ; <i8*> [#uses=1]
+ %tmp13 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 3 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp13, align 1
- %tmp17 = getelementptr [19 x i8]* %x, i32 0, i32 4 ; <i8*> [#uses=1]
+ %tmp17 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 4 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp17, align 1
- %tmp21 = getelementptr [19 x i8]* %x, i32 0, i32 5 ; <i8*> [#uses=1]
+ %tmp21 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 5 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp21, align 1
- %tmp25 = getelementptr [19 x i8]* %x, i32 0, i32 6 ; <i8*> [#uses=1]
+ %tmp25 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 6 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp25, align 1
- %tmp29 = getelementptr [19 x i8]* %x, i32 0, i32 7 ; <i8*> [#uses=1]
+ %tmp29 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 7 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp29, align 1
- %tmp33 = getelementptr [19 x i8]* %x, i32 0, i32 8 ; <i8*> [#uses=1]
+ %tmp33 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 8 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp33, align 1
- %tmp37 = getelementptr [19 x i8]* %x, i32 0, i32 9 ; <i8*> [#uses=1]
+ %tmp37 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 9 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp37, align 1
- %tmp41 = getelementptr [19 x i8]* %x, i32 0, i32 10 ; <i8*> [#uses=1]
+ %tmp41 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 10 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp41, align 1
- %tmp45 = getelementptr [19 x i8]* %x, i32 0, i32 11 ; <i8*> [#uses=1]
+ %tmp45 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 11 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp45, align 1
- %tmp49 = getelementptr [19 x i8]* %x, i32 0, i32 12 ; <i8*> [#uses=1]
+ %tmp49 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 12 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp49, align 1
- %tmp53 = getelementptr [19 x i8]* %x, i32 0, i32 13 ; <i8*> [#uses=1]
+ %tmp53 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 13 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp53, align 1
- %tmp57 = getelementptr [19 x i8]* %x, i32 0, i32 14 ; <i8*> [#uses=1]
+ %tmp57 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 14 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp57, align 1
- %tmp61 = getelementptr [19 x i8]* %x, i32 0, i32 15 ; <i8*> [#uses=1]
+ %tmp61 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 15 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp61, align 1
- %tmp65 = getelementptr [19 x i8]* %x, i32 0, i32 16 ; <i8*> [#uses=1]
+ %tmp65 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 16 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp65, align 1
- %tmp69 = getelementptr [19 x i8]* %x, i32 0, i32 17 ; <i8*> [#uses=1]
+ %tmp69 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 17 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp69, align 1
- %tmp73 = getelementptr [19 x i8]* %x, i32 0, i32 18 ; <i8*> [#uses=1]
+ %tmp73 = getelementptr [19 x i8], [19 x i8]* %x, i32 0, i32 18 ; <i8*> [#uses=1]
store i8 %c, i8* %tmp73, align 1
%tmp76 = call i32 (...)* @bar( [19 x i8]* %x ) nounwind
ret void
%ref_idx = alloca [8 x i8] ; <[8 x i8]*> [#uses=8]
%left_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17]
%up_mvd = alloca [8 x %struct.MV] ; <[8 x %struct.MV]*> [#uses=17]
- %tmp20 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 7 ; <i8*> [#uses=1]
+ %tmp20 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 7 ; <i8*> [#uses=1]
store i8 -1, i8* %tmp20, align 1
- %tmp23 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 6 ; <i8*> [#uses=1]
+ %tmp23 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 6 ; <i8*> [#uses=1]
store i8 -1, i8* %tmp23, align 1
- %tmp26 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 5 ; <i8*> [#uses=1]
+ %tmp26 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 5 ; <i8*> [#uses=1]
store i8 -1, i8* %tmp26, align 1
- %tmp29 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 4 ; <i8*> [#uses=1]
+ %tmp29 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 4 ; <i8*> [#uses=1]
store i8 -1, i8* %tmp29, align 1
- %tmp32 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 3 ; <i8*> [#uses=1]
+ %tmp32 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 3 ; <i8*> [#uses=1]
store i8 -1, i8* %tmp32, align 1
- %tmp35 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 2 ; <i8*> [#uses=1]
+ %tmp35 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 2 ; <i8*> [#uses=1]
store i8 -1, i8* %tmp35, align 1
- %tmp38 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 1 ; <i8*> [#uses=1]
+ %tmp38 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 1 ; <i8*> [#uses=1]
store i8 -1, i8* %tmp38, align 1
- %tmp41 = getelementptr [8 x i8]* %ref_idx, i32 0, i32 0 ; <i8*> [#uses=2]
+ %tmp41 = getelementptr [8 x i8], [8 x i8]* %ref_idx, i32 0, i32 0 ; <i8*> [#uses=2]
store i8 -1, i8* %tmp41, align 1
- %tmp43 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0 ; <i16*> [#uses=1]
+ %tmp43 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp43, align 2
- %tmp46 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1 ; <i16*> [#uses=1]
+ %tmp46 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 7, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp46, align 2
- %tmp57 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0 ; <i16*> [#uses=1]
+ %tmp57 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp57, align 2
- %tmp60 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1 ; <i16*> [#uses=1]
+ %tmp60 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 6, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp60, align 2
- %tmp71 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0 ; <i16*> [#uses=1]
+ %tmp71 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp71, align 2
- %tmp74 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1 ; <i16*> [#uses=1]
+ %tmp74 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 5, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp74, align 2
- %tmp85 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0 ; <i16*> [#uses=1]
+ %tmp85 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp85, align 2
- %tmp88 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 1 ; <i16*> [#uses=1]
+ %tmp88 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 4, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp88, align 2
- %tmp99 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 0 ; <i16*> [#uses=1]
+ %tmp99 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp99, align 2
- %tmp102 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 1 ; <i16*> [#uses=1]
+ %tmp102 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 3, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp102, align 2
- %tmp113 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 0 ; <i16*> [#uses=1]
+ %tmp113 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp113, align 2
- %tmp116 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 1 ; <i16*> [#uses=1]
+ %tmp116 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 2, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp116, align 2
- %tmp127 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 0 ; <i16*> [#uses=1]
+ %tmp127 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp127, align 2
- %tmp130 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 1 ; <i16*> [#uses=1]
+ %tmp130 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 1, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp130, align 2
- %tmp141 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 0 ; <i16*> [#uses=1]
+ %tmp141 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp141, align 8
- %tmp144 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 1 ; <i16*> [#uses=1]
+ %tmp144 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 0, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp144, align 2
- %tmp148 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 0 ; <i16*> [#uses=1]
+ %tmp148 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp148, align 2
- %tmp151 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 1 ; <i16*> [#uses=1]
+ %tmp151 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 7, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp151, align 2
- %tmp162 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 0 ; <i16*> [#uses=1]
+ %tmp162 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp162, align 2
- %tmp165 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 1 ; <i16*> [#uses=1]
+ %tmp165 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 6, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp165, align 2
- %tmp176 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 0 ; <i16*> [#uses=1]
+ %tmp176 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp176, align 2
- %tmp179 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 1 ; <i16*> [#uses=1]
+ %tmp179 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 5, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp179, align 2
- %tmp190 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 0 ; <i16*> [#uses=1]
+ %tmp190 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp190, align 2
- %tmp193 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 1 ; <i16*> [#uses=1]
+ %tmp193 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 4, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp193, align 2
- %tmp204 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 0 ; <i16*> [#uses=1]
+ %tmp204 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp204, align 2
- %tmp207 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 1 ; <i16*> [#uses=1]
+ %tmp207 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 3, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp207, align 2
- %tmp218 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 0 ; <i16*> [#uses=1]
+ %tmp218 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp218, align 2
- %tmp221 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 1 ; <i16*> [#uses=1]
+ %tmp221 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 2, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp221, align 2
- %tmp232 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 0 ; <i16*> [#uses=1]
+ %tmp232 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp232, align 2
- %tmp235 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 1 ; <i16*> [#uses=1]
+ %tmp235 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 1, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp235, align 2
- %tmp246 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 0 ; <i16*> [#uses=1]
+ %tmp246 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 0 ; <i16*> [#uses=1]
store i16 0, i16* %tmp246, align 8
- %tmp249 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 1 ; <i16*> [#uses=1]
+ %tmp249 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 0, i32 1 ; <i16*> [#uses=1]
store i16 0, i16* %tmp249, align 2
- %up_mvd252 = getelementptr [8 x %struct.MV]* %up_mvd, i32 0, i32 0 ; <%struct.MV*> [#uses=1]
- %left_mvd253 = getelementptr [8 x %struct.MV]* %left_mvd, i32 0, i32 0 ; <%struct.MV*> [#uses=1]
+ %up_mvd252 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %up_mvd, i32 0, i32 0 ; <%struct.MV*> [#uses=1]
+ %left_mvd253 = getelementptr [8 x %struct.MV], [8 x %struct.MV]* %left_mvd, i32 0, i32 0 ; <%struct.MV*> [#uses=1]
call void @foo( %struct.MV* %up_mvd252, %struct.MV* %left_mvd253, i8* %tmp41 ) nounwind
ret void
; Store followed by memset.
define void @test3(i32* nocapture %P) nounwind ssp {
entry:
- %arrayidx = getelementptr inbounds i32* %P, i64 1
+ %arrayidx = getelementptr inbounds i32, i32* %P, i64 1
store i32 0, i32* %arrayidx, align 4
- %add.ptr = getelementptr inbounds i32* %P, i64 2
+ %add.ptr = getelementptr inbounds i32, i32* %P, i64 2
%0 = bitcast i32* %add.ptr to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 11, i32 1, i1 false)
ret void
define void @test4(i32* nocapture %P) nounwind ssp {
entry:
store i32 0, i32* %P, align 4
- %add.ptr = getelementptr inbounds i32* %P, i64 1
+ %add.ptr = getelementptr inbounds i32, i32* %P, i64 1
%0 = bitcast i32* %add.ptr to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 11, i32 1, i1 false)
ret void
; Memset followed by store.
define void @test5(i32* nocapture %P) nounwind ssp {
entry:
- %add.ptr = getelementptr inbounds i32* %P, i64 2
+ %add.ptr = getelementptr inbounds i32, i32* %P, i64 2
%0 = bitcast i32* %add.ptr to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 11, i32 1, i1 false)
- %arrayidx = getelementptr inbounds i32* %P, i64 1
+ %arrayidx = getelementptr inbounds i32, i32* %P, i64 1
store i32 0, i32* %arrayidx, align 4
ret void
; CHECK-LABEL: @test5(
entry:
%0 = bitcast i32* %P to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 12, i32 1, i1 false)
- %add.ptr = getelementptr inbounds i32* %P, i64 3
+ %add.ptr = getelementptr inbounds i32, i32* %P, i64 3
%1 = bitcast i32* %add.ptr to i8*
tail call void @llvm.memset.p0i8.i64(i8* %1, i8 0, i64 12, i32 1, i1 false)
ret void
; rdar://9892684
define void @test7(i32* nocapture %c) nounwind optsize {
store i32 -1, i32* %c, align 4
- %1 = getelementptr inbounds i32* %c, i32 1
+ %1 = getelementptr inbounds i32, i32* %c, i32 1
store i32 -1, i32* %1, align 4
- %2 = getelementptr inbounds i32* %c, i32 2
+ %2 = getelementptr inbounds i32, i32* %c, i32 2
store i32 -1, i32* %2, align 4
- %3 = getelementptr inbounds i32* %c, i32 3
+ %3 = getelementptr inbounds i32, i32* %c, i32 3
store i32 -1, i32* %3, align 4
- %4 = getelementptr inbounds i32* %c, i32 4
+ %4 = getelementptr inbounds i32, i32* %c, i32 4
store i32 -1, i32* %4, align 4
; CHECK-LABEL: @test7(
; CHECK: call void @llvm.memset.p0i8.i64(i8* %5, i8 -1, i64 20, i32 4, i1 false)
; Memset followed by odd store.
define void @test11(i32* nocapture %P) nounwind ssp {
entry:
- %add.ptr = getelementptr inbounds i32* %P, i64 3
+ %add.ptr = getelementptr inbounds i32, i32* %P, i64 3
%0 = bitcast i32* %add.ptr to i8*
tail call void @llvm.memset.p0i8.i64(i8* %0, i8 1, i64 11, i32 1, i1 false)
- %arrayidx = getelementptr inbounds i32* %P, i64 0
+ %arrayidx = getelementptr inbounds i32, i32* %P, i64 0
%arrayidx.cast = bitcast i32* %arrayidx to i96*
store i96 310698676526526814092329217, i96* %arrayidx.cast, align 4
ret void
%temp.lvalue = alloca %"class.std::auto_ptr", align 8
; CHECK: call void @_Z3barv(%"class.std::auto_ptr"* sret %agg.result)
call void @_Z3barv(%"class.std::auto_ptr"* sret %temp.lvalue)
- %tmp.i.i = getelementptr inbounds %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
+ %tmp.i.i = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
; CHECK-NOT: load
%tmp2.i.i = load i32** %tmp.i.i, align 8
- %tmp.i.i4 = getelementptr inbounds %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
+ %tmp.i.i4 = getelementptr inbounds %"class.std::auto_ptr", %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
; CHECK-NOT: store
store i32* %tmp2.i.i, i32** %tmp.i.i4, align 8
; CHECK: ret void
%arr = alloca [3 x i32], align 4
%arr_i8 = bitcast [3 x i32]* %arr to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %arr_i8, i8* bitcast ([3 x i32]* @cst to i8*), i64 12, i32 4, i1 false)
- %arraydecay = getelementptr inbounds [3 x i32]* %arr, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [3 x i32], [3 x i32]* %arr, i64 0, i64 0
call void @foo(i32* %arraydecay) nounwind
ret void
; CHECK-LABEL: @test1(
define i32 @test1(%struct.foo* nocapture %foobie) nounwind noinline ssp uwtable {
%bletch.sroa.1 = alloca [7 x i8], align 1
- %1 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 0
+ %1 = getelementptr inbounds %struct.foo, %struct.foo* %foobie, i64 0, i32 0
store i8 98, i8* %1, align 4
- %2 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 1, i64 0
- %3 = getelementptr inbounds [7 x i8]* %bletch.sroa.1, i64 0, i64 0
+ %2 = getelementptr inbounds %struct.foo, %struct.foo* %foobie, i64 0, i32 1, i64 0
+ %3 = getelementptr inbounds [7 x i8], [7 x i8]* %bletch.sroa.1, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %2, i8* %3, i64 7, i32 1, i1 false)
- %4 = getelementptr inbounds %struct.foo* %foobie, i64 0, i32 2
+ %4 = getelementptr inbounds %struct.foo, %struct.foo* %foobie, i64 0, i32 2
store i32 20, i32* %4, align 4
ret i32 undef
%y = alloca %struct.S, align 16
%tmp = bitcast %struct.S* %y to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp, i8* bitcast (%struct.S* @sS to i8*), i64 32, i32 16, i1 false)
- %a = getelementptr %struct.S* %y, i64 0, i32 1, i64 0
+ %a = getelementptr %struct.S, %struct.S* %y, i64 0, i32 1, i64 0
store i8 4, i8* %a
call void @test5a(%struct.S* align 16 byval %y)
ret i32 0
; CHECK: test8
; CHECK-NOT: memcpy
%A = tail call i8* @malloc(i32 10)
- %B = getelementptr inbounds i8* %A, i64 2
+ %B = getelementptr inbounds i8, i8* %A, i64 2
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %B, i8* getelementptr inbounds ([7 x i8]* @test8.str, i64 0, i64 0), i32 7, i32 1, i1 false)
%C = tail call i8* @malloc(i32 10)
- %D = getelementptr inbounds i8* %C, i64 2
+ %D = getelementptr inbounds i8, i8* %C, i64 2
tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %D, i8* %B, i32 7, i32 1, i1 false)
ret void
; CHECK: ret void
%malloccall = tail call i8* @malloc(i32 trunc (i64 mul nuw (i64 ptrtoint (i8* getelementptr (i8* null, i32 1) to i64), i64 13) to i32))
%call3 = bitcast i8* %malloccall to [13 x i8]*
- %call3.sub = getelementptr inbounds [13 x i8]* %call3, i64 0, i64 0
+ %call3.sub = getelementptr inbounds [13 x i8], [13 x i8]* %call3, i64 0, i64 0
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %call3.sub, i8* %src, i64 13, i32 1, i1 false)
ret i8* %call3.sub
}
entry:
; CHECK-LABEL: @test2(
; CHECK: call void @llvm.memcpy
- %add.ptr = getelementptr i8* %P, i64 16
+ %add.ptr = getelementptr i8, i8* %P, i64 16
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %add.ptr, i64 16, i32 1, i1 false)
ret void
}
entry:
; CHECK-LABEL: @test3(
; CHECK: call void @llvm.memmove
- %add.ptr = getelementptr i8* %P, i64 16
+ %add.ptr = getelementptr i8, i8* %P, i64 16
tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %P, i8* %add.ptr, i64 17, i32 1, i1 false)
ret void
}
%agg.tmp = alloca %struct.s, align 4
store i32 99, i32* getelementptr inbounds (%struct.s* @cell, i32 0, i32 1), align 4
call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.s* @cell, i32 0, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8]* @.str, i32 0, i32 0), i32 11, i32 1, i1 false)
- %tmp = getelementptr inbounds %struct.s* %agg.tmp, i32 0, i32 0, i32 0
+ %tmp = getelementptr inbounds %struct.s, %struct.s* %agg.tmp, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.s* @cell, i32 0, i32 0, i32 0), i32 16, i32 4, i1 false)
call void @check(%struct.s* byval %agg.tmp)
ret void
entry:
%iz = alloca %0
%memtmp = alloca %0, align 16
- %tmp1 = getelementptr %0* %z, i32 0, i32 1
+ %tmp1 = getelementptr %0, %0* %z, i32 0, i32 1
%tmp2 = load x86_fp80* %tmp1, align 16
%tmp3 = fsub x86_fp80 0xK80000000000000000000, %tmp2
- %tmp4 = getelementptr %0* %iz, i32 0, i32 1
- %real = getelementptr %0* %iz, i32 0, i32 0
- %tmp7 = getelementptr %0* %z, i32 0, i32 0
+ %tmp4 = getelementptr %0, %0* %iz, i32 0, i32 1
+ %real = getelementptr %0, %0* %iz, i32 0, i32 0
+ %tmp7 = getelementptr %0, %0* %z, i32 0, i32 0
%tmp8 = load x86_fp80* %tmp7, align 16
store x86_fp80 %tmp3, x86_fp80* %real, align 16
store x86_fp80 %tmp8, x86_fp80* %tmp4, align 16
store %"struct.kc::impl_casestring__Str"* %_file, %"struct.kc::impl_casestring__Str"** %_file_addr
store i32 %_line, i32* %_line_addr
%0 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
+ %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
call void @_ZN2kc13impl_filelineC2Ev() nounwind
%2 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
- %4 = getelementptr inbounds %"struct.kc::impl_fileline"* %3, i32 0, i32 0
- %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
+ %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
+ %4 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %3, i32 0, i32 0
+ %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc22impl_fileline_FileLineE, i32 0, i32 2), i32 (...)*** %5, align 4
%6 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
%7 = icmp eq %"struct.kc::impl_casestring__Str"* %6, null
bb2: ; preds = %bb1, %invcont
%10 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
- %12 = getelementptr inbounds %"struct.kc::impl_fileline"* %11, i32 0, i32 1
+ %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
+ %12 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %11, i32 0, i32 1
%13 = load %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
store %"struct.kc::impl_casestring__Str"* %13, %"struct.kc::impl_casestring__Str"** %12, align 4
%14 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
- %16 = getelementptr inbounds %"struct.kc::impl_fileline"* %15, i32 0, i32 2
+ %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
+ %16 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %15, i32 0, i32 2
%17 = load i32* %_line_addr, align 4
store i32 %17, i32* %16, align 4
ret void
%"alloca point" = bitcast i32 0 to i32
store %"struct.kc::impl_fileline"* %this, %"struct.kc::impl_fileline"** %this_addr
%0 = load %"struct.kc::impl_fileline"** %this_addr, align 4
- %1 = getelementptr inbounds %"struct.kc::impl_fileline"* %0, i32 0, i32 0
- %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
+ %1 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %0, i32 0, i32 0
+ %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc13impl_filelineE, i32 0, i32 2), i32 (...)*** %2, align 4
%3 = trunc i32 0 to i8
%toBool = icmp ne i8 %3, 0
%"alloca point" = bitcast i32 0 to i32
store %"struct.kc::impl_fileline"* %this, %"struct.kc::impl_fileline"** %this_addr
%0 = load %"struct.kc::impl_fileline"** %this_addr, align 4
- %1 = getelementptr inbounds %"struct.kc::impl_fileline"* %0, i32 0, i32 0
- %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
+ %1 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %0, i32 0, i32 0
+ %2 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %1, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc13impl_filelineE, i32 0, i32 2), i32 (...)*** %2, align 4
%3 = trunc i32 0 to i8
%toBool = icmp ne i8 %3, 0
store %"struct.kc::impl_casestring__Str"* %_file, %"struct.kc::impl_casestring__Str"** %_file_addr
store i32 %_line, i32* %_line_addr
%0 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
+ %1 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %0, i32 0, i32 0
call void @_ZN2kc13impl_filelineC2Ev() nounwind
%2 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
- %4 = getelementptr inbounds %"struct.kc::impl_fileline"* %3, i32 0, i32 0
- %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
+ %3 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %2, i32 0, i32 0
+ %4 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %3, i32 0, i32 0
+ %5 = getelementptr inbounds %"struct.kc::impl_abstract_phylum", %"struct.kc::impl_abstract_phylum"* %4, i32 0, i32 0
store i32 (...)** getelementptr inbounds ([13 x i32 (...)*]* @_ZTVN2kc22impl_fileline_FileLineE, i32 0, i32 2), i32 (...)*** %5, align 4
%6 = load %"struct.kc::impl_casestring__Str"** %_file_addr, align 4
%7 = icmp eq %"struct.kc::impl_casestring__Str"* %6, null
bb2: ; preds = %bb1, %invcont
%10 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
- %12 = getelementptr inbounds %"struct.kc::impl_fileline"* %11, i32 0, i32 1
+ %11 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %10, i32 0, i32 0
+ %12 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %11, i32 0, i32 1
%13 = load %"struct.kc::impl_casestring__Str"** %iftmp.99, align 4
store %"struct.kc::impl_casestring__Str"* %13, %"struct.kc::impl_casestring__Str"** %12, align 4
%14 = load %"struct.kc::impl_fileline_FileLine"** %this_addr, align 4
- %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
- %16 = getelementptr inbounds %"struct.kc::impl_fileline"* %15, i32 0, i32 2
+ %15 = getelementptr inbounds %"struct.kc::impl_fileline_FileLine", %"struct.kc::impl_fileline_FileLine"* %14, i32 0, i32 0
+ %16 = getelementptr inbounds %"struct.kc::impl_fileline", %"struct.kc::impl_fileline"* %15, i32 0, i32 2
%17 = load i32* %_line_addr, align 4
store i32 %17, i32* %16, align 4
ret void
define i32 @store_as0(i32* %x) {
; CHECK-LABEL: @store_as0(
; CHECK: call void @foo(
- %gep = getelementptr i32* %x, i32 4
+ %gep = getelementptr i32, i32* %x, i32 4
%y = load i32* %gep
call void @foo(i32 %y) nounwind
ret i32 %y
define i32 @store_as1(i32 addrspace(1)* %x) {
; CHECK-LABEL: @store_as1(
; CHECK: call void @foo(
- %gep = getelementptr i32 addrspace(1)* %x, i32 4
+ %gep = getelementptr i32, i32 addrspace(1)* %x, i32 4
%y = load i32 addrspace(1)* %gep
call void @foo(i32 %y) nounwind
ret i32 %y
define i32 @store_as2(i32 addrspace(2)* %x) {
; CHECK-LABEL: @store_as2(
; CHECK: call void @foo(
- %gep = getelementptr i32 addrspace(2)* %x, i32 4
+ %gep = getelementptr i32, i32 addrspace(2)* %x, i32 4
%y = load i32 addrspace(2)* %gep
call void @foo(i32 %y) nounwind
ret i32 %y
}
define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
- %1 = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
+ %1 = getelementptr inbounds %.qux.2496, %.qux.2496* %this, i32 0, i32 1, i32 1
%2 = load i32* %1, align 4
ret i32 %2
}
}
define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
- %1 = getelementptr inbounds %.qux.2585* %this, i32 0
+ %1 = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0
ret i32* undef
}
define internal i8* @func35(%.qux.2585* nocapture %this) align 2 {
- %1 = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
+ %1 = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0, i32 2
%2 = load i8** %1, align 4
ret i8* %2
}
define internal i32 @func10(%.qux.2496 addrspace(1)* nocapture %this) align 2 {
bb:
- %tmp = getelementptr inbounds %.qux.2496 addrspace(1)* %this, i32 0, i32 1, i32 1
+ %tmp = getelementptr inbounds %.qux.2496, %.qux.2496 addrspace(1)* %this, i32 0, i32 1, i32 1
%tmp1 = load i32 addrspace(1)* %tmp, align 4
ret i32 %tmp1
}
; CHECK: %[[V2:.+]] = bitcast %.qux.2585 addrspace(1)* %{{.*}} to %.qux.2496 addrspace(1)*
; CHECK: %[[V3:.+]] = tail call i32 @func10(%.qux.2496 addrspace(1)* %[[V2]])
; CHECK: %{{.*}} = inttoptr i32 %[[V3]] to i8*
- %tmp = getelementptr inbounds %.qux.2585 addrspace(1)* %this, i32 0, i32 2
+ %tmp = getelementptr inbounds %.qux.2585, %.qux.2585 addrspace(1)* %this, i32 0, i32 2
%tmp1 = load i8* addrspace(1)* %tmp, align 4
ret i8* %tmp1
}
define internal i32 @func10(%.qux.2496* nocapture %this) align 2 {
bb:
- %tmp = getelementptr inbounds %.qux.2496* %this, i32 0, i32 1, i32 1
+ %tmp = getelementptr inbounds %.qux.2496, %.qux.2496* %this, i32 0, i32 1, i32 1
%tmp1 = load i32* %tmp, align 4
ret i32 %tmp1
}
define internal i32* @func34(%.qux.2585* nocapture %this) align 2 {
bb:
- %tmp = getelementptr inbounds %.qux.2585* %this, i32 0
+ %tmp = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0
ret i32* undef
}
; CHECK: %[[V2:.+]] = bitcast %.qux.2585* %{{.*}} to %.qux.2496*
; CHECK: %[[V3:.+]] = tail call i32 @func10(%.qux.2496* %[[V2]])
; CHECK: %{{.*}} = inttoptr i32 %[[V3]] to i8*
- %tmp = getelementptr inbounds %.qux.2585* %this, i32 0, i32 2
+ %tmp = getelementptr inbounds %.qux.2585, %.qux.2585* %this, i32 0, i32 2
%tmp1 = load i8** %tmp, align 4
ret i8* %tmp1
}
define %kv1 @fn1() {
; CHECK-LABEL: @fn1(
%tmp = alloca %kv1
- %v1 = getelementptr %kv1* %tmp, i32 0, i32 0
+ %v1 = getelementptr %kv1, %kv1* %tmp, i32 0, i32 0
store i32* null, i32** %v1
- %v2 = getelementptr %kv1* %tmp, i32 0, i32 0
+ %v2 = getelementptr %kv1, %kv1* %tmp, i32 0, i32 0
store i32* null, i32** %v2
call void @noop()
%v3 = load %kv1* %tmp
; CHECK: %3 = bitcast i32* %2 to i8*
; CHECK: %4 = insertvalue %kv2 undef, i8* %3, 0
%tmp = alloca %kv2
- %v1 = getelementptr %kv2* %tmp, i32 0, i32 0
+ %v1 = getelementptr %kv2, %kv2* %tmp, i32 0, i32 0
store i8* null, i8** %v1
- %v2 = getelementptr %kv2* %tmp, i32 0, i32 0
+ %v2 = getelementptr %kv2, %kv2* %tmp, i32 0, i32 0
store i8* null, i8** %v2
call void @noop()
; This used to cause a crash when compairing the GEPs
define void @foo(<2 x i64*>) {
- %tmp = getelementptr <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
+ %tmp = getelementptr i64, <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
ret void
}
define void @bar(<2 x i64*>) {
- %tmp = getelementptr <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
+ %tmp = getelementptr i64, <2 x i64*> %0, <2 x i64> <i64 0, i64 0>
ret void
}
define void @func_4_xxx(%struct.foo_xxx* sret %agg.result) nounwind uwtable ssp {
%1 = alloca %struct.foo_xxx, align 8
- %2 = getelementptr inbounds %struct.foo_xxx* %1, i32 0, i32 0
+ %2 = getelementptr inbounds %struct.foo_xxx, %struct.foo_xxx* %1, i32 0, i32 0
store i32 1, i32* %2, align 4
- %3 = getelementptr inbounds %struct.foo_xxx* %1, i32 0, i32 1
+ %3 = getelementptr inbounds %struct.foo_xxx, %struct.foo_xxx* %1, i32 0, i32 1
store float 2.000000e+00, float* %3, align 4
- %4 = getelementptr inbounds %struct.foo_xxx* %1, i32 0, i32 2
- %5 = getelementptr inbounds %struct.bar_xxx* %4, i32 0, i32 0
+ %4 = getelementptr inbounds %struct.foo_xxx, %struct.foo_xxx* %1, i32 0, i32 2
+ %5 = getelementptr inbounds %struct.bar_xxx, %struct.bar_xxx* %4, i32 0, i32 0
store i32 3, i32* %5, align 4
- %6 = getelementptr inbounds %struct.bar_xxx* %4, i32 0, i32 1
+ %6 = getelementptr inbounds %struct.bar_xxx, %struct.bar_xxx* %4, i32 0, i32 1
store double 4.000000e+00, double* %6, align 8
%7 = bitcast %struct.foo_xxx* %agg.result to i8*
%8 = bitcast %struct.foo_xxx* %1 to i8*
define void @test1b(i8* %x) {
entry:
%A = alloca i8*
- %gep = getelementptr i8** %A, i32 0
+ %gep = getelementptr i8*, i8** %A, i32 0
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
define void @test1c(i8* %x) {
entry:
%A = alloca i8*, i32 3
- %gep = getelementptr i8** %A, i32 2
+ %gep = getelementptr i8*, i8** %A, i32 2
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
exit:
%A = phi i8** [ %allocaA, %use_allocaA ], [ %allocaB, %use_allocaB ]
- %gep = getelementptr i8** %A, i32 0
+ %gep = getelementptr i8*, i8** %A, i32 0
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
exit:
%A = phi i8** [ %allocaA, %use_allocaA ], [ %allocaB, %use_allocaB ]
- %gep = getelementptr i8** %A, i32 2
+ %gep = getelementptr i8*, i8** %A, i32 2
tail call i8* @objc_retain(i8* %x)
tail call i8* @objc_retain(i8* %x)
store i8* %x, i8** %gep, align 8
define void @test2b(i8* %x) {
entry:
%A = alloca i8*
- %gep1 = getelementptr i8** %A, i32 0
+ %gep1 = getelementptr i8*, i8** %A, i32 0
store i8* %x, i8** %gep1, align 8
- %gep2 = getelementptr i8** %A, i32 0
+ %gep2 = getelementptr i8*, i8** %A, i32 0
%y = load i8** %gep2
br label %bb1
define void @test2c(i8* %x) {
entry:
%A = alloca i8*, i32 3
- %gep1 = getelementptr i8** %A, i32 2
+ %gep1 = getelementptr i8*, i8** %A, i32 2
store i8* %x, i8** %gep1, align 8
- %gep2 = getelementptr i8** %A, i32 2
+ %gep2 = getelementptr i8*, i8** %A, i32 2
%y = load i8** %gep2
tail call i8* @objc_retain(i8* %x)
br label %bb1
bb1:
%Abb1 = alloca i8*, i32 3
- %gepbb11 = getelementptr i8** %Abb1, i32 2
+ %gepbb11 = getelementptr i8*, i8** %Abb1, i32 2
store i8* %x, i8** %gepbb11, align 8
- %gepbb12 = getelementptr i8** %Abb1, i32 2
+ %gepbb12 = getelementptr i8*, i8** %Abb1, i32 2
%ybb1 = load i8** %gepbb12
br label %bb3
bb2:
%Abb2 = alloca i8*, i32 4
- %gepbb21 = getelementptr i8** %Abb2, i32 2
+ %gepbb21 = getelementptr i8*, i8** %Abb2, i32 2
store i8* %x, i8** %gepbb21, align 8
- %gepbb22 = getelementptr i8** %Abb2, i32 2
+ %gepbb22 = getelementptr i8*, i8** %Abb2, i32 2
%ybb2 = load i8** %gepbb22
br label %bb3
%call1 = call i8* @returner()
%tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call1)
- %objs.begin = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+ %objs.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
tail call i8* @objc_retain(i8* %call1)
store i8* %call1, i8** %objs.begin, align 8
- %objs.elt = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 1
+ %objs.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 1
tail call i8* @objc_retain(i8* %call1)
store i8* %call1, i8** %objs.elt
%call2 = call i8* @returner1()
%call3 = call i8* @returner2()
- %keys.begin = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+ %keys.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
tail call i8* @objc_retain(i8* %call2)
store i8* %call2, i8** %keys.begin, align 8
- %keys.elt = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 1
+ %keys.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 1
tail call i8* @objc_retain(i8* %call3)
store i8* %call3, i8** %keys.elt
- %gep = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 2
+ %gep = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 2
br label %arraydestroy.body
arraydestroy.body:
%arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
- %arraydestroy.element = getelementptr inbounds i8** %arraydestroy.elementPast, i64 -1
+ %arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
%destroy_tmp = load i8** %arraydestroy.element, align 8
call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
- %objs_ptr = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+ %objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
%arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
br i1 %arraydestroy.cmp, label %arraydestroy.done, label %arraydestroy.body
arraydestroy.done:
- %gep1 = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 2
+ %gep1 = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 2
br label %arraydestroy.body1
arraydestroy.body1:
%arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
- %arraydestroy.element1 = getelementptr inbounds i8** %arraydestroy.elementPast1, i64 -1
+ %arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
%destroy_tmp1 = load i8** %arraydestroy.element1, align 8
call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
- %keys_ptr = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+ %keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
%arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
br i1 %arraydestroy.cmp1, label %arraydestroy.done1, label %arraydestroy.body1
%tmp0 = tail call i8* @objc_retainAutoreleasedReturnValue(i8* %call1)
%tmp1 = tail call i8* @objc_retain(i8* %call1)
- %objs.begin = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+ %objs.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
tail call i8* @objc_retain(i8* %call1)
store i8* %call1, i8** %objs.begin, align 8
- %objs.elt = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 1
+ %objs.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 1
tail call i8* @objc_retain(i8* %call1)
store i8* %call1, i8** %objs.elt
%call2 = call i8* @returner1()
%call3 = call i8* @returner2()
- %keys.begin = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+ %keys.begin = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
tail call i8* @objc_retain(i8* %call2)
store i8* %call2, i8** %keys.begin, align 8
- %keys.elt = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 1
+ %keys.elt = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 1
tail call i8* @objc_retain(i8* %call3)
store i8* %call3, i8** %keys.elt
- %gep = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 2
+ %gep = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 2
br label %arraydestroy.body
arraydestroy.body:
%arraydestroy.elementPast = phi i8** [ %gep, %entry ], [ %arraydestroy.element, %arraydestroy.body ]
- %arraydestroy.element = getelementptr inbounds i8** %arraydestroy.elementPast, i64 -1
+ %arraydestroy.element = getelementptr inbounds i8*, i8** %arraydestroy.elementPast, i64 -1
%destroy_tmp = load i8** %arraydestroy.element, align 8
call void @objc_release(i8* %destroy_tmp), !clang.imprecise_release !0
- %objs_ptr = getelementptr inbounds [2 x i8*]* %objs, i64 0, i64 0
+ %objs_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %objs, i64 0, i64 0
%arraydestroy.cmp = icmp eq i8** %arraydestroy.element, %objs_ptr
br i1 %arraydestroy.cmp, label %arraydestroy.done, label %arraydestroy.body
arraydestroy.done:
- %gep1 = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 2
+ %gep1 = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 2
br label %arraydestroy.body1
arraydestroy.body1:
%arraydestroy.elementPast1 = phi i8** [ %gep1, %arraydestroy.done ], [ %arraydestroy.element1, %arraydestroy.body1 ]
- %arraydestroy.element1 = getelementptr inbounds i8** %arraydestroy.elementPast1, i64 -1
+ %arraydestroy.element1 = getelementptr inbounds i8*, i8** %arraydestroy.elementPast1, i64 -1
%destroy_tmp1 = load i8** %arraydestroy.element1, align 8
call void @objc_release(i8* %destroy_tmp1), !clang.imprecise_release !0
- %keys_ptr = getelementptr inbounds [2 x i8*]* %keys, i64 0, i64 0
+ %keys_ptr = getelementptr inbounds [2 x i8*], [2 x i8*]* %keys, i64 0, i64 0
%arraydestroy.cmp1 = icmp eq i8** %arraydestroy.element1, %keys_ptr
br i1 %arraydestroy.cmp1, label %arraydestroy.done1, label %arraydestroy.body1
done:
%g = bitcast i8* %p to i8*
- %h = getelementptr i8* %g, i64 0
+ %h = getelementptr i8, i8* %g, i64 0
call void @objc_release(i8* %g)
ret void
}
done:
%g = bitcast i8* %p to i8*
- %h = getelementptr i8* %g, i64 0
+ %h = getelementptr i8, i8* %g, i64 0
call void @objc_release(i8* %g)
ret void
}
done:
%g = bitcast i8* %p to i8*
- %h = getelementptr i8* %g, i64 0
+ %h = getelementptr i8, i8* %g, i64 0
call void @objc_release(i8* %g)
ret void
}
done:
%g = bitcast i8* %p to i8*
- %h = getelementptr i8* %g, i64 0
+ %h = getelementptr i8, i8* %g, i64 0
call void @objc_release(i8* %g), !clang.imprecise_release !0
ret void
}
done:
%g = bitcast i8* %p to i8*
- %h = getelementptr i8* %g, i64 0
+ %h = getelementptr i8, i8* %g, i64 0
call void @objc_release(i8* %g)
ret void
}
done:
%g = bitcast i8* %p to i8*
- %h = getelementptr i8* %g, i64 0
+ %h = getelementptr i8, i8* %g, i64 0
call void @objc_release(i8* %g), !clang.imprecise_release !0
ret void
}
tail call void @llvm.dbg.value(metadata {}* %self, i64 0, metadata !0, metadata !{})
tail call void @llvm.dbg.value(metadata {}* %self, i64 0, metadata !0, metadata !{})
%ivar = load i64* @"OBJC_IVAR_$_A.myZ", align 8
- %add.ptr = getelementptr i8* %0, i64 %ivar
+ %add.ptr = getelementptr i8, i8* %0, i64 %ivar
%tmp1 = bitcast i8* %add.ptr to float*
%tmp2 = load float* %tmp1, align 4
%conv = fpext float %tmp2 to double
%add.ptr.sum = add i64 %ivar, 4
- %tmp6 = getelementptr inbounds i8* %0, i64 %add.ptr.sum
+ %tmp6 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
%2 = bitcast i8* %tmp6 to float*
%tmp7 = load float* %2, align 4
%conv8 = fpext float %tmp7 to double
%add.ptr.sum36 = add i64 %ivar, 8
- %tmp12 = getelementptr inbounds i8* %0, i64 %add.ptr.sum36
+ %tmp12 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum36
%arrayidx = bitcast i8* %tmp12 to float*
%tmp13 = load float* %arrayidx, align 4
%conv14 = fpext float %tmp13 to double
%tmp12.sum = add i64 %ivar, 12
- %arrayidx19 = getelementptr inbounds i8* %0, i64 %tmp12.sum
+ %arrayidx19 = getelementptr inbounds i8, i8* %0, i64 %tmp12.sum
%3 = bitcast i8* %arrayidx19 to float*
%tmp20 = load float* %3, align 4
%conv21 = fpext float %tmp20 to double
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([33 x i8]* @.str4, i64 0, i64 0), double %conv, double %conv8, double %conv14, double %conv21)
%ivar23 = load i64* @"OBJC_IVAR_$_A.myZ", align 8
- %add.ptr24 = getelementptr i8* %0, i64 %ivar23
+ %add.ptr24 = getelementptr i8, i8* %0, i64 %ivar23
%4 = bitcast i8* %add.ptr24 to i128*
%srcval = load i128* %4, align 4
tail call void @objc_release(i8* %0) nounwind
entry:
%ivar = load i64* @"OBJC_IVAR_$_Controller.preferencesController", align 8
%tmp = bitcast %0* %self to i8*
- %add.ptr = getelementptr inbounds i8* %tmp, i64 %ivar
+ %add.ptr = getelementptr inbounds i8, i8* %tmp, i64 %ivar
%tmp1 = bitcast i8* %add.ptr to %1**
%tmp2 = load %1** %tmp1, align 8
%tmp3 = bitcast %1* %preferencesController to i8*
entry:
%weakLogNTimes = alloca %struct.__block_byref_weakLogNTimes, align 8
%block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
- %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
+ %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
store i8* null, i8** %byref.isa, align 8
- %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
+ %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
store %struct.__block_byref_weakLogNTimes* %weakLogNTimes, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
- %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
+ %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
store i32 33554432, i32* %byref.flags, align 8
- %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
+ %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
store i32 48, i32* %byref.size, align 4
- %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
+ %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
store i8* bitcast (void (i8*, i8*)* @__Block_byref_object_copy_ to i8*), i8** %tmp1, align 8
- %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
+ %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
- %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
+ %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
%tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
%tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
- %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
+ %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
store i8* null, i8** %block.isa, align 8
- %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
+ %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
store i32 1107296256, i32* %block.flags, align 8
- %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
+ %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
store i32 0, i32* %block.reserved, align 4
- %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
+ %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
store i8* bitcast (void (i8*, i32)* @__main_block_invoke_0 to i8*), i8** %block.invoke, align 8
- %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
+ %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
store %struct.__block_descriptor* null, %struct.__block_descriptor** %block.descriptor, align 8
- %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
+ %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
%tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
store i8* %tmp5, i8** %block.captured, align 8
%tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
%tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
%tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
- %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
+ %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
%tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
%tmp10 = call i8* @objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
- %tmp11 = getelementptr inbounds i8* %tmp7, i64 16
+ %tmp11 = getelementptr inbounds i8, i8* %tmp7, i64 16
%tmp12 = bitcast i8* %tmp11 to i8**
%tmp13 = load i8** %tmp12, align 8
%tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
entry:
%weakLogNTimes = alloca %struct.__block_byref_weakLogNTimes, align 8
%block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, align 8
- %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
+ %byref.isa = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 0
store i8* null, i8** %byref.isa, align 8
- %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
+ %byref.forwarding = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 1
store %struct.__block_byref_weakLogNTimes* %weakLogNTimes, %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
- %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
+ %byref.flags = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 2
store i32 33554432, i32* %byref.flags, align 8
- %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
+ %byref.size = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 3
store i32 48, i32* %byref.size, align 4
- %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
+ %tmp1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 4
store i8* bitcast (void (i8*, i8*)* @__Block_byref_object_copy_ to i8*), i8** %tmp1, align 8
- %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
+ %tmp2 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 5
store i8* bitcast (void (i8*)* @__Block_byref_object_dispose_ to i8*), i8** %tmp2, align 8
- %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
+ %weakLogNTimes1 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %weakLogNTimes, i64 0, i32 6
%tmp3 = bitcast void (...)** %weakLogNTimes1 to i8**
%tmp4 = call i8* @objc_initWeak(i8** %tmp3, i8* null) nounwind
- %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
+ %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 0
store i8* null, i8** %block.isa, align 8
- %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
+ %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 1
store i32 1107296256, i32* %block.flags, align 8
- %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
+ %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 2
store i32 0, i32* %block.reserved, align 4
- %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
+ %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 3
store i8* bitcast (void (i8*, i32)* @__main_block_invoke_0 to i8*), i8** %block.invoke, align 8
- %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
+ %block.descriptor = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 4
store %struct.__block_descriptor* null, %struct.__block_descriptor** %block.descriptor, align 8
- %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
+ %block.captured = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block, i64 0, i32 5
%tmp5 = bitcast %struct.__block_byref_weakLogNTimes* %weakLogNTimes to i8*
store i8* %tmp5, i8** %block.captured, align 8
%tmp6 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i8* }>* %block to i8*
%tmp7 = call i8* @objc_retainBlock(i8* %tmp6) nounwind, !clang.arc.copy_on_escape !0
%tmp8 = load %struct.__block_byref_weakLogNTimes** %byref.forwarding, align 8
- %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
+ %weakLogNTimes3 = getelementptr inbounds %struct.__block_byref_weakLogNTimes, %struct.__block_byref_weakLogNTimes* %tmp8, i64 0, i32 6
%tmp9 = bitcast void (...)** %weakLogNTimes3 to i8**
%tmp10 = call i8* @not_really_objc_storeWeak(i8** %tmp9, i8* %tmp7) nounwind
- %tmp11 = getelementptr inbounds i8* %tmp7, i64 16
+ %tmp11 = getelementptr inbounds i8, i8* %tmp7, i64 16
%tmp12 = bitcast i8* %tmp11 to i8**
%tmp13 = load i8** %tmp12, align 8
%tmp14 = bitcast i8* %tmp13 to void (i8*, i32)*
%tmp70 = tail call %14* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to %14* (i8*, i8*, %23*, %18*)*)(i8* %tmp69, i8* %tmp68, %23* %tmp67, %18* %tmp47)
%tmp71 = bitcast %14* %tmp70 to i8*
; hack to prevent the optimize from using objc_retainAutoreleasedReturnValue.
- %tmp71x = getelementptr i8* %tmp71, i64 1
+ %tmp71x = getelementptr i8, i8* %tmp71, i64 1
%tmp72 = tail call i8* @objc_retain(i8* %tmp71x) nounwind
%tmp73 = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_402", align 8
tail call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, i8)*)(i8* %tmp72, i8* %tmp73, i8 signext 1)
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
- %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+ %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
forcoll.notmutated:
%stateitems = load i8*** %stateitems.ptr, align 8
- %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+ %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
%3 = load i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
- %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+ %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
forcoll.notmutated:
%stateitems = load i8*** %stateitems.ptr, align 8
- %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+ %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
%3 = load i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
- %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+ %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
forcoll.notmutated:
%stateitems = load i8*** %stateitems.ptr, align 8
- %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+ %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
%3 = load i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
- %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+ %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
forcoll.notmutated:
%stateitems = load i8*** %stateitems.ptr, align 8
- %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+ %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
%3 = load i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
- %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+ %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
forcoll.notmutated:
%stateitems = load i8*** %stateitems.ptr, align 8
- %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+ %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
%3 = load i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
- %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+ %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
forcoll.notmutated:
%stateitems = load i8*** %stateitems.ptr, align 8
- %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+ %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
%3 = load i8** %currentitem.ptr, align 8
call void @use(i8* %3)
%4 = add i64 %forcoll.index, 1
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
- %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
+ %stateitems.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 1
br label %forcoll.loopbody.outer
forcoll.loopbody.outer:
forcoll.notmutated:
%stateitems = load i8*** %stateitems.ptr, align 8
- %currentitem.ptr = getelementptr i8** %stateitems, i64 %forcoll.index
+ %currentitem.ptr = getelementptr i8*, i8** %stateitems, i64 %forcoll.index
%3 = load i8** %currentitem.ptr, align 8
%tobool = icmp eq i8* %3, null
br i1 %tobool, label %forcoll.next, label %if.then
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
br i1 %iszero, label %forcoll.empty, label %forcoll.loopinit
forcoll.loopinit:
- %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
+ %mutationsptr.ptr = getelementptr inbounds %struct.__objcFastEnumerationState, %struct.__objcFastEnumerationState* %state.ptr, i64 0, i32 2
%mutationsptr = load i64** %mutationsptr.ptr, align 8
%forcoll.initial-mutations = load i64* %mutationsptr, align 8
br label %forcoll.loopbody.outer
%block = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
%block9 = alloca <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, align 8
%call = call i8* @def(), !clang.arc.no_objc_arc_exceptions !0
- %foo = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 5
- %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 0
+ %foo = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 5
+ %block.isa = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 0
store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
- %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 1
+ %block.flags = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 1
store i32 1107296256, i32* %block.flags, align 8
- %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 2
+ %block.reserved = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 2
store i32 0, i32* %block.reserved, align 4
- %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 3
+ %block.invoke = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 3
store i8* bitcast (void (i8*)* @__crasher_block_invoke to i8*), i8** %block.invoke, align 8
- %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
+ %block.d = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block, i64 0, i32 4
store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp to %struct.__block_d*), %struct.__block_d** %block.d, align 8
%foo2 = tail call i8* @objc_retain(i8* %call) nounwind
store i8* %foo2, i8** %foo, align 8
call void @objc_release(i8* %foo5) nounwind
%strongdestroy = load i8** %foo, align 8
call void @objc_release(i8* %strongdestroy) nounwind, !clang.imprecise_release !0
- %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
- %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
+ %foo10 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 5
+ %block.isa11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 0
store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa11, align 8
- %block.flags12 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 1
+ %block.flags12 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 1
store i32 1107296256, i32* %block.flags12, align 8
- %block.reserved13 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 2
+ %block.reserved13 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 2
store i32 0, i32* %block.reserved13, align 4
- %block.invoke14 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 3
+ %block.invoke14 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 3
store i8* bitcast (void (i8*)* @__crasher_block_invoke1 to i8*), i8** %block.invoke14, align 8
- %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
+ %block.d15 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_d*, i8* }>* %block9, i64 0, i32 4
store %struct.__block_d* bitcast ({ i64, i64, i8*, i8*, i8*, i8* }* @__block_d_tmp5 to %struct.__block_d*), %struct.__block_d** %block.d15, align 8
%foo18 = call i8* @objc_retain(i8* %call) nounwind
store i8* %call, i8** %foo10, align 8
; CHECK: %tmp16 = call i8* @objc_retainBlock(i8* %tmp15) [[NUW:#[0-9]+]]
; CHECK: %tmp17 = bitcast i8* %tmp16 to void ()*
; CHECK: %tmp18 = load %struct.__block_byref_repeater** %byref.forwarding, align 8
-; CHECK: %repeater12 = getelementptr inbounds %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
+; CHECK: %repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
; CHECK: store void ()* %tmp17, void ()** %repeater12, align 8
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
entry:
%repeater = alloca %struct.__block_byref_repeater, align 8
%block = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>, align 8
- %byref.forwarding = getelementptr inbounds %struct.__block_byref_repeater* %repeater, i64 0, i32 1
- %tmp10 = getelementptr inbounds %struct.__block_byref_repeater* %repeater, i64 0, i32 6
+ %byref.forwarding = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %repeater, i64 0, i32 1
+ %tmp10 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %repeater, i64 0, i32 6
store void ()* null, void ()** %tmp10, align 8
- %block.captured11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block, i64 0, i32 6
+ %block.captured11 = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block, i64 0, i32 6
%tmp14 = bitcast %struct.__block_byref_repeater* %repeater to i8*
store i8* %tmp14, i8** %block.captured11, align 8
%tmp15 = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, %0*, i8* }>* %block to i8*
%tmp16 = call i8* @objc_retainBlock(i8* %tmp15) nounwind
%tmp17 = bitcast i8* %tmp16 to void ()*
%tmp18 = load %struct.__block_byref_repeater** %byref.forwarding, align 8
- %repeater12 = getelementptr inbounds %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
+ %repeater12 = getelementptr inbounds %struct.__block_byref_repeater, %struct.__block_byref_repeater* %tmp18, i64 0, i32 6
%tmp13 = load void ()** %repeater12, align 8
store void ()* %tmp17, void ()** %repeater12, align 8
ret void
%block = alloca %1, align 8
%0 = call i8* @objc_retain(i8* %me) nounwind
%1 = call i8* @objc_initWeak(i8** %w, i8* %0) nounwind
- %block.isa = getelementptr inbounds %1* %block, i64 0, i32 0
+ %block.isa = getelementptr inbounds %1, %1* %block, i64 0, i32 0
store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** %block.isa, align 8
- %block.flags = getelementptr inbounds %1* %block, i64 0, i32 1
+ %block.flags = getelementptr inbounds %1, %1* %block, i64 0, i32 1
store i32 1107296256, i32* %block.flags, align 8
- %block.reserved = getelementptr inbounds %1* %block, i64 0, i32 2
+ %block.reserved = getelementptr inbounds %1, %1* %block, i64 0, i32 2
store i32 0, i32* %block.reserved, align 4
- %block.invoke = getelementptr inbounds %1* %block, i64 0, i32 3
+ %block.invoke = getelementptr inbounds %1, %1* %block, i64 0, i32 3
store i8* bitcast (void (i8*)* @__qux_block_invoke_0 to i8*), i8** %block.invoke, align 8
- %block.descriptor = getelementptr inbounds %1* %block, i64 0, i32 4
+ %block.descriptor = getelementptr inbounds %1, %1* %block, i64 0, i32 4
store %struct.__block_descriptor* bitcast (%0* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** %block.descriptor, align 8
- %block.captured = getelementptr inbounds %1* %block, i64 0, i32 5
+ %block.captured = getelementptr inbounds %1, %1* %block, i64 0, i32 5
%2 = call i8* @objc_loadWeak(i8** %w) nounwind
%3 = call i8* @objc_initWeak(i8** %block.captured, i8* %2) nounwind
%4 = bitcast %1* %block to void ()*
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %this, %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr
%1 = load %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"** %this_addr, align 8 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
- %2 = getelementptr inbounds %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1, i32 0, i32 0 ; <i32*> [#uses=1]
+ %2 = getelementptr inbounds %"struct.boost::details::compressed_pair_imp<empty_t,int,1>", %"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %1, i32 0, i32 0 ; <i32*> [#uses=1]
store i32* %2, i32** %0, align 8
%3 = load i32** %0, align 8 ; <i32*> [#uses=1]
store i32* %3, i32** %retval, align 8
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr
%1 = load %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
- %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
+ %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
%3 = call i32* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE6secondEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <i32*> [#uses=1]
store i32* %3, i32** %0, align 8
%4 = load i32** %0, align 8 ; <i32*> [#uses=1]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store %"struct.boost::compressed_pair<empty_t,int>"* %this, %"struct.boost::compressed_pair<empty_t,int>"** %this_addr
%1 = load %"struct.boost::compressed_pair<empty_t,int>"** %this_addr, align 8 ; <%"struct.boost::compressed_pair<empty_t,int>"*> [#uses=1]
- %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
+ %2 = getelementptr inbounds %"struct.boost::compressed_pair<empty_t,int>", %"struct.boost::compressed_pair<empty_t,int>"* %1, i32 0, i32 0 ; <%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"*> [#uses=1]
%3 = call %struct.empty_base_t* @_ZN5boost7details19compressed_pair_impI7empty_tiLi1EE5firstEv(%"struct.boost::details::compressed_pair_imp<empty_t,int,1>"* %2) nounwind ; <%struct.empty_base_t*> [#uses=1]
store %struct.empty_base_t* %3, %struct.empty_base_t** %0, align 8
%4 = load %struct.empty_base_t** %0, align 8 ; <%struct.empty_base_t*> [#uses=1]
br i1 %cmp, label %land.lhs.true, label %if.end
land.lhs.true: ; preds = %entry
- %arrayidx4 = getelementptr inbounds i8* %arrayidx, i64 1
+ %arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1
%tmp5 = load i8* %arrayidx4, align 1
%conv6 = zext i8 %tmp5 to i32
%cmp7 = icmp eq i32 %conv6, 69
br i1 %cmp7, label %land.lhs.true9, label %if.end
land.lhs.true9: ; preds = %land.lhs.true
- %arrayidx12 = getelementptr inbounds i8* %arrayidx, i64 2
+ %arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2
%tmp13 = load i8* %arrayidx12, align 1
%conv14 = zext i8 %tmp13 to i32
%cmp15 = icmp eq i32 %conv14, 76
br i1 %cmp15, label %land.lhs.true17, label %if.end
land.lhs.true17: ; preds = %land.lhs.true9
- %arrayidx20 = getelementptr inbounds i8* %arrayidx, i64 3
+ %arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3
%tmp21 = load i8* %arrayidx20, align 1
%conv22 = zext i8 %tmp21 to i32
%cmp23 = icmp eq i32 %conv22, 70
br i1 %cmp, label %land.lhs.true, label %if.end
land.lhs.true: ; preds = %entry
- %arrayidx4 = getelementptr inbounds i8* %arrayidx, i64 1
+ %arrayidx4 = getelementptr inbounds i8, i8* %arrayidx, i64 1
%tmp5 = load i8* %arrayidx4, align 1
%conv6 = zext i8 %tmp5 to i32
%cmp7 = icmp eq i32 %conv6, 69
br i1 %cmp7, label %land.lhs.true9, label %if.end
land.lhs.true9: ; preds = %land.lhs.true
- %arrayidx12 = getelementptr inbounds i8* %arrayidx, i64 2
+ %arrayidx12 = getelementptr inbounds i8, i8* %arrayidx, i64 2
%tmp13 = load i8* %arrayidx12, align 1
%conv14 = zext i8 %tmp13 to i32
%cmp15 = icmp eq i32 %conv14, 76
br i1 %cmp15, label %land.lhs.true17, label %if.end
land.lhs.true17: ; preds = %land.lhs.true9
- %arrayidx20 = getelementptr inbounds i8* %arrayidx, i64 3
+ %arrayidx20 = getelementptr inbounds i8, i8* %arrayidx, i64 3
%tmp21 = load i8* %arrayidx20, align 1
%conv22 = zext i8 %tmp21 to i32
%cmp23 = icmp eq i32 %conv22, 70
define i32 @test2(i32 %a, i32* %p) nounwind uwtable ssp {
entry:
%div = udiv i32 %a, 4
- %arrayidx = getelementptr inbounds i32* %p, i64 0
+ %arrayidx = getelementptr inbounds i32, i32* %p, i64 0
store i32 %div, i32* %arrayidx, align 4
%add = add i32 %div, %div
- %arrayidx1 = getelementptr inbounds i32* %p, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
store i32 %add, i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32* %p, i64 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %p, i64 1
%0 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %p, i64 0
+ %arrayidx3 = getelementptr inbounds i32, i32* %p, i64 0
%1 = load i32* %arrayidx3, align 4
%mul = mul i32 2, %1
%sub = sub i32 %0, %mul
for.body: ; preds = %for.cond
store i32 0, i32* %p.addr.0, align 4
- %add.ptr = getelementptr inbounds i32* %p.addr.0, i64 %div
+ %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 %div
store i32 1, i32* %add.ptr, align 4
- %add.ptr1 = getelementptr inbounds i32* %add.ptr, i64 %div
+ %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i64 %div
br label %for.inc
for.inc: ; preds = %for.body
for.body: ; preds = %for.cond
store i32 0, i32* %p.addr.0, align 4
- %add.ptr = getelementptr inbounds i32* %p.addr.0, i64 %div
+ %add.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 %div
store i32 1, i32* %add.ptr, align 4
- %add.ptr1 = getelementptr inbounds i32* %add.ptr, i64 %div
+ %add.ptr1 = getelementptr inbounds i32, i32* %add.ptr, i64 %div
br label %for.inc
for.inc: ; preds = %for.body
unreachable
"8": ; preds = %"4"
- %8 = getelementptr inbounds i8* undef, i32 %6
+ %8 = getelementptr inbounds i8, i8* undef, i32 %6
br i1 undef, label %"13", label %"12"
"12": ; preds = %"8", %entry
%reg117 = phi i32 [ %reg118, %bb4 ], [ 0, %bb3 ] ; <i32> [#uses=2]
%reg113 = add i32 %reg115, %reg117 ; <i32> [#uses=1]
%reg114 = add i32 %reg113, %reg116 ; <i32> [#uses=1]
- %cast227 = getelementptr [4 x i8]* @.LC0, i64 0, i64 0 ; <i8*> [#uses=1]
+ %cast227 = getelementptr [4 x i8], [4 x i8]* @.LC0, i64 0, i64 0 ; <i8*> [#uses=1]
call i32 (i8*, ...)* @printf( i8* %cast227, i32 %reg114 ) ; <i32>:0 [#uses=0]
%reg118 = add i32 %reg117, 1 ; <i32> [#uses=2]
%cond224 = icmp ne i32 %reg118, %Num ; <i1> [#uses=1]
; CHECK-NEXT: load i8 addrspace(1)* %derived.relocated
; CHECK-NEXT: load i8 addrspace(1)* %obj.relocated
entry:
- %derived = getelementptr i8 addrspace(1)* %obj, i64 10
+ %derived = getelementptr i8, i8 addrspace(1)* %obj, i64 10
call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* @foo, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
%a = load i8 addrspace(1)* %derived
@G = external global [40 x i32] ; <[40 x i32]*> [#uses=1]
define i32* @test() {
- %X = getelementptr [40 x i32]* @G, i64 0, i64 0 ; <i32*> [#uses=1]
+ %X = getelementptr [40 x i32], [40 x i32]* @G, i64 0, i64 0 ; <i32*> [#uses=1]
ret i32* %X
}
loopexit: ; preds = %endif, %then, %entry
%j.1 = phi i32 [ 1, %entry ], [ %j.0, %endif ], [ %i.0, %then ] ; <i32> [#uses=1]
%i.1 = phi i32 [ 1, %entry ], [ %inc, %endif ], [ %inc1, %then ] ; <i32> [#uses=1]
- %tmp.17 = getelementptr i32* %data.1, i64 1 ; <i32*> [#uses=1]
+ %tmp.17 = getelementptr i32, i32* %data.1, i64 1 ; <i32*> [#uses=1]
store i32 %j.1, i32* %tmp.17
- %tmp.23 = getelementptr i32* %data.1, i64 2 ; <i32*> [#uses=1]
+ %tmp.23 = getelementptr i32, i32* %data.1, i64 2 ; <i32*> [#uses=1]
store i32 %i.1, i32* %tmp.23
ret void
}
ret void
cond_next252: ; preds = %cond_next208, %entry
%D.0.0 = phi i32 [ 0, %entry ], [ %tmp229, %cond_next208 ] ; <i32> [#uses=1]
- %tmp254 = getelementptr i8** null, i32 1 ; <i8**> [#uses=1]
+ %tmp254 = getelementptr i8*, i8** null, i32 1 ; <i8**> [#uses=1]
%tmp256 = load i8** %tmp254 ; <i8*> [#uses=1]
%tmp258 = load i8* %tmp256 ; <i8> [#uses=1]
%tmp259 = icmp eq i8 %tmp258, 45 ; <i1> [#uses=1]
define void @gldLLVMVecPointRender(%struct.GLDContextRec* %ctx) {
entry:
- %tmp.uip = getelementptr %struct.GLDContextRec* %ctx, i32 0, i32 22 ; <i32*> [#uses=1]
+ %tmp.uip = getelementptr %struct.GLDContextRec, %struct.GLDContextRec* %ctx, i32 0, i32 22 ; <i32*> [#uses=1]
%tmp = load i32* %tmp.uip ; <i32> [#uses=3]
%tmp91 = lshr i32 %tmp, 5 ; <i32> [#uses=1]
%tmp92 = trunc i32 %tmp91 to i1 ; <i1> [#uses=1]
br i1 %tmp92, label %cond_true93, label %cond_next116
cond_true93: ; preds = %entry
- %tmp.upgrd.1 = getelementptr %struct.GLDContextRec* %ctx, i32 0, i32 31, i32 14 ; <i32*> [#uses=1]
+ %tmp.upgrd.1 = getelementptr %struct.GLDContextRec, %struct.GLDContextRec* %ctx, i32 0, i32 31, i32 14 ; <i32*> [#uses=1]
%tmp95 = load i32* %tmp.upgrd.1 ; <i32> [#uses=1]
%tmp95.upgrd.2 = sitofp i32 %tmp95 to float ; <float> [#uses=1]
%tmp108 = fmul float undef, %tmp95.upgrd.2 ; <float> [#uses=1]
define i101 @array()
{
Head:
- %A = getelementptr [6 x i101]* @Y, i32 0, i32 1
+ %A = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 1
%B = load i101* %A
%C = icmp sge i101 %B, 1
True:
%D = and i101 %B, 1
%E = trunc i101 %D to i32
- %F = getelementptr [6 x i101]* @Y, i32 0, i32 %E
+ %F = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 %E
%G = load i101* %F
br label %False
False:
@G = global [1000000 x i10000] zeroinitializer
define internal i10000* @test(i10000 %Arg) {
- %X = getelementptr [1000000 x i10000]* @G, i32 0, i32 999
+ %X = getelementptr [1000000 x i10000], [1000000 x i10000]* @G, i32 0, i32 999
store i10000 %Arg, i10000* %X
ret i10000* %X
}
define i101 @array()
{
Head:
- %A = getelementptr [6 x i101]* @Y, i32 0, i32 1
+ %A = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 1
%B = load i101* %A
%D = and i101 %B, 1
%DD = or i101 %D, 1
%E = trunc i101 %DD to i32
- %F = getelementptr [6 x i101]* @Y, i32 0, i32 %E
+ %F = getelementptr [6 x i101], [6 x i101]* @Y, i32 0, i32 %E
%G = load i101* %F
ret i101 %G
{ i212, float } { i212 37, float 2.0 } ]
define internal float @test2() {
- %A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
+ %A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
%B = load float* %A
ret float %B
}
define internal float @test3() {
- %A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 0, i32 1
+ %A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 0, i32 1
%B = load float* %A
ret float %B
}
}
define internal float @test2() {
- %A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
+ %A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 1, i32 1
%B = load float* %A
ret float %B
}
define internal i212 @test3() {
- %A = getelementptr [2 x { i212, float}]* @Y, i32 0, i32 0, i32 0
+ %A = getelementptr [2 x { i212, float}], [2 x { i212, float}]* @Y, i32 0, i32 0, i32 0
%B = load i212* %A
ret i212 %B
}
@A = constant i32 10
define i712 @test1() {
- %P = getelementptr i32* @A, i32 0
+ %P = getelementptr i32, i32* @A, i32 0
%B = ptrtoint i32* %P to i64
%BB = and i64 %B, undef
%C = icmp sge i64 %BB, 0
}
define float @test2() {
- %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 1, i32 1 ; <float*> [#uses=1]
%B = load float* %A ; <float> [#uses=1]
ret float %B
}
define i32 @test3() {
- %A = getelementptr [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
+ %A = getelementptr [2 x { i32, float }], [2 x { i32, float }]* @Y, i64 0, i64 0, i32 0 ; <i32*> [#uses=1]
%B = load i32* %A
ret i32 %B
}
define void @test1(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
; CHECK-LABEL: test1
-; CHECK: %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
-; CHECK: %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
+; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
; CHECK: %4 = load <2 x float>* %3, align 4
; CHECK: %5 = fsub fast <2 x float> %2, %4
for.body3.lr.ph:
%conv5 = sitofp i32 %ymin to float
%conv = sitofp i32 %xmin to float
- %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
+ %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
%0 = load float* %arrayidx4, align 4
%sub = fsub fast float %conv, %0
- %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+ %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
%1 = load float* %arrayidx9, align 4
%sub10 = fsub fast float %conv5, %1
%mul11 = fmul fast float %sub, %sub
define void @test2(%structA* nocapture readonly %J, i32 %xmin, i32 %ymin) {
; CHECK-LABEL: test2
-; CHECK: %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
-; CHECK: %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+; CHECK: %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
+; CHECK: %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
; CHECK: %3 = bitcast float* %arrayidx4 to <2 x float>*
; CHECK: %4 = load <2 x float>* %3, align 4
; CHECK: %5 = fsub fast <2 x float> %2, %4
for.body3.lr.ph:
%conv5 = sitofp i32 %ymin to float
%conv = sitofp i32 %xmin to float
- %arrayidx4 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 0
+ %arrayidx4 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 0
%0 = load float* %arrayidx4, align 4
%sub = fsub fast float %conv, %0
- %arrayidx9 = getelementptr inbounds %structA* %J, i64 0, i32 0, i64 1
+ %arrayidx9 = getelementptr inbounds %structA, %structA* %J, i64 0, i32 0, i64 1
%1 = load float* %arrayidx9, align 4
%sub10 = fsub fast float %conv5, %1
%mul11 = fmul fast float %sub, %sub
; CHECK: store double
; CHECK: store double
define void @f(double* %p, double* %q) {
- %addr2 = getelementptr double* %q, i32 1
- %addr = getelementptr double* %p, i32 1
+ %addr2 = getelementptr double, double* %q, i32 1
+ %addr = getelementptr double, double* %p, i32 1
%x = load double* %p
%y = load double* %addr
call void @g()
loop:
%p1 = phi double [0.0, %entry], [%x, %loop]
%p2 = phi double [0.0, %entry], [%y, %loop]
- %addr2 = getelementptr double* %q, i32 1
- %addr = getelementptr double* %p, i32 1
+ %addr2 = getelementptr double, double* %q, i32 1
+ %addr = getelementptr double, double* %p, i32 1
store double %p1, double* %q
store double %p2, double* %addr2
%add = add nsw i32 %1, %0
%div = sdiv i32 %add, 2
store i32 %div, i32* %a, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1
%2 = load i32* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds i32* %c, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1
%3 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %3, %2
%div6 = sdiv i32 %add5, 2
- %arrayidx7 = getelementptr inbounds i32* %a, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %div6, i32* %arrayidx7, align 4
- %arrayidx8 = getelementptr inbounds i32* %b, i64 2
+ %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2
%4 = load i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i64 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
%5 = load i32* %arrayidx9, align 4
%add10 = add nsw i32 %5, %4
%div11 = sdiv i32 %add10, 2
- %arrayidx12 = getelementptr inbounds i32* %a, i64 2
+ %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2
store i32 %div11, i32* %arrayidx12, align 4
- %arrayidx13 = getelementptr inbounds i32* %b, i64 3
+ %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3
%6 = load i32* %arrayidx13, align 4
- %arrayidx14 = getelementptr inbounds i32* %c, i64 3
+ %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3
%7 = load i32* %arrayidx14, align 4
%add15 = add nsw i32 %7, %6
%div16 = sdiv i32 %add15, 2
- %arrayidx17 = getelementptr inbounds i32* %a, i64 3
+ %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3
store i32 %div16, i32* %arrayidx17, align 4
ret void
}
entry:
%0 = load double* %src, align 8
store double %0, double* %dst, align 8
- %arrayidx2 = getelementptr inbounds double* %src, i64 1
+ %arrayidx2 = getelementptr inbounds double, double* %src, i64 1
%1 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %dst, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %dst, i64 1
store double %1, double* %arrayidx3, align 8
ret void
}
%3 = bitcast i64 %b.sroa.3.12.insert.insert to double
%add = fadd double %0, %2
%add3 = fadd double %1, %3
- %re.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 0
+ %re.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 0
store double %add, double* %re.i.i, align 4
- %im.i.i = getelementptr inbounds %class.Complex* %agg.result, i32 0, i32 1
+ %im.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 1
store double %add3, double* %im.i.i, align 4
ret void
}
%i0 = load double addrspace(3)* %a, align 8
%i1 = load double addrspace(3)* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double addrspace(3)* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double addrspace(3)* %a, i64 1
%i3 = load double addrspace(3)* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double addrspace(3)* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double addrspace(3)* %b, i64 1
%i4 = load double addrspace(3)* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double addrspace(3)* %c, align 8
- %arrayidx5 = getelementptr inbounds double addrspace(3)* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double addrspace(3)* %c, i64 1
store double %mul5, double addrspace(3)* %arrayidx5, align 8
ret void
}
%i0 = load double addrspace(3)* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double addrspace(3)* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double addrspace(3)* %a, i64 1
%i3 = load double addrspace(3)* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double addrspace(3)* %c, align 8
- %arrayidx5 = getelementptr inbounds double addrspace(3)* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double addrspace(3)* %c, i64 1
store double %mul5, double addrspace(3)* %arrayidx5, align 8
ret void
}
%4 = load double* %d
%5 = fsub double %3, %4
store double %5, double* %c
- %6 = getelementptr inbounds double* %d, i64 1
+ %6 = getelementptr inbounds double, double* %d, i64 1
%7 = load double* %6
- %8 = getelementptr inbounds double* %a, i64 1
+ %8 = getelementptr inbounds double, double* %a, i64 1
%9 = load double* %8
- %10 = getelementptr inbounds double* %b, i64 1
+ %10 = getelementptr inbounds double, double* %b, i64 1
%11 = load double* %10
%12 = fadd double %9, %11
%13 = fadd double %7, %12
- %14 = getelementptr inbounds double* %c, i64 1
+ %14 = getelementptr inbounds double, double* %c, i64 1
store double %13, double* %14
ret void
}
%i0 = load double* %a
%i1 = load double* %b
%mul = fmul double %i0, %i1
- %store1 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
- %store2 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %store1 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
+ %store2 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
define void @test2(float * %a, float * %b) {
entry:
%l0 = load float* %a
- %a1 = getelementptr inbounds float* %a, i64 1
+ %a1 = getelementptr inbounds float, float* %a, i64 1
%l1 = load float* %a1
- %a2 = getelementptr inbounds float* %a, i64 2
+ %a2 = getelementptr inbounds float, float* %a, i64 2
%l2 = load float* %a2
- %a3 = getelementptr inbounds float* %a, i64 3
+ %a3 = getelementptr inbounds float, float* %a, i64 3
%l3 = load float* %a3
store float %l0, float* %b
- %b1 = getelementptr inbounds float* %b, i64 1
+ %b1 = getelementptr inbounds float, float* %b, i64 1
store float %l1, float* %b1
- %b2 = getelementptr inbounds float* %b, i64 2
+ %b2 = getelementptr inbounds float, float* %b, i64 2
store float %l2, float* %b2
- %b3 = getelementptr inbounds float* %b, i64 3
+ %b3 = getelementptr inbounds float, float* %b, i64 3
store float %l3, float* %b3
ret void
}
%b.cast = bitcast x86_mmx %b to i64
%a.and = and i64 %a.cast, 42
%b.and = and i64 %b.cast, 42
- %gep = getelementptr i64* %ptr, i32 1
+ %gep = getelementptr i64, i64* %ptr, i32 1
store i64 %a.and, i64* %ptr
store i64 %b.and, i64* %gep
ret void
store i32 %add, i32* %A, align 4
%mul1 = mul nsw i32 %n, 9
%add2 = add nsw i32 %mul1, 9
- %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %add2, i32* %arrayidx3, align 4
%mul4 = shl i32 %n, 3
%add5 = add nsw i32 %mul4, 9
- %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %add5, i32* %arrayidx6, align 4
%mul7 = mul nsw i32 %n, 10
%add8 = add nsw i32 %mul7, 9
- %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %add8, i32* %arrayidx9, align 4
ret i32 undef
}
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @sin(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @sin(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @cos(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @cos(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @pow(double %mul,double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @pow(double %mul5,double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @exp2(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @exp2(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
%i1 = load i64* %b, align 8
%mul = mul i64 %i0, %i1
%call = tail call i64 @round(i64 %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds i64* %a, i64 1
+ %arrayidx3 = getelementptr inbounds i64, i64* %a, i64 1
%i3 = load i64* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds i64* %b, i64 1
+ %arrayidx4 = getelementptr inbounds i64, i64* %b, i64 1
%i4 = load i64* %arrayidx4, align 8
%mul5 = mul i64 %i3, %i4
%call5 = tail call i64 @round(i64 %mul5) nounwind readnone
store i64 %call, i64* %c, align 8
- %arrayidx5 = getelementptr inbounds i64* %c, i64 1
+ %arrayidx5 = getelementptr inbounds i64, i64* %c, i64 1
store i64 %call5, i64* %arrayidx5, align 8
ret void
}
%0 = load i8* %B, align 1
%conv = sext i8 %0 to i32
store i32 %conv, i32* %A, align 4
- %arrayidx2 = getelementptr inbounds i8* %B, i64 1
+ %arrayidx2 = getelementptr inbounds i8, i8* %B, i64 1
%1 = load i8* %arrayidx2, align 1
%conv3 = sext i8 %1 to i32
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %conv3, i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i8* %B, i64 2
+ %arrayidx5 = getelementptr inbounds i8, i8* %B, i64 2
%2 = load i8* %arrayidx5, align 1
%conv6 = sext i8 %2 to i32
- %arrayidx7 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %conv6, i32* %arrayidx7, align 4
- %arrayidx8 = getelementptr inbounds i8* %B, i64 3
+ %arrayidx8 = getelementptr inbounds i8, i8* %B, i64 3
%3 = load i8* %arrayidx8, align 1
%conv9 = sext i8 %3 to i32
- %arrayidx10 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %conv9, i32* %arrayidx10, align 4
ret i32 undef
}
;CHECK: ret i32 undef
define i32 @foo(double* noalias nocapture %A, double* noalias nocapture %B, double %G) {
entry:
- %arrayidx = getelementptr inbounds double* %B, i64 10
+ %arrayidx = getelementptr inbounds double, double* %B, i64 10
%0 = load double* %arrayidx, align 8
%tobool = fcmp une double %0, 0.000000e+00
%cond = select i1 %tobool, double %G, double 1.000000e+00
store double %cond, double* %A, align 8
- %arrayidx2 = getelementptr inbounds double* %B, i64 11
+ %arrayidx2 = getelementptr inbounds double, double* %B, i64 11
%1 = load double* %arrayidx2, align 8
%tobool3 = fcmp une double %1, 0.000000e+00
%cond7 = select i1 %tobool3, double %G, double 1.000000e+00
- %arrayidx8 = getelementptr inbounds double* %A, i64 1
+ %arrayidx8 = getelementptr inbounds double, double* %A, i64 1
store double %cond7, double* %arrayidx8, align 8
ret i32 undef
}
for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%0 = shl nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds double* %A, i64 %0
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %0
%1 = load double* %arrayidx, align 8
%mul1 = fmul double %conv, %1
%mul2 = fmul double %mul1, 7.000000e+00
%add = fadd double %mul2, 5.000000e+00
%2 = or i64 %0, 1
- %arrayidx6 = getelementptr inbounds double* %A, i64 %2
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
%3 = load double* %arrayidx6, align 8
%mul8 = fmul double %conv, %3
%mul9 = fmul double %mul8, 4.000000e+00
store i32 %u, i32* %u.addr, align 4
%mul = mul nsw i32 %u, 3
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
%0 = load double* %arrayidx, align 8
- %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+ %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
%1 = load double* %arrayidx4, align 8
%add5 = fadd double %0, %1
store double %add5, double* %arrayidx, align 8
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
- %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+ %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
%2 = load double* %arrayidx13, align 8
- %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+ %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
%3 = load double* %arrayidx17, align 8
%add18 = fadd double %2, %3
store double %add18, double* %arrayidx13, align 8
%add24 = add nsw i32 %mul, 2
%idxprom25 = sext i32 %add24 to i64
- %arrayidx26 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom25
+ %arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25
%4 = load double* %arrayidx26, align 8
- %arrayidx30 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom25
+ %arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25
%5 = load double* %arrayidx30, align 8
%add31 = fadd double %4, %5
store double %add31, double* %arrayidx26, align 8
store i32 %u, i32* %u.addr, align 4
%mul = mul nsw i32 %u, 2
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
%0 = load double* %arrayidx, align 8
- %arrayidx4 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom
+ %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
%1 = load double* %arrayidx4, align 8
%add5 = fadd double %0, %1
store double %add5, double* %arrayidx, align 8
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
- %arrayidx13 = getelementptr inbounds [2000 x double]* @A, i32 0, i64 %idxprom12
+ %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
%2 = load double* %arrayidx13, align 8
- %arrayidx17 = getelementptr inbounds [2000 x double]* @B, i32 0, i64 %idxprom12
+ %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
%3 = load double* %arrayidx17, align 8
%add18 = fadd double %2, %3
store double %add18, double* %arrayidx13, align 8
store i32 %u, i32* %u.addr, align 4
%mul = mul nsw i32 %u, 4
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom
+ %arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom
%0 = load float* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom
+ %arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom
%1 = load float* %arrayidx4, align 4
%add5 = fadd float %0, %1
store float %add5, float* %arrayidx, align 4
%add11 = add nsw i32 %mul, 1
%idxprom12 = sext i32 %add11 to i64
- %arrayidx13 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom12
+ %arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12
%2 = load float* %arrayidx13, align 4
- %arrayidx17 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom12
+ %arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12
%3 = load float* %arrayidx17, align 4
%add18 = fadd float %2, %3
store float %add18, float* %arrayidx13, align 4
%add24 = add nsw i32 %mul, 2
%idxprom25 = sext i32 %add24 to i64
- %arrayidx26 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom25
+ %arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25
%4 = load float* %arrayidx26, align 4
- %arrayidx30 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom25
+ %arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25
%5 = load float* %arrayidx30, align 4
%add31 = fadd float %4, %5
store float %add31, float* %arrayidx26, align 4
%add37 = add nsw i32 %mul, 3
%idxprom38 = sext i32 %add37 to i64
- %arrayidx39 = getelementptr inbounds [2000 x float]* @C, i32 0, i64 %idxprom38
+ %arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38
%6 = load float* %arrayidx39, align 4
- %arrayidx43 = getelementptr inbounds [2000 x float]* @D, i32 0, i64 %idxprom38
+ %arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38
%7 = load float* %arrayidx43, align 4
%add44 = fadd float %6, %7
store float %add44, float* %arrayidx39, align 4
%1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
%mul = mul nsw i32 %0, 2
%idxprom = sext i32 %mul to i64
- %arrayidx = getelementptr inbounds double* %A, i64 %idxprom
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
%2 = load double* %arrayidx, align 8
%mul1 = fmul double 7.000000e+00, %2
%add = add nsw i32 %mul, 1
%idxprom3 = sext i32 %add to i64
- %arrayidx4 = getelementptr inbounds double* %A, i64 %idxprom3
+ %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
%3 = load double* %arrayidx4, align 8
%mul5 = fmul double 7.000000e+00, %3
%add6 = fadd double %mul1, %mul5
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
%0 = bitcast double* %a to <4 x i32>*
%1 = load <4 x i32>* %0, align 8
define fastcc void @LzmaDec_DecodeReal2(%struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p) {
entry:
- %range20.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 4
- %code21.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 5
+ %range20.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 4
+ %code21.i = getelementptr inbounds %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334, %struct.CLzmaDec.1.28.55.82.103.124.145.166.181.196.229.259.334* %p, i64 0, i32 5
br label %do.body66.i
do.body66.i: ; preds = %do.cond.i, %entry
ret void
if.else: ; preds = %entry
- %m_numConstraintRows4 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 0
- %nub5 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 1
+ %m_numConstraintRows4 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 0
+ %nub5 = getelementptr inbounds %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960", %"struct.btTypedConstraint::btConstraintInfo1.17.157.357.417.477.960"* %info, i64 0, i32 1
br i1 undef, label %land.lhs.true.i.1, label %if.then7.1
land.lhs.true.i.1: ; preds = %if.else
define void @_ZN30GIM_TRIANGLE_CALCULATION_CACHE18triangle_collisionERK9btVector3S2_S2_fS2_S2_S2_fR25GIM_TRIANGLE_CONTACT_DATA(%class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this) {
entry:
- %arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
- %arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
+ %arrayidx26 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 1
+ %arrayidx36 = getelementptr inbounds %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332, %class.GIM_TRIANGLE_CALCULATION_CACHE.9.34.69.94.119.144.179.189.264.284.332* %this, i64 0, i32 2, i64 0, i32 0, i64 2
%0 = load float* %arrayidx36, align 4
%add587 = fadd float undef, undef
%sub600 = fsub float %add587, undef
%dy276.1 = phi float [ undef, %if.then329 ], [ undef, %if.end327 ], [ 0x3F847AE140000000, %if.then291 ]
%sub334 = fsub float %add294, %dx272.1
%sub338 = fsub float %add297, %dy276.1
- %arrayidx.i.i606 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 0
+ %arrayidx.i.i606 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113, %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 0
store float %sub334, float* %arrayidx.i.i606, align 4
- %arrayidx3.i607 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 1
+ %arrayidx3.i607 = getelementptr inbounds %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113, %class.btVector3.23.221.463.485.507.573.595.683.727.749.815.837.991.1585.1607.1629.1651.1849.2047.2069.2091.2113* %vertices, i64 0, i32 0, i64 1
store float %sub338, float* %arrayidx3.i607, align 4
br label %return
%acc1.056 = phi float [ 0.000000e+00, %entry ], [ %add13, %for.body ]
%s1.055 = phi float [ 0.000000e+00, %entry ], [ %cond.i40, %for.body ]
%s0.054 = phi float [ 0.000000e+00, %entry ], [ %cond.i44, %for.body ]
- %arrayidx = getelementptr inbounds float* %src, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %src, i64 %indvars.iv
%0 = load float* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
- %arrayidx2 = getelementptr inbounds float* %dest, i64 %indvars.iv
+ %arrayidx2 = getelementptr inbounds float, float* %dest, i64 %indvars.iv
store float %acc1.056, float* %arrayidx2, align 4
%add = fadd float %s0.054, %0
%add3 = fadd float %s1.055, %0
; Function Attrs: nounwind ssp uwtable
define void @_ZSt6uniqueISt15_Deque_iteratorIdRdPdEET_S4_S4_(%"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* nocapture %__last) {
entry:
- %_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
+ %_M_cur2.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 0
%0 = load double** %_M_cur2.i.i, align 8
- %_M_first3.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 1
- %_M_cur2.i.i81 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 0
+ %_M_first3.i.i = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__first, i64 0, i32 1
+ %_M_cur2.i.i81 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 0
%1 = load double** %_M_cur2.i.i81, align 8
- %_M_first3.i.i83 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 1
+ %_M_first3.i.i83 = getelementptr inbounds %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731", %"struct.std::_Deque_iterator.4.157.174.208.259.276.344.731"* %__last, i64 0, i32 1
%2 = load double** %_M_first3.i.i83, align 8
br i1 undef, label %_ZSt13adjacent_findISt15_Deque_iteratorIdRdPdEET_S4_S4_.exit, label %while.cond.i.preheader
define i32 @fn1() {
entry:
%0 = load i64** @a, align 8
- %add.ptr = getelementptr inbounds i64* %0, i64 1
+ %add.ptr = getelementptr inbounds i64, i64* %0, i64 1
%1 = ptrtoint i64* %add.ptr to i64
- %arrayidx = getelementptr inbounds i64* %0, i64 2
+ %arrayidx = getelementptr inbounds i64, i64* %0, i64 2
store i64 %1, i64* %arrayidx, align 8
%2 = ptrtoint i64* %arrayidx to i64
store i64 %2, i64* %add.ptr, align 8
define void @intrapred_luma() {
entry:
%conv153 = trunc i32 undef to i16
- %arrayidx154 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 12
+ %arrayidx154 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 12
store i16 %conv153, i16* %arrayidx154, align 8
- %arrayidx155 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 11
+ %arrayidx155 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 11
store i16 %conv153, i16* %arrayidx155, align 2
- %arrayidx156 = getelementptr inbounds [13 x i16]* undef, i64 0, i64 10
+ %arrayidx156 = getelementptr inbounds [13 x i16], [13 x i16]* undef, i64 0, i64 10
store i16 %conv153, i16* %arrayidx156, align 4
ret void
}
define fastcc void @dct36(double* %inbuf) {
entry:
- %arrayidx41 = getelementptr inbounds double* %inbuf, i64 2
- %arrayidx44 = getelementptr inbounds double* %inbuf, i64 1
+ %arrayidx41 = getelementptr inbounds double, double* %inbuf, i64 2
+ %arrayidx44 = getelementptr inbounds double, double* %inbuf, i64 1
%0 = load double* %arrayidx44, align 8
%add46 = fadd double %0, undef
store double %add46, double* %arrayidx41, align 8
%tmp4 = fmul double %tmp3, undef
%tmp5 = fmul double %tmp3, undef
%tmp6 = fsub double %tmp5, undef
- %tmp7 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 1
+ %tmp7 = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 1
store double %tmp6, double* %tmp7, align 8
%tmp8 = fmul double %tmp1, undef
%tmp9 = fsub double %tmp8, undef
- %tmp10 = getelementptr inbounds %struct.hoge* %arg, i64 0, i32 2
+ %tmp10 = getelementptr inbounds %struct.hoge, %struct.hoge* %arg, i64 0, i32 2
store double %tmp9, double* %tmp10, align 8
br i1 undef, label %bb11, label %bb12
define void @rc4_crypt(%struct.rc4_state.0.24* nocapture %s) {
entry:
- %x1 = getelementptr inbounds %struct.rc4_state.0.24* %s, i64 0, i32 0
- %y2 = getelementptr inbounds %struct.rc4_state.0.24* %s, i64 0, i32 1
+ %x1 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 0
+ %y2 = getelementptr inbounds %struct.rc4_state.0.24, %struct.rc4_state.0.24* %s, i64 0, i32 1
br i1 undef, label %for.body, label %for.end
for.body: ; preds = %for.body, %entry
%p3.addr.0258 = phi double [ %add, %bb1 ], [ %add28, %for.body ]
%vecinit.i.i237 = insertelement <2 x double> undef, double %t.0259, i32 0
%x13 = tail call i32 @_xfn(<2 x double> %vecinit.i.i237) #2
- %arrayidx = getelementptr inbounds [256 x i32]* %tab1, i64 0, i64 %indvars.iv266
+ %arrayidx = getelementptr inbounds [256 x i32], [256 x i32]* %tab1, i64 0, i64 %indvars.iv266
store i32 %x13, i32* %arrayidx, align 4, !tbaa !4
%vecinit.i.i = insertelement <2 x double> undef, double %p3.addr.0258, i32 0
%x14 = tail call i32 @_xfn(<2 x double> %vecinit.i.i) #2
- %arrayidx26 = getelementptr inbounds [256 x i32]* %tab2, i64 0, i64 %indvars.iv266
+ %arrayidx26 = getelementptr inbounds [256 x i32], [256 x i32]* %tab2, i64 0, i64 %indvars.iv266
store i32 %x14, i32* %arrayidx26, align 4, !tbaa !4
%add27 = fadd double %mul19, %t.0259
%add28 = fadd double %mul21, %p3.addr.0258
unreachable
if.end98: ; preds = %if.then17
- %from299 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 1
+ %from299 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171, %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 1
br i1 undef, label %land.lhs.true167, label %if.then103
if.then103: ; preds = %if.end98
%.sub100 = select i1 undef, i32 250, i32 undef
%mul114 = shl nsw i32 %.sub100, 2
- %from1115 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 0
+ %from1115 = getelementptr inbounds %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171, %struct._exon_t.12.103.220.363.480.649.740.857.1039.1065.1078.1091.1117.1130.1156.1169.1195.1221.1234.1286.1299.1312.1338.1429.1455.1468.1494.1520.1884.1897.1975.2066.2105.2170.2171* undef, i64 0, i32 0
%cond125 = select i1 undef, i32 undef, i32 %mul114
br label %for.cond.i
br i1 undef, label %arrayctor.cont, label %invoke.cont
arrayctor.cont: ; preds = %invoke.cont
- %agg.tmp99208.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 0
- %agg.tmp99208.sroa.1.8.idx388 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 1
- %agg.tmp101211.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 0
- %agg.tmp101211.sroa.1.8.idx390 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 1
+ %agg.tmp99208.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 0
+ %agg.tmp99208.sroa.1.8.idx388 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 0, i32 1
+ %agg.tmp101211.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 0
+ %agg.tmp101211.sroa.1.8.idx390 = getelementptr inbounds %struct.Ray.5.11.53.113.119.137.149.185.329.389.416, %struct.Ray.5.11.53.113.119.137.149.185.329.389.416* undef, i64 0, i32 1, i32 1
br label %for.cond36.preheader
for.cond36.preheader: ; preds = %_Z5clampd.exit.1, %arrayctor.cont
%add4.i698 = fadd double undef, %add4.i719
%mul.i.i679 = fmul double undef, %add.i695
%mul2.i.i680 = fmul double undef, %add4.i698
- %agg.tmp74663.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 0
+ %agg.tmp74663.sroa.0.0.idx = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601, %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 0
store double %mul.i.i679, double* %agg.tmp74663.sroa.0.0.idx, align 8
- %agg.tmp74663.sroa.1.8.idx943 = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 1
+ %agg.tmp74663.sroa.1.8.idx943 = getelementptr inbounds %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601, %struct.Ray.5.11.53.95.137.191.197.203.239.257.263.269.275.281.287.293.383.437.443.455.461.599.601* undef, i64 0, i32 1, i32 1
store double %mul2.i.i680, double* %agg.tmp74663.sroa.1.8.idx943, align 8
br label %return
;define fastcc void @bar() {
define void @bar() {
- %1 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
- %2 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
- %3 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
- %4 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
- %5 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 0
- %6 = getelementptr inbounds %0* undef, i64 0, i32 1, i32 1
+ %1 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+ %2 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
+ %3 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+ %4 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
+ %5 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 0
+ %6 = getelementptr inbounds %0, %0* undef, i64 0, i32 1, i32 1
br label %7
; <label>:7 ; preds = %18, %17, %17, %0
define i32 @foo(double* nocapture %A, float* nocapture %B, i32 %g) {
entry:
%0 = load float* %B, align 4
- %arrayidx1 = getelementptr inbounds float* %B, i64 1
+ %arrayidx1 = getelementptr inbounds float, float* %B, i64 1
%1 = load float* %arrayidx1, align 4
%add = fadd float %0, 5.000000e+00
%add2 = fadd float %1, 8.000000e+00
%add4 = fadd double %conv, %2
store double %add4, double* %A, align 8
%conv5 = fpext float %add2 to double
- %arrayidx6 = getelementptr inbounds double* %A, i64 1
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 1
%3 = load double* %arrayidx6, align 8
%add7 = fadd double %conv5, %3
store double %add7, double* %arrayidx6, align 8
define i32 @test(double* nocapture %G) {
entry:
- %arrayidx = getelementptr inbounds double* %G, i64 5
+ %arrayidx = getelementptr inbounds double, double* %G, i64 5
%0 = load double* %arrayidx, align 8
%mul = fmul double %0, 4.000000e+00
%add = fadd double %mul, 1.000000e+00
store double %add, double* %G, align 8
- %arrayidx2 = getelementptr inbounds double* %G, i64 6
+ %arrayidx2 = getelementptr inbounds double, double* %G, i64 6
%1 = load double* %arrayidx2, align 8
%mul3 = fmul double %1, 3.000000e+00
%add4 = fadd double %mul3, 6.000000e+00
- %arrayidx5 = getelementptr inbounds double* %G, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %G, i64 1
store double %add4, double* %arrayidx5, align 8
%add8 = fadd double %mul, 7.000000e+00
- %arrayidx9 = getelementptr inbounds double* %G, i64 2
+ %arrayidx9 = getelementptr inbounds double, double* %G, i64 2
store double %add8, double* %arrayidx9, align 8
%mul11 = fmul double %1, 4.000000e+00
%add12 = fadd double %mul11, 8.000000e+00
- %arrayidx13 = getelementptr inbounds double* %G, i64 3
+ %arrayidx13 = getelementptr inbounds double, double* %G, i64 3
store double %add12, double* %arrayidx13, align 8
ret i32 undef
}
%mul1 = fmul double %conv, %mul
%add = fadd double %mul1, 6.000000e+00
store double %add, double* %A, align 8
- %arrayidx3 = getelementptr inbounds double* %A, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
%1 = load double* %arrayidx3, align 8
%mul4 = fmul double %1, 7.700000e+00
%mul6 = fmul double %conv, %mul4
%add7 = fadd double %mul6, 2.000000e+00
store double %add7, double* %arrayidx3, align 8
- %arrayidx9 = getelementptr inbounds double* %A, i64 2
+ %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
%2 = load double* %arrayidx9, align 8
%mul10 = fmul double %2, 7.600000e+00
%mul12 = fmul double %conv, %mul10
%add13 = fadd double %mul12, 3.000000e+00
store double %add13, double* %arrayidx9, align 8
- %arrayidx15 = getelementptr inbounds double* %A, i64 3
+ %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
%3 = load double* %arrayidx15, align 8
%mul16 = fmul double %3, 7.400000e+00
%mul18 = fmul double %conv, %mul16
; CHECK: ret
define i32 @test2(double* nocapture %G, i32 %k) {
%1 = icmp eq i32 %k, 0
- %2 = getelementptr inbounds double* %G, i64 5
+ %2 = getelementptr inbounds double, double* %G, i64 5
%3 = load double* %2, align 8
%4 = fmul double %3, 4.000000e+00
br i1 %1, label %12, label %5
; <label>:5 ; preds = %0
%6 = fadd double %4, 1.000000e+00
store double %6, double* %G, align 8
- %7 = getelementptr inbounds double* %G, i64 6
+ %7 = getelementptr inbounds double, double* %G, i64 6
%8 = load double* %7, align 8
%9 = fmul double %8, 3.000000e+00
%10 = fadd double %9, 6.000000e+00
- %11 = getelementptr inbounds double* %G, i64 1
+ %11 = getelementptr inbounds double, double* %G, i64 1
store double %10, double* %11, align 8
br label %20
; <label>:12 ; preds = %0
%13 = fadd double %4, 7.000000e+00
- %14 = getelementptr inbounds double* %G, i64 2
+ %14 = getelementptr inbounds double, double* %G, i64 2
store double %13, double* %14, align 8
- %15 = getelementptr inbounds double* %G, i64 6
+ %15 = getelementptr inbounds double, double* %G, i64 6
%16 = load double* %15, align 8
%17 = fmul double %16, 3.000000e+00
%18 = fadd double %17, 8.000000e+00
- %19 = getelementptr inbounds double* %G, i64 3
+ %19 = getelementptr inbounds double, double* %G, i64 3
store double %18, double* %19, align 8
br label %20
%mul1 = fmul double %conv, %mul
%add = fadd double %mul1, 6.000000e+00
store double %add, double* %A, align 8
- %arrayidx3 = getelementptr inbounds double* %A, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
%1 = load double* %arrayidx3, align 8
%mul4 = fmul double %1, 7.900000e+00
%mul6 = fmul double %conv, %mul4
%add7 = fadd double %mul6, 6.000000e+00
store double %add7, double* %arrayidx3, align 8
- %arrayidx9 = getelementptr inbounds double* %A, i64 2
+ %arrayidx9 = getelementptr inbounds double, double* %A, i64 2
%2 = load double* %arrayidx9, align 8
%mul10 = fmul double %2, 7.900000e+00
%mul12 = fmul double %conv, %mul10
%add13 = fadd double %mul12, 6.000000e+00
store double %add13, double* %arrayidx9, align 8
- %arrayidx15 = getelementptr inbounds double* %A, i64 3
+ %arrayidx15 = getelementptr inbounds double, double* %A, i64 3
%3 = load double* %arrayidx15, align 8
%mul16 = fmul double %3, 7.900000e+00
%mul18 = fmul double %conv, %mul16
%conv = sitofp i32 %n to double
%mul = fmul double %conv, %0
store double %mul, double* %A, align 8
- %arrayidx2 = getelementptr inbounds double* %A, i64 1
+ %arrayidx2 = getelementptr inbounds double, double* %A, i64 1
%1 = load double* %arrayidx2, align 8
%mul4 = fmul double %conv, %1
store double %mul4, double* %arrayidx2, align 8
br i1 %cmp, label %return, label %if.end
if.end: ; preds = %entry
- %arrayidx7 = getelementptr inbounds double* %A, i64 2
+ %arrayidx7 = getelementptr inbounds double, double* %A, i64 2
%2 = load double* %arrayidx7, align 8
%mul9 = fmul double %conv, %2
store double %mul9, double* %arrayidx7, align 8
- %arrayidx11 = getelementptr inbounds double* %A, i64 3
+ %arrayidx11 = getelementptr inbounds double, double* %A, i64 3
%3 = load double* %arrayidx11, align 8
%add = add nsw i32 %n, 4
%conv12 = sitofp i32 %add to double
br i1 undef, label %if.end13, label %if.end13
sw.epilog7: ; No predecessors!
- %.in = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 1
+ %.in = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 1
%0 = load double* %.in, align 8
%add = fadd double undef, 0.000000e+00
%add6 = fadd double %add, %0
%1 = load double* @a, align 8
%add8 = fadd double %1, 0.000000e+00
- %_dy = getelementptr inbounds %class.B.53.55* %this, i64 0, i32 0, i32 2
+ %_dy = getelementptr inbounds %class.B.53.55, %class.B.53.55* %this, i64 0, i32 0, i32 2
%2 = load double* %_dy, align 8
%add10 = fadd double %add8, %2
br i1 undef, label %if.then12, label %if.end13
define i32 @foo(i32* nocapture %A) #0 {
entry:
%0 = load i32* %A, align 4
- %arrayidx1 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %A, i64 1
%1 = load i32* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 2
%2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 3
%3 = load i32* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 13
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 13
%4 = load i32* %arrayidx4, align 4
%cmp24 = icmp sgt i32 %4, 0
br i1 %cmp24, label %for.body, label %for.end
br i1 %cmp8, label %for.body.lr.ph, label %for.end, !dbg !23
for.body.lr.ph: ; preds = %entry
- %arrayidx = getelementptr inbounds double* %A, i64 4, !dbg !24
+ %arrayidx = getelementptr inbounds double, double* %A, i64 4, !dbg !24
%0 = load double* %arrayidx, align 8, !dbg !24
- %arrayidx1 = getelementptr inbounds double* %A, i64 5, !dbg !29
+ %arrayidx1 = getelementptr inbounds double, double* %A, i64 5, !dbg !29
%1 = load double* %arrayidx1, align 8, !dbg !29
br label %for.end, !dbg !23
for.end: ; preds = %for.body.lr.ph, %entry
%y1.0.lcssa = phi double [ %1, %for.body.lr.ph ], [ 1.000000e+00, %entry ]
%y0.0.lcssa = phi double [ %0, %for.body.lr.ph ], [ 0.000000e+00, %entry ]
- %arrayidx2 = getelementptr inbounds double* %A, i64 8, !dbg !30
+ %arrayidx2 = getelementptr inbounds double, double* %A, i64 8, !dbg !30
store double %y0.0.lcssa, double* %arrayidx2, align 8, !dbg !30
- %arrayidx3 = getelementptr inbounds double* %A, i64 9, !dbg !30
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 9, !dbg !30
store double %y1.0.lcssa, double* %arrayidx3, align 8, !dbg !30
ret i32 undef, !dbg !31
}
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
%1 = load i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
- %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
%2 = load i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
- %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
%3 = load i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
- %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
ret i32 0
}
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
%1 = load i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
- %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
%2 = load i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
- %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
%3 = load i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
- %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
ret i32 %0 ;<--------- This value has multiple users
}
%mul238 = add i32 %m, %n
%add = mul i32 %0, %mul238
store i32 %add, i32* %B, align 4
- %arrayidx4 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
%1 = load i32* %arrayidx4, align 4
%add8 = mul i32 %1, %mul238
- %arrayidx9 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx9 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %add8, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 2
%2 = load i32* %arrayidx10, align 4
%add14 = mul i32 %2, %mul238
- %arrayidx15 = getelementptr inbounds i32* %B, i64 2
+ %arrayidx15 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %add14, i32* %arrayidx15, align 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 3
%3 = load i32* %arrayidx16, align 4
%add20 = mul i32 %3, %mul238
- %arrayidx21 = getelementptr inbounds i32* %B, i64 3
+ %arrayidx21 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %add20, i32* %arrayidx21, align 4
ret i32 %1 ;<--------- This value has multiple users
}
define double @ext_user(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) {
entry:
- %arrayidx = getelementptr inbounds double* %A, i64 1
+ %arrayidx = getelementptr inbounds double, double* %A, i64 1
%0 = load double* %arrayidx, align 8
%1 = load double* %A, align 8
br label %for.body
for.end: ; preds = %for.body
store double %add5, double* %B, align 8
- %arrayidx7 = getelementptr inbounds double* %B, i64 1
+ %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
store double %add4, double* %arrayidx7, align 8
ret double %mul3
}
%LD = load <2 x double>* undef
%V0 = extractelement <2 x double> %LD, i32 0
%V1 = extractelement <2 x double> %LD, i32 1
- %P0 = getelementptr inbounds double* %ptr, i64 0
- %P1 = getelementptr inbounds double* %ptr, i64 1
+ %P0 = getelementptr inbounds double, double* %ptr, i64 0
+ %P1 = getelementptr inbounds double, double* %ptr, i64 1
%A0 = fadd double %V0, 0.0
%A1 = fadd double %V1, 1.1
store double %A0, double* %P0, align 4
%LD = load <2 x double>* undef
%V0 = extractelement <2 x double> %LD, i32 0
%V1 = extractelement <2 x double> %LD, i32 1
- %P0 = getelementptr inbounds double* %ptr, i64 1 ; <--- incorrect order
- %P1 = getelementptr inbounds double* %ptr, i64 0
+ %P0 = getelementptr inbounds double, double* %ptr, i64 1 ; <--- incorrect order
+ %P1 = getelementptr inbounds double, double* %ptr, i64 0
%A0 = fadd double %V0, 1.2
%A1 = fadd double %V1, 3.4
store double %A0, double* %P0, align 4
%LD = load <4 x double>* undef
%V0 = extractelement <4 x double> %LD, i32 0 ; <--- invalid size.
%V1 = extractelement <4 x double> %LD, i32 1
- %P0 = getelementptr inbounds double* %ptr, i64 0
- %P1 = getelementptr inbounds double* %ptr, i64 1
+ %P0 = getelementptr inbounds double, double* %ptr, i64 0
+ %P1 = getelementptr inbounds double, double* %ptr, i64 1
%A0 = fadd double %V0, 5.5
%A1 = fadd double %V1, 6.6
store double %A0, double* %P0, align 4
define i32 @fn1() {
entry:
%0 = load i64** @a, align 8
- %add.ptr = getelementptr inbounds i64* %0, i64 11
+ %add.ptr = getelementptr inbounds i64, i64* %0, i64 11
%1 = ptrtoint i64* %add.ptr to i64
store i64 %1, i64* %add.ptr, align 8
- %add.ptr1 = getelementptr inbounds i64* %0, i64 56
+ %add.ptr1 = getelementptr inbounds i64, i64* %0, i64 56
%2 = ptrtoint i64* %add.ptr1 to i64
- %arrayidx2 = getelementptr inbounds i64* %0, i64 12
+ %arrayidx2 = getelementptr inbounds i64, i64* %0, i64 12
store i64 %2, i64* %arrayidx2, align 8
ret i32 undef
; CHECK-LABEL: @fn1(
%fp1 = sitofp i32 %add1 to float
%call1 = tail call float @llvm.powi.f32(float %fp1,i32 %add1) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
%i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
%i3 = load i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%fp2 = sitofp i32 %add2 to float
%call2 = tail call float @llvm.powi.f32(float %fp2,i32 %add1) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
%i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
%i5 = load i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%fp3 = sitofp i32 %add3 to float
%call3 = tail call float @llvm.powi.f32(float %fp3,i32 %add1) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
%i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
%i7 = load i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%fp4 = sitofp i32 %add4 to float
%call4 = tail call float @llvm.powi.f32(float %fp4,i32 %add1) nounwind readnone
store float %call1, float* %c, align 4
- %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
store float %call2, float* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
store float %call3, float* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
store float %call4, float* %arrayidx10, align 4
ret void
store i32 %add, i32* %A, align 4
%mul1 = mul nsw i32 %n, 9
%add2 = add nsw i32 %mul1, 9
- %arrayidx3 = getelementptr inbounds i32* %A, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
store i32 %add2, i32* %arrayidx3, align 4
%mul4 = shl i32 %n, 3
%add5 = add nsw i32 %mul4, 9
- %arrayidx6 = getelementptr inbounds i32* %A, i64 2
+ %arrayidx6 = getelementptr inbounds i32, i32* %A, i64 2
store i32 %add5, i32* %arrayidx6, align 4
%mul7 = mul nsw i32 %n, 10
%add8 = add nsw i32 %mul7, 9
- %arrayidx9 = getelementptr inbounds i32* %A, i64 3
+ %arrayidx9 = getelementptr inbounds i32, i32* %A, i64 3
store i32 %add8, i32* %arrayidx9, align 4
%externaluse1 = add nsw i32 %add, %m
%externaluse2 = mul nsw i32 %add, %m ; we should add the extract cost only once and the store will be vectorized
.lr.ph: ; preds = %0, %.lr.ph
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
- %3 = getelementptr inbounds i32* %in, i64 %2
+ %3 = getelementptr inbounds i32, i32* %in, i64 %2
%4 = load i32* %3, align 4
%5 = or i64 %2, 1
- %6 = getelementptr inbounds i32* %in, i64 %5
+ %6 = getelementptr inbounds i32, i32* %in, i64 %5
%7 = load i32* %6, align 4
%8 = or i64 %2, 2
- %9 = getelementptr inbounds i32* %in, i64 %8
+ %9 = getelementptr inbounds i32, i32* %in, i64 %8
%10 = load i32* %9, align 4
%11 = or i64 %2, 3
- %12 = getelementptr inbounds i32* %in, i64 %11
+ %12 = getelementptr inbounds i32, i32* %in, i64 %11
%13 = load i32* %12, align 4
%14 = mul i32 %4, 7
%15 = add i32 %14, 7
%19 = add i32 %18, 21
%20 = mul i32 %13, 7
%21 = add i32 %20, 28
- %22 = getelementptr inbounds i32* %out, i64 %2
+ %22 = getelementptr inbounds i32, i32* %out, i64 %2
store i32 %15, i32* %22, align 4
- %23 = getelementptr inbounds i32* %out, i64 %5
+ %23 = getelementptr inbounds i32, i32* %out, i64 %5
store i32 %17, i32* %23, align 4
- %24 = getelementptr inbounds i32* %out, i64 %8
+ %24 = getelementptr inbounds i32, i32* %out, i64 %8
store i32 %19, i32* %24, align 4
- %25 = getelementptr inbounds i32* %out, i64 %11
+ %25 = getelementptr inbounds i32, i32* %out, i64 %11
store i32 %21, i32* %25, align 4
%26 = add i64 %i.019, 1
%exitcond = icmp eq i64 %26, %n
; CHECK-LABEL: foo1
; CHECK: <2 x i32*>
define void @foo1 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y) {
- %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
+ %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
%2 = load i32** %1, align 8
- %3 = getelementptr inbounds i32* %2, i64 16
- %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+ %3 = getelementptr inbounds i32, i32* %2, i64 16
+ %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
store i32* %3, i32** %4, align 8
- %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
+ %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
%6 = load i32** %5, align 8
- %7 = getelementptr inbounds i32* %6, i64 16
- %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+ %7 = getelementptr inbounds i32, i32* %6, i64 16
+ %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
store i32* %7, i32** %8, align 8
ret void
}
; CHECK-LABEL: foo2
; CHECK-NOT: <2 x i32*>
define void @foo2 ({ i32*, i32* }* noalias %x, { i32*, i32* }* noalias %y, i32 %i) {
- %1 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 0
+ %1 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 0
%2 = load i32** %1, align 8
- %3 = getelementptr inbounds i32* %2, i32 %i
- %4 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 0
+ %3 = getelementptr inbounds i32, i32* %2, i32 %i
+ %4 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 0
store i32* %3, i32** %4, align 8
- %5 = getelementptr inbounds { i32*, i32* }* %y, i64 0, i32 1
+ %5 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %y, i64 0, i32 1
%6 = load i32** %5, align 8
- %7 = getelementptr inbounds i32* %6, i32 %i
- %8 = getelementptr inbounds { i32*, i32* }* %x, i64 0, i32 1
+ %7 = getelementptr inbounds i32, i32* %6, i32 %i
+ %8 = getelementptr inbounds { i32*, i32* }, { i32*, i32* }* %x, i64 0, i32 1
store i32* %7, i32** %8, align 8
ret void
}
for.body: ; preds = %entry, %for.body
%i.024 = phi i32 [ 0, %entry ], [ %add10, %for.body ]
- %arrayidx = getelementptr inbounds i32* %A, i32 %i.024
+ %arrayidx = getelementptr inbounds i32, i32* %A, i32 %i.024
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %n
store i32 %add, i32* %arrayidx, align 4
%add121 = or i32 %i.024, 1
- %arrayidx2 = getelementptr inbounds i32* %A, i32 %add121
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i32 %add121
%1 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %1, %k
store i32 %add3, i32* %arrayidx2, align 4
%add422 = or i32 %i.024, 2
- %arrayidx5 = getelementptr inbounds i32* %A, i32 %add422
+ %arrayidx5 = getelementptr inbounds i32, i32* %A, i32 %add422
%2 = load i32* %arrayidx5, align 4
%add6 = add nsw i32 %2, %n
store i32 %add6, i32* %arrayidx5, align 4
%add723 = or i32 %i.024, 3
- %arrayidx8 = getelementptr inbounds i32* %A, i32 %add723
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i32 %add723
%3 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %3, %k
store i32 %add9, i32* %arrayidx8, align 4
%i.033 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.032 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add17, %for.body ]
%mul = shl nsw i64 %i.033, 2
- %arrayidx = getelementptr inbounds float* %A, i64 %mul
+ %arrayidx = getelementptr inbounds float, float* %A, i64 %mul
%1 = load float* %arrayidx, align 4
%mul2 = fmul float %1, 7.000000e+00
%add28 = or i64 %mul, 1
- %arrayidx4 = getelementptr inbounds float* %A, i64 %add28
+ %arrayidx4 = getelementptr inbounds float, float* %A, i64 %add28
%2 = load float* %arrayidx4, align 4
%mul5 = fmul float %2, 7.000000e+00
%add6 = fadd fast float %mul2, %mul5
%add829 = or i64 %mul, 2
- %arrayidx9 = getelementptr inbounds float* %A, i64 %add829
+ %arrayidx9 = getelementptr inbounds float, float* %A, i64 %add829
%3 = load float* %arrayidx9, align 4
%mul10 = fmul float %3, 7.000000e+00
%add11 = fadd fast float %add6, %mul10
%add1330 = or i64 %mul, 3
- %arrayidx14 = getelementptr inbounds float* %A, i64 %add1330
+ %arrayidx14 = getelementptr inbounds float, float* %A, i64 %add1330
%4 = load float* %arrayidx14, align 4
%mul15 = fmul float %4, 7.000000e+00
%add16 = fadd fast float %add11, %mul15
for.body.lr.ph:
%0 = load float* %B, align 4
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
%1 = load float* %arrayidx4, align 4
- %arrayidx9 = getelementptr inbounds float* %B, i64 2
+ %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
%2 = load float* %arrayidx9, align 4
- %arrayidx15 = getelementptr inbounds float* %B, i64 3
+ %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
%3 = load float* %arrayidx15, align 4
%4 = sext i32 %n to i64
br label %for.body
%i.040 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.039 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %mul21, %for.body ]
%mul = shl nsw i64 %i.040, 2
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
%5 = load float* %arrayidx2, align 4
%mul3 = fmul float %0, %5
%add35 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds float* %A, i64 %add35
+ %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add35
%6 = load float* %arrayidx6, align 4
%mul7 = fmul float %1, %6
%add8 = fadd fast float %mul3, %mul7
%add1136 = or i64 %mul, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %add1136
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1136
%7 = load float* %arrayidx12, align 4
%mul13 = fmul float %2, %7
%add14 = fadd fast float %add8, %mul13
%add1737 = or i64 %mul, 3
- %arrayidx18 = getelementptr inbounds float* %A, i64 %add1737
+ %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1737
%8 = load float* %arrayidx18, align 4
%mul19 = fmul float %3, %8
%add20 = fadd fast float %add14, %mul19
for.body.lr.ph:
%0 = load float* %B, align 4
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
%1 = load float* %arrayidx4, align 4
- %arrayidx9 = getelementptr inbounds float* %B, i64 2
+ %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
%2 = load float* %arrayidx9, align 4
- %arrayidx15 = getelementptr inbounds float* %B, i64 3
+ %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
%3 = load float* %arrayidx15, align 4
- %arrayidx21 = getelementptr inbounds float* %B, i64 4
+ %arrayidx21 = getelementptr inbounds float, float* %B, i64 4
%4 = load float* %arrayidx21, align 4
- %arrayidx27 = getelementptr inbounds float* %B, i64 5
+ %arrayidx27 = getelementptr inbounds float, float* %B, i64 5
%5 = load float* %arrayidx27, align 4
- %arrayidx33 = getelementptr inbounds float* %B, i64 6
+ %arrayidx33 = getelementptr inbounds float, float* %B, i64 6
%6 = load float* %arrayidx33, align 4
- %arrayidx39 = getelementptr inbounds float* %B, i64 7
+ %arrayidx39 = getelementptr inbounds float, float* %B, i64 7
%7 = load float* %arrayidx39, align 4
- %arrayidx45 = getelementptr inbounds float* %B, i64 8
+ %arrayidx45 = getelementptr inbounds float, float* %B, i64 8
%8 = load float* %arrayidx45, align 4
%9 = sext i32 %n to i64
br label %for.body
%i.083 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.082 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add51, %for.body ]
%mul = mul nsw i64 %i.083, 6
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
%10 = load float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %10
%add80 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds float* %A, i64 %add80
+ %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add80
%11 = load float* %arrayidx6, align 4
%mul7 = fmul fast float %1, %11
%add8 = fadd fast float %mul3, %mul7
%add11 = add nsw i64 %mul, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %add11
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add11
%12 = load float* %arrayidx12, align 4
%mul13 = fmul fast float %2, %12
%add14 = fadd fast float %add8, %mul13
%add17 = add nsw i64 %mul, 3
- %arrayidx18 = getelementptr inbounds float* %A, i64 %add17
+ %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add17
%13 = load float* %arrayidx18, align 4
%mul19 = fmul fast float %3, %13
%add20 = fadd fast float %add14, %mul19
%add23 = add nsw i64 %mul, 4
- %arrayidx24 = getelementptr inbounds float* %A, i64 %add23
+ %arrayidx24 = getelementptr inbounds float, float* %A, i64 %add23
%14 = load float* %arrayidx24, align 4
%mul25 = fmul fast float %4, %14
%add26 = fadd fast float %add20, %mul25
%add29 = add nsw i64 %mul, 5
- %arrayidx30 = getelementptr inbounds float* %A, i64 %add29
+ %arrayidx30 = getelementptr inbounds float, float* %A, i64 %add29
%15 = load float* %arrayidx30, align 4
%mul31 = fmul fast float %5, %15
%add32 = fadd fast float %add26, %mul31
%add35 = add nsw i64 %mul, 6
- %arrayidx36 = getelementptr inbounds float* %A, i64 %add35
+ %arrayidx36 = getelementptr inbounds float, float* %A, i64 %add35
%16 = load float* %arrayidx36, align 4
%mul37 = fmul fast float %6, %16
%add38 = fadd fast float %add32, %mul37
%add41 = add nsw i64 %mul, 7
- %arrayidx42 = getelementptr inbounds float* %A, i64 %add41
+ %arrayidx42 = getelementptr inbounds float, float* %A, i64 %add41
%17 = load float* %arrayidx42, align 4
%mul43 = fmul fast float %7, %17
%add44 = fadd fast float %add38, %mul43
%add47 = add nsw i64 %mul, 8
- %arrayidx48 = getelementptr inbounds float* %A, i64 %add47
+ %arrayidx48 = getelementptr inbounds float, float* %A, i64 %add47
%18 = load float* %arrayidx48, align 4
%mul49 = fmul fast float %8, %18
%add50 = fadd fast float %add44, %mul49
for.body.lr.ph:
%0 = load float* %B, align 4
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
%1 = load float* %arrayidx4, align 4
- %arrayidx10 = getelementptr inbounds float* %B, i64 2
+ %arrayidx10 = getelementptr inbounds float, float* %B, i64 2
%2 = load float* %arrayidx10, align 4
- %arrayidx16 = getelementptr inbounds float* %B, i64 3
+ %arrayidx16 = getelementptr inbounds float, float* %B, i64 3
%3 = load float* %arrayidx16, align 4
%4 = sext i32 %n to i64
br label %for.body
%i.043 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%sum.042 = phi float [ 0.000000e+00, %for.body.lr.ph ], [ %add21, %for.body ]
%mul = shl nsw i64 %i.043, 2
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
%5 = load float* %arrayidx2, align 4
%mul3 = fmul fast float %0, %5
%add = fadd fast float %sum.042, %mul3
%add638 = or i64 %mul, 1
- %arrayidx7 = getelementptr inbounds float* %A, i64 %add638
+ %arrayidx7 = getelementptr inbounds float, float* %A, i64 %add638
%6 = load float* %arrayidx7, align 4
%mul8 = fmul fast float %1, %6
%add9 = fadd fast float %add, %mul8
%add1239 = or i64 %mul, 2
- %arrayidx13 = getelementptr inbounds float* %A, i64 %add1239
+ %arrayidx13 = getelementptr inbounds float, float* %A, i64 %add1239
%7 = load float* %arrayidx13, align 4
%mul14 = fmul fast float %2, %7
%add15 = fadd fast float %add9, %mul14
%add1840 = or i64 %mul, 3
- %arrayidx19 = getelementptr inbounds float* %A, i64 %add1840
+ %arrayidx19 = getelementptr inbounds float, float* %A, i64 %add1840
%8 = load float* %arrayidx19, align 4
%mul20 = fmul fast float %3, %8
%add21 = fadd fast float %add15, %mul20
br i1 %cmp37, label %for.body.lr.ph, label %for.end
for.body.lr.ph:
- %arrayidx4 = getelementptr inbounds float* %B, i64 1
- %arrayidx9 = getelementptr inbounds float* %B, i64 2
- %arrayidx15 = getelementptr inbounds float* %B, i64 3
+ %arrayidx4 = getelementptr inbounds float, float* %B, i64 1
+ %arrayidx9 = getelementptr inbounds float, float* %B, i64 2
+ %arrayidx15 = getelementptr inbounds float, float* %B, i64 3
%0 = sext i32 %n to i64
br label %for.body
%C.addr.038 = phi float* [ %C, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
%1 = load float* %B, align 4
%mul = shl nsw i64 %i.039, 2
- %arrayidx2 = getelementptr inbounds float* %A, i64 %mul
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 %mul
%2 = load float* %arrayidx2, align 4
%mul3 = fmul fast float %1, %2
%3 = load float* %arrayidx4, align 4
%add34 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds float* %A, i64 %add34
+ %arrayidx6 = getelementptr inbounds float, float* %A, i64 %add34
%4 = load float* %arrayidx6, align 4
%mul7 = fmul fast float %3, %4
%add8 = fadd fast float %mul3, %mul7
%5 = load float* %arrayidx9, align 4
%add1135 = or i64 %mul, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %add1135
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %add1135
%6 = load float* %arrayidx12, align 4
%mul13 = fmul fast float %5, %6
%add14 = fadd fast float %add8, %mul13
%7 = load float* %arrayidx15, align 4
%add1736 = or i64 %mul, 3
- %arrayidx18 = getelementptr inbounds float* %A, i64 %add1736
+ %arrayidx18 = getelementptr inbounds float, float* %A, i64 %add1736
%8 = load float* %arrayidx18, align 4
%mul19 = fmul fast float %7, %8
%add20 = fadd fast float %add14, %mul19
store float %add20, float* %C.addr.038, align 4
- %incdec.ptr = getelementptr inbounds float* %C.addr.038, i64 1
+ %incdec.ptr = getelementptr inbounds float, float* %C.addr.038, i64 1
%inc = add nsw i64 %i.039, 1
%exitcond = icmp eq i64 %inc, %0
br i1 %exitcond, label %for.end, label %for.body
for.body.lr.ph:
%0 = load double* %B, align 8
- %arrayidx4 = getelementptr inbounds double* %B, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %B, i64 1
%1 = load double* %arrayidx4, align 8
%2 = sext i32 %n to i64
br label %for.body
for.body:
%i.018 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
%mul = shl nsw i64 %i.018, 2
- %arrayidx2 = getelementptr inbounds double* %A, i64 %mul
+ %arrayidx2 = getelementptr inbounds double, double* %A, i64 %mul
%3 = load double* %arrayidx2, align 8
%mul3 = fmul fast double %0, %3
%add16 = or i64 %mul, 1
- %arrayidx6 = getelementptr inbounds double* %A, i64 %add16
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 %add16
%4 = load double* %arrayidx6, align 8
%mul7 = fmul fast double %1, %4
%add8 = fadd fast double %mul3, %mul7
- %arrayidx9 = getelementptr inbounds double* %C, i64 %i.018
+ %arrayidx9 = getelementptr inbounds double, double* %C, i64 %i.018
store double %add8, double* %arrayidx9, align 8
%inc = add nsw i64 %i.018, 1
%exitcond = icmp eq i64 %inc, %2
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
for.body: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.inc ]
%0 = shl nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds double* %A, i64 %0
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %0
%1 = load double* %arrayidx, align 8
%mul1 = fmul double %conv, %1
%mul2 = fmul double %mul1, 7.000000e+00
%add = fadd double %mul2, 5.000000e+00
%InTreeUser = fadd double %add, %add ; <------------------ In tree user.
%2 = or i64 %0, 1
- %arrayidx6 = getelementptr inbounds double* %A, i64 %2
+ %arrayidx6 = getelementptr inbounds double, double* %A, i64 %2
%3 = load double* %arrayidx6, align 8
%mul8 = fmul double %conv, %3
%mul9 = fmul double %mul8, 4.000000e+00
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
%call = tail call double @llvm.fabs.f64(double %mul) nounwind readnone
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%call5 = tail call double @llvm.fabs.f64(double %mul5) nounwind readnone
store double %call, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %call5, double* %arrayidx5, align 8
ret void
}
%call0 = tail call float @llvm.copysign.f32(float %0, float %1) nounwind readnone
store float %call0, float* %c, align 4
- %ix2 = getelementptr inbounds float* %a, i64 1
+ %ix2 = getelementptr inbounds float, float* %a, i64 1
%2 = load float* %ix2, align 4
- %ix3 = getelementptr inbounds float* %b, i64 1
+ %ix3 = getelementptr inbounds float, float* %b, i64 1
%3 = load float* %ix3, align 4
%call1 = tail call float @llvm.copysign.f32(float %2, float %3) nounwind readnone
- %c1 = getelementptr inbounds float* %c, i64 1
+ %c1 = getelementptr inbounds float, float* %c, i64 1
store float %call1, float* %c1, align 4
- %ix4 = getelementptr inbounds float* %a, i64 2
+ %ix4 = getelementptr inbounds float, float* %a, i64 2
%4 = load float* %ix4, align 4
- %ix5 = getelementptr inbounds float* %b, i64 2
+ %ix5 = getelementptr inbounds float, float* %b, i64 2
%5 = load float* %ix5, align 4
%call2 = tail call float @llvm.copysign.f32(float %4, float %5) nounwind readnone
- %c2 = getelementptr inbounds float* %c, i64 2
+ %c2 = getelementptr inbounds float, float* %c, i64 2
store float %call2, float* %c2, align 4
- %ix6 = getelementptr inbounds float* %a, i64 3
+ %ix6 = getelementptr inbounds float, float* %a, i64 3
%6 = load float* %ix6, align 4
- %ix7 = getelementptr inbounds float* %b, i64 3
+ %ix7 = getelementptr inbounds float, float* %b, i64 3
%7 = load float* %ix7, align 4
%call3 = tail call float @llvm.copysign.f32(float %6, float %7) nounwind readnone
- %c3 = getelementptr inbounds float* %c, i64 3
+ %c3 = getelementptr inbounds float, float* %c, i64 3
store float %call3, float* %c3, align 4
ret void
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.bswap.i32(i32 %add1) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
%i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
%i3 = load i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.bswap.i32(i32 %add2) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
%i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
%i5 = load i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.bswap.i32(i32 %add3) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
%i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
%i7 = load i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.bswap.i32(i32 %add4) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
%i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
%i3 = load i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 true) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
%i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
%i5 = load i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
%i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
%i7 = load i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 true) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.ctlz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
%i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
%i3 = load i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.ctlz.i32(i32 %add2,i1 false) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
%i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
%i5 = load i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.ctlz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
%i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
%i7 = load i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.ctlz.i32(i32 %add4,i1 false) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
%i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
%i3 = load i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 true) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
%i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
%i5 = load i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
%i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
%i7 = load i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 true) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
%add1 = add i32 %i0, %i1
%call1 = tail call i32 @llvm.cttz.i32(i32 %add1,i1 true) nounwind readnone
- %arrayidx2 = getelementptr inbounds i32* %a, i32 1
+ %arrayidx2 = getelementptr inbounds i32, i32* %a, i32 1
%i2 = load i32* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i32 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i32 1
%i3 = load i32* %arrayidx3, align 4
%add2 = add i32 %i2, %i3
%call2 = tail call i32 @llvm.cttz.i32(i32 %add2,i1 false) nounwind readnone
- %arrayidx4 = getelementptr inbounds i32* %a, i32 2
+ %arrayidx4 = getelementptr inbounds i32, i32* %a, i32 2
%i4 = load i32* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds i32* %b, i32 2
+ %arrayidx5 = getelementptr inbounds i32, i32* %b, i32 2
%i5 = load i32* %arrayidx5, align 4
%add3 = add i32 %i4, %i5
%call3 = tail call i32 @llvm.cttz.i32(i32 %add3,i1 true) nounwind readnone
- %arrayidx6 = getelementptr inbounds i32* %a, i32 3
+ %arrayidx6 = getelementptr inbounds i32, i32* %a, i32 3
%i6 = load i32* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds i32* %b, i32 3
+ %arrayidx7 = getelementptr inbounds i32, i32* %b, i32 3
%i7 = load i32* %arrayidx7, align 4
%add4 = add i32 %i6, %i7
%call4 = tail call i32 @llvm.cttz.i32(i32 %add4,i1 false) nounwind readnone
store i32 %call1, i32* %c, align 4
- %arrayidx8 = getelementptr inbounds i32* %c, i32 1
+ %arrayidx8 = getelementptr inbounds i32, i32* %c, i32 1
store i32 %call2, i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i32 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i32 2
store i32 %call3, i32* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds i32* %c, i32 3
+ %arrayidx10 = getelementptr inbounds i32, i32* %c, i32 3
store i32 %call4, i32* %arrayidx10, align 4
ret void
%add1 = fadd float %i0, %i1
%call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %a, i32 1
+ %arrayidx2 = getelementptr inbounds float, float* %a, i32 1
%i2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %b, i32 1
+ %arrayidx3 = getelementptr inbounds float, float* %b, i32 1
%i3 = load float* %arrayidx3, align 4
%add2 = fadd float %i2, %i3
%call2 = tail call float @llvm.powi.f32(float %add2,i32 %P) nounwind readnone
- %arrayidx4 = getelementptr inbounds float* %a, i32 2
+ %arrayidx4 = getelementptr inbounds float, float* %a, i32 2
%i4 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %b, i32 2
+ %arrayidx5 = getelementptr inbounds float, float* %b, i32 2
%i5 = load float* %arrayidx5, align 4
%add3 = fadd float %i4, %i5
%call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
- %arrayidx6 = getelementptr inbounds float* %a, i32 3
+ %arrayidx6 = getelementptr inbounds float, float* %a, i32 3
%i6 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %b, i32 3
+ %arrayidx7 = getelementptr inbounds float, float* %b, i32 3
%i7 = load float* %arrayidx7, align 4
%add4 = fadd float %i6, %i7
%call4 = tail call float @llvm.powi.f32(float %add4,i32 %P) nounwind readnone
store float %call1, float* %c, align 4
- %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
store float %call2, float* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
store float %call3, float* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
store float %call4, float* %arrayidx10, align 4
ret void
%add1 = fadd float %i0, %i1
%call1 = tail call float @llvm.powi.f32(float %add1,i32 %P) nounwind readnone
- %arrayidx2 = getelementptr inbounds float* %a, i32 1
+ %arrayidx2 = getelementptr inbounds float, float* %a, i32 1
%i2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %b, i32 1
+ %arrayidx3 = getelementptr inbounds float, float* %b, i32 1
%i3 = load float* %arrayidx3, align 4
%add2 = fadd float %i2, %i3
%call2 = tail call float @llvm.powi.f32(float %add2,i32 %Q) nounwind readnone
- %arrayidx4 = getelementptr inbounds float* %a, i32 2
+ %arrayidx4 = getelementptr inbounds float, float* %a, i32 2
%i4 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %b, i32 2
+ %arrayidx5 = getelementptr inbounds float, float* %b, i32 2
%i5 = load float* %arrayidx5, align 4
%add3 = fadd float %i4, %i5
%call3 = tail call float @llvm.powi.f32(float %add3,i32 %P) nounwind readnone
- %arrayidx6 = getelementptr inbounds float* %a, i32 3
+ %arrayidx6 = getelementptr inbounds float, float* %a, i32 3
%i6 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %b, i32 3
+ %arrayidx7 = getelementptr inbounds float, float* %b, i32 3
%i7 = load float* %arrayidx7, align 4
%add4 = fadd float %i6, %i7
%call4 = tail call float @llvm.powi.f32(float %add4,i32 %Q) nounwind readnone
store float %call1, float* %c, align 4
- %arrayidx8 = getelementptr inbounds float* %c, i32 1
+ %arrayidx8 = getelementptr inbounds float, float* %c, i32 1
store float %call2, float* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds float* %c, i32 2
+ %arrayidx9 = getelementptr inbounds float, float* %c, i32 2
store float %call3, float* %arrayidx9, align 4
- %arrayidx10 = getelementptr inbounds float* %c, i32 3
+ %arrayidx10 = getelementptr inbounds float, float* %c, i32 3
store float %call4, float* %arrayidx10, align 4
ret void
define i32 @test(double* nocapture %A, i8* nocapture %B) {
entry:
%0 = load i8* %B, align 1
- %arrayidx1 = getelementptr inbounds i8* %B, i64 1
+ %arrayidx1 = getelementptr inbounds i8, i8* %B, i64 1
%1 = load i8* %arrayidx1, align 1
%add = add i8 %0, 3
%add4 = add i8 %1, 3
%mul25 = fmul double %add22, %add22
%add26 = fadd double %mul25, 1.000000e+00
store double %add24, double* %A, align 8
- %arrayidx28 = getelementptr inbounds double* %A, i64 1
+ %arrayidx28 = getelementptr inbounds double, double* %A, i64 1
store double %add26, double* %arrayidx28, align 8
ret i32 undef
}
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
- %arrayidx = getelementptr inbounds i32* %A, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%0 = load i32* %arrayidx, align 4
%add1 = add nsw i32 %0, %n
store i32 %add1, i32* %arrayidx, align 4
%1 = or i64 %indvars.iv, 1
- %arrayidx4 = getelementptr inbounds i32* %A, i64 %1
+ %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %1
%2 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %2, %n
store i32 %add5, i32* %arrayidx4, align 4
%3 = or i64 %indvars.iv, 2
- %arrayidx8 = getelementptr inbounds i32* %A, i64 %3
+ %arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %3
%4 = load i32* %arrayidx8, align 4
%add9 = add nsw i32 %4, %n
store i32 %add9, i32* %arrayidx8, align 4
%5 = or i64 %indvars.iv, 3
- %arrayidx12 = getelementptr inbounds i32* %A, i64 %5
+ %arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %5
%6 = load i32* %arrayidx12, align 4
%add13 = add nsw i32 %6, %n
store i32 %add13, i32* %arrayidx12, align 4
%7 = or i64 %indvars.iv, 4
- %arrayidx16 = getelementptr inbounds i32* %A, i64 %7
+ %arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %7
%8 = load i32* %arrayidx16, align 4
%add17 = add nsw i32 %8, %n
store i32 %add17, i32* %arrayidx16, align 4
%9 = or i64 %indvars.iv, 5
- %arrayidx20 = getelementptr inbounds i32* %A, i64 %9
+ %arrayidx20 = getelementptr inbounds i32, i32* %A, i64 %9
%10 = load i32* %arrayidx20, align 4
%add21 = add nsw i32 %10, %n
store i32 %add21, i32* %arrayidx20, align 4
%11 = or i64 %indvars.iv, 6
- %arrayidx24 = getelementptr inbounds i32* %A, i64 %11
+ %arrayidx24 = getelementptr inbounds i32, i32* %A, i64 %11
%12 = load i32* %arrayidx24, align 4
%add25 = add nsw i32 %12, %n
store i32 %add25, i32* %arrayidx24, align 4
%13 = or i64 %indvars.iv, 7
- %arrayidx28 = getelementptr inbounds i32* %A, i64 %13
+ %arrayidx28 = getelementptr inbounds i32, i32* %A, i64 %13
%14 = load i32* %arrayidx28, align 4
%add29 = add nsw i32 %14, %n
store i32 %add29, i32* %arrayidx28, align 4
%i0 = load double* %a, align 8, !tbaa !4
%i1 = load double* %b, align 8, !tbaa !4
%mul = fmul double %i0, %i1, !fpmath !0
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8, !tbaa !4
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8, !tbaa !4
%mul5 = fmul double %i3, %i4, !fpmath !0
store double %mul, double* %c, align 8, !tbaa !4
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8, !tbaa !4
ret void
}
%i0 = load double* %a, align 8, !tbaa !4
%i1 = load double* %b, align 8, !tbaa !4
%mul = fmul double %i0, %i1, !fpmath !1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8, !tbaa !4
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8, !tbaa !4
%mul5 = fmul double %i3, %i4, !fpmath !1
%c = bitcast i8* %e to double*
store double %mul, double* %c, align 8, !tbaa !4
- %carrayidx5 = getelementptr inbounds i8* %e, i64 8
+ %carrayidx5 = getelementptr inbounds i8, i8* %e, i64 8
%arrayidx5 = bitcast i8* %carrayidx5 to double*
store double %mul5, double* %arrayidx5, align 8, !tbaa !4
ret void
;CHECK: ret
define i32 @bar(double* nocapture %A, i32 %d) {
%1 = load double* %A, align 8
- %2 = getelementptr inbounds double* %A, i64 1
+ %2 = getelementptr inbounds double, double* %A, i64 1
%3 = load double* %2, align 8
%4 = fptrunc double %1 to float
%5 = fptrunc double %3 to float
%11 = fadd float %5, 5.000000e+00
%12 = fpext float %10 to double
%13 = fadd double %12, 9.000000e+00
- %14 = getelementptr inbounds double* %A, i64 8
+ %14 = getelementptr inbounds double, double* %A, i64 8
store double %13, double* %14, align 8
%15 = fpext float %11 to double
%16 = fadd double %15, 5.000000e+00
- %17 = getelementptr inbounds double* %A, i64 9
+ %17 = getelementptr inbounds double, double* %A, i64 9
store double %16, double* %17, align 8
ret i32 undef
}
%4 = add nsw i32 %2, %3
store i32 %4, i32* %A, align 4
%5 = add nsw i32 %1, 8
- %6 = getelementptr inbounds i32* %A, i64 1
+ %6 = getelementptr inbounds i32, i32* %A, i64 1
%7 = load i32* %6, align 4
%8 = add nsw i32 %5, %7
store i32 %8, i32* %6, align 4
%9 = add nsw i32 %1, 9
- %10 = getelementptr inbounds i32* %A, i64 2
+ %10 = getelementptr inbounds i32, i32* %A, i64 2
%11 = load i32* %10, align 4
%12 = add nsw i32 %9, %11
store i32 %12, i32* %10, align 4
%13 = add nsw i32 %1, 10
- %14 = getelementptr inbounds i32* %A, i64 3
+ %14 = getelementptr inbounds i32, i32* %A, i64 3
%15 = load i32* %14, align 4
%16 = add nsw i32 %13, %15
store i32 %16, i32* %14, align 4
%17 = add nsw i32 %1, 11
- %18 = getelementptr inbounds i32* %A, i64 4
+ %18 = getelementptr inbounds i32, i32* %A, i64 4
%19 = load i32* %18, align 4
%20 = add nsw i32 %17, %19
store i32 %20, i32* %18, align 4
;CHECK-NOT: store <3 x i8>
;CHECK: ret
define i32 @foo(i8* noalias nocapture %A, float* noalias nocapture %B, float %T) {
- %1 = getelementptr inbounds float* %B, i64 10
+ %1 = getelementptr inbounds float, float* %B, i64 10
%2 = load float* %1, align 4
%3 = fmul float %2, %T
%4 = fpext float %3 to double
%5 = fadd double %4, 4.000000e+00
%6 = fptosi double %5 to i8
store i8 %6, i8* %A, align 1
- %7 = getelementptr inbounds float* %B, i64 11
+ %7 = getelementptr inbounds float, float* %B, i64 11
%8 = load float* %7, align 4
%9 = fmul float %8, %T
%10 = fpext float %9 to double
%11 = fadd double %10, 5.000000e+00
%12 = fptosi double %11 to i8
- %13 = getelementptr inbounds i8* %A, i64 1
+ %13 = getelementptr inbounds i8, i8* %A, i64 1
store i8 %12, i8* %13, align 1
- %14 = getelementptr inbounds float* %B, i64 12
+ %14 = getelementptr inbounds float, float* %B, i64 12
%15 = load float* %14, align 4
%16 = fmul float %15, %T
%17 = fpext float %16 to double
%18 = fadd double %17, 6.000000e+00
%19 = fptosi double %18 to i8
- %20 = getelementptr inbounds i8* %A, i64 2
+ %20 = getelementptr inbounds i8, i8* %A, i64 2
store i8 %19, i8* %20, align 1
ret i32 undef
}
define void @shuffle_operands1(double * noalias %from, double * noalias %to,
double %v1, double %v2) {
- %from_1 = getelementptr double *%from, i64 1
+ %from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double * %from
%v0_2 = load double * %from_1
%v1_1 = fadd double %v0_1, %v1
%v1_2 = fadd double %v2, %v0_2
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
ret void
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
+ %from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double * %from
%v0_2 = load double * %from_1
%v1_1 = fadd double %v0_1, %p
%v1_2 = fadd double %v0_1, %v0_2
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
+ %from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double * %from
%v0_2 = load double * %from_1
%v1_1 = fadd double %p, %v0_1
%v1_2 = fadd double %v0_2, %v0_1
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
+ %from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double * %from
%v0_2 = load double * %from_1
%v1_1 = fadd double %p, %v0_1
%v1_2 = fadd double %v0_1, %v0_2
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
+ %from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double * %from
%v0_2 = load double * %from_1
%v1_1 = fadd double %v0_2, %v0_1
%v1_2 = fadd double %p, %v0_1
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
+ %from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double * %from
%v0_2 = load double * %from_1
%v1_1 = fadd double %v0_1, %v0_2
%v1_2 = fadd double %p, %v0_1
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
lp:
%p = phi double [ 1.000000e+00, %lp ], [ 0.000000e+00, %entry ]
- %from_1 = getelementptr double *%from, i64 1
+ %from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double * %from
%v0_2 = load double * %from_1
%v1_1 = fadd double %v0_1, %v0_2
%v1_2 = fadd double %v0_1, %p
- %to_2 = getelementptr double * %to, i64 1
+ %to_2 = getelementptr double, double * %to, i64 1
store double %v1_1, double *%to
store double %v1_2, double *%to_2
br i1 undef, label %lp, label %ext
%1 = phi float [ %0, %for.cond1.preheader ], [ %10, %for.body3 ]
%indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next, %for.body3 ]
%2 = add nsw i64 %indvars.iv, 1
- %arrayidx = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %2
+ %arrayidx = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %2
%3 = load float* %arrayidx, align 4
- %arrayidx5 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv
+ %arrayidx5 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv
%mul6 = fmul float %3, %1
store float %mul6, float* %arrayidx5, align 4
%4 = add nsw i64 %indvars.iv, 2
- %arrayidx11 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %4
+ %arrayidx11 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %4
%5 = load float* %arrayidx11, align 4
%mul15 = fmul float %5, %3
store float %mul15, float* %arrayidx, align 4
%6 = add nsw i64 %indvars.iv, 3
- %arrayidx21 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %6
+ %arrayidx21 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %6
%7 = load float* %arrayidx21, align 4
%mul25 = fmul float %7, %5
store float %mul25, float* %arrayidx11, align 4
%8 = add nsw i64 %indvars.iv, 4
- %arrayidx31 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %8
+ %arrayidx31 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %8
%9 = load float* %arrayidx31, align 4
%mul35 = fmul float %9, %7
store float %mul35, float* %arrayidx21, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 5
- %arrayidx41 = getelementptr inbounds [32000 x float]* @a, i64 0, i64 %indvars.iv.next
+ %arrayidx41 = getelementptr inbounds [32000 x float], [32000 x float]* @a, i64 0, i64 %indvars.iv.next
%10 = load float* %arrayidx41, align 4
%mul45 = fmul float %10, %9
store float %mul45, float* %arrayidx31, align 4
%2 = load double* %b
%3 = fadd double %1, %2
store double %3, double* %c
- %4 = getelementptr inbounds double* %b, i64 1
+ %4 = getelementptr inbounds double, double* %b, i64 1
%5 = load double* %4
- %6 = getelementptr inbounds double* %a, i64 1
+ %6 = getelementptr inbounds double, double* %a, i64 1
%7 = load double* %6
%8 = fadd double %5, %7
- %9 = getelementptr inbounds double* %c, i64 1
+ %9 = getelementptr inbounds double, double* %c, i64 1
store double %8, double* %9
ret void
}
%2 = load float* %b
%3 = fadd float %1, %2
store float %3, float* %c
- %4 = getelementptr inbounds float* %b, i64 1
+ %4 = getelementptr inbounds float, float* %b, i64 1
%5 = load float* %4
- %6 = getelementptr inbounds float* %a, i64 1
+ %6 = getelementptr inbounds float, float* %a, i64 1
%7 = load float* %6
%8 = fadd float %5, %7
- %9 = getelementptr inbounds float* %c, i64 1
+ %9 = getelementptr inbounds float, float* %c, i64 1
store float %8, float* %9
- %10 = getelementptr inbounds float* %a, i64 2
+ %10 = getelementptr inbounds float, float* %a, i64 2
%11 = load float* %10
- %12 = getelementptr inbounds float* %b, i64 2
+ %12 = getelementptr inbounds float, float* %b, i64 2
%13 = load float* %12
%14 = fadd float %11, %13
- %15 = getelementptr inbounds float* %c, i64 2
+ %15 = getelementptr inbounds float, float* %c, i64 2
store float %14, float* %15
- %16 = getelementptr inbounds float* %a, i64 3
+ %16 = getelementptr inbounds float, float* %a, i64 3
%17 = load float* %16
- %18 = getelementptr inbounds float* %b, i64 3
+ %18 = getelementptr inbounds float, float* %b, i64 3
%19 = load float* %18
%20 = fadd float %17, %19
- %21 = getelementptr inbounds float* %c, i64 3
+ %21 = getelementptr inbounds float, float* %c, i64 3
store float %20, float* %21
ret void
}
%4 = load float* %d
%5 = fadd float %3, %4
store float %5, float* %a
- %6 = getelementptr inbounds float* %d, i64 1
+ %6 = getelementptr inbounds float, float* %d, i64 1
%7 = load float* %6
- %8 = getelementptr inbounds float* %b, i64 1
+ %8 = getelementptr inbounds float, float* %b, i64 1
%9 = load float* %8
- %10 = getelementptr inbounds float* %c, i64 1
+ %10 = getelementptr inbounds float, float* %c, i64 1
%11 = load float* %10
%12 = fadd float %9, %11
%13 = fadd float %7, %12
- %14 = getelementptr inbounds float* %a, i64 1
+ %14 = getelementptr inbounds float, float* %a, i64 1
store float %13, float* %14
- %15 = getelementptr inbounds float* %b, i64 2
+ %15 = getelementptr inbounds float, float* %b, i64 2
%16 = load float* %15
- %17 = getelementptr inbounds float* %c, i64 2
+ %17 = getelementptr inbounds float, float* %c, i64 2
%18 = load float* %17
%19 = fadd float %16, %18
- %20 = getelementptr inbounds float* %d, i64 2
+ %20 = getelementptr inbounds float, float* %d, i64 2
%21 = load float* %20
%22 = fadd float %19, %21
- %23 = getelementptr inbounds float* %a, i64 2
+ %23 = getelementptr inbounds float, float* %a, i64 2
store float %22, float* %23
- %24 = getelementptr inbounds float* %b, i64 3
+ %24 = getelementptr inbounds float, float* %b, i64 3
%25 = load float* %24
- %26 = getelementptr inbounds float* %c, i64 3
+ %26 = getelementptr inbounds float, float* %c, i64 3
%27 = load float* %26
%28 = fadd float %25, %27
- %29 = getelementptr inbounds float* %d, i64 3
+ %29 = getelementptr inbounds float, float* %d, i64 3
%30 = load float* %29
%31 = fadd float %28, %30
- %32 = getelementptr inbounds float* %a, i64 3
+ %32 = getelementptr inbounds float, float* %a, i64 3
store float %31, float* %32
ret void
}
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
br i1 %tobool, label %if.else, label %if.end
if.else: ; preds = %entry
- %arrayidx = getelementptr inbounds double* %A, i64 10
+ %arrayidx = getelementptr inbounds double, double* %A, i64 10
%0 = load double* %arrayidx, align 8
- %arrayidx1 = getelementptr inbounds double* %A, i64 11
+ %arrayidx1 = getelementptr inbounds double, double* %A, i64 11
%1 = load double* %arrayidx1, align 8
br label %if.end
%A0.0 = phi double [ %0, %if.else ], [ 3.000000e+00, %entry ]
%A1.0 = phi double [ %1, %if.else ], [ 5.000000e+00, %entry ]
store double %A0.0, double* %A, align 8
- %arrayidx3 = getelementptr inbounds double* %A, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %A, i64 1
store double %A1.0, double* %arrayidx3, align 8
ret i32 undef
}
;CHECK: ret
define i32 @foo2(double* noalias nocapture %B, double* noalias nocapture %A, i32 %n, i32 %m) #0 {
entry:
- %arrayidx = getelementptr inbounds double* %A, i64 1
+ %arrayidx = getelementptr inbounds double, double* %A, i64 1
%0 = load double* %arrayidx, align 8
%1 = load double* %A, align 8
br label %for.body
for.end: ; preds = %for.body
store double %add5, double* %B, align 8
- %arrayidx7 = getelementptr inbounds double* %B, i64 1
+ %arrayidx7 = getelementptr inbounds double, double* %B, i64 1
store double %add4, double* %arrayidx7, align 8
ret i32 0
}
define float @foo3(float* nocapture readonly %A) #0 {
entry:
%0 = load float* %A, align 4
- %arrayidx1 = getelementptr inbounds float* %A, i64 1
+ %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
%1 = load float* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds float* %A, i64 2
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
%2 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %A, i64 3
+ %arrayidx3 = getelementptr inbounds float, float* %A, i64 3
%3 = load float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %A, i64 4
+ %arrayidx4 = getelementptr inbounds float, float* %A, i64 4
%4 = load float* %arrayidx4, align 4
br label %for.body
%mul10 = fmul float %5, 8.000000e+00
%add11 = fadd float %G.053, %mul10
%7 = add nsw i64 %indvars.iv, 2
- %arrayidx14 = getelementptr inbounds float* %A, i64 %7
+ %arrayidx14 = getelementptr inbounds float, float* %A, i64 %7
%8 = load float* %arrayidx14, align 4
%mul15 = fmul float %8, 9.000000e+00
%add16 = fadd float %B.054, %mul15
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 3
- %arrayidx19 = getelementptr inbounds float* %A, i64 %indvars.iv.next
+ %arrayidx19 = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
%9 = load float* %arrayidx19, align 4
%mul20 = fmul float %9, 1.000000e+01
%add21 = fadd float %Y.055, %mul20
%10 = add nsw i64 %indvars.iv, 4
- %arrayidx24 = getelementptr inbounds float* %A, i64 %10
+ %arrayidx24 = getelementptr inbounds float, float* %A, i64 %10
%11 = load float* %arrayidx24, align 4
%mul25 = fmul float %11, 1.100000e+01
%add26 = fadd float %P.056, %mul25
entry:
%i1.0 = load x86_fp80* %i1, align 16
- %i1.gep1 = getelementptr x86_fp80* %i1, i64 1
+ %i1.gep1 = getelementptr x86_fp80, x86_fp80* %i1, i64 1
%i1.1 = load x86_fp80* %i1.gep1, align 16
; CHECK: load x86_fp80*
; CHECK: load x86_fp80*
br i1 undef, label %then, label %end
then:
- %i2.gep0 = getelementptr inbounds x86_fp80* %i2, i64 0
+ %i2.gep0 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 0
%i2.0 = load x86_fp80* %i2.gep0, align 16
- %i2.gep1 = getelementptr inbounds x86_fp80* %i2, i64 1
+ %i2.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %i2, i64 1
%i2.1 = load x86_fp80* %i2.gep1, align 16
; CHECK: load x86_fp80*
; CHECK: load x86_fp80*
; CHECK-NOT: extractelement <2 x x86_fp80>
; CHECK-NOT: extractelement <2 x x86_fp80>
store x86_fp80 %phi0, x86_fp80* %o, align 16
- %o.gep1 = getelementptr inbounds x86_fp80* %o, i64 1
+ %o.gep1 = getelementptr inbounds x86_fp80, x86_fp80* %o, i64 1
store x86_fp80 %phi1, x86_fp80* %o.gep1, align 16
ret void
}
entry:
%i1.0 = load double* %i1, align 16
- %i1.gep1 = getelementptr double* %i1, i64 1
+ %i1.gep1 = getelementptr double, double* %i1, i64 1
%i1.1 = load double* %i1.gep1, align 16
; CHECK: load double*
; CHECK: load double*
br i1 undef, label %then, label %end
then:
- %i2.gep0 = getelementptr inbounds double* %i2, i64 0
+ %i2.gep0 = getelementptr inbounds double, double* %i2, i64 0
%i2.0 = load double* %i2.gep0, align 16
- %i2.gep1 = getelementptr inbounds double* %i2, i64 1
+ %i2.gep1 = getelementptr inbounds double, double* %i2, i64 1
%i2.1 = load double* %i2.gep1, align 16
; CHECK: load double*
; CHECK: load double*
; CHECK: extractelement <2 x double>
; CHECK: extractelement <2 x double>
store double %phi0, double* %o, align 16
- %o.gep1 = getelementptr inbounds double* %o, i64 1
+ %o.gep1 = getelementptr inbounds double, double* %o, i64 1
store double %phi1, double* %o.gep1, align 16
ret void
}
%add = add nsw i32 %1, %0
%div = sdiv i32 %add, 2
store i32 %div, i32* %a, align 4
- %arrayidx3 = getelementptr inbounds i32* %b, i64 1
+ %arrayidx3 = getelementptr inbounds i32, i32* %b, i64 1
%2 = load i32* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds i32* %c, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %c, i64 1
%3 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %3, %2
%div6 = sdiv i32 %add5, 2
- %arrayidx7 = getelementptr inbounds i32* %a, i64 1
+ %arrayidx7 = getelementptr inbounds i32, i32* %a, i64 1
store i32 %div6, i32* %arrayidx7, align 4
- %arrayidx8 = getelementptr inbounds i32* %b, i64 2
+ %arrayidx8 = getelementptr inbounds i32, i32* %b, i64 2
%4 = load i32* %arrayidx8, align 4
- %arrayidx9 = getelementptr inbounds i32* %c, i64 2
+ %arrayidx9 = getelementptr inbounds i32, i32* %c, i64 2
%5 = load i32* %arrayidx9, align 4
%add10 = add nsw i32 %5, %4
%div11 = sdiv i32 %add10, 2
- %arrayidx12 = getelementptr inbounds i32* %a, i64 2
+ %arrayidx12 = getelementptr inbounds i32, i32* %a, i64 2
store i32 %div11, i32* %arrayidx12, align 4
- %arrayidx13 = getelementptr inbounds i32* %b, i64 3
+ %arrayidx13 = getelementptr inbounds i32, i32* %b, i64 3
%6 = load i32* %arrayidx13, align 4
- %arrayidx14 = getelementptr inbounds i32* %c, i64 3
+ %arrayidx14 = getelementptr inbounds i32, i32* %c, i64 3
%7 = load i32* %arrayidx14, align 4
%add15 = add nsw i32 %7, %6
%div16 = sdiv i32 %add15, 2
- %arrayidx17 = getelementptr inbounds i32* %a, i64 3
+ %arrayidx17 = getelementptr inbounds i32, i32* %a, i64 3
store i32 %div16, i32* %arrayidx17, align 4
ret void
}
entry:
%0 = load i32** @a, align 4, !tbaa !4
%1 = load i32* %0, align 4, !tbaa !5
- %arrayidx1 = getelementptr inbounds i32* %0, i32 1
+ %arrayidx1 = getelementptr inbounds i32, i32* %0, i32 1
%2 = load i32* %arrayidx1, align 4, !tbaa !5
br label %do.body
%1 = alloca double*, align 8
store double* %x, double** %1, align 8
%2 = load double** %1, align 8
- %3 = getelementptr inbounds double* %2, i64 0
+ %3 = getelementptr inbounds double, double* %2, i64 0
%4 = load double* %3, align 8
%5 = load double** %1, align 8
- %6 = getelementptr inbounds double* %5, i64 0
+ %6 = getelementptr inbounds double, double* %5, i64 0
%7 = load double* %6, align 8
%8 = fadd double %4, %7
%9 = load double** %1, align 8
- %10 = getelementptr inbounds double* %9, i64 0
+ %10 = getelementptr inbounds double, double* %9, i64 0
%11 = load double* %10, align 8
%12 = fadd double %8, %11
%13 = load double** %1, align 8
- %14 = getelementptr inbounds double* %13, i64 0
+ %14 = getelementptr inbounds double, double* %13, i64 0
store double %12, double* %14, align 8
%15 = load double** %1, align 8
- %16 = getelementptr inbounds double* %15, i64 1
+ %16 = getelementptr inbounds double, double* %15, i64 1
%17 = load double* %16, align 8
%18 = load double** %1, align 8
- %19 = getelementptr inbounds double* %18, i64 1
+ %19 = getelementptr inbounds double, double* %18, i64 1
%20 = load double* %19, align 8
%21 = fadd double %17, %20
%22 = load double** %1, align 8
- %23 = getelementptr inbounds double* %22, i64 1
+ %23 = getelementptr inbounds double, double* %22, i64 1
%24 = load double* %23, align 8
%25 = fadd double %21, %24
%26 = load double** %1, align 8
- %27 = getelementptr inbounds double* %26, i64 1
+ %27 = getelementptr inbounds double, double* %26, i64 1
store double %25, double* %27, align 8
%28 = load double** %1, align 8
- %29 = getelementptr inbounds double* %28, i64 2
+ %29 = getelementptr inbounds double, double* %28, i64 2
%30 = load double* %29, align 8
%31 = load double** %1, align 8
- %32 = getelementptr inbounds double* %31, i64 2
+ %32 = getelementptr inbounds double, double* %31, i64 2
%33 = load double* %32, align 8
%34 = fadd double %30, %33
%35 = load double** %1, align 8
- %36 = getelementptr inbounds double* %35, i64 2
+ %36 = getelementptr inbounds double, double* %35, i64 2
%37 = load double* %36, align 8
%38 = fadd double %34, %37
%39 = load double** %1, align 8
- %40 = getelementptr inbounds double* %39, i64 2
+ %40 = getelementptr inbounds double, double* %39, i64 2
store double %38, double* %40, align 8
%41 = load double** %1, align 8
- %42 = getelementptr inbounds double* %41, i64 3
+ %42 = getelementptr inbounds double, double* %41, i64 3
%43 = load double* %42, align 8
%44 = load double** %1, align 8
- %45 = getelementptr inbounds double* %44, i64 3
+ %45 = getelementptr inbounds double, double* %44, i64 3
%46 = load double* %45, align 8
%47 = fadd double %43, %46
%48 = load double** %1, align 8
- %49 = getelementptr inbounds double* %48, i64 3
+ %49 = getelementptr inbounds double, double* %48, i64 3
%50 = load double* %49, align 8
%51 = fadd double %47, %50
%52 = load double** %1, align 8
- %53 = getelementptr inbounds double* %52, i64 3
+ %53 = getelementptr inbounds double, double* %52, i64 3
store double %51, double* %53, align 8
ret void
}
; CHECK-LABEL: @exact(
; CHECK: lshr exact <4 x i32>
define void @exact(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK-LABEL: @not_exact(
; CHECK: lshr <4 x i32>
define void @not_exact(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK-LABEL: @nsw(
; CHECK: add nsw <4 x i32>
define void @nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK-LABEL: @not_nsw(
; CHECK: add <4 x i32>
define void @not_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK-LABEL: @nuw(
; CHECK: add nuw <4 x i32>
define void @nuw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK-LABEL: @not_nuw(
; CHECK: add <4 x i32>
define void @not_nuw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK-LABEL: @nnan(
; CHECK: fadd nnan <4 x float>
define void @nnan(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
%load1 = load float* %idx1, align 4
%load2 = load float* %idx2, align 4
; CHECK-LABEL: @not_nnan(
; CHECK: fadd <4 x float>
define void @not_nnan(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
%load1 = load float* %idx1, align 4
%load2 = load float* %idx2, align 4
; CHECK-LABEL: @only_fast(
; CHECK: fadd fast <4 x float>
define void @only_fast(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
%load1 = load float* %idx1, align 4
%load2 = load float* %idx2, align 4
; CHECK-LABEL: @only_arcp(
; CHECK: fadd arcp <4 x float>
define void @only_arcp(float* %x) {
- %idx1 = getelementptr inbounds float* %x, i64 0
- %idx2 = getelementptr inbounds float* %x, i64 1
- %idx3 = getelementptr inbounds float* %x, i64 2
- %idx4 = getelementptr inbounds float* %x, i64 3
+ %idx1 = getelementptr inbounds float, float* %x, i64 0
+ %idx2 = getelementptr inbounds float, float* %x, i64 1
+ %idx3 = getelementptr inbounds float, float* %x, i64 2
+ %idx4 = getelementptr inbounds float, float* %x, i64 3
%load1 = load float* %idx1, align 4
%load2 = load float* %idx2, align 4
; CHECK: add nsw <4 x i32>
; CHECK: sub nsw <4 x i32>
define void @addsub_all_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK: add nsw <4 x i32>
; CHECK: sub <4 x i32>
define void @addsub_some_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
; CHECK: add <4 x i32>
; CHECK: sub <4 x i32>
define void @addsub_no_nsw(i32* %x) {
- %idx1 = getelementptr inbounds i32* %x, i64 0
- %idx2 = getelementptr inbounds i32* %x, i64 1
- %idx3 = getelementptr inbounds i32* %x, i64 2
- %idx4 = getelementptr inbounds i32* %x, i64 3
+ %idx1 = getelementptr inbounds i32, i32* %x, i64 0
+ %idx2 = getelementptr inbounds i32, i32* %x, i64 1
+ %idx3 = getelementptr inbounds i32, i32* %x, i64 2
+ %idx4 = getelementptr inbounds i32, i32* %x, i64 3
%load1 = load i32* %idx1, align 4
%load2 = load i32* %idx2, align 4
%i.015 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%sum.014 = phi double [ %add6, %for.body ], [ 0.000000e+00, %entry ]
%mul = shl nsw i32 %i.015, 1
- %arrayidx = getelementptr inbounds double* %A, i32 %mul
+ %arrayidx = getelementptr inbounds double, double* %A, i32 %mul
%0 = load double* %arrayidx, align 4
%mul1 = fmul double %0, 7.000000e+00
%add12 = or i32 %mul, 1
- %arrayidx3 = getelementptr inbounds double* %A, i32 %add12
+ %arrayidx3 = getelementptr inbounds double, double* %A, i32 %add12
%1 = load double* %arrayidx3, align 4
%mul4 = fmul double %1, 7.000000e+00
%add5 = fadd double %mul1, %mul4
%i.02 = phi i32 [ 0, %0 ], [ %10, %1 ]
%sum.01 = phi double [ 0.000000e+00, %0 ], [ %9, %1 ]
%2 = shl nsw i32 %i.02, 1
- %3 = getelementptr inbounds double* %D, i32 %2
+ %3 = getelementptr inbounds double, double* %D, i32 %2
%4 = load double* %3, align 4
%A4 = fmul double %4, %4
%A42 = fmul double %A4, %A4
%5 = or i32 %2, 1
- %6 = getelementptr inbounds double* %D, i32 %5
+ %6 = getelementptr inbounds double, double* %D, i32 %5
%7 = load double* %6, align 4
%A7 = fmul double %7, %7
%A72 = fmul double %A7, %A7
define double @return2(double* nocapture readonly %x) {
entry:
%x0 = load double* %x, align 4
- %arrayidx1 = getelementptr inbounds double* %x, i32 2
+ %arrayidx1 = getelementptr inbounds double, double* %x, i32 2
%x2 = load double* %arrayidx1, align 4
%add3 = fadd double %x0, %x2
- %arrayidx2 = getelementptr inbounds double* %x, i32 1
+ %arrayidx2 = getelementptr inbounds double, double* %x, i32 1
%x1 = load double* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds double* %x, i32 3
+ %arrayidx3 = getelementptr inbounds double, double* %x, i32 3
%x3 = load double* %arrayidx3, align 4
%add4 = fadd double %x1, %x3
%add5 = fadd double %add3, %add4
define float @foo(float* nocapture readonly %A) {
entry:
%0 = load float* %A, align 4
- %arrayidx1 = getelementptr inbounds float* %A, i64 1
+ %arrayidx1 = getelementptr inbounds float, float* %A, i64 1
%1 = load float* %arrayidx1, align 4
- %arrayidx2 = getelementptr inbounds float* %A, i64 2
+ %arrayidx2 = getelementptr inbounds float, float* %A, i64 2
%2 = load float* %arrayidx2, align 4
br label %for.body
%mul = fmul float %3, 7.000000e+00
%add4 = fadd float %R.030, %mul
%4 = add nsw i64 %indvars.iv, 1
- %arrayidx7 = getelementptr inbounds float* %A, i64 %4
+ %arrayidx7 = getelementptr inbounds float, float* %A, i64 %4
%5 = load float* %arrayidx7, align 4
%mul8 = fmul float %5, 8.000000e+00
%add9 = fadd float %G.031, %mul8
%6 = add nsw i64 %indvars.iv, 2
- %arrayidx12 = getelementptr inbounds float* %A, i64 %6
+ %arrayidx12 = getelementptr inbounds float, float* %A, i64 %6
%7 = load float* %arrayidx12, align 4
%mul13 = fmul float %7, 9.000000e+00
%add14 = fadd float %B.032, %mul13
br i1 %cmp, label %for.body.for.body_crit_edge, label %for.end
for.body.for.body_crit_edge: ; preds = %for.body
- %arrayidx3.phi.trans.insert = getelementptr inbounds float* %A, i64 %indvars.iv.next
+ %arrayidx3.phi.trans.insert = getelementptr inbounds float, float* %A, i64 %indvars.iv.next
%.pre = load float* %arrayidx3.phi.trans.insert, align 4
br label %for.body
;CHECK: ret
define void @SAXPY(i32* noalias nocapture %x, i32* noalias nocapture %y, i32 %a, i64 %i) {
- %1 = getelementptr inbounds i32* %x, i64 %i
+ %1 = getelementptr inbounds i32, i32* %x, i64 %i
%2 = load i32* %1, align 4
%3 = mul nsw i32 %2, %a
- %4 = getelementptr inbounds i32* %y, i64 %i
+ %4 = getelementptr inbounds i32, i32* %y, i64 %i
%5 = load i32* %4, align 4
%6 = add nsw i32 %3, %5
store i32 %6, i32* %1, align 4
%7 = add i64 %i, 1
- %8 = getelementptr inbounds i32* %x, i64 %7
+ %8 = getelementptr inbounds i32, i32* %x, i64 %7
%9 = load i32* %8, align 4
%10 = mul nsw i32 %9, %a
- %11 = getelementptr inbounds i32* %y, i64 %7
+ %11 = getelementptr inbounds i32, i32* %y, i64 %7
%12 = load i32* %11, align 4
%13 = add nsw i32 %10, %12
store i32 %13, i32* %8, align 4
%14 = add i64 %i, 2
- %15 = getelementptr inbounds i32* %x, i64 %14
+ %15 = getelementptr inbounds i32, i32* %x, i64 %14
%16 = load i32* %15, align 4
%17 = mul nsw i32 %16, %a
- %18 = getelementptr inbounds i32* %y, i64 %14
+ %18 = getelementptr inbounds i32, i32* %y, i64 %14
%19 = load i32* %18, align 4
%20 = add nsw i32 %17, %19
store i32 %20, i32* %15, align 4
%21 = add i64 %i, 3
- %22 = getelementptr inbounds i32* %x, i64 %21
+ %22 = getelementptr inbounds i32, i32* %x, i64 %21
%23 = load i32* %22, align 4
%24 = mul nsw i32 %23, %a
- %25 = getelementptr inbounds i32* %y, i64 %21
+ %25 = getelementptr inbounds i32, i32* %y, i64 %21
%26 = load i32* %25, align 4
%27 = add nsw i32 %24, %26
store i32 %27, i32* %22, align 4
; Make sure we don't crash on this one.
define void @SAXPY_crash(i32* noalias nocapture %x, i32* noalias nocapture %y, i64 %i) {
%1 = add i64 %i, 1
- %2 = getelementptr inbounds i32* %x, i64 %1
- %3 = getelementptr inbounds i32* %y, i64 %1
+ %2 = getelementptr inbounds i32, i32* %x, i64 %1
+ %3 = getelementptr inbounds i32, i32* %y, i64 %1
%4 = load i32* %3, align 4
%5 = add nsw i32 undef, %4
store i32 %5, i32* %2, align 4
%6 = add i64 %i, 2
- %7 = getelementptr inbounds i32* %x, i64 %6
- %8 = getelementptr inbounds i32* %y, i64 %6
+ %7 = getelementptr inbounds i32, i32* %x, i64 %6
+ %8 = getelementptr inbounds i32, i32* %y, i64 %6
%9 = load i32* %8, align 4
%10 = add nsw i32 undef, %9
store i32 %10, i32* %7, align 4
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%a.088 = phi i32 [ 0, %entry ], [ %add52, %for.body ]
%1 = shl i64 %indvars.iv, 3
- %arrayidx = getelementptr inbounds i32* %diff, i64 %1
+ %arrayidx = getelementptr inbounds i32, i32* %diff, i64 %1
%2 = load i32* %arrayidx, align 4
%3 = or i64 %1, 4
- %arrayidx2 = getelementptr inbounds i32* %diff, i64 %3
+ %arrayidx2 = getelementptr inbounds i32, i32* %diff, i64 %3
%4 = load i32* %arrayidx2, align 4
%add3 = add nsw i32 %4, %2
- %arrayidx6 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
+ %arrayidx6 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 0
store i32 %add3, i32* %arrayidx6, align 16
%add10 = add nsw i32 %add3, %a.088
%5 = or i64 %1, 1
- %arrayidx13 = getelementptr inbounds i32* %diff, i64 %5
+ %arrayidx13 = getelementptr inbounds i32, i32* %diff, i64 %5
%6 = load i32* %arrayidx13, align 4
%7 = or i64 %1, 5
- %arrayidx16 = getelementptr inbounds i32* %diff, i64 %7
+ %arrayidx16 = getelementptr inbounds i32, i32* %diff, i64 %7
%8 = load i32* %arrayidx16, align 4
%add17 = add nsw i32 %8, %6
- %arrayidx20 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
+ %arrayidx20 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 1
store i32 %add17, i32* %arrayidx20, align 4
%add24 = add nsw i32 %add10, %add17
%9 = or i64 %1, 2
- %arrayidx27 = getelementptr inbounds i32* %diff, i64 %9
+ %arrayidx27 = getelementptr inbounds i32, i32* %diff, i64 %9
%10 = load i32* %arrayidx27, align 4
%11 = or i64 %1, 6
- %arrayidx30 = getelementptr inbounds i32* %diff, i64 %11
+ %arrayidx30 = getelementptr inbounds i32, i32* %diff, i64 %11
%12 = load i32* %arrayidx30, align 4
%add31 = add nsw i32 %12, %10
- %arrayidx34 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
+ %arrayidx34 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 2
store i32 %add31, i32* %arrayidx34, align 8
%add38 = add nsw i32 %add24, %add31
%13 = or i64 %1, 3
- %arrayidx41 = getelementptr inbounds i32* %diff, i64 %13
+ %arrayidx41 = getelementptr inbounds i32, i32* %diff, i64 %13
%14 = load i32* %arrayidx41, align 4
%15 = or i64 %1, 7
- %arrayidx44 = getelementptr inbounds i32* %diff, i64 %15
+ %arrayidx44 = getelementptr inbounds i32, i32* %diff, i64 %15
%16 = load i32* %arrayidx44, align 4
%add45 = add nsw i32 %16, %14
- %arrayidx48 = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
+ %arrayidx48 = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 %indvars.iv, i64 3
store i32 %add45, i32* %arrayidx48, align 4
%add52 = add nsw i32 %add38, %add45
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
- %arraydecay = getelementptr inbounds [8 x [8 x i32]]* %m2, i64 0, i64 0
+ %arraydecay = getelementptr inbounds [8 x [8 x i32]], [8 x [8 x i32]]* %m2, i64 0, i64 0
call void @ff([8 x i32]* %arraydecay) #1
ret i32 %add52
}
.lr.ph: ; preds = %0, %.lr.ph
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
- %3 = getelementptr inbounds i32* %in, i64 %2
+ %3 = getelementptr inbounds i32, i32* %in, i64 %2
;CHECK:load <4 x i32>
%4 = load i32* %3, align 4
%5 = or i64 %2, 1
- %6 = getelementptr inbounds i32* %in, i64 %5
+ %6 = getelementptr inbounds i32, i32* %in, i64 %5
%7 = load i32* %6, align 4
%8 = or i64 %2, 2
- %9 = getelementptr inbounds i32* %in, i64 %8
+ %9 = getelementptr inbounds i32, i32* %in, i64 %8
%10 = load i32* %9, align 4
%11 = or i64 %2, 3
- %12 = getelementptr inbounds i32* %in, i64 %11
+ %12 = getelementptr inbounds i32, i32* %in, i64 %11
%13 = load i32* %12, align 4
;CHECK:mul <4 x i32>
%14 = mul i32 %4, 7
%19 = add i32 %18, 21
%20 = mul i32 %13, 7
%21 = add i32 %20, 28
- %22 = getelementptr inbounds i32* %out, i64 %2
+ %22 = getelementptr inbounds i32, i32* %out, i64 %2
;CHECK:store <4 x i32>
store i32 %15, i32* %22, align 4
- %23 = getelementptr inbounds i32* %out, i64 %5
+ %23 = getelementptr inbounds i32, i32* %out, i64 %5
store i32 %17, i32* %23, align 4
- %24 = getelementptr inbounds i32* %out, i64 %8
+ %24 = getelementptr inbounds i32, i32* %out, i64 %8
store i32 %19, i32* %24, align 4
- %25 = getelementptr inbounds i32* %out, i64 %11
+ %25 = getelementptr inbounds i32, i32* %out, i64 %11
store i32 %21, i32* %25, align 4
%26 = add i64 %i.019, 1
%exitcond = icmp eq i64 %26, %n
.lr.ph: ; preds = %0, %.lr.ph
%i.019 = phi i64 [ %26, %.lr.ph ], [ 0, %0 ]
%2 = shl i64 %i.019, 2
- %3 = getelementptr inbounds i32* %in, i64 %2
+ %3 = getelementptr inbounds i32, i32* %in, i64 %2
%4 = load i32* %3, align 4
%5 = or i64 %2, 1
- %6 = getelementptr inbounds i32* %in, i64 %5
+ %6 = getelementptr inbounds i32, i32* %in, i64 %5
%7 = load i32* %6, align 4
%8 = or i64 %2, 2
- %9 = getelementptr inbounds i32* %in, i64 %8
+ %9 = getelementptr inbounds i32, i32* %in, i64 %8
%10 = load i32* %9, align 4
%11 = or i64 %2, 3
- %12 = getelementptr inbounds i32* %in, i64 %11
+ %12 = getelementptr inbounds i32, i32* %in, i64 %11
%13 = load i32* %12, align 4
%14 = mul i32 %4, 7
%15 = add i32 %14, 7
%19 = add i32 %18, 21
%20 = mul i32 %13, 7
%21 = add i32 %20, 28
- %22 = getelementptr inbounds i32* %out, i64 %2
+ %22 = getelementptr inbounds i32, i32* %out, i64 %2
store i32 %15, i32* %22, align 4
- %23 = getelementptr inbounds i32* %out, i64 %5
+ %23 = getelementptr inbounds i32, i32* %out, i64 %5
store i32 %17, i32* %23, align 4
%barrier = call i32 @goo(i32 0) ; <---------------- memory barrier.
- %24 = getelementptr inbounds i32* %out, i64 %8
+ %24 = getelementptr inbounds i32, i32* %out, i64 %8
store i32 %19, i32* %24, align 4
- %25 = getelementptr inbounds i32* %out, i64 %11
+ %25 = getelementptr inbounds i32, i32* %out, i64 %11
store i32 %21, i32* %25, align 4
%26 = add i64 %i.019, 1
%exitcond = icmp eq i64 %26, %n
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
%c = bitcast i8* %e to double*
store double %mul, double* %c, align 8
- %carrayidx5 = getelementptr inbounds i8* %e, i64 8
+ %carrayidx5 = getelementptr inbounds i8, i8* %e, i64 8
%arrayidx5 = bitcast i8* %carrayidx5 to double*
store double %mul5, double* %arrayidx5, align 8
ret void
%i0 = load volatile double* %a, align 8
%i1 = load volatile double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store volatile double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store volatile double %mul5, double* %arrayidx5, align 8
ret void
}
%src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
%0 = load double* %src.addr.013, align 8
store double %0, double* %dst.addr.014, align 8
- %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 1
+ %arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 1
%1 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1
store double %1, double* %arrayidx3, align 8
- %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
- %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+ %add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
+ %add.ptr4 = getelementptr inbounds double, double* %dst.addr.014, i64 %i.015
%inc = add i64 %i.015, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
%src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
%0 = load float* %src.addr.021, align 4
store float %0, float* %dst.addr.022, align 4
- %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 1
+ %arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 1
%1 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+ %arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
store float %1, float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
+ %arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
%2 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+ %arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
store float %2, float* %arrayidx5, align 4
- %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
+ %arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
%3 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+ %arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
store float %3, float* %arrayidx7, align 4
- %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
- %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+ %add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
+ %add.ptr8 = getelementptr inbounds float, float* %dst.addr.022, i64 %i.023
%inc = add i64 %i.023, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
%src.addr.013 = phi double* [ %add.ptr, %for.body ], [ %src, %entry ]
%0 = load double* %src.addr.013, align 8
store double %0, double* %dst.addr.014, align 8
- %arrayidx2 = getelementptr inbounds double* %src.addr.013, i64 2
+ %arrayidx2 = getelementptr inbounds double, double* %src.addr.013, i64 2
%1 = load double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double* %dst.addr.014, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %dst.addr.014, i64 1
store double %1, double* %arrayidx3, align 8
- %add.ptr = getelementptr inbounds double* %src.addr.013, i64 %i.015
- %add.ptr4 = getelementptr inbounds double* %dst.addr.014, i64 %i.015
+ %add.ptr = getelementptr inbounds double, double* %src.addr.013, i64 %i.015
+ %add.ptr4 = getelementptr inbounds double, double* %dst.addr.014, i64 %i.015
%inc = add i64 %i.015, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
%src.addr.021 = phi float* [ %add.ptr, %for.body ], [ %src, %entry ]
%0 = load float* %src.addr.021, align 4
store float %0, float* %dst.addr.022, align 4
- %arrayidx2 = getelementptr inbounds float* %src.addr.021, i64 4
+ %arrayidx2 = getelementptr inbounds float, float* %src.addr.021, i64 4
%1 = load float* %arrayidx2, align 4
- %arrayidx3 = getelementptr inbounds float* %dst.addr.022, i64 1
+ %arrayidx3 = getelementptr inbounds float, float* %dst.addr.022, i64 1
store float %1, float* %arrayidx3, align 4
- %arrayidx4 = getelementptr inbounds float* %src.addr.021, i64 2
+ %arrayidx4 = getelementptr inbounds float, float* %src.addr.021, i64 2
%2 = load float* %arrayidx4, align 4
- %arrayidx5 = getelementptr inbounds float* %dst.addr.022, i64 2
+ %arrayidx5 = getelementptr inbounds float, float* %dst.addr.022, i64 2
store float %2, float* %arrayidx5, align 4
- %arrayidx6 = getelementptr inbounds float* %src.addr.021, i64 3
+ %arrayidx6 = getelementptr inbounds float, float* %src.addr.021, i64 3
%3 = load float* %arrayidx6, align 4
- %arrayidx7 = getelementptr inbounds float* %dst.addr.022, i64 3
+ %arrayidx7 = getelementptr inbounds float, float* %dst.addr.022, i64 3
store float %3, float* %arrayidx7, align 4
- %add.ptr = getelementptr inbounds float* %src.addr.021, i64 %i.023
- %add.ptr8 = getelementptr inbounds float* %dst.addr.022, i64 %i.023
+ %add.ptr = getelementptr inbounds float, float* %src.addr.021, i64 %i.023
+ %add.ptr8 = getelementptr inbounds float, float* %dst.addr.022, i64 %i.023
%inc = add i64 %i.023, 1
%exitcond = icmp eq i64 %inc, %count
br i1 %exitcond, label %for.end, label %for.body
; CHECK-LABEL: store_splat
; CHECK: store <4 x float>
define void @store_splat(float*, float) {
- %3 = getelementptr inbounds float* %0, i64 0
+ %3 = getelementptr inbounds float, float* %0, i64 0
store float %1, float* %3, align 4
- %4 = getelementptr inbounds float* %0, i64 1
+ %4 = getelementptr inbounds float, float* %0, i64 1
store float %1, float* %4, align 4
- %5 = getelementptr inbounds float* %0, i64 2
+ %5 = getelementptr inbounds float, float* %0, i64 2
store float %1, float* %5, align 4
- %6 = getelementptr inbounds float* %0, i64 3
+ %6 = getelementptr inbounds float, float* %0, i64 3
store float %1, float* %6, align 4
ret void
}
br label %bb2
bb1: ; an unreachable block
- %t3 = getelementptr inbounds i32* %x, i64 4
+ %t3 = getelementptr inbounds i32, i32* %x, i64 4
%t4 = load i32* %t3, align 4
- %t5 = getelementptr inbounds i32* %x, i64 5
+ %t5 = getelementptr inbounds i32, i32* %x, i64 5
%t6 = load i32* %t5, align 4
%bad = fadd float %bad, 0.000000e+00 ; <- an instruction with self dependency,
; but legal in unreachable code
- %t7 = getelementptr inbounds i32* %x, i64 6
+ %t7 = getelementptr inbounds i32, i32* %x, i64 6
%t8 = load i32* %t7, align 4
- %t9 = getelementptr inbounds i32* %x, i64 7
+ %t9 = getelementptr inbounds i32, i32* %x, i64 7
%t10 = load i32* %t9, align 4
br label %bb2
%t3.0 = phi i32 [ %t8, %bb1 ], [ 2, %entry ]
%t4.0 = phi i32 [ %t10, %bb1 ], [ 2, %entry ]
store i32 %t1.0, i32* %x, align 4
- %t12 = getelementptr inbounds i32* %x, i64 1
+ %t12 = getelementptr inbounds i32, i32* %x, i64 1
store i32 %t2.0, i32* %t12, align 4
- %t13 = getelementptr inbounds i32* %x, i64 2
+ %t13 = getelementptr inbounds i32, i32* %x, i64 2
store i32 %t3.0, i32* %t13, align 4
- %t14 = getelementptr inbounds i32* %x, i64 3
+ %t14 = getelementptr inbounds i32, i32* %x, i64 3
store i32 %t4.0, i32* %t14, align 4
ret void
}
%i0 = load double* %a, align 8
%i1 = load double* %b, align 8
%mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
%i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
+ %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
%i4 = load double* %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
+ %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
store double %mul5, double* %arrayidx5, align 8
ret void
}
%in = alloca %struct.struct_test_27.0.13, align 8
%0 = bitcast %struct.struct_test_27.0.13* %in to [5 x i64]*
store [5 x i64] %in.coerce, [5 x i64]* %0, align 8
- %scevgep9 = getelementptr %struct.struct_test_27.0.13* %in, i32 0, i32 4, i32 0
+ %scevgep9 = getelementptr %struct.struct_test_27.0.13, %struct.struct_test_27.0.13* %in, i32 0, i32 4, i32 0
%scevgep910 = bitcast i32* %scevgep9 to i8*
call void @llvm.memcpy.p1i8.p0i8.i32(i8 addrspace(1)* undef, i8* %scevgep910, i32 16, i32 4, i1 false)
ret void
define void @test1({ i8, i8 }* %a, { i8, i8 }* %b) {
; CHECK-LABEL: @test1(
-; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 0
+; CHECK: %[[gep_a0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 0
; CHECK: %[[a0:.*]] = load i8* %[[gep_a0]], align 16
-; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }* %a, i64 0, i32 1
+; CHECK: %[[gep_a1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %a, i64 0, i32 1
; CHECK: %[[a1:.*]] = load i8* %[[gep_a1]], align 1
-; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 0
+; CHECK: %[[gep_b0:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 0
; CHECK: store i8 %[[a0]], i8* %[[gep_b0]], align 16
-; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }* %b, i64 0, i32 1
+; CHECK: %[[gep_b1:.*]] = getelementptr inbounds { i8, i8 }, { i8, i8 }* %b, i64 0, i32 1
; CHECK: store i8 %[[a1]], i8* %[[gep_b1]], align 1
; CHECK: ret void
entry:
%alloca = alloca { i8, i8 }, align 16
- %gep_a = getelementptr { i8, i8 }* %a, i32 0, i32 0
- %gep_alloca = getelementptr { i8, i8 }* %alloca, i32 0, i32 0
- %gep_b = getelementptr { i8, i8 }* %b, i32 0, i32 0
+ %gep_a = getelementptr { i8, i8 }, { i8, i8 }* %a, i32 0, i32 0
+ %gep_alloca = getelementptr { i8, i8 }, { i8, i8 }* %alloca, i32 0, i32 0
+ %gep_b = getelementptr { i8, i8 }, { i8, i8 }* %b, i32 0, i32 0
store i8 420, i8* %gep_alloca, align 16
entry:
%a = alloca { i8, i8, i8, i8 }, align 2
- %gep1 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 1
+ %gep1 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 1
%cast1 = bitcast i8* %gep1 to i16*
store volatile i16 0, i16* %cast1
- %gep2 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 2
+ %gep2 = getelementptr { i8, i8, i8, i8 }, { i8, i8, i8, i8 }* %a, i32 0, i32 2
%result = load i8* %gep2
store i8 42, i8* %gep2
ret void
%a_raw = bitcast { i8*, i8*, i8* }* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a_raw, i8* %x, i32 22, i32 8, i1 false)
%b_raw = bitcast { i8*, i8*, i8* }* %b to i8*
- %b_gep = getelementptr i8* %b_raw, i32 6
+ %b_gep = getelementptr i8, i8* %b_raw, i32 6
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b_gep, i8* %x, i32 18, i32 2, i1 false)
ret void
}
entry:
%a = alloca [18 x i8]
- %raw1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 0
+ %raw1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 0
%ptr1 = bitcast i8* %raw1 to double*
store volatile double 0.0, double* %ptr1, align 1
- %weird_gep1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 7
+ %weird_gep1 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 7
%weird_cast1 = bitcast i8* %weird_gep1 to i16*
%weird_load1 = load volatile i16* %weird_cast1, align 1
- %raw2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 9
+ %raw2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 9
%ptr2 = bitcast i8* %raw2 to double*
%d1 = load double* %ptr1, align 1
store volatile double %d1, double* %ptr2, align 1
- %weird_gep2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 16
+ %weird_gep2 = getelementptr inbounds [18 x i8], [18 x i8]* %a, i32 0, i32 16
%weird_cast2 = bitcast i8* %weird_gep2 to i16*
%weird_load2 = load volatile i16* %weird_cast2, align 1
entry:
%a = alloca [16 x i8]
- %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
+ %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
%ptr1 = bitcast i8* %raw1 to double*
store volatile double 0.0, double* %ptr1, align 1
- %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
+ %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
%ptr2 = bitcast i8* %raw2 to double*
%val = load double* %ptr1, align 1
store volatile double %val, double* %ptr2, align 1
entry:
%a = alloca [16 x i8]
- %raw1 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 0
+ %raw1 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 0
%ptr1 = bitcast i8* %raw1 to double*
- %raw2 = getelementptr inbounds [16 x i8]* %a, i32 0, i32 8
+ %raw2 = getelementptr inbounds [16 x i8], [16 x i8]* %a, i32 0, i32 8
%ptr2 = bitcast i8* %raw2 to double*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %raw1, i8* %out, i32 16, i32 0, i1 false)
entry:
%X = alloca { i32, float }
- %Y = getelementptr { i32, float }* %X, i64 0, i32 0
+ %Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0
store i32 0, i32* %Y
%Z = load i32* %Y
ret i32 %Z
; CHECK-NEXT: %[[test3_a6:.*]] = alloca [7 x i8]
; CHECK-NEXT: %[[test3_a7:.*]] = alloca [85 x i8]
- %b = getelementptr [300 x i8]* %a, i64 0, i64 0
+ %b = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 300, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 42
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
; CHECK-NEXT: %[[test3_r1:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 43
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8]* %[[test3_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 142
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 142
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 158
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 158
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 200
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 200
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 207
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 207
; CHECK-NEXT: %[[test3_r2:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 208
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 208
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 215
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 215
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
; Clobber a single element of the array, this should be promotable.
- %c = getelementptr [300 x i8]* %a, i64 0, i64 42
+ %c = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 42
store i8 0, i8* %c
; Make a sequence of overlapping stores to the array. These overlap both in
; forward strides and in shrinking accesses.
- %overlap.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 142
- %overlap.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 143
- %overlap.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 144
- %overlap.4.i8 = getelementptr [300 x i8]* %a, i64 0, i64 145
- %overlap.5.i8 = getelementptr [300 x i8]* %a, i64 0, i64 146
- %overlap.6.i8 = getelementptr [300 x i8]* %a, i64 0, i64 147
- %overlap.7.i8 = getelementptr [300 x i8]* %a, i64 0, i64 148
- %overlap.8.i8 = getelementptr [300 x i8]* %a, i64 0, i64 149
- %overlap.9.i8 = getelementptr [300 x i8]* %a, i64 0, i64 150
+ %overlap.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 142
+ %overlap.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 143
+ %overlap.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 144
+ %overlap.4.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 145
+ %overlap.5.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 146
+ %overlap.6.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 147
+ %overlap.7.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 148
+ %overlap.8.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 149
+ %overlap.9.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 150
%overlap.1.i16 = bitcast i8* %overlap.1.i8 to i16*
%overlap.1.i32 = bitcast i8* %overlap.1.i8 to i32*
%overlap.1.i64 = bitcast i8* %overlap.1.i8 to i64*
%overlap.8.i64 = bitcast i8* %overlap.8.i8 to i64*
%overlap.9.i64 = bitcast i8* %overlap.9.i8 to i64*
store i8 1, i8* %overlap.1.i8
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
; CHECK-NEXT: store i8 1, i8* %[[gep]]
store i16 1, i16* %overlap.1.i16
; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i16*
; CHECK-NEXT: %[[bitcast:.*]] = bitcast [16 x i8]* %[[test3_a3]] to i64*
; CHECK-NEXT: store i64 1, i64* %[[bitcast]]
store i64 2, i64* %overlap.2.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 1
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 2, i64* %[[bitcast]]
store i64 3, i64* %overlap.3.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 2
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 3, i64* %[[bitcast]]
store i64 4, i64* %overlap.4.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 3
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 3
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 4, i64* %[[bitcast]]
store i64 5, i64* %overlap.5.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 4
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 4
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 5, i64* %[[bitcast]]
store i64 6, i64* %overlap.6.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 5
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 5
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 6, i64* %[[bitcast]]
store i64 7, i64* %overlap.7.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 6
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 6
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 7, i64* %[[bitcast]]
store i64 8, i64* %overlap.8.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 7
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 7
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 8, i64* %[[bitcast]]
store i64 9, i64* %overlap.9.i64
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 8
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 8
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i64*
; CHECK-NEXT: store i64 9, i64* %[[bitcast]]
; Make two sequences of overlapping stores with more gaps and irregularities.
- %overlap2.1.0.i8 = getelementptr [300 x i8]* %a, i64 0, i64 200
- %overlap2.1.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 201
- %overlap2.1.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 202
- %overlap2.1.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 203
+ %overlap2.1.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 200
+ %overlap2.1.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 201
+ %overlap2.1.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 202
+ %overlap2.1.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 203
- %overlap2.2.0.i8 = getelementptr [300 x i8]* %a, i64 0, i64 208
- %overlap2.2.1.i8 = getelementptr [300 x i8]* %a, i64 0, i64 209
- %overlap2.2.2.i8 = getelementptr [300 x i8]* %a, i64 0, i64 210
- %overlap2.2.3.i8 = getelementptr [300 x i8]* %a, i64 0, i64 211
+ %overlap2.2.0.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 208
+ %overlap2.2.1.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 209
+ %overlap2.2.2.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 210
+ %overlap2.2.3.i8 = getelementptr [300 x i8], [300 x i8]* %a, i64 0, i64 211
%overlap2.1.0.i16 = bitcast i8* %overlap2.1.0.i8 to i16*
%overlap2.1.0.i32 = bitcast i8* %overlap2.1.0.i8 to i32*
%overlap2.1.2.i32 = bitcast i8* %overlap2.1.2.i8 to i32*
%overlap2.1.3.i32 = bitcast i8* %overlap2.1.3.i8 to i32*
store i8 1, i8* %overlap2.1.0.i8
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
; CHECK-NEXT: store i8 1, i8* %[[gep]]
store i16 1, i16* %overlap2.1.0.i16
; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i16*
; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a5]] to i32*
; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
store i32 2, i32* %overlap2.1.1.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 1
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
; CHECK-NEXT: store i32 2, i32* %[[bitcast]]
store i32 3, i32* %overlap2.1.2.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
store i32 4, i32* %overlap2.1.3.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 3
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 3
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
; CHECK-NEXT: %[[bitcast:.*]] = bitcast [7 x i8]* %[[test3_a6]] to i32*
; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
store i8 1, i8* %overlap2.2.1.i8
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
; CHECK-NEXT: store i8 1, i8* %[[gep]]
store i16 1, i16* %overlap2.2.1.i16
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
; CHECK-NEXT: store i16 1, i16* %[[bitcast]]
store i32 1, i32* %overlap2.2.1.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
; CHECK-NEXT: store i32 1, i32* %[[bitcast]]
store i32 3, i32* %overlap2.2.2.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
; CHECK-NEXT: store i32 3, i32* %[[bitcast]]
store i32 4, i32* %overlap2.2.3.i32
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 3
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 3
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i32*
; CHECK-NEXT: store i32 4, i32* %[[bitcast]]
- %overlap2.prefix = getelementptr i8* %overlap2.1.1.i8, i64 -4
+ %overlap2.prefix = getelementptr i8, i8* %overlap2.1.1.i8, i64 -4
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.prefix, i8* %src, i32 8, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 39
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 39
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %src, i32 3
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 3
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 3
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 5
; Bridge between the overlapping areas
call void @llvm.memset.p0i8.i32(i8* %overlap2.1.2.i8, i8 42, i32 8, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 2
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 5
; ...promoted i8 store...
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[gep]], i8 42, i32 2
; Entirely within the second overlap.
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.1.i8, i8* %src, i32 5, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 1
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
; Trailing past the second overlap.
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %overlap2.2.2.i8, i8* %src, i32 8, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 2
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 5
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 5
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 5
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 3
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 300, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a1]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 42
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
; CHECK-NEXT: store i8 0, i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 43
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [99 x i8]* %[[test3_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [99 x i8], [99 x i8]* %[[test3_a2]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 99
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 142
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [16 x i8]* %[[test3_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 142
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[test3_a3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 16
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 158
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [42 x i8]* %[[test3_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 158
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [42 x i8], [42 x i8]* %[[test3_a4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 42
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 200
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 200
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 207
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 207
; CHECK-NEXT: store i8 42, i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 208
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test3_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 208
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test3_a6]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 215
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [85 x i8]* %[[test3_a7]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 215
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [85 x i8], [85 x i8]* %[[test3_a7]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 85
ret void
; CHECK-NEXT: %[[test4_a5:.*]] = alloca [7 x i8]
; CHECK-NEXT: %[[test4_a6:.*]] = alloca [40 x i8]
- %b = getelementptr [100 x i8]* %a, i64 0, i64 0
+ %b = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b, i8* %src, i32 100, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8]* %[[test4_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep]], i8* %src, i32 20
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 20
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 20
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
; CHECK-NEXT: %[[test4_r1:.*]] = load i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 22
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 22
; CHECK-NEXT: %[[test4_r2:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 23
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 23
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 30
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [10 x i8]* %[[test4_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 30
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 40
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 40
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
; CHECK-NEXT: %[[test4_r3:.*]] = load i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 42
; CHECK-NEXT: %[[test4_r4:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 43
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 43
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 50
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 50
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
; CHECK-NEXT: %[[test4_r5:.*]] = load i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %src, i64 52
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %src, i64 52
; CHECK-NEXT: %[[test4_r6:.*]] = load i8* %[[gep]]
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 53
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 53
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8* %src, i64 60
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [40 x i8]* %[[test4_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds i8, i8* %src, i64 60
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
- %a.src.1 = getelementptr [100 x i8]* %a, i64 0, i64 20
- %a.dst.1 = getelementptr [100 x i8]* %a, i64 0, i64 40
+ %a.src.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 20
+ %a.dst.1 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 40
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.1, i32 10, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
; Clobber a single element of the array, this should be promotable, and be deleted.
- %c = getelementptr [100 x i8]* %a, i64 0, i64 42
+ %c = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 42
store i8 0, i8* %c
- %a.src.2 = getelementptr [100 x i8]* %a, i64 0, i64 50
+ %a.src.2 = getelementptr [100 x i8], [100 x i8]* %a, i64 0, i64 50
call void @llvm.memmove.p0i8.p0i8.i32(i8* %a.dst.1, i8* %a.src.2, i32 10, i32 1, i1 false)
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %b, i32 100, i32 1, i1 false)
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8]* %[[test4_a1]], i64 0, i64 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds [20 x i8], [20 x i8]* %[[test4_a1]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[gep]], i32 20
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 20
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 20
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
; CHECK-NEXT: store i16 %[[test4_r1]], i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 22
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 22
; CHECK-NEXT: store i8 %[[test4_r2]], i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 23
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a2]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 23
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a2]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 30
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [10 x i8]* %[[test4_a3]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 30
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [10 x i8], [10 x i8]* %[[test4_a3]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 10
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 40
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 40
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 42
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 42
; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 43
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a4]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 43
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a4]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 50
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 50
; CHECK-NEXT: %[[bitcast:.*]] = bitcast i8* %[[gep]] to i16*
; CHECK-NEXT: store i16 %[[test4_r5]], i16* %[[bitcast]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8* %dst, i64 52
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds i8, i8* %dst, i64 52
; CHECK-NEXT: store i8 %[[test4_r6]], i8* %[[gep]]
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 53
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8]* %[[test4_a5]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 53
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [7 x i8], [7 x i8]* %[[test4_a5]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 7
-; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8* %dst, i64 60
-; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [40 x i8]* %[[test4_a6]], i64 0, i64 0
+; CHECK-NEXT: %[[gep_dst:.*]] = getelementptr inbounds i8, i8* %dst, i64 60
+; CHECK-NEXT: %[[gep_src:.*]] = getelementptr inbounds [40 x i8], [40 x i8]* %[[test4_a6]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[gep_dst]], i8* %[[gep_src]], i32 40
ret void
%a = alloca [4 x i8]
%fptr = bitcast [4 x i8]* %a to float*
store float 0.0, float* %fptr
- %ptr = getelementptr [4 x i8]* %a, i32 0, i32 2
+ %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 2
%iptr = bitcast i8* %ptr to i16*
%val = load i16* %iptr
ret i16 %val
entry:
%a = alloca [4 x i8]
- %ptr = getelementptr [4 x i8]* %a, i32 0, i32 0
+ %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %ptr, i8 42, i32 4, i32 1, i1 true)
%iptr = bitcast i8* %ptr to i32*
%val = load i32* %iptr
entry:
%a = alloca [4 x i8]
- %ptr = getelementptr [4 x i8]* %a, i32 0, i32 0
+ %ptr = getelementptr [4 x i8], [4 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
ret void
%new = alloca %S2
; CHECK-NOT: alloca
- %s2.next.ptr = getelementptr %S2* %s2, i64 0, i32 1
+ %s2.next.ptr = getelementptr %S2, %S2* %s2, i64 0, i32 1
%s2.next = load %S2** %s2.next.ptr
-; CHECK: %[[gep:.*]] = getelementptr %S2* %s2, i64 0, i32 1
+; CHECK: %[[gep:.*]] = getelementptr %S2, %S2* %s2, i64 0, i32 1
; CHECK-NEXT: %[[next:.*]] = load %S2** %[[gep]]
- %s2.next.s1.ptr = getelementptr %S2* %s2.next, i64 0, i32 0
+ %s2.next.s1.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 0
%s2.next.s1 = load %S1** %s2.next.s1.ptr
- %new.s1.ptr = getelementptr %S2* %new, i64 0, i32 0
+ %new.s1.ptr = getelementptr %S2, %S2* %new, i64 0, i32 0
store %S1* %s2.next.s1, %S1** %new.s1.ptr
- %s2.next.next.ptr = getelementptr %S2* %s2.next, i64 0, i32 1
+ %s2.next.next.ptr = getelementptr %S2, %S2* %s2.next, i64 0, i32 1
%s2.next.next = load %S2** %s2.next.next.ptr
- %new.next.ptr = getelementptr %S2* %new, i64 0, i32 1
+ %new.next.ptr = getelementptr %S2, %S2* %new, i64 0, i32 1
store %S2* %s2.next.next, %S2** %new.next.ptr
-; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2* %[[next]], i64 0, i32 0
+; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 0
; CHECK-NEXT: %[[next_s1:.*]] = load %S1** %[[gep]]
-; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2* %[[next]], i64 0, i32 1
+; CHECK-NEXT: %[[gep:.*]] = getelementptr %S2, %S2* %[[next]], i64 0, i32 1
; CHECK-NEXT: %[[next_next:.*]] = load %S2** %[[gep]]
%new.s1 = load %S1** %new.s1.ptr
entry:
%a = alloca { [3 x i8] }, align 8
- %gep1 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 0
+ %gep1 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 0
store i8 0, i8* %gep1, align 1
- %gep2 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 1
+ %gep2 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 1
store i8 0, i8* %gep2, align 1
- %gep3 = getelementptr inbounds { [3 x i8] }* %a, i32 0, i32 0, i32 2
+ %gep3 = getelementptr inbounds { [3 x i8] }, { [3 x i8] }* %a, i32 0, i32 0, i32 2
store i8 26, i8* %gep3, align 1
%cast = bitcast { [3 x i8] }* %a to { i64 }*
- %elt = getelementptr inbounds { i64 }* %cast, i32 0, i32 0
+ %elt = getelementptr inbounds { i64 }, { i64 }* %cast, i32 0, i32 0
%load = load i64* %elt
%result = and i64 %load, 16777215
ret i64 %result
entry:
%a = alloca [8 x i8]
- %ptr = getelementptr [8 x i8]* %a, i32 0, i32 0
+ %ptr = getelementptr [8 x i8], [8 x i8]* %a, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %ptr, i8 0, i32 8, i32 1, i1 false)
%s2ptrptr = bitcast i8* %ptr to %S2**
%s2ptr = load %S2** %s2ptrptr
br i1 undef, label %good, label %bad
good:
- %Y = getelementptr i32* %X, i64 0
+ %Y = getelementptr i32, i32* %X, i64 0
store i32 0, i32* %Y
%Z = load i32* %Y
ret i32 %Z
bad:
- %Y2 = getelementptr i32* %X, i64 1
+ %Y2 = getelementptr i32, i32* %X, i64 1
store i32 0, i32* %Y2
%Z2 = load i32* %Y2
ret i32 %Z2
%b = alloca [3 x i8]
; CHECK-NOT: alloca
- %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+ %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
store i8 0, i8* %a0ptr
- %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+ %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
store i8 0, i8* %a1ptr
- %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+ %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
store i8 0, i8* %a2ptr
%aiptr = bitcast [3 x i8]* %a to i24*
%ai = load i24* %aiptr
%biptr = bitcast [3 x i8]* %b to i24*
store i24 %ai, i24* %biptr
- %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
+ %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
%b0 = load i8* %b0ptr
- %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
+ %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
%b1 = load i8* %b1ptr
- %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
+ %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
%b2 = load i8* %b2ptr
; CHECK-NOT: store
; CHECK-NOT: load
entry:
%a = alloca [3 x i8], align 2
- %b0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+ %b0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
store i8 0, i8* %b0ptr
- %b1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+ %b1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
store i8 0, i8* %b1ptr
- %b2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+ %b2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
store i8 0, i8* %b2ptr
%iptrcast = bitcast [3 x i8]* %a to i16*
- %iptrgep = getelementptr i16* %iptrcast, i64 1
+ %iptrgep = getelementptr i16, i16* %iptrcast, i64 1
%i = load i16* %iptrgep
%ret = zext i16 %i to i32
ret i32 %ret
%a = alloca %test14.struct
%p = alloca %test14.struct*
%0 = bitcast %test14.struct* %a to i8*
- %1 = getelementptr i8* %0, i64 12
+ %1 = getelementptr i8, i8* %0, i64 12
%2 = bitcast i8* %1 to %test14.struct*
- %3 = getelementptr inbounds %test14.struct* %2, i32 0, i32 0
- %4 = getelementptr inbounds %test14.struct* %a, i32 0, i32 0
+ %3 = getelementptr inbounds %test14.struct, %test14.struct* %2, i32 0, i32 0
+ %4 = getelementptr inbounds %test14.struct, %test14.struct* %a, i32 0, i32 0
%5 = bitcast [3 x i32]* %3 to i32*
%6 = bitcast [3 x i32]* %4 to i32*
%7 = load i32* %6, align 4
store i32 %7, i32* %5, align 4
- %8 = getelementptr inbounds i32* %5, i32 1
- %9 = getelementptr inbounds i32* %6, i32 1
+ %8 = getelementptr inbounds i32, i32* %5, i32 1
+ %9 = getelementptr inbounds i32, i32* %6, i32 1
%10 = load i32* %9, align 4
store i32 %10, i32* %8, align 4
- %11 = getelementptr inbounds i32* %5, i32 2
- %12 = getelementptr inbounds i32* %6, i32 2
+ %11 = getelementptr inbounds i32, i32* %5, i32 2
+ %12 = getelementptr inbounds i32, i32* %6, i32 2
%13 = load i32* %12, align 4
store i32 %13, i32* %11, align 4
ret void
store i64 1879048192, i64* %l0, align 8
%bc0 = bitcast i64* %l0 to i8*
- %gep0 = getelementptr i8* %bc0, i64 3
+ %gep0 = getelementptr i8, i8* %bc0, i64 3
%dead0 = bitcast i8* %gep0 to i64*
store i64 1879048192, i64* %l1, align 8
%bc1 = bitcast i64* %l1 to i8*
- %gep1 = getelementptr i8* %bc1, i64 3
- %dead1 = getelementptr i8* %gep1, i64 1
+ %gep1 = getelementptr i8, i8* %bc1, i64 3
+ %dead1 = getelementptr i8, i8* %gep1, i64 1
store i64 1879048192, i64* %l2, align 8
%bc2 = bitcast i64* %l2 to i8*
- %gep2.1 = getelementptr i8* %bc2, i64 1
- %gep2.2 = getelementptr i8* %bc2, i64 3
+ %gep2.1 = getelementptr i8, i8* %bc2, i64 1
+ %gep2.2 = getelementptr i8, i8* %bc2, i64 3
; Note that this select should get visited multiple times due to using two
; different GEPs off the same alloca. We should only delete it once.
%dead2 = select i1 %flag, i8* %gep2.1, i8* %gep2.2
store i64 1879048192, i64* %l3, align 8
%bc3 = bitcast i64* %l3 to i8*
- %gep3 = getelementptr i8* %bc3, i64 3
+ %gep3 = getelementptr i8, i8* %bc3, i64 3
br label %loop
}
entry:
%a = alloca [3 x i8]
- %ptr = getelementptr [3 x i8]* %a, i32 0, i32 0
+ %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 false)
%cast = bitcast i8* %ptr to i24*
store i24 0, i24* %cast
; the alloca.
; CHECK-LABEL: @test17(
; CHECK: %[[a:.*]] = alloca [3 x i8]
-; CHECK-NEXT: %[[ptr:.*]] = getelementptr [3 x i8]* %[[a]], i32 0, i32 0
+; CHECK-NEXT: %[[ptr:.*]] = getelementptr [3 x i8], [3 x i8]* %[[a]], i32 0, i32 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[ptr]], i8* %src,
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[ptr]],
; CHECK-NEXT: ret void
entry:
%a = alloca [3 x i8]
- %ptr = getelementptr [3 x i8]* %a, i32 0, i32 0
+ %ptr = getelementptr [3 x i8], [3 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 4, i32 1, i1 true)
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %ptr, i32 4, i32 1, i1 true)
ret void
; the variable sized intrinsic.
; CHECK-LABEL: @test18(
; CHECK: %[[a:.*]] = alloca [34 x i8]
-; CHECK: %[[srcgep1:.*]] = getelementptr inbounds i8* %src, i64 4
+; CHECK: %[[srcgep1:.*]] = getelementptr inbounds i8, i8* %src, i64 4
; CHECK-NEXT: %[[srccast1:.*]] = bitcast i8* %[[srcgep1]] to i32*
; CHECK-NEXT: %[[srcload:.*]] = load i32* %[[srccast1]]
-; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: %[[agep1:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[agep1]], i8* %src, i32 %size,
-; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: %[[agep2:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* %[[agep2]], i8 42, i32 %size,
; CHECK-NEXT: %[[dstcast1:.*]] = bitcast i8* %dst to i32*
; CHECK-NEXT: store i32 42, i32* %[[dstcast1]]
-; CHECK-NEXT: %[[dstgep1:.*]] = getelementptr inbounds i8* %dst, i64 4
+; CHECK-NEXT: %[[dstgep1:.*]] = getelementptr inbounds i8, i8* %dst, i64 4
; CHECK-NEXT: %[[dstcast2:.*]] = bitcast i8* %[[dstgep1]] to i32*
; CHECK-NEXT: store i32 %[[srcload]], i32* %[[dstcast2]]
-; CHECK-NEXT: %[[agep3:.*]] = getelementptr inbounds [34 x i8]* %[[a]], i64 0, i64 0
+; CHECK-NEXT: %[[agep3:.*]] = getelementptr inbounds [34 x i8], [34 x i8]* %[[a]], i64 0, i64 0
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %[[agep3]], i32 %size,
; CHECK-NEXT: ret void
entry:
%a = alloca [42 x i8]
- %ptr = getelementptr [42 x i8]* %a, i32 0, i32 0
+ %ptr = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr, i8* %src, i32 8, i32 1, i1 false)
- %ptr2 = getelementptr [42 x i8]* %a, i32 0, i32 8
+ %ptr2 = getelementptr [42 x i8], [42 x i8]* %a, i32 0, i32 8
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %ptr2, i8* %src, i32 %size, i32 1, i1 false)
call void @llvm.memset.p0i8.i32(i8* %ptr2, i8 42, i32 %size, i32 1, i1 false)
%cast = bitcast i8* %ptr to i32*
%cast1 = bitcast %opaque* %x to i8*
%cast2 = bitcast { i64, i8* }* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast2, i8* %cast1, i32 16, i32 1, i1 false)
- %gep = getelementptr inbounds { i64, i8* }* %a, i32 0, i32 0
+ %gep = getelementptr inbounds { i64, i8* }, { i64, i8* }* %a, i32 0, i32 0
%val = load i64* %gep
ret i32 undef
}
entry:
%a = alloca [3 x i32]
- %gep1 = getelementptr [3 x i32]* %a, i32 0, i32 0
+ %gep1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 0
store i32 1, i32* %gep1
- %gep2.1 = getelementptr [3 x i32]* %a, i32 0, i32 -2
- %gep2.2 = getelementptr i32* %gep2.1, i32 3
+ %gep2.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 -2
+ %gep2.2 = getelementptr i32, i32* %gep2.1, i32 3
store i32 2, i32* %gep2.2
- %gep3.1 = getelementptr [3 x i32]* %a, i32 0, i32 14
- %gep3.2 = getelementptr i32* %gep3.1, i32 -12
+ %gep3.1 = getelementptr [3 x i32], [3 x i32]* %a, i32 0, i32 14
+ %gep3.2 = getelementptr i32, i32* %gep3.1, i32 -12
store i32 3, i32* %gep3.2
%load1 = load i32* %gep1
entry:
%a = alloca [2305843009213693951 x i8]
- %gep0 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
+ %gep0 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 2305843009213693949
store i8 255, i8* %gep0
- %gep1 = getelementptr [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
- %gep2 = getelementptr i8* %gep1, i64 -1
+ %gep1 = getelementptr [2305843009213693951 x i8], [2305843009213693951 x i8]* %a, i64 0, i64 -9223372036854775807
+ %gep2 = getelementptr i8, i8* %gep1, i64 -1
call void @llvm.memset.p0i8.i64(i8* %gep2, i8 0, i64 18446744073709551615, i32 1, i1 false)
- %gep3 = getelementptr i8* %gep1, i64 9223372036854775807
- %gep4 = getelementptr i8* %gep3, i64 9223372036854775807
- %gep5 = getelementptr i8* %gep4, i64 -6917529027641081857
+ %gep3 = getelementptr i8, i8* %gep1, i64 9223372036854775807
+ %gep4 = getelementptr i8, i8* %gep3, i64 9223372036854775807
+ %gep5 = getelementptr i8, i8* %gep4, i64 -6917529027641081857
store i8 255, i8* %gep5
%cast1 = bitcast i8* %gep4 to i32*
store i32 0, i32* %cast1
%load = load i8* %gep0
- %gep6 = getelementptr i8* %gep0, i32 1
+ %gep6 = getelementptr i8, i8* %gep0, i32 1
%load2 = load i8* %gep6
%result = or i8 %load, %load2
ret i8 %result
br label %if.end
if.end:
- %gep = getelementptr %PR13916.struct* %a, i32 0, i32 0
+ %gep = getelementptr %PR13916.struct, %PR13916.struct* %a, i32 0, i32 0
%tmp2 = load i8* %gep
ret void
}
entry:
%a = alloca %PR14034.struct
- %list = getelementptr %PR14034.struct* %a, i32 0, i32 2
- %prev = getelementptr %PR14034.list* %list, i32 0, i32 1
+ %list = getelementptr %PR14034.struct, %PR14034.struct* %a, i32 0, i32 2
+ %prev = getelementptr %PR14034.list, %PR14034.list* %list, i32 0, i32 1
store %PR14034.list* undef, %PR14034.list** %prev
%cast0 = bitcast %PR14034.struct* undef to i8*
%cast1 = bitcast %PR14034.struct* %a to i8*
; CHECK-NOT: alloca
%wrap1 = insertvalue [1 x { i32 }] undef, i32 %x, 0, 0
- %gep1 = getelementptr { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
+ %gep1 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0, i32 0
store [1 x { i32 }] %wrap1, [1 x { i32 }]* %gep1
- %gep2 = getelementptr { { [1 x { i32 }] } }* %a1, i32 0, i32 0
+ %gep2 = getelementptr { { [1 x { i32 }] } }, { { [1 x { i32 }] } }* %a1, i32 0, i32 0
%ptrcast1 = bitcast { [1 x { i32 }] }* %gep2 to { [1 x { float }] }*
%load1 = load { [1 x { float }] }* %ptrcast1
%unwrap1 = extractvalue { [1 x { float }] } %load1, 0, 0
%wrap2 = insertvalue { {}, { float }, [0 x i8] } undef, { float } %unwrap1, 1
store { {}, { float }, [0 x i8] } %wrap2, { {}, { float }, [0 x i8] }* %a2
- %gep3 = getelementptr { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
+ %gep3 = getelementptr { {}, { float }, [0 x i8] }, { {}, { float }, [0 x i8] }* %a2, i32 0, i32 1, i32 0
%ptrcast2 = bitcast float* %gep3 to <4 x i8>*
%load3 = load <4 x i8>* %ptrcast2
%valcast1 = bitcast <4 x i8> %load3 to i32
%wrap3 = insertvalue [1 x [1 x i32]] undef, i32 %valcast1, 0, 0
%wrap4 = insertvalue { [1 x [1 x i32]], {} } undef, [1 x [1 x i32]] %wrap3, 0
- %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
+ %gep4 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1
%ptrcast3 = bitcast { [0 x double], [1 x [1 x <4 x i8>]], {} }* %gep4 to { [1 x [1 x i32]], {} }*
store { [1 x [1 x i32]], {} } %wrap4, { [1 x [1 x i32]], {} }* %ptrcast3
- %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
+ %gep5 = getelementptr { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }, { [0 x i8], { [0 x double], [1 x [1 x <4 x i8>]], {} }, { { {} } } }* %a3, i32 0, i32 1, i32 1, i32 0
%ptrcast4 = bitcast [1 x <4 x i8>]* %gep5 to { {}, float, {} }*
%load4 = load { {}, float, {} }* %ptrcast4
%unwrap2 = extractvalue { {}, float, {} } %load4, 1
store i32 0, i32* %X.sroa.0.0.cast2.i, align 8
; Also use a memset to the middle 32-bits for fun.
- %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8* %0, i32 2
+ %X.sroa.0.2.raw_idx2.i = getelementptr inbounds i8, i8* %0, i32 2
call void @llvm.memset.p0i8.i64(i8* %X.sroa.0.2.raw_idx2.i, i8 0, i64 4, i32 1, i1 false)
; Or a memset of the whole thing.
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 8, i32 1, i1 false)
; Write to the high 32-bits with a memcpy.
- %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8* %0, i32 4
+ %X.sroa.0.4.raw_idx4.i = getelementptr inbounds i8, i8* %0, i32 4
%d.raw = bitcast double* %d to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %X.sroa.0.4.raw_idx4.i, i8* %d.raw, i32 4, i32 1, i1 false)
store i64 0, i64* %0
; CHECK-NOT: store
- %phi.realp = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+ %phi.realp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
%phi.real = load float* %phi.realp
- %phi.imagp = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+ %phi.imagp = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
%phi.imag = load float* %phi.imagp
- ; CHECK: %[[realp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 0
+ ; CHECK: %[[realp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 0
; CHECK-NEXT: %[[real:.*]] = load float* %[[realp]]
- ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }* %phi, i32 0, i32 1
+ ; CHECK-NEXT: %[[imagp:.*]] = getelementptr inbounds { float, float }, { float, float }* %phi, i32 0, i32 1
; CHECK-NEXT: %[[imag:.*]] = load float* %[[imagp]]
- %real = getelementptr inbounds { float, float }* %retval, i32 0, i32 0
- %imag = getelementptr inbounds { float, float }* %retval, i32 0, i32 1
+ %real = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 0
+ %imag = getelementptr inbounds { float, float }, { float, float }* %retval, i32 0, i32 1
store float %phi.real, float* %real
store float %phi.imag, float* %imag
; CHECK-NEXT: %[[real_convert:.*]] = bitcast float %[[real]] to i32
%a = alloca { [16 x i8] }, align 8
; CHECK: alloca [16 x i8], align 8
- %gep = getelementptr inbounds { [16 x i8] }* %ptr, i64 -1
-; CHECK-NEXT: getelementptr inbounds { [16 x i8] }* %ptr, i64 -1, i32 0, i64 0
+ %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1
+; CHECK-NEXT: getelementptr inbounds { [16 x i8] }, { [16 x i8] }* %ptr, i64 -1, i32 0, i64 0
%cast1 = bitcast { [16 x i8 ] }* %gep to i8*
%cast2 = bitcast { [16 x i8 ] }* %a to i8*
%a = alloca { [16 x i8] }, align 8
; CHECK: alloca [16 x i8], align 8
- %gep = getelementptr inbounds { [16 x i8] } addrspace(1)* %ptr, i64 -1
-; CHECK-NEXT: getelementptr inbounds { [16 x i8] } addrspace(1)* %ptr, i16 -1, i32 0, i16 0
+ %gep = getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i64 -1
+; CHECK-NEXT: getelementptr inbounds { [16 x i8] }, { [16 x i8] } addrspace(1)* %ptr, i16 -1, i32 0, i16 0
%cast1 = bitcast { [16 x i8 ] } addrspace(1)* %gep to i8 addrspace(1)*
%cast2 = bitcast { [16 x i8 ] }* %a to i8*
%a.i8 = bitcast <{ i1 }>* %a to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.i8, i8* %b.i8, i32 1, i32 1, i1 false) nounwind
%bar = load i8* %a.i8, align 1
- %a.i1 = getelementptr inbounds <{ i1 }>* %a, i32 0, i32 0
+ %a.i1 = getelementptr inbounds <{ i1 }>, <{ i1 }>* %a, i32 0, i32 0
%baz = load i1* %a.i1, align 1
; CHECK-NEXT: %[[a_cast:.*]] = bitcast i8* %[[a]] to i1*
; CHECK-NEXT: {{.*}} = load i1* %[[a_cast]], align 8
]
bb4:
- %src.gep3 = getelementptr inbounds i8* %src, i32 3
+ %src.gep3 = getelementptr inbounds i8, i8* %src, i32 3
%src.3 = load i8* %src.gep3
- %tmp.gep3 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 3
+ %tmp.gep3 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 3
store i8 %src.3, i8* %tmp.gep3
; CHECK: store i8
br label %bb3
bb3:
- %src.gep2 = getelementptr inbounds i8* %src, i32 2
+ %src.gep2 = getelementptr inbounds i8, i8* %src, i32 2
%src.2 = load i8* %src.gep2
- %tmp.gep2 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 2
+ %tmp.gep2 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 2
store i8 %src.2, i8* %tmp.gep2
; CHECK: store i8
br label %bb2
bb2:
- %src.gep1 = getelementptr inbounds i8* %src, i32 1
+ %src.gep1 = getelementptr inbounds i8, i8* %src, i32 1
%src.1 = load i8* %src.gep1
- %tmp.gep1 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 1
+ %tmp.gep1 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 1
store i8 %src.1, i8* %tmp.gep1
; CHECK: store i8
br label %bb1
bb1:
- %src.gep0 = getelementptr inbounds i8* %src, i32 0
+ %src.gep0 = getelementptr inbounds i8, i8* %src, i32 0
%src.0 = load i8* %src.gep0
- %tmp.gep0 = getelementptr inbounds [4 x i8]* %tmp, i32 0, i32 0
+ %tmp.gep0 = getelementptr inbounds [4 x i8], [4 x i8]* %tmp, i32 0, i32 0
store i8 %src.0, i8* %tmp.gep0
; CHECK: store i8
%b = alloca i32, align 4
%b.cast = bitcast i32* %b to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %b.cast, i8* %a, i32 4, i32 4, i1 true)
- %b.gep = getelementptr inbounds i8* %b.cast, i32 2
+ %b.gep = getelementptr inbounds i8, i8* %b.cast, i32 2
load i8* %b.gep, align 2
unreachable
}
entry:
%tv1 = alloca { <2 x float>, <2 x float> }, align 8
- %0 = getelementptr { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
+ %0 = getelementptr { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1
store <2 x float> undef, <2 x float>* %0, align 8
- %1 = getelementptr inbounds { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
+ %1 = getelementptr inbounds { <2 x float>, <2 x float> }, { <2 x float>, <2 x float> }* %tv1, i64 0, i32 1, i64 0
%cond105.in.i.i = select i1 undef, float* null, float* %1
%cond105.i.i = load float* %cond105.in.i.i, align 8
ret void
entry:
%a = alloca i32, align 4
store i32 %x, i32* %a, align 4
- %gep1 = getelementptr inbounds i32* %a, i32 1
- %gep0 = getelementptr inbounds i32* %a, i32 0
+ %gep1 = getelementptr inbounds i32, i32* %a, i32 1
+ %gep0 = getelementptr inbounds i32, i32* %a, i32 0
%cast1 = bitcast i32* %gep1 to i8*
%cast0 = bitcast i32* %gep0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %cast1, i8* %cast0, i32 4, i32 1, i1 false)
; CHECK: ret void
entry:
%f = alloca i8
- %gep = getelementptr i8* %f, i64 -1
+ %gep = getelementptr i8, i8* %f, i64 -1
call void @llvm.memcpy.p0i8.p0i8.i32(i8* undef, i8* %gep, i32 1, i32 1, i1 false)
ret void
}
%a = alloca i64
%b = alloca i64
%a.cast = bitcast i64* %a to [2 x float]*
- %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
- %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+ %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
+ %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
%b.cast = bitcast i64* %b to [2 x float]*
- %b.gep1 = getelementptr [2 x float]* %b.cast, i32 0, i32 0
- %b.gep2 = getelementptr [2 x float]* %b.cast, i32 0, i32 1
+ %b.gep1 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 0
+ %b.gep2 = getelementptr [2 x float], [2 x float]* %b.cast, i32 0, i32 1
store float 0.0, float* %a.gep1
store float 1.0, float* %a.gep2
%v = load i64* %a
entry:
%a = alloca i64
%a.cast = bitcast i64* %a to [2 x float]*
- %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
- %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+ %a.gep1 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 0
+ %a.gep2 = getelementptr [2 x float], [2 x float]* %a.cast, i32 0, i32 1
%v1 = load i64* bitcast ([2 x float]* @complex1 to i64*)
store i64 %v1, i64* %a
%f1 = load float* %a.gep1
entry:
%a = alloca [12 x i8]
- %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
- %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
- %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
+ %gep1 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 0
+ %gep2 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 4
+ %gep3 = getelementptr [12 x i8], [12 x i8]* %a, i32 0, i32 8
%iptr1 = bitcast i8* %gep1 to i64*
%iptr2 = bitcast i8* %gep2 to i64*
%fptr1 = bitcast i8* %gep1 to float*
store volatile i16 42, i16* %a.cast2
%load = load i32* %a.cast1
store i32 %load, i32* %a.cast1
- %a.gep1 = getelementptr i32* %a.cast1, i32 1
+ %a.gep1 = getelementptr i32, i32* %a.cast1, i32 1
%a.cast3 = bitcast i32* %a.gep1 to i8*
store volatile i8 13, i8* %a.cast3
store i32 %load, i32* %a.gep1
%b = alloca [3 x i8]
; CHECK-NOT: alloca
- %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+ %a0ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 0
store i8 0, i8* %a0ptr
- %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+ %a1ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 1
store i8 0, i8* %a1ptr
- %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+ %a2ptr = getelementptr [3 x i8], [3 x i8]* %a, i64 0, i32 2
store i8 0, i8* %a2ptr
%aiptr = bitcast [3 x i8]* %a to i24*
%ai = load i24* %aiptr
%biptr = bitcast [3 x i8]* %b to i24*
store i24 %ai, i24* %biptr
- %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
+ %b0ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 0
%b0 = load i8* %b0ptr
- %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
+ %b1ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 1
%b1 = load i8* %b1ptr
- %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
+ %b2ptr = getelementptr [3 x i8], [3 x i8]* %b, i64 0, i32 2
%b2 = load i8* %b2ptr
; CHECK-NOT: store
; CHECK-NOT: load
%a = alloca [7 x i8]
; CHECK-NOT: alloca
- %a0ptr = getelementptr [7 x i8]* %a, i64 0, i32 0
- %a1ptr = getelementptr [7 x i8]* %a, i64 0, i32 1
- %a2ptr = getelementptr [7 x i8]* %a, i64 0, i32 2
- %a3ptr = getelementptr [7 x i8]* %a, i64 0, i32 3
+ %a0ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 0
+ %a1ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 1
+ %a2ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 2
+ %a3ptr = getelementptr [7 x i8], [7 x i8]* %a, i64 0, i32 3
; CHECK-NOT: store
; CHECK-NOT: load
store { i32, i32 } undef, { i32, i32 }* %a
- %gep1 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 0
+ %gep1 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 0
store i32 %x, i32* %gep1
- %gep2 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 1
+ %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
store i32 %y, i32* %gep2
%result = load { i32, i32 }* %a
%a = alloca { i32, i32 }
%b = alloca { i32, i32 }
- %gep1 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 0
+ %gep1 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 0
store i32 %x, i32* %gep1
- %gep2 = getelementptr inbounds { i32, i32 }* %a, i32 0, i32 1
+ %gep2 = getelementptr inbounds { i32, i32 }, { i32, i32 }* %a, i32 0, i32 1
store i32 %y, i32* %gep2
%result = load volatile { i32, i32 }* %a
%a = alloca [2 x i32]
; CHECK-NOT: alloca
- %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
- %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 0, i32* %a0
store i32 1, i32* %a1
%v0 = load i32* %a0
%a = alloca [2 x i32]
; CHECK-NOT: alloca
- %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
- %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 0, i32* %a0
store i32 1, i32* %a1
%v0 = load i32* %a0
; Note that we build redundant GEPs here to ensure that having different GEPs
; into the same alloca partation continues to work with PHI speculation. This
; was the underlying cause of PR13926.
- %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
- %a0b = getelementptr [2 x i32]* %a, i64 0, i32 0
- %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
- %a1b = getelementptr [2 x i32]* %a, i64 0, i32 1
+ %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+ %a0b = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
+ %a1b = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 0, i32* %a0
store i32 1, i32* %a1
; CHECK-NOT: store
%a = alloca [2 x i32]
; CHECK-NOT: alloca
- %a0 = getelementptr [2 x i32]* %a, i64 0, i32 0
- %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ %a0 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 0
+ %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 0, i32* %a0
store i32 1, i32* %a1
%v0 = load i32* %a0
%a = alloca [2 x i32]
; CHECK-NOT: alloca
- %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 1, i32* %a1
; CHECK-NOT: store
%c = alloca i32
; CHECK-NOT: alloca
- %a1 = getelementptr [2 x i32]* %a, i64 0, i32 1
+ %a1 = getelementptr [2 x i32], [2 x i32]* %a, i64 0, i32 1
store i32 1, i32* %a1
%select = select i1 true, i32* %a1, i32* %b
br i1 undef, label %good, label %bad
good:
- %Y1 = getelementptr i32* %X, i64 0
+ %Y1 = getelementptr i32, i32* %X, i64 0
store i32 0, i32* %Y1
br label %exit
bad:
- %Y2 = getelementptr i32* %X, i64 1
+ %Y2 = getelementptr i32, i32* %X, i64 1
store i32 0, i32* %Y2
br label %exit
else:
%a.raw = bitcast i64* %a to i8*
- %a.raw.4 = getelementptr i8* %a.raw, i64 4
+ %a.raw.4 = getelementptr i8, i8* %a.raw, i64 4
%a.raw.4.f = bitcast i8* %a.raw.4 to float*
br label %end
; CHECK: %[[hi_cast:.*]] = bitcast i32 %[[hi]] to float
br i1 %cond, label %then, label %else
then:
- %0 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+ %0 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
store float 1.000000e+00, float* %0, align 4
br label %merge
else:
- %1 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+ %1 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
store float 2.000000e+00, float* %1, align 4
br label %merge
br i1 %cond, label %then, label %else
then:
- %0 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+ %0 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
store float 1.000000e+00, float* %0, align 4
br label %then2
br label %merge
else:
- %2 = getelementptr inbounds [4 x float]* %arr, i64 0, i64 3
+ %2 = getelementptr inbounds [4 x float], [4 x float]* %arr, i64 0, i64 3
store float 3.000000e+00, float* %2, align 4
br label %merge
%2 = bitcast { i16*, i32 }* %0 to i8*
%3 = bitcast { i16*, i32 }* %arg to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
- %b = getelementptr inbounds { i16*, i32 }* %arg, i64 0, i32 0
+ %b = getelementptr inbounds { i16*, i32 }, { i16*, i32 }* %arg, i64 0, i32 0
%pb0 = bitcast i16** %b to i63*
%b0 = load i63* %pb0
%pb1 = bitcast i16** %b to i8**
%2 = bitcast { i16*, i32 }* %0 to i8*
%3 = bitcast { i16*, i32 }* %arg to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %3, i8* %2, i32 16, i32 8, i1 false)
- %b = getelementptr inbounds { i16*, i32 }* %arg, i64 0, i32 0
+ %b = getelementptr inbounds { i16*, i32 }, { i16*, i32 }* %arg, i64 0, i32 0
%pb1 = bitcast i16** %b to i8**
%b1 = load i8** %pb1
%pb0 = bitcast i16** %b to i63*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x_i8, i8* bitcast (%union.Foo* @foo_copy_source to i8*), i32 32, i32 16, i1 false)
; Access a slice of the alloca to trigger SROA.
- %mid_p = getelementptr %union.Foo* %x, i32 0, i32 1
+ %mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
%elt = load i64* %mid_p
store i64 %elt, i64* @i64_sink
ret void
call void @llvm.memset.p0i8.i32(i8* %x_i8, i8 -1, i32 32, i32 16, i1 false)
; Access a slice of the alloca to trigger SROA.
- %mid_p = getelementptr %union.Foo* %x, i32 0, i32 1
+ %mid_p = getelementptr %union.Foo, %union.Foo* %x, i32 0, i32 1
%elt = load i64* %mid_p
store i64 %elt, i64* @i64_sink
ret void
; The following block does nothing; but appears to confuse SROA
%unused1 = bitcast %S.vec3float* %tmp1 to %U.vec3float*
- %unused2 = getelementptr inbounds %U.vec3float* %unused1, i32 0, i32 0
+ %unused2 = getelementptr inbounds %U.vec3float, %U.vec3float* %unused1, i32 0, i32 0
%unused3 = load <4 x float>* %unused2, align 1
; Create a second temporary and copy %tmp1 into it
%a = alloca [2 x <4 x i32>]
; CHECK-NOT: alloca
- %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
store <4 x i32> %x, <4 x i32>* %a.x
- %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
store <4 x i32> %y, <4 x i32>* %a.y
; CHECK-NOT: store
- %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
%tmp1 = load i32* %a.tmp1
- %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
%tmp2 = load i32* %a.tmp2
- %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
%tmp3 = load i32* %a.tmp3
; CHECK-NOT: load
; CHECK: extractelement <4 x i32> %x, i32 2
%a = alloca [2 x <4 x i32>]
; CHECK-NOT: alloca
- %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
store <4 x i32> %x, <4 x i32>* %a.x
- %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
store <4 x i32> %y, <4 x i32>* %a.y
; CHECK-NOT: store
- %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
%tmp1 = load i32* %a.tmp1
- %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
%tmp2 = load i32* %a.tmp2
- %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
%a.tmp3.cast = bitcast i32* %a.tmp3 to <2 x i32>*
%tmp3.vec = load <2 x i32>* %a.tmp3.cast
%tmp3 = extractelement <2 x i32> %tmp3.vec, i32 0
%a = alloca [2 x <4 x i32>]
; CHECK-NOT: alloca
- %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
store <4 x i32> %x, <4 x i32>* %a.x
- %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
store <4 x i32> %y, <4 x i32>* %a.y
; CHECK-NOT: store
call void @llvm.memset.p0i8.i32(i8* %a.y.cast, i8 0, i32 16, i32 1, i1 false)
; CHECK-NOT: memset
- %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
%a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.tmp1.cast, i8 -1, i32 4, i32 1, i1 false)
%tmp1 = load i32* %a.tmp1
- %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
%tmp2 = load i32* %a.tmp2
- %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
%tmp3 = load i32* %a.tmp3
; CHECK-NOT: load
; CHECK: %[[insert:.*]] = insertelement <4 x i32> %x, i32 -1, i32 2
%a = alloca [2 x <4 x i32>]
; CHECK-NOT: alloca
- %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
store <4 x i32> %x, <4 x i32>* %a.x
- %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
store <4 x i32> %y, <4 x i32>* %a.y
; CHECK-NOT: store
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.y.cast, i8* %z.cast, i32 16, i32 1, i1 false)
; CHECK-NOT: memcpy
- %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
%a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
- %z.tmp1 = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+ %z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
%z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.tmp1.cast, i8* %z.tmp1.cast, i32 4, i32 1, i1 false)
%tmp1 = load i32* %a.tmp1
- %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
%tmp2 = load i32* %a.tmp2
- %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
%tmp3 = load i32* %a.tmp3
; CHECK-NOT: memcpy
; CHECK: %[[load:.*]] = load <4 x i32>* %z
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
; CHECK-NEXT: %[[element_load:.*]] = load i32* %[[gep]]
; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
%a = alloca [2 x <4 x i32>]
; CHECK-NOT: alloca
- %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
store <4 x i32> %x, <4 x i32>* %a.x
- %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
store <4 x i32> %y, <4 x i32>* %a.y
; CHECK-NOT: store
call void @llvm.memcpy.p0i8.p1i8.i32(i8* %a.y.cast, i8 addrspace(1)* %z.cast, i32 16, i32 1, i1 false)
; CHECK-NOT: memcpy
- %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
%a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
- %z.tmp1 = getelementptr inbounds <4 x i32> addrspace(1)* %z, i16 0, i16 2
+ %z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %z, i16 0, i16 2
%z.tmp1.cast = bitcast i32 addrspace(1)* %z.tmp1 to i8 addrspace(1)*
call void @llvm.memcpy.p0i8.p1i8.i32(i8* %a.tmp1.cast, i8 addrspace(1)* %z.tmp1.cast, i32 4, i32 1, i1 false)
%tmp1 = load i32* %a.tmp1
- %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
%tmp2 = load i32* %a.tmp2
- %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
%tmp3 = load i32* %a.tmp3
; CHECK-NOT: memcpy
; CHECK: %[[load:.*]] = load <4 x i32> addrspace(1)* %z
-; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32> addrspace(1)* %z, i64 0, i64 2
+; CHECK-NEXT: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %z, i64 0, i64 2
; CHECK-NEXT: %[[element_load:.*]] = load i32 addrspace(1)* %[[gep]]
; CHECK-NEXT: %[[insert:.*]] = insertelement <4 x i32> %x, i32 %[[element_load]], i32 2
; CHECK-NEXT: extractelement <4 x i32> %[[insert]], i32 2
%a = alloca [2 x <4 x i32>]
; CHECK-NOT: alloca
- %a.x = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0
+ %a.x = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0
store <4 x i32> %x, <4 x i32>* %a.x
- %a.y = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1
+ %a.y = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1
store <4 x i32> %y, <4 x i32>* %a.y
; CHECK-NOT: store
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.x.cast, i8* %a.y.cast, i32 16, i32 1, i1 false)
; CHECK-NOT: memcpy
- %a.tmp1 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
+ %a.tmp1 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 0, i64 2
%a.tmp1.cast = bitcast i32* %a.tmp1 to i8*
- %z.tmp1 = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+ %z.tmp1 = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
%z.tmp1.cast = bitcast i32* %z.tmp1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %z.tmp1.cast, i8* %a.tmp1.cast, i32 4, i32 1, i1 false)
%tmp1 = load i32* %a.tmp1
- %a.tmp2 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
+ %a.tmp2 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 3
%tmp2 = load i32* %a.tmp2
- %a.tmp3 = getelementptr inbounds [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
+ %a.tmp3 = getelementptr inbounds [2 x <4 x i32>], [2 x <4 x i32>]* %a, i64 0, i64 1, i64 0
%tmp3 = load i32* %a.tmp3
; CHECK-NOT: memcpy
-; CHECK: %[[gep:.*]] = getelementptr inbounds <4 x i32>* %z, i64 0, i64 2
+; CHECK: %[[gep:.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* %z, i64 0, i64 2
; CHECK-NEXT: %[[extract:.*]] = extractelement <4 x i32> %y, i32 2
; CHECK-NEXT: store i32 %[[extract]], i32* %[[gep]]
; CHECK-NEXT: extractelement <4 x i32> %y, i32 2
; The old scalarrepl pass would wrongly drop the store to the second alloca.
; PR13254
%tmp = alloca { <4 x i64>, <4 x i64> }
- %p0 = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0
+ %p0 = getelementptr inbounds { <4 x i64>, <4 x i64> }, { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0
store <4 x i64> %x, <4 x i64>* %p0
; CHECK: store <4 x i64> %x,
- %p1 = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 1
+ %p1 = getelementptr inbounds { <4 x i64>, <4 x i64> }, { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 1
store <4 x i64> %y, <4 x i64>* %p1
; CHECK: store <4 x i64> %y,
- %addr = getelementptr inbounds { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0, i64 %n
+ %addr = getelementptr inbounds { <4 x i64>, <4 x i64> }, { <4 x i64>, <4 x i64> }* %tmp, i32 0, i32 0, i64 %n
%res = load i64* %addr, align 4
ret i64 %res
}
%a = alloca <4 x i32>
; CHECK-NOT: alloca
- %a.gep0 = getelementptr <4 x i32>* %a, i32 0, i32 0
+ %a.gep0 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 0
%a.cast0 = bitcast i32* %a.gep0 to <2 x i32>*
store <2 x i32> <i32 0, i32 0>, <2 x i32>* %a.cast0
; CHECK-NOT: store
; CHECK: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
- %a.gep1 = getelementptr <4 x i32>* %a, i32 0, i32 1
+ %a.gep1 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 1
%a.cast1 = bitcast i32* %a.gep1 to <2 x i32>*
store <2 x i32> <i32 1, i32 1>, <2 x i32>* %a.cast1
; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
- %a.gep2 = getelementptr <4 x i32>* %a, i32 0, i32 2
+ %a.gep2 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 2
%a.cast2 = bitcast i32* %a.gep2 to <2 x i32>*
store <2 x i32> <i32 2, i32 2>, <2 x i32>* %a.cast2
; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
- %a.gep3 = getelementptr <4 x i32>* %a, i32 0, i32 3
+ %a.gep3 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 3
store i32 3, i32* %a.gep3
; CHECK-NEXT: insertelement <4 x i32>
store <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32>* %a
; CHECK-NOT: store
- %a.gep0 = getelementptr <4 x i32>* %a, i32 0, i32 0
+ %a.gep0 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 0
%a.cast0 = bitcast i32* %a.gep0 to <2 x i32>*
%first = load <2 x i32>* %a.cast0
; CHECK-NOT: load
; CHECK: %[[extract1:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
- %a.gep1 = getelementptr <4 x i32>* %a, i32 0, i32 1
+ %a.gep1 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 1
%a.cast1 = bitcast i32* %a.gep1 to <2 x i32>*
%second = load <2 x i32>* %a.cast1
; CHECK-NEXT: %[[extract2:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 1, i32 2>
- %a.gep2 = getelementptr <4 x i32>* %a, i32 0, i32 2
+ %a.gep2 = getelementptr <4 x i32>, <4 x i32>* %a, i32 0, i32 2
%a.cast2 = bitcast i32* %a.gep2 to <2 x i32>*
%third = load <2 x i32>* %a.cast2
; CHECK-NEXT: %[[extract3:.*]] = shufflevector <4 x i32> <i32 0, i32 1, i32 2, i32 3>, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%a = alloca <4 x float>
; CHECK-NOT: alloca
- %a.gep0 = getelementptr <4 x float>* %a, i32 0, i32 0
+ %a.gep0 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 0
%a.cast0 = bitcast float* %a.gep0 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast0, i8 0, i32 8, i32 0, i1 false)
; CHECK-NOT: store
; CHECK: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
- %a.gep1 = getelementptr <4 x float>* %a, i32 0, i32 1
+ %a.gep1 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 1
%a.cast1 = bitcast float* %a.gep1 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast1, i8 1, i32 8, i32 0, i1 false)
; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
- %a.gep2 = getelementptr <4 x float>* %a, i32 0, i32 2
+ %a.gep2 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 2
%a.cast2 = bitcast float* %a.gep2 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast2, i8 3, i32 8, i32 0, i1 false)
; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
- %a.gep3 = getelementptr <4 x float>* %a, i32 0, i32 3
+ %a.gep3 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 3
%a.cast3 = bitcast float* %a.gep3 to i8*
call void @llvm.memset.p0i8.i32(i8* %a.cast3, i8 7, i32 4, i32 0, i1 false)
; CHECK-NEXT: insertelement <4 x float>
%a = alloca <4 x float>
; CHECK-NOT: alloca
- %a.gep0 = getelementptr <4 x float>* %a, i32 0, i32 0
+ %a.gep0 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 0
%a.cast0 = bitcast float* %a.gep0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast0, i8* %x, i32 8, i32 0, i1 false)
; CHECK: %[[xptr:.*]] = bitcast i8* %x to <2 x float>*
; CHECK-NEXT: %[[expand_x:.*]] = shufflevector <2 x float> %[[x]], <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
; CHECK-NEXT: select <4 x i1> <i1 true, i1 true, i1 false, i1 false>
- %a.gep1 = getelementptr <4 x float>* %a, i32 0, i32 1
+ %a.gep1 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 1
%a.cast1 = bitcast float* %a.gep1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast1, i8* %y, i32 8, i32 0, i1 false)
; CHECK-NEXT: %[[yptr:.*]] = bitcast i8* %y to <2 x float>*
; CHECK-NEXT: %[[expand_y:.*]] = shufflevector <2 x float> %[[y]], <2 x float> undef, <4 x i32> <i32 undef, i32 0, i32 1, i32 undef>
; CHECK-NEXT: select <4 x i1> <i1 false, i1 true, i1 true, i1 false>
- %a.gep2 = getelementptr <4 x float>* %a, i32 0, i32 2
+ %a.gep2 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 2
%a.cast2 = bitcast float* %a.gep2 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast2, i8* %z, i32 8, i32 0, i1 false)
; CHECK-NEXT: %[[zptr:.*]] = bitcast i8* %z to <2 x float>*
; CHECK-NEXT: %[[expand_z:.*]] = shufflevector <2 x float> %[[z]], <2 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 1>
; CHECK-NEXT: select <4 x i1> <i1 false, i1 false, i1 true, i1 true>
- %a.gep3 = getelementptr <4 x float>* %a, i32 0, i32 3
+ %a.gep3 = getelementptr <4 x float>, <4 x float>* %a, i32 0, i32 3
%a.cast3 = bitcast float* %a.gep3 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %a.cast3, i8* %f, i32 4, i32 0, i1 false)
; CHECK-NEXT: %[[fptr:.*]] = bitcast i8* %f to float*
%a.cast = bitcast [2 x i64]* %a to [2 x <2 x i32>]*
; CHECK-NOT: alloca
- %a.x = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 0
+ %a.x = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 0
store <2 x i32> %x, <2 x i32>* %a.x
- %a.y = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 1
+ %a.y = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1
store <2 x i32> %y, <2 x i32>* %a.y
; CHECK-NOT: store
- %a.tmp1 = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 0, i64 1
+ %a.tmp1 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 0, i64 1
%tmp1 = load i32* %a.tmp1
- %a.tmp2 = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 1
+ %a.tmp2 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 1
%tmp2 = load i32* %a.tmp2
- %a.tmp3 = getelementptr inbounds [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 0
+ %a.tmp3 = getelementptr inbounds [2 x <2 x i32>], [2 x <2 x i32>]* %a.cast, i64 0, i64 1, i64 0
%tmp3 = load i32* %a.tmp3
; CHECK-NOT: load
; CHECK: extractelement <2 x i32> %x, i32 1
; CHECK-NOT: store
%tmp1 = load i32* %a.i32
- %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+ %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
%tmp2 = load i32* %a.tmp2
; CHECK-NOT: load
; CHECK: extractelement <2 x i32> %x, i32 0
; CHECK-NOT: alloca
store i32 %x, i32* %a.i32
- %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+ %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
store i32 %y, i32* %a.tmp2
; CHECK-NOT: store
; CHECK: %[[V1:.*]] = insertelement <2 x i32> undef, i32 %x, i32 0
; CHECK-NOT: alloca
store <4 x i16> %x, <4 x i16>* %a.vec2
- %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+ %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
store i32 %y, i32* %a.tmp2
; CHECK-NOT: store
; CHECK: %[[V1:.*]] = bitcast <4 x i16> %x to <2 x i32>
; CHECK-NOT: alloca
store <4 x i16> %x, <4 x i16>* %a.vec2
- %a.tmp2 = getelementptr inbounds i32* %a.i32, i64 1
+ %a.tmp2 = getelementptr inbounds i32, i32* %a.i32, i64 1
store i32 %y, i32* %a.tmp2
; CHECK-NOT: store
; CHECK: %[[V1:.*]] = bitcast i32 %y to <2 x i16>
; CHECK: edge entry -> if.end probability is 1 / 2 = 50%
if.end: ; preds = %entry
- %arrayidx = getelementptr inbounds i8** %argv, i64 1, !dbg !30
+ %arrayidx = getelementptr inbounds i8*, i8** %argv, i64 1, !dbg !30
%0 = load i8** %arrayidx, align 8, !dbg !30, !tbaa !31
%call = tail call i32 @atoi(i8* %0) #4, !dbg !30
tail call void @llvm.dbg.value(metadata i32 %call, i64 0, metadata !17, metadata !{}), !dbg !30
define i32 @test() nounwind {
%X = alloca [4 x i32] ; <[4 x i32]*> [#uses=1]
- %Y = getelementptr [4 x i32]* %X, i64 0, i64 0 ; <i32*> [#uses=1]
+ %Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 0 ; <i32*> [#uses=1]
; Must preserve arrayness!
- %Z = getelementptr i32* %Y, i64 1 ; <i32*> [#uses=1]
+ %Z = getelementptr i32, i32* %Y, i64 1 ; <i32*> [#uses=1]
%A = load i32* %Z ; <i32> [#uses=1]
ret i32 %A
}
define i8* @test() {
%A = alloca [30 x i8] ; <[30 x i8]*> [#uses=1]
- %B = getelementptr [30 x i8]* %A, i64 0, i64 0 ; <i8*> [#uses=2]
- %C = getelementptr i8* %B, i64 1 ; <i8*> [#uses=1]
+ %B = getelementptr [30 x i8], [30 x i8]* %A, i64 0, i64 0 ; <i8*> [#uses=2]
+ %C = getelementptr i8, i8* %B, i64 1 ; <i8*> [#uses=1]
store i8 0, i8* %B
ret i8* %C
}
define i32 @main() {
%d = alloca %T ; <{ [80 x i8], i32, i32 }*> [#uses=2]
- %tmp.0 = getelementptr %T* %d, i64 0, i32 2 ; <i32*> [#uses=1]
+ %tmp.0 = getelementptr %T, %T* %d, i64 0, i32 2 ; <i32*> [#uses=1]
store i32 0, i32* %tmp.0
- %tmp.1 = getelementptr %T* %d, i64 0, i32 0, i64 0 ; <i8*> [#uses=1]
+ %tmp.1 = getelementptr %T, %T* %d, i64 0, i32 0, i64 0 ; <i8*> [#uses=1]
call void @.iter_2( i32 (i8*)* @.callback_1, i8* %tmp.1 )
ret i32 0
}
%vsiidx = alloca [2 x <4 x i32>], align 16 ; <[2 x <4 x i32>]*> [#uses=3]
%tmp = call <4 x i32> @llvm.x86.sse2.cvttps2dq( <4 x float> %v0 ) ; <<4 x i32>> [#uses=2]
%tmp.upgrd.1 = bitcast <4 x i32> %tmp to <2 x i64> ; <<2 x i64>> [#uses=0]
- %tmp.upgrd.2 = getelementptr [2 x <4 x i32>]* %vsiidx, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
+ %tmp.upgrd.2 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
store <4 x i32> %tmp, <4 x i32>* %tmp.upgrd.2
%tmp10 = call <4 x i32> @llvm.x86.sse2.cvttps2dq( <4 x float> %v1 ) ; <<4 x i32>> [#uses=2]
%tmp10.upgrd.3 = bitcast <4 x i32> %tmp10 to <2 x i64> ; <<2 x i64>> [#uses=0]
- %tmp14 = getelementptr [2 x <4 x i32>]* %vsiidx, i32 0, i32 1 ; <<4 x i32>*> [#uses=1]
+ %tmp14 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 1 ; <<4 x i32>*> [#uses=1]
store <4 x i32> %tmp10, <4 x i32>* %tmp14
- %tmp15 = getelementptr [2 x <4 x i32>]* %vsiidx, i32 0, i32 0, i32 4 ; <i32*> [#uses=1]
+ %tmp15 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %vsiidx, i32 0, i32 0, i32 4 ; <i32*> [#uses=1]
%tmp.upgrd.4 = load i32* %tmp15 ; <i32> [#uses=1]
ret i32 %tmp.upgrd.4
}
define void @_Z4testP9UnionTypePS0_(%struct.UnionType* %p, %struct.UnionType** %pointerToUnion) {
entry:
%tmp = alloca %struct.UnionType, align 8
- %tmp2 = getelementptr %struct.UnionType* %tmp, i32 0, i32 0, i32 0
- %tmp13 = getelementptr %struct.UnionType* %p, i32 0, i32 0, i32 0
+ %tmp2 = getelementptr %struct.UnionType, %struct.UnionType* %tmp, i32 0, i32 0, i32 0
+ %tmp13 = getelementptr %struct.UnionType, %struct.UnionType* %p, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp2, i8* %tmp13, i32 8, i32 0, i1 false)
%tmp5 = load %struct.UnionType** %pointerToUnion
- %tmp56 = getelementptr %struct.UnionType* %tmp5, i32 0, i32 0, i32 0
- %tmp7 = getelementptr %struct.UnionType* %tmp, i32 0, i32 0, i32 0
+ %tmp56 = getelementptr %struct.UnionType, %struct.UnionType* %tmp5, i32 0, i32 0, i32 0
+ %tmp7 = getelementptr %struct.UnionType, %struct.UnionType* %tmp, i32 0, i32 0, i32 0
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp56, i8* %tmp7, i32 8, i32 0, i1 false)
ret void
}
%tmp = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i16 %b, i16* %b_addr
- %tmp1 = getelementptr %struct.S* %s, i32 0, i32 0 ; <i16*> [#uses=1]
+ %tmp1 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0 ; <i16*> [#uses=1]
%tmp2 = load i16* %b_addr, align 2 ; <i16> [#uses=1]
store i16 %tmp2, i16* %tmp1, align 2
- %tmp3 = getelementptr %struct.S* %s, i32 0, i32 0 ; <i16*> [#uses=1]
+ %tmp3 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0 ; <i16*> [#uses=1]
%tmp34 = bitcast i16* %tmp3 to [2 x i1]* ; <[2 x i1]*> [#uses=1]
- %tmp5 = getelementptr [2 x i1]* %tmp34, i32 0, i32 1 ; <i1*> [#uses=1]
+ %tmp5 = getelementptr [2 x i1], [2 x i1]* %tmp34, i32 0, i32 1 ; <i1*> [#uses=1]
%tmp6 = load i1* %tmp5, align 1 ; <i1> [#uses=1]
%tmp67 = zext i1 %tmp6 to i32 ; <i32> [#uses=1]
store i32 %tmp67, i32* %tmp, align 4
%T3 = bitcast [1 x %struct.T]* %s to i32*
store i32 -61184, i32* %T3
- %tmp16 = getelementptr [1 x %struct.T]* %s, i32 0, i32 0 ; <%struct.T*> [#uses=1]
- %tmp17 = getelementptr %struct.T* %tmp16, i32 0, i32 1 ; <[3 x i8]*> [#uses=1]
+ %tmp16 = getelementptr [1 x %struct.T], [1 x %struct.T]* %s, i32 0, i32 0 ; <%struct.T*> [#uses=1]
+ %tmp17 = getelementptr %struct.T, %struct.T* %tmp16, i32 0, i32 1 ; <[3 x i8]*> [#uses=1]
%tmp1718 = bitcast [3 x i8]* %tmp17 to i32* ; <i32*> [#uses=1]
%tmp19 = load i32* %tmp1718, align 4 ; <i32> [#uses=1]
%mask = and i32 %tmp19, 16777215 ; <i32> [#uses=2]
define i32 @main(i32 %argc, i8** %argv) {
entry:
%c = alloca %struct..0anon ; <%struct..0anon*> [#uses=2]
- %tmp2 = getelementptr %struct..0anon* %c, i32 0, i32 0 ; <<1 x i64>*> [#uses=1]
+ %tmp2 = getelementptr %struct..0anon, %struct..0anon* %c, i32 0, i32 0 ; <<1 x i64>*> [#uses=1]
store <1 x i64> zeroinitializer, <1 x i64>* %tmp2, align 8
- %tmp7 = getelementptr %struct..0anon* %c, i32 0, i32 0 ; <<1 x i64>*> [#uses=1]
+ %tmp7 = getelementptr %struct..0anon, %struct..0anon* %c, i32 0, i32 0 ; <<1 x i64>*> [#uses=1]
%tmp78 = bitcast <1 x i64>* %tmp7 to [2 x i32]* ; <[2 x i32]*> [#uses=1]
- %tmp9 = getelementptr [2 x i32]* %tmp78, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp9 = getelementptr [2 x i32], [2 x i32]* %tmp78, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp10 = load i32* %tmp9, align 4 ; <i32> [#uses=0]
unreachable
}
; And store it
store { i32, i32 } %res2, { i32, i32 }* %target
; Actually use %target, so it doesn't get removed altogether
- %ptr = getelementptr { i32, i32 }* %target, i32 0, i32 0
+ %ptr = getelementptr { i32, i32 }, { i32, i32 }* %target, i32 0, i32 0
%val = load i32* %ptr
ret i32 %val
}
; And store it
store [ 2 x i32 ] %res2, [ 2 x i32 ]* %target
; Actually use %target, so it doesn't get removed altogether
- %ptr = getelementptr [ 2 x i32 ]* %target, i32 0, i32 0
+ %ptr = getelementptr [ 2 x i32 ], [ 2 x i32 ]* %target, i32 0, i32 0
%val = load i32* %ptr
ret i32 %val
}
%r1 = bitcast %struct.x* %r to i8*
%s2 = bitcast %struct.x* %s to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %r1, i8* %s2, i32 12, i32 8, i1 false)
- %1 = getelementptr %struct.x* %r, i32 0, i32 0, i32 1
+ %1 = getelementptr %struct.x, %struct.x* %r, i32 0, i32 0, i32 1
%2 = load i32* %1, align 4
ret i32 %2
}
define void @main(%struct.two* %D, i16 %V) {
entry:
%S = alloca %struct.two
- %S.2 = getelementptr %struct.two* %S, i32 0, i32 1
+ %S.2 = getelementptr %struct.two, %struct.two* %S, i32 0, i32 1
store i16 %V, i16* %S.2
; This gep is effectively a bitcast to i8*, but is sometimes generated
; because the type of the first element in %struct.two is i8.
- %tmpS = getelementptr %struct.two* %S, i32 0, i32 0, i32 0
+ %tmpS = getelementptr %struct.two, %struct.two* %S, i32 0, i32 0, i32 0
%tmpD = bitcast %struct.two* %D to i8*
call void @llvm.memmove.p0i8.p0i8.i32(i8* %tmpD, i8* %tmpS, i32 4, i32 1, i1 false)
ret void
define i32 @f(i32 %x, i32 %y) {
%instance = alloca %pair
- %first = getelementptr %pair* %instance, i32 0, i32 0
+ %first = getelementptr %pair, %pair* %instance, i32 0, i32 0
%cast = bitcast [1 x i32]* %first to i32*
store i32 %x, i32* %cast
- %second = getelementptr %pair* %instance, i32 0, i32 1
+ %second = getelementptr %pair, %pair* %instance, i32 0, i32 1
store i32 %y, i32* %second
%v = load i32* %cast
ret i32 %v
define void @f(i8* %p) nounwind {
entry:
%s = alloca %struct.st, align 4 ; <%struct.st*> [#uses=2]
- %0 = getelementptr %struct.st* %s, i32 0, i32 0 ; <i16*> [#uses=1]
+ %0 = getelementptr %struct.st, %struct.st* %s, i32 0, i32 0 ; <i16*> [#uses=1]
store i16 1, i16* %0, align 4
%s1 = bitcast %struct.st* %s to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %p, i8* %s1, i32 2, i32 1, i1 false)
%tmp2 = alloca %struct.int16x8x2_t
%0 = alloca %struct.int16x8x2_t
%"alloca point" = bitcast i32 0 to i32
- %1 = getelementptr inbounds %struct.int16x8_t* %tmp_addr, i32 0, i32 0
+ %1 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
store <8 x i16> %tmp.0, <8 x i16>* %1
store %struct.int16x8x2_t* %dst, %struct.int16x8x2_t** %dst_addr
- %2 = getelementptr inbounds %struct.int16x8_t* %__ax, i32 0, i32 0
- %3 = getelementptr inbounds %struct.int16x8_t* %tmp_addr, i32 0, i32 0
+ %2 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__ax, i32 0, i32 0
+ %3 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
%4 = load <8 x i16>* %3, align 16
store <8 x i16> %4, <8 x i16>* %2, align 16
- %5 = getelementptr inbounds %struct.int16x8_t* %__bx, i32 0, i32 0
- %6 = getelementptr inbounds %struct.int16x8_t* %tmp_addr, i32 0, i32 0
+ %5 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__bx, i32 0, i32 0
+ %6 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %tmp_addr, i32 0, i32 0
%7 = load <8 x i16>* %6, align 16
store <8 x i16> %7, <8 x i16>* %5, align 16
- %8 = getelementptr inbounds %struct.int16x8_t* %__ax, i32 0, i32 0
+ %8 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__ax, i32 0, i32 0
%9 = load <8 x i16>* %8, align 16
- %10 = getelementptr inbounds %struct.int16x8_t* %__bx, i32 0, i32 0
+ %10 = getelementptr inbounds %struct.int16x8_t, %struct.int16x8_t* %__bx, i32 0, i32 0
%11 = load <8 x i16>* %10, align 16
- %12 = getelementptr inbounds %union..0anon* %__rv, i32 0, i32 0
+ %12 = getelementptr inbounds %union..0anon, %union..0anon* %__rv, i32 0, i32 0
%13 = bitcast %struct.int16x8x2_t* %12 to %struct.__neon_int16x8x2_t*
%14 = shufflevector <8 x i16> %9, <8 x i16> %11, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
- %15 = getelementptr inbounds %struct.__neon_int16x8x2_t* %13, i32 0, i32 0
+ %15 = getelementptr inbounds %struct.__neon_int16x8x2_t, %struct.__neon_int16x8x2_t* %13, i32 0, i32 0
store <8 x i16> %14, <8 x i16>* %15
%16 = shufflevector <8 x i16> %9, <8 x i16> %11, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
- %17 = getelementptr inbounds %struct.__neon_int16x8x2_t* %13, i32 0, i32 1
+ %17 = getelementptr inbounds %struct.__neon_int16x8x2_t, %struct.__neon_int16x8x2_t* %13, i32 0, i32 1
store <8 x i16> %16, <8 x i16>* %17
- %18 = getelementptr inbounds %union..0anon* %__rv, i32 0, i32 0
+ %18 = getelementptr inbounds %union..0anon, %union..0anon* %__rv, i32 0, i32 0
%19 = bitcast %struct.int16x8x2_t* %0 to i8*
%20 = bitcast %struct.int16x8x2_t* %18 to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %19, i8* %20, i32 32, i32 16, i1 false)
%tmp = bitcast [4 x i32]* %l_10 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp, i8* bitcast ([4 x i32]* @func_1.l_10 to i8*), i64 16, i32 16, i1 false)
; CHECK: call void @llvm.memcpy
- %arrayidx = getelementptr inbounds [4 x i32]* %l_10, i64 0, i64 0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %l_10, i64 0, i64 0
%call = call i32* @noop(i32* %arrayidx)
store i32 0, i32* %call
ret i32 0
entry:
%ref.tmp2 = alloca %0, align 16
%tmpcast = bitcast %0* %ref.tmp2 to %struct.Point_3*
- %0 = getelementptr %0* %ref.tmp2, i64 0, i32 0
+ %0 = getelementptr %0, %0* %ref.tmp2, i64 0, i32 0
store <2 x float> zeroinitializer, <2 x float>* %0, align 16
- %1 = getelementptr inbounds %struct.Point_3* %tmpcast, i64 0, i32 0
- %base.i.i.i = getelementptr inbounds %struct.PointC3* %1, i64 0, i32 0
- %arrayidx.i.i.i.i = getelementptr inbounds %struct.array* %base.i.i.i, i64 0, i32 0, i64 0
+ %1 = getelementptr inbounds %struct.Point_3, %struct.Point_3* %tmpcast, i64 0, i32 0
+ %base.i.i.i = getelementptr inbounds %struct.PointC3, %struct.PointC3* %1, i64 0, i32 0
+ %arrayidx.i.i.i.i = getelementptr inbounds %struct.array, %struct.array* %base.i.i.i, i64 0, i32 0, i64 0
%tmp5.i.i = load float* %arrayidx.i.i.i.i, align 4
ret void
}
entry:
%ref.tmp2 = alloca {<2 x float>, float}, align 16
%tmpcast = bitcast {<2 x float>, float}* %ref.tmp2 to float*
- %0 = getelementptr {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
+ %0 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
store <2 x float> zeroinitializer, <2 x float>* %0, align 16
%tmp5.i.i = load float* %tmpcast, align 4
ret void
entry:
%ref.tmp2 = alloca {<2 x float>, float}, align 16
%tmpcast = bitcast {<2 x float>, float}* %ref.tmp2 to float*
- %tmpcast2 = getelementptr {<2 x float>, float}* %ref.tmp2, i64 0, i32 1
- %0 = getelementptr {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
+ %tmpcast2 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 1
+ %0 = getelementptr {<2 x float>, float}, {<2 x float>, float}* %ref.tmp2, i64 0, i32 0
store <2 x float> zeroinitializer, <2 x float>* %0, align 16
store float 1.0, float* %tmpcast2, align 4
%r1 = load float* %tmpcast, align 4
%ai = alloca { <2 x float>, <2 x float> }, align 8
store { <2 x float>, <2 x float> } {<2 x float> <float 0.0, float 1.0>, <2 x float> <float 2.0, float 3.0>}, { <2 x float>, <2 x float> }* %ai, align 8
%tmpcast = bitcast { <2 x float>, <2 x float> }* %ai to [4 x float]*
- %arrayidx = getelementptr inbounds [4 x float]* %tmpcast, i64 0, i64 3
+ %arrayidx = getelementptr inbounds [4 x float], [4 x float]* %tmpcast, i64 0, i64 3
%f = load float* %arrayidx, align 4
ret float %f
}
%p = bitcast { <4 x float> }* %a to i8*
call void @llvm.memset.p0i8.i32(i8* %p, i8 0, i32 16, i32 16, i1 false)
%q = bitcast { <4 x float> }* %a to [2 x <2 x float>]*
- %arrayidx = getelementptr inbounds [2 x <2 x float>]* %q, i32 0, i32 0
+ %arrayidx = getelementptr inbounds [2 x <2 x float>], [2 x <2 x float>]* %q, i32 0, i32 0
store <2 x float> undef, <2 x float>* %arrayidx, align 8
ret void
}
entry:
%u = alloca %union.anon, align 16
%u164 = bitcast %union.anon* %u to [4 x i32]*
- %arrayidx165 = getelementptr inbounds [4 x i32]* %u164, i32 0, i32 0
+ %arrayidx165 = getelementptr inbounds [4 x i32], [4 x i32]* %u164, i32 0, i32 0
store i32 undef, i32* %arrayidx165, align 4
%v186 = bitcast %union.anon* %u to <4 x float>*
store <4 x float> undef, <4 x float>* %v186, align 16
entry:
%retval = alloca %struct.S, align 8
%ret = alloca %struct.S, align 8
- %b = getelementptr inbounds %struct.S* %ret, i32 0, i32 1
+ %b = getelementptr inbounds %struct.S, %struct.S* %ret, i32 0, i32 1
store double 1.000000e+00, double* %b, align 8
%0 = bitcast %struct.S* %retval to i8*
%1 = bitcast %struct.S* %ret to i8*
%X_addr = alloca i64 ; <i64*> [#uses=2]
store i64 %X, i64* %X_addr
%tmp.0 = bitcast i64* %X_addr to i32* ; <i32*> [#uses=1]
- %tmp.1 = getelementptr i32* %tmp.0, i32 1 ; <i32*> [#uses=1]
+ %tmp.1 = getelementptr i32, i32* %tmp.0, i32 1 ; <i32*> [#uses=1]
%tmp.2 = bitcast i32* %tmp.1 to i8* ; <i8*> [#uses=1]
- %tmp.3 = getelementptr i8* %tmp.2, i32 3 ; <i8*> [#uses=1]
+ %tmp.3 = getelementptr i8, i8* %tmp.2, i32 3 ; <i8*> [#uses=1]
%tmp.2.upgrd.1 = load i8* %tmp.3 ; <i8> [#uses=1]
ret i8 %tmp.2.upgrd.1
}
define i16 @crafty(i64 %X) {
%a = alloca { i64 } ; <{ i64 }*> [#uses=2]
- %tmp.0 = getelementptr { i64 }* %a, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp.0 = getelementptr { i64 }, { i64 }* %a, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %X, i64* %tmp.0
%tmp.3 = bitcast { i64 }* %a to [4 x i16]* ; <[4 x i16]*> [#uses=2]
- %tmp.4 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 3 ; <i16*> [#uses=1]
+ %tmp.4 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 3 ; <i16*> [#uses=1]
%tmp.5 = load i16* %tmp.4 ; <i16> [#uses=1]
- %tmp.8 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 2 ; <i16*> [#uses=1]
+ %tmp.8 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 2 ; <i16*> [#uses=1]
%tmp.9 = load i16* %tmp.8 ; <i16> [#uses=1]
%tmp.10 = or i16 %tmp.9, %tmp.5 ; <i16> [#uses=1]
ret i16 %tmp.10
%a = alloca i64 ; <i64*> [#uses=2]
store i64 %X, i64* %a
%tmp.3 = bitcast i64* %a to [4 x i16]* ; <[4 x i16]*> [#uses=2]
- %tmp.4 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 3 ; <i16*> [#uses=1]
+ %tmp.4 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 3 ; <i16*> [#uses=1]
%tmp.5 = load i16* %tmp.4 ; <i16> [#uses=1]
- %tmp.8 = getelementptr [4 x i16]* %tmp.3, i32 0, i32 2 ; <i16*> [#uses=1]
+ %tmp.8 = getelementptr [4 x i16], [4 x i16]* %tmp.3, i32 0, i32 2 ; <i16*> [#uses=1]
%tmp.9 = load i16* %tmp.8 ; <i16> [#uses=1]
%tmp.10 = or i16 %tmp.9, %tmp.5 ; <i16> [#uses=1]
ret i16 %tmp.10
define void @Test(%struct.anon addrspace(2)* %pPtr) nounwind {
entry:
%s = alloca %struct.anon, align 4 ; <%struct.anon*> [#uses=3]
- %arrayidx = getelementptr inbounds %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
+ %arrayidx = getelementptr inbounds %struct.anon, %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
%tmp1 = bitcast %struct.anon* %s to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.anon addrspace(2)* %arrayidx to i8 addrspace(2)* ; <i8 addrspace(2)*> [#uses=1]
call void @llvm.memcpy.p0i8.p2i8.i64(i8* %tmp1, i8 addrspace(2)* %tmp2, i64 4, i32 4, i1 false)
- %tmp3 = getelementptr inbounds %struct.anon* %s, i32 0, i32 0 ; <[1 x float]*> [#uses=1]
- %arrayidx4 = getelementptr inbounds [1 x float]* %tmp3, i32 0, i64 0 ; <float*> [#uses=2]
+ %tmp3 = getelementptr inbounds %struct.anon, %struct.anon* %s, i32 0, i32 0 ; <[1 x float]*> [#uses=1]
+ %arrayidx4 = getelementptr inbounds [1 x float], [1 x float]* %tmp3, i32 0, i64 0 ; <float*> [#uses=2]
%tmp5 = load float* %arrayidx4 ; <float> [#uses=1]
%sub = fsub float %tmp5, 5.000000e+00 ; <float> [#uses=1]
store float %sub, float* %arrayidx4
- %arrayidx7 = getelementptr inbounds %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
+ %arrayidx7 = getelementptr inbounds %struct.anon, %struct.anon addrspace(2)* %pPtr, i64 0 ; <%struct.anon addrspace(2)*> [#uses=1]
%tmp8 = bitcast %struct.anon addrspace(2)* %arrayidx7 to i8 addrspace(2)* ; <i8 addrspace(2)*> [#uses=1]
%tmp9 = bitcast %struct.anon* %s to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p2i8.p0i8.i64(i8 addrspace(2)* %tmp8, i8* %tmp9, i64 4, i32 4, i1 false)
define i32 @test() {
%X = alloca [4 x i32] ; <[4 x i32]*> [#uses=1]
- %Y = getelementptr [4 x i32]* %X, i64 0, i64 0 ; <i32*> [#uses=2]
+ %Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 0 ; <i32*> [#uses=2]
store i32 0, i32* %Y
%Z = load i32* %Y ; <i32> [#uses=1]
ret i32 %Z
; CHECK-LABEL: @test1(
; CHECK-NOT: = alloca
%X = alloca [4 x i32]
- %Y = getelementptr [4 x i32]* %X, i64 0, i64 6 ; <i32*> [#uses=2]
+ %Y = getelementptr [4 x i32], [4 x i32]* %X, i64 0, i64 6 ; <i32*> [#uses=2]
store i32 0, i32* %Y
%Z = load i32* %Y ; <i32> [#uses=1]
ret i32 %Z
; CHECK-NOT: = alloca
; CHECK: store i64
%var_1 = alloca %padded, align 8 ; <%padded*> [#uses=3]
- %0 = getelementptr inbounds %padded* %var_1, i32 0, i32 0 ; <%base*> [#uses=2]
+ %0 = getelementptr inbounds %padded, %padded* %var_1, i32 0, i32 0 ; <%base*> [#uses=2]
- %p2 = getelementptr inbounds %base* %0, i32 0, i32 1, i32 0 ; <i8*> [#uses=1]
+ %p2 = getelementptr inbounds %base, %base* %0, i32 0, i32 1, i32 0 ; <i8*> [#uses=1]
store i8 72, i8* %p2, align 1
; 72 -> a[0].
define i32 @test1() {
%X = alloca { i32, float } ; <{ i32, float }*> [#uses=1]
- %Y = getelementptr { i32, float }* %X, i64 0, i32 0 ; <i32*> [#uses=2]
+ %Y = getelementptr { i32, float }, { i32, float }* %X, i64 0, i32 0 ; <i32*> [#uses=2]
store i32 0, i32* %Y
%Z = load i32* %Y ; <i32> [#uses=1]
ret i32 %Z
%ALL = alloca %t, align 8
%tmp59172 = bitcast %t* %ALL to i64*
store i64 %A, i64* %tmp59172, align 8
- %C = getelementptr %t* %ALL, i32 0, i32 0, i32 1
+ %C = getelementptr %t, %t* %ALL, i32 0, i32 0, i32 1
%D = bitcast i16* %C to i32*
%E = load i32* %D, align 4
%F = bitcast %t* %ALL to i8*
%Y = bitcast {{i32,i32}}* %X to i64*
store i64 %V, i64* %Y
- %A = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 0
- %B = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 1
+ %A = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 0
+ %B = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 1
%a = load i32* %A
%b = load i32* %B
%c = add i32 %a, %b
%Y = bitcast {[4 x float]}* %X to i128*
store i128 %V, i128* %Y
- %A = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 0
- %B = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 3
+ %A = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 0
+ %B = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 3
%a = load float* %A
%b = load float* %B
%c = fadd float %a, %b
; CHECK-NOT: alloca
%X = alloca {{i32, i32}}
- %A = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 0
- %B = getelementptr {{i32,i32}}* %X, i32 0, i32 0, i32 1
+ %A = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 0
+ %B = getelementptr {{i32,i32}}, {{i32,i32}}* %X, i32 0, i32 0, i32 1
store i32 %a, i32* %A
store i32 %b, i32* %B
; CHECK: test4
; CHECK-NOT: alloca
%X = alloca {[4 x float]}
- %A = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 0
- %B = getelementptr {[4 x float]}* %X, i32 0, i32 0, i32 3
+ %A = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 0
+ %B = getelementptr {[4 x float]}, {[4 x float]}* %X, i32 0, i32 0, i32 3
store float %a, float* %A
store float %b, float* %B
define void @test2() {
%E = alloca { { i32, float, double, i64 }, { i32, float, double, i64 } } ; <{ { i32, float, double, i64 }, { i32, float, double, i64 } }*> [#uses=1]
- %tmp.151 = getelementptr { { i32, float, double, i64 }, { i32, float, double, i64 } }* %E, i64 0, i32 1, i32 3 ; <i64*> [#uses=0]
+ %tmp.151 = getelementptr { { i32, float, double, i64 }, { i32, float, double, i64 } }, { { i32, float, double, i64 }, { i32, float, double, i64 } }* %E, i64 0, i32 1, i32 3 ; <i64*> [#uses=0]
ret void
}
define i32 @test3() {
%X = alloca { [4 x i32] } ; <{ [4 x i32] }*> [#uses=1]
- %Y = getelementptr { [4 x i32] }* %X, i64 0, i32 0, i64 2 ; <i32*> [#uses=2]
+ %Y = getelementptr { [4 x i32] }, { [4 x i32] }* %X, i64 0, i32 0, i64 2 ; <i32*> [#uses=2]
store i32 4, i32* %Y
%Z = load i32* %Y ; <i32> [#uses=1]
ret i32 %Z
%tmp.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
%tmp.i.upgrd.1 = bitcast %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp.i to %"struct.__gnu_cxx::bitmap_allocator<char>"* ; <%"struct.__gnu_cxx::bitmap_allocator<char>"*> [#uses=0]
%tmp1.i = load %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"** %this_addr.i ; <%"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"*> [#uses=1]
- %tmp.i.upgrd.2 = getelementptr %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp1.i, i32 0, i32 0 ; <%"struct.__gnu_cxx::bitmap_allocator<char>::_Alloc_block"**> [#uses=0]
+ %tmp.i.upgrd.2 = getelementptr %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>", %"struct.__gnu_cxx::balloc::_Inclusive_between<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*>"* %tmp1.i, i32 0, i32 0 ; <%"struct.__gnu_cxx::bitmap_allocator<char>::_Alloc_block"**> [#uses=0]
unreachable
}
cond_next34: ; preds = %cond_next
%i.2.reload22 = load i32* null ; <i32> [#uses=1]
- %tmp51 = getelementptr %struct.aal_spanbucket_t* %SB, i32 0, i32 2, i32 0, i32 0, i32 %i.2.reload22, i32 1
+ %tmp51 = getelementptr %struct.aal_spanbucket_t, %struct.aal_spanbucket_t* %SB, i32 0, i32 2, i32 0, i32 0, i32 %i.2.reload22, i32 1
; <i16*> [#uses=0]
ret void
define void @test8() {
entry:
%v = alloca %struct.c37304a__vrec
- %0 = getelementptr %struct.c37304a__vrec* %v, i32 0, i32 0
+ %0 = getelementptr %struct.c37304a__vrec, %struct.c37304a__vrec* %v, i32 0, i32 0
store i8 8, i8* %0, align 1
unreachable
}
define i32 @test9() {
entry:
%.compoundliteral = alloca %0
- %tmp228 = getelementptr %0* %.compoundliteral, i32 0, i32 7
+ %tmp228 = getelementptr %0, %0* %.compoundliteral, i32 0, i32 7
%tmp229 = bitcast [0 x i16]* %tmp228 to i8*
call void @llvm.memset.p0i8.i64(i8* %tmp229, i8 0, i64 0, i32 2, i1 false)
unreachable
define void @test10() {
entry:
%w = alloca %wrapper, align 8 ; <%wrapper*> [#uses=1]
- %0 = getelementptr %wrapper* %w, i64 0, i32 0 ; <i1*>
+ %0 = getelementptr %wrapper, %wrapper* %w, i64 0, i32 0 ; <i1*>
store i1 true, i1* %0
ret void
}
%a = alloca %struct.singlebool, align 1 ; <%struct.singlebool*> [#uses=2]
%storetmp.i = bitcast %struct.singlebool* %a to i1* ; <i1*> [#uses=1]
store i1 true, i1* %storetmp.i
- %tmp = getelementptr %struct.singlebool* %a, i64 0, i32 0 ; <i8*> [#uses=1]
+ %tmp = getelementptr %struct.singlebool, %struct.singlebool* %a, i64 0, i32 0 ; <i8*> [#uses=1]
%tmp1 = load i8* %tmp ; <i8> [#uses=1]
ret i8 %tmp1
}
bb4.i:
%malloccall = tail call i8* @malloc(i32 0)
%0 = bitcast i8* %malloccall to [0 x %struct.Item]*
- %.sub.i.c.i = getelementptr [0 x %struct.Item]* %0, i32 0, i32 0 ; <%struct.Item*> [#uses=0]
+ %.sub.i.c.i = getelementptr [0 x %struct.Item], [0 x %struct.Item]* %0, i32 0, i32 0 ; <%struct.Item*> [#uses=0]
unreachable
}
declare noalias i8* @malloc(i32)
entry:
%alloc.0.0 = alloca <4 x float>, align 16
%bitcast = bitcast <4 x float>* %alloc.0.0 to [4 x float]*
- %idx3 = getelementptr inbounds [4 x float]* %bitcast, i32 0, i32 3
+ %idx3 = getelementptr inbounds [4 x float], [4 x float]* %bitcast, i32 0, i32 3
store float 0.000000e+00, float* %idx3, align 4
br label %for.body10
for.body10: ; preds = %for.body10, %entry
%loopidx = phi i32 [ 0, %entry ], [ undef, %for.body10 ]
- %unusedidx = getelementptr inbounds <4 x float>* %alloc.0.0, i32 0, i32 %loopidx
+ %unusedidx = getelementptr inbounds <4 x float>, <4 x float>* %alloc.0.0, i32 0, i32 %loopidx
br i1 undef, label %for.end, label %for.body10
for.end: ; preds = %for.body10
br label %for.cond
for.end: ; preds = %for.cond
- %x = getelementptr inbounds %struct.Vector4* %vector, i32 0, i32 0
+ %x = getelementptr inbounds %struct.Vector4, %struct.Vector4* %vector, i32 0, i32 0
%tmp5 = load float* %x, align 16
%conv = fpext float %tmp5 to double
%call = call i32 (...)* @printf(double %conv) nounwind
define void @test1() {
; CHECK-LABEL: @test1(
%A = alloca %t1
- %A1 = getelementptr %t1* %A, i32 0, i32 0
- %A2 = getelementptr %t1* %A, i32 0, i32 1
- %A3 = getelementptr %t1* %A, i32 0, i32 2
+ %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+ %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+ %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
%B = bitcast i32* %A1 to i8*
store i32 0, i32* %A1
call void @llvm.lifetime.start(i64 -1, i8* %B)
define void @test2() {
; CHECK-LABEL: @test2(
%A = alloca %t1
- %A1 = getelementptr %t1* %A, i32 0, i32 0
- %A2 = getelementptr %t1* %A, i32 0, i32 1
- %A3 = getelementptr %t1* %A, i32 0, i32 2
+ %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+ %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+ %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
%B = bitcast i32* %A2 to i8*
store i32 0, i32* %A2
call void @llvm.lifetime.start(i64 -1, i8* %B)
define void @test3() {
; CHECK-LABEL: @test3(
%A = alloca %t1
- %A1 = getelementptr %t1* %A, i32 0, i32 0
- %A2 = getelementptr %t1* %A, i32 0, i32 1
- %A3 = getelementptr %t1* %A, i32 0, i32 2
+ %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+ %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+ %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
%B = bitcast i32* %A2 to i8*
store i32 0, i32* %A2
call void @llvm.lifetime.start(i64 6, i8* %B)
define void @test4() {
; CHECK-LABEL: @test4(
%A = alloca %t1
- %A1 = getelementptr %t1* %A, i32 0, i32 0
- %A2 = getelementptr %t1* %A, i32 0, i32 1
- %A3 = getelementptr %t1* %A, i32 0, i32 2
+ %A1 = getelementptr %t1, %t1* %A, i32 0, i32 0
+ %A2 = getelementptr %t1, %t1* %A, i32 0, i32 1
+ %A3 = getelementptr %t1, %t1* %A, i32 0, i32 2
%B = bitcast i32* %A2 to i8*
store i32 0, i32* %A2
call void @llvm.lifetime.start(i64 1, i8* %B)
; CHECK: alloca{{.*}}i8
; CHECK: alloca{{.*}}i8
- %A21 = getelementptr %t2* %A, i32 0, i32 1, i32 0
- %A22 = getelementptr %t2* %A, i32 0, i32 1, i32 1
- %A23 = getelementptr %t2* %A, i32 0, i32 1, i32 2
- %A24 = getelementptr %t2* %A, i32 0, i32 1, i32 3
+ %A21 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 0
+ %A22 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 1
+ %A23 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 2
+ %A24 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 3
; CHECK-NOT: store i8 1
store i8 1, i8* %A21
store i8 2, i8* %A22
store i8 3, i8* %A23
store i8 4, i8* %A24
- %A1 = getelementptr %t2* %A, i32 0, i32 0
- %A2 = getelementptr %t2* %A, i32 0, i32 1, i32 1
- %A3 = getelementptr %t2* %A, i32 0, i32 2
+ %A1 = getelementptr %t2, %t2* %A, i32 0, i32 0
+ %A2 = getelementptr %t2, %t2* %A, i32 0, i32 1, i32 1
+ %A3 = getelementptr %t2, %t2* %A, i32 0, i32 2
store i8 0, i8* %A2
call void @llvm.lifetime.start(i64 5, i8* %A2)
; CHECK: llvm.lifetime{{.*}}i64 1
; CHECK: alloca i8
; CHECK: alloca i8
- %A11 = getelementptr %t3* %A, i32 0, i32 0, i32 0
- %A12 = getelementptr %t3* %A, i32 0, i32 0, i32 1
- %A13 = getelementptr %t3* %A, i32 0, i32 0, i32 2
- %A14 = getelementptr %t3* %A, i32 0, i32 0, i32 3
+ %A11 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 0
+ %A12 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 1
+ %A13 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 2
+ %A14 = getelementptr %t3, %t3* %A, i32 0, i32 0, i32 3
store i16 11, i16* %A11
store i16 12, i16* %A12
store i16 13, i16* %A13
; CHECK-NOT: store i16 13
; CHECK-NOT: store i16 14
- %A21 = getelementptr %t3* %A, i32 0, i32 1, i32 0
- %A22 = getelementptr %t3* %A, i32 0, i32 1, i32 1
- %A23 = getelementptr %t3* %A, i32 0, i32 1, i32 2
- %A24 = getelementptr %t3* %A, i32 0, i32 1, i32 3
+ %A21 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 0
+ %A22 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 1
+ %A23 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 2
+ %A24 = getelementptr %t3, %t3* %A, i32 0, i32 1, i32 3
store i8 21, i8* %A21
store i8 22, i8* %A22
store i8 23, i8* %A23
%V = load %struct.foo* %P
store %struct.foo %V, %struct.foo* %L
- %tmp4 = getelementptr %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp4 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
ret i32 %tmp5
}
define %struct.foo @test2(i32 %A, i32 %B) {
entry:
%L = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2]
- %L.0 = getelementptr %struct.foo* %L, i32 0, i32 0
+ %L.0 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0
store i32 %A, i32* %L.0
- %L.1 = getelementptr %struct.foo* %L, i32 0, i32 1
+ %L.1 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 1
store i32 %B, i32* %L.1
%V = load %struct.foo* %L
ret %struct.foo %V
define i32 @test1(%struct.foo* %P) {
entry:
%L = alloca %struct.foo, align 2 ; <%struct.foo*> [#uses=1]
- %L2 = getelementptr %struct.foo* %L, i32 0, i32 0 ; <i8*> [#uses=2]
- %tmp13 = getelementptr %struct.foo* %P, i32 0, i32 0 ; <i8*> [#uses=1]
+ %L2 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0 ; <i8*> [#uses=2]
+ %tmp13 = getelementptr %struct.foo, %struct.foo* %P, i32 0, i32 0 ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i32( i8* %L2, i8* %tmp13, i32 2, i32 1, i1 false)
%tmp5 = load i8* %L2 ; <i8> [#uses=1]
%tmp56 = sext i8 %tmp5 to i32 ; <i32> [#uses=1]
%L2 = bitcast %struct.foo* %L to i8* ; <i8*> [#uses=1]
%tmp13 = bitcast %struct.foo* %P to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %L2, i8* %tmp13, i32 8, i32 4, i1 false)
- %tmp4 = getelementptr %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp4 = getelementptr %struct.foo, %struct.foo* %L, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
ret i32 %tmp5
}
%L = alloca [4 x %struct.foo], align 16 ; <[4 x %struct.foo]*> [#uses=2]
%L12 = bitcast [4 x %struct.foo]* %L to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i32(i8* %L12, i8 0, i32 32, i32 16, i1 false)
- %tmp4 = getelementptr [4 x %struct.foo]* %L, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp4 = getelementptr [4 x %struct.foo], [4 x %struct.foo]* %L, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
%tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
ret i32 %tmp5
}
%B = alloca %struct.bar, align 16 ; <%struct.bar*> [#uses=4]
%B1 = bitcast %struct.bar* %B to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i32(i8* %B1, i8 1, i32 24, i32 16, i1 false)
- %tmp3 = getelementptr %struct.bar* %B, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp3
- %tmp4 = getelementptr %struct.bar* %B, i32 0, i32 2 ; <double*> [#uses=1]
+ %tmp4 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 2 ; <double*> [#uses=1]
store double 1.000000e+01, double* %tmp4
- %tmp6 = getelementptr %struct.bar* %B, i32 0, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp6 = getelementptr %struct.bar, %struct.bar* %B, i32 0, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp7 = load i32* %tmp6 ; <i32> [#uses=1]
ret i32 %tmp7
}
define i16 @test4() nounwind {
entry:
%A = alloca %struct.f, align 8 ; <%struct.f*> [#uses=3]
- %0 = getelementptr %struct.f* %A, i32 0, i32 0 ; <i32*> [#uses=1]
+ %0 = getelementptr %struct.f, %struct.f* %A, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %0, align 8
- %1 = getelementptr %struct.f* %A, i32 0, i32 1 ; <i32*> [#uses=1]
+ %1 = getelementptr %struct.f, %struct.f* %A, i32 0, i32 1 ; <i32*> [#uses=1]
%2 = bitcast i32* %1 to i8* ; <i8*> [#uses=1]
call void @llvm.memset.p0i8.i32(i8* %2, i8 2, i32 12, i32 4, i1 false)
- %3 = getelementptr %struct.f* %A, i32 0, i32 2 ; <i32*> [#uses=1]
+ %3 = getelementptr %struct.f, %struct.f* %A, i32 0, i32 2 ; <i32*> [#uses=1]
%4 = load i32* %3, align 8 ; <i32> [#uses=1]
%retval12 = trunc i32 %4 to i16 ; <i16> [#uses=1]
ret i16 %retval12
store i32 0, i32* %retval
%0 = bitcast [1 x i8]* %buff to i8*
call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 1, i32 1, i1 false)
- %arraydecay = getelementptr inbounds [1 x i8]* %buff, i32 0, i32 0
+ %arraydecay = getelementptr inbounds [1 x i8], [1 x i8]* %buff, i32 0, i32 0
call void @llvm.memset.p0i8.i32(i8* %arraydecay, i8 -1, i32 -8, i32 1, i1 false) ; Negative 8!
ret i32 0
}
; CHECK-NOT: = i160
; CHECK: ret i32 undef
%A = alloca %nested
- %B = getelementptr %nested* %A, i32 0, i32 1, i32 0
- %C = getelementptr i32* %B, i32 2
+ %B = getelementptr %nested, %nested* %A, i32 0, i32 1, i32 0
+ %C = getelementptr i32, i32* %B, i32 2
%D = load i32* %C
ret i32 %D
}
; CHECK-LABEL: @test2(
; CHECK: i160
%A = alloca %nested
- %B = getelementptr %nested* %A, i32 0, i32 1, i32 0
- %C = getelementptr i32* %B, i32 4
+ %B = getelementptr %nested, %nested* %A, i32 0, i32 1, i32 0
+ %C = getelementptr i32, i32* %B, i32 4
%D = load i32* %C
ret i32 %D
}
; CHECK: ret i32 undef
%A = alloca %nested
%B = bitcast %nested* %A to i32*
- %C = getelementptr i32* %B, i32 2
+ %C = getelementptr i32, i32* %B, i32 2
%D = load i32* %C
ret i32 %D
}
; CHECK: i160
%A = alloca %nested
%B = bitcast %nested* %A to i32*
- %C = getelementptr i32* %B, i32 -1
+ %C = getelementptr i32, i32* %B, i32 -1
%D = load i32* %C
ret i32 %D
}
%C = bitcast [7 x i64]* %ARR to double*
store double %A, double* %C
- %D = getelementptr [7 x i64]* %ARR, i32 0, i32 4
+ %D = getelementptr [7 x i64], [7 x i64]* %ARR, i32 0, i32 4
%E = bitcast i64* %D to double*
store double %B, double* %E
- %F = getelementptr double* %C, i32 4
+ %F = getelementptr double, double* %C, i32 4
%G = load double* %F
ret double %G
}
define i32 @main(i32 %argc, i8** nocapture %argv) nounwind uwtable {
entry:
%f = alloca %struct.foo, align 4
- %x.i = getelementptr inbounds %struct.foo* %f, i64 0, i32 0
+ %x.i = getelementptr inbounds %struct.foo, %struct.foo* %f, i64 0, i32 0
store i32 1, i32* %x.i, align 4
- %y.i = getelementptr inbounds %struct.foo* %f, i64 0, i32 1
+ %y.i = getelementptr inbounds %struct.foo, %struct.foo* %f, i64 0, i32 1
br label %while.cond.i
; CHECK: while.cond.i:
entry:
%a = alloca %struct.X, align 8 ; <%struct.X*> [#uses=2]
%b = alloca %struct.X, align 8 ; <%struct.X*> [#uses=2]
- %0 = getelementptr inbounds %struct.X* %a, i64 0, i32 0 ; <i32*> [#uses=1]
+ %0 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %0, align 8
- %1 = getelementptr inbounds %struct.X* %b, i64 0, i32 0 ; <i32*> [#uses=1]
+ %1 = getelementptr inbounds %struct.X, %struct.X* %b, i64 0, i32 0 ; <i32*> [#uses=1]
store i32 2, i32* %1, align 8
%2 = icmp eq i32 %x, 0 ; <i1> [#uses=1]
%p.0 = select i1 %2, %struct.X* %b, %struct.X* %a ; <%struct.X*> [#uses=1]
- %3 = getelementptr inbounds %struct.X* %p.0, i64 0, i32 0 ; <i32*> [#uses=1]
+ %3 = getelementptr inbounds %struct.X, %struct.X* %p.0, i64 0, i32 0 ; <i32*> [#uses=1]
%4 = load i32* %3, align 8 ; <i32> [#uses=1]
ret i32 %4
}
define i32 @test2(i1 %c) {
entry:
%A = alloca {i32, i32}
- %B = getelementptr {i32, i32}* %A, i32 0, i32 0
+ %B = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 0
store i32 1, i32* %B
br i1 %c, label %T, label %F
T:
- %C = getelementptr {i32, i32}* %A, i32 0, i32 1
+ %C = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 1
store i32 2, i32* %C
br label %F
F:
; rdar://8904039
define i32 @test3(i1 %c) {
%A = alloca {i32, i32}
- %B = getelementptr {i32, i32}* %A, i32 0, i32 0
+ %B = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 0
store i32 1, i32* %B
- %C = getelementptr {i32, i32}* %A, i32 0, i32 1
+ %C = getelementptr {i32, i32}, {i32, i32}* %A, i32 0, i32 1
store i32 2, i32* %C
%X = select i1 %c, i32* %B, i32* %C
%A = alloca %PairTy
; CHECK-LABEL: @test4(
; CHECK: %A = alloca %PairTy
- %B = getelementptr %PairTy* %A, i32 0, i32 0
+ %B = getelementptr %PairTy, %PairTy* %A, i32 0, i32 0
store i32 1, i32* %B
- %C = getelementptr %PairTy* %A, i32 0, i32 1
+ %C = getelementptr %PairTy, %PairTy* %A, i32 0, i32 1
store i32 2, i32* %B
%X = select i1 %c, i32* %B, i32* %C
define i32 @test(i32 %X) {
%Arr = alloca [2 x i32] ; <[2 x i32]*> [#uses=3]
- %tmp.0 = getelementptr [2 x i32]* %Arr, i32 0, i32 0 ; <i32*> [#uses=1]
+ %tmp.0 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp.0
- %tmp.1 = getelementptr [2 x i32]* %Arr, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp.1 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 1 ; <i32*> [#uses=1]
store i32 2, i32* %tmp.1
- %tmp.3 = getelementptr [2 x i32]* %Arr, i32 0, i32 %X ; <i32*> [#uses=1]
+ %tmp.3 = getelementptr [2 x i32], [2 x i32]* %Arr, i32 0, i32 %X ; <i32*> [#uses=1]
%tmp.4 = load i32* %tmp.3 ; <i32> [#uses=1]
ret i32 %tmp.4
}
; CHECK-NEXT: %2 = inttoptr i16 %1 to i8 addrspace(1)*
; CHECK-NEXT: ret i8 addrspace(1)* %2
%as_ptr_array = alloca [4 x i16 addrspace(1)*]
- %elem1 = getelementptr [4 x i16 addrspace(1)*]* %as_ptr_array, i32 0, i32 1
+ %elem1 = getelementptr [4 x i16 addrspace(1)*], [4 x i16 addrspace(1)*]* %as_ptr_array, i32 0, i32 1
store i16 addrspace(1)* %x, i16 addrspace(1)** %elem1
%elem1.cast = bitcast i16 addrspace(1)** %elem1 to i8 addrspace(1)**
%tmp = load i8 addrspace(1)** %elem1.cast
%tmp.upgrd.2 = call i64 @_Z3foov( ) ; <i64> [#uses=1]
%tmp1.upgrd.3 = bitcast %struct.Val* %tmp1 to i64* ; <i64*> [#uses=1]
store i64 %tmp.upgrd.2, i64* %tmp1.upgrd.3
- %tmp.upgrd.4 = getelementptr %struct.Val* %tmp, i32 0, i32 0 ; <i32**> [#uses=1]
- %tmp2 = getelementptr %struct.Val* %tmp1, i32 0, i32 0 ; <i32**> [#uses=1]
+ %tmp.upgrd.4 = getelementptr %struct.Val, %struct.Val* %tmp, i32 0, i32 0 ; <i32**> [#uses=1]
+ %tmp2 = getelementptr %struct.Val, %struct.Val* %tmp1, i32 0, i32 0 ; <i32**> [#uses=1]
%tmp.upgrd.5 = load i32** %tmp2 ; <i32*> [#uses=1]
store i32* %tmp.upgrd.5, i32** %tmp.upgrd.4
- %tmp3 = getelementptr %struct.Val* %tmp, i32 0, i32 1 ; <i32*> [#uses=1]
- %tmp4 = getelementptr %struct.Val* %tmp1, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr %struct.Val, %struct.Val* %tmp, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp4 = getelementptr %struct.Val, %struct.Val* %tmp1, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp.upgrd.6 = load i32* %tmp4 ; <i32> [#uses=1]
store i32 %tmp.upgrd.6, i32* %tmp3
%tmp7 = bitcast %struct.Val* %tmp to { i64 }* ; <{ i64 }*> [#uses=1]
- %tmp8 = getelementptr { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
+ %tmp8 = getelementptr { i64 }, { i64 }* %tmp7, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp9 = load i64* %tmp8 ; <i64> [#uses=1]
call void @_Z3bar3ValS_( i64 %Op.0, i64 %tmp9 )
ret void
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
- %G.upgrd.1 = getelementptr <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
+ %G.upgrd.1 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
store float %f, float* %G.upgrd.1
%tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
%tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
- %tmp.upgrd.2 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
+ %tmp.upgrd.2 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
store float %f, float* %tmp.upgrd.2
%tmp4 = load <4 x float>* %G ; <<4 x float>> [#uses=2]
%tmp6 = fadd <4 x float> %tmp4, %tmp4 ; <<4 x float>> [#uses=1]
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
- %tmp.upgrd.3 = getelementptr <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
+ %tmp.upgrd.3 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 2 ; <float*> [#uses=1]
%tmp.upgrd.4 = load float* %tmp.upgrd.3 ; <float> [#uses=1]
store float %tmp.upgrd.4, float* %f
ret void
%tmp = load <4 x float>* %F ; <<4 x float>> [#uses=2]
%tmp3 = fadd <4 x float> %tmp, %tmp ; <<4 x float>> [#uses=1]
store <4 x float> %tmp3, <4 x float>* %G
- %G.upgrd.5 = getelementptr <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
+ %G.upgrd.5 = getelementptr <4 x float>, <4 x float>* %G, i32 0, i32 0 ; <float*> [#uses=1]
%tmp.upgrd.6 = load float* %G.upgrd.5 ; <float> [#uses=1]
store float %tmp.upgrd.6, float* %f
ret void
define i32 @test5(float %X) { ;; should turn into bitcast.
%X_addr = alloca [4 x float]
- %X1 = getelementptr [4 x float]* %X_addr, i32 0, i32 2
+ %X1 = getelementptr [4 x float], [4 x float]* %X_addr, i32 0, i32 2
store float %X, float* %X1
%a = bitcast float* %X1 to i32*
%tmp = load i32* %a
%memtmp = alloca %struct.test7, align 16
%0 = bitcast %struct.test7* %memtmp to <4 x i32>*
store <4 x i32> zeroinitializer, <4 x i32>* %0, align 16
- %1 = getelementptr inbounds %struct.test7* %memtmp, i64 0, i32 0, i64 5
+ %1 = getelementptr inbounds %struct.test7, %struct.test7* %memtmp, i64 0, i32 0, i64 5
store i32 0, i32* %1, align 4
ret void
; CHECK-LABEL: @test7(
define i32 @voltest(i32 %T) {
%A = alloca {i32, i32}
- %B = getelementptr {i32,i32}* %A, i32 0, i32 0
+ %B = getelementptr {i32,i32}, {i32,i32}* %A, i32 0, i32 0
store volatile i32 %T, i32* %B
; CHECK: store volatile
- %C = getelementptr {i32,i32}* %A, i32 0, i32 1
+ %C = getelementptr {i32,i32}, {i32,i32}* %A, i32 0, i32 1
%X = load volatile i32* %C
; CHECK: load volatile
ret i32 %X
; CHECK: %acc.i2 = phi float [ %init.i2, %entry ], [ %sel.i2, %loop ]
; CHECK: %acc.i3 = phi float [ %init.i3, %entry ], [ %sel.i3, %loop ]
; CHECK: %nexti = sub i32 %i, 1
-; CHECK: %ptr = getelementptr <4 x float>* %base, i32 %i
+; CHECK: %ptr = getelementptr <4 x float>, <4 x float>* %base, i32 %i
; CHECK: %ptr.i0 = bitcast <4 x float>* %ptr to float*
; CHECK: %val.i0 = load float* %ptr.i0, align 16
-; CHECK: %ptr.i1 = getelementptr float* %ptr.i0, i32 1
+; CHECK: %ptr.i1 = getelementptr float, float* %ptr.i0, i32 1
; CHECK: %val.i1 = load float* %ptr.i1, align 4
-; CHECK: %ptr.i2 = getelementptr float* %ptr.i0, i32 2
+; CHECK: %ptr.i2 = getelementptr float, float* %ptr.i0, i32 2
; CHECK: %val.i2 = load float* %ptr.i2, align 8
-; CHECK: %ptr.i3 = getelementptr float* %ptr.i0, i32 3
+; CHECK: %ptr.i3 = getelementptr float, float* %ptr.i0, i32 3
; CHECK: %val.i3 = load float* %ptr.i3, align 4
; CHECK: %add.i0 = fadd float %val.i0, %val.i2
; CHECK: %add.i1 = fadd float %val.i1, %val.i3
%acc = phi <4 x float> [ %init, %entry ], [ %sel, %loop ]
%nexti = sub i32 %i, 1
- %ptr = getelementptr <4 x float> *%base, i32 %i
+ %ptr = getelementptr <4 x float>, <4 x float> *%base, i32 %i
%val = load <4 x float> *%ptr
%dval = bitcast <4 x float> %val to <2 x double>
%dacc = bitcast <4 x float> %acc to <2 x double>
; CHECK: %acc.i2 = phi i32 [ %init.i2, %entry ], [ %sel.i2, %loop ]
; CHECK: %acc.i3 = phi i32 [ %init.i3, %entry ], [ %sel.i3, %loop ]
; CHECK: %nexti = sub i32 %i, 1
-; CHECK: %ptr = getelementptr <4 x i8>* %base, i32 %i
+; CHECK: %ptr = getelementptr <4 x i8>, <4 x i8>* %base, i32 %i
; CHECK: %ptr.i0 = bitcast <4 x i8>* %ptr to i8*
; CHECK: %val.i0 = load i8* %ptr.i0, align 4
-; CHECK: %ptr.i1 = getelementptr i8* %ptr.i0, i32 1
+; CHECK: %ptr.i1 = getelementptr i8, i8* %ptr.i0, i32 1
; CHECK: %val.i1 = load i8* %ptr.i1, align 1
-; CHECK: %ptr.i2 = getelementptr i8* %ptr.i0, i32 2
+; CHECK: %ptr.i2 = getelementptr i8, i8* %ptr.i0, i32 2
; CHECK: %val.i2 = load i8* %ptr.i2, align 2
-; CHECK: %ptr.i3 = getelementptr i8* %ptr.i0, i32 3
+; CHECK: %ptr.i3 = getelementptr i8, i8* %ptr.i0, i32 3
; CHECK: %val.i3 = load i8* %ptr.i3, align 1
; CHECK: %ext.i0 = sext i8 %val.i0 to i32
; CHECK: %ext.i1 = sext i8 %val.i1 to i32
%acc = phi <4 x i32> [ %init, %entry ], [ %sel, %loop ]
%nexti = sub i32 %i, 1
- %ptr = getelementptr <4 x i8> *%base, i32 %i
+ %ptr = getelementptr <4 x i8>, <4 x i8> *%base, i32 %i
%val = load <4 x i8> *%ptr
%ext = sext <4 x i8> %val to <4 x i32>
%add = add <4 x i32> %ext, %acc
loop:
%index = phi i32 [ 0, %entry ], [ %next_index, %loop ]
- %this_src = getelementptr <4 x i32> *%src, i32 %index
- %this_dst = getelementptr <4 x i32> *%dst, i32 %index
+ %this_src = getelementptr <4 x i32>, <4 x i32> *%src, i32 %index
+ %this_dst = getelementptr <4 x i32>, <4 x i32> *%dst, i32 %index
%val = load <4 x i32> *%this_src, !llvm.mem.parallel_loop_access !3
%add = add <4 x i32> %val, %val
store <4 x i32> %add, <4 x i32> *%this_dst, !llvm.mem.parallel_loop_access !3
float *%other) {
; CHECK-LABEL: @f8(
; CHECK: %dest.i0 = bitcast <4 x float*>* %dest to float**
-; CHECK: %dest.i1 = getelementptr float** %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float** %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float** %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float*, float** %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float*, float** %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float*, float** %dest.i0, i32 3
; CHECK: %i0.i1 = extractelement <4 x i32> %i0, i32 1
; CHECK: %i0.i3 = extractelement <4 x i32> %i0, i32 3
; CHECK: %ptr0.i0 = extractelement <4 x float*> %ptr0, i32 0
-; CHECK: %val.i0 = getelementptr float* %ptr0.i0, i32 100
-; CHECK: %val.i1 = getelementptr float* %other, i32 %i0.i1
+; CHECK: %val.i0 = getelementptr float, float* %ptr0.i0, i32 100
+; CHECK: %val.i1 = getelementptr float, float* %other, i32 %i0.i1
; CHECK: %ptr0.i2 = extractelement <4 x float*> %ptr0, i32 2
-; CHECK: %val.i2 = getelementptr float* %ptr0.i2, i32 100
+; CHECK: %val.i2 = getelementptr float, float* %ptr0.i2, i32 100
; CHECK: %ptr0.i3 = extractelement <4 x float*> %ptr0, i32 3
-; CHECK: %val.i3 = getelementptr float* %ptr0.i3, i32 %i0.i3
+; CHECK: %val.i3 = getelementptr float, float* %ptr0.i3, i32 %i0.i3
; CHECK: store float* %val.i0, float** %dest.i0, align 32
; CHECK: store float* %val.i1, float** %dest.i1, align 8
; CHECK: store float* %val.i2, float** %dest.i2, align 16
%i1 = insertelement <4 x i32> %i0, i32 100, i32 0
%i2 = insertelement <4 x i32> %i1, i32 100, i32 2
%ptr1 = insertelement <4 x float *> %ptr0, float *%other, i32 1
- %val = getelementptr <4 x float *> %ptr1, <4 x i32> %i2
+ %val = getelementptr float, <4 x float *> %ptr1, <4 x i32> %i2
store <4 x float *> %val, <4 x float *> *%dest
ret void
}
define void @f9(<4 x float> *%dest, <4 x float> *%src) {
; CHECK: @f9(
; CHECK: %dest.i0 = bitcast <4 x float>* %dest to float*
-; CHECK: %dest.i1 = getelementptr float* %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float* %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float* %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float, float* %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float, float* %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float, float* %dest.i0, i32 3
; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
; CHECK: %val.i0 = load float* %src.i0, align 4
-; CHECK: %src.i1 = getelementptr float* %src.i0, i32 1
+; CHECK: %src.i1 = getelementptr float, float* %src.i0, i32 1
; CHECK: %val.i1 = load float* %src.i1, align 4
-; CHECK: %src.i2 = getelementptr float* %src.i0, i32 2
+; CHECK: %src.i2 = getelementptr float, float* %src.i0, i32 2
; CHECK: %val.i2 = load float* %src.i2, align 4
-; CHECK: %src.i3 = getelementptr float* %src.i0, i32 3
+; CHECK: %src.i3 = getelementptr float, float* %src.i0, i32 3
; CHECK: %val.i3 = load float* %src.i3, align 4
; CHECK: store float %val.i0, float* %dest.i0, align 8
; CHECK: store float %val.i1, float* %dest.i1, align 4
define void @f10(<4 x float> *%dest, <4 x float> *%src) {
; CHECK: @f10(
; CHECK: %dest.i0 = bitcast <4 x float>* %dest to float*
-; CHECK: %dest.i1 = getelementptr float* %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float* %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float* %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float, float* %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float, float* %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float, float* %dest.i0, i32 3
; CHECK: %src.i0 = bitcast <4 x float>* %src to float*
; CHECK: %val.i0 = load float* %src.i0, align 1
-; CHECK: %src.i1 = getelementptr float* %src.i0, i32 1
+; CHECK: %src.i1 = getelementptr float, float* %src.i0, i32 1
; CHECK: %val.i1 = load float* %src.i1, align 1
-; CHECK: %src.i2 = getelementptr float* %src.i0, i32 2
+; CHECK: %src.i2 = getelementptr float, float* %src.i0, i32 2
; CHECK: %val.i2 = load float* %src.i2, align 1
-; CHECK: %src.i3 = getelementptr float* %src.i0, i32 3
+; CHECK: %src.i3 = getelementptr float, float* %src.i0, i32 3
; CHECK: %val.i3 = load float* %src.i3, align 1
; CHECK: store float %val.i0, float* %dest.i0, align 2
; CHECK: store float %val.i1, float* %dest.i1, align 2
; CHECK: %val1 = load <32 x i1>* %src1
; CHECK: store <32 x i1> %and, <32 x i1>* %dest
; CHECK: ret void
- %src1 = getelementptr <32 x i1> *%src0, i32 1
+ %src1 = getelementptr <32 x i1>, <32 x i1> *%src0, i32 1
%val0 = load <32 x i1> *%src0
%val1 = load <32 x i1> *%src1
%and = and <32 x i1> %val0, %val1
float *%other) {
; CHECK-LABEL: @f13(
; CHECK: %dest.i0 = bitcast <4 x float*>* %dest to float**
-; CHECK: %dest.i1 = getelementptr float** %dest.i0, i32 1
-; CHECK: %dest.i2 = getelementptr float** %dest.i0, i32 2
-; CHECK: %dest.i3 = getelementptr float** %dest.i0, i32 3
+; CHECK: %dest.i1 = getelementptr float*, float** %dest.i0, i32 1
+; CHECK: %dest.i2 = getelementptr float*, float** %dest.i0, i32 2
+; CHECK: %dest.i3 = getelementptr float*, float** %dest.i0, i32 3
; CHECK: %i.i0 = extractelement <4 x i32> %i, i32 0
; CHECK: %ptr.i0 = extractelement <4 x [4 x float]*> %ptr, i32 0
-; CHECK: %val.i0 = getelementptr inbounds [4 x float]* %ptr.i0, i32 0, i32 %i.i0
+; CHECK: %val.i0 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i0, i32 0, i32 %i.i0
; CHECK: %i.i1 = extractelement <4 x i32> %i, i32 1
; CHECK: %ptr.i1 = extractelement <4 x [4 x float]*> %ptr, i32 1
-; CHECK: %val.i1 = getelementptr inbounds [4 x float]* %ptr.i1, i32 1, i32 %i.i1
+; CHECK: %val.i1 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i1, i32 1, i32 %i.i1
; CHECK: %i.i2 = extractelement <4 x i32> %i, i32 2
; CHECK: %ptr.i2 = extractelement <4 x [4 x float]*> %ptr, i32 2
-; CHECK: %val.i2 = getelementptr inbounds [4 x float]* %ptr.i2, i32 2, i32 %i.i2
+; CHECK: %val.i2 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i2, i32 2, i32 %i.i2
; CHECK: %i.i3 = extractelement <4 x i32> %i, i32 3
; CHECK: %ptr.i3 = extractelement <4 x [4 x float]*> %ptr, i32 3
-; CHECK: %val.i3 = getelementptr inbounds [4 x float]* %ptr.i3, i32 3, i32 %i.i3
+; CHECK: %val.i3 = getelementptr inbounds [4 x float], [4 x float]* %ptr.i3, i32 3, i32 %i.i3
; CHECK: store float* %val.i0, float** %dest.i0, align 32
; CHECK: store float* %val.i1, float** %dest.i1, align 8
; CHECK: store float* %val.i2, float** %dest.i2, align 16
; CHECK: store float* %val.i3, float** %dest.i3, align 8
; CHECK: ret void
- %val = getelementptr inbounds <4 x [4 x float] *> %ptr,
+ %val = getelementptr inbounds [4 x float], <4 x [4 x float] *> %ptr,
<4 x i32> <i32 0, i32 1, i32 2, i32 3>,
<4 x i32> %i
store <4 x float *> %val, <4 x float *> *%dest
define void @f1(<4 x i32>* nocapture %a, <4 x i32>* nocapture readonly %b, <4 x i32>* nocapture readonly %c) #0 {
; CHECK: @f1(
; CHECK: %a.i0 = bitcast <4 x i32>* %a to i32*
-; CHECK: %a.i1 = getelementptr i32* %a.i0, i32 1
-; CHECK: %a.i2 = getelementptr i32* %a.i0, i32 2
-; CHECK: %a.i3 = getelementptr i32* %a.i0, i32 3
+; CHECK: %a.i1 = getelementptr i32, i32* %a.i0, i32 1
+; CHECK: %a.i2 = getelementptr i32, i32* %a.i0, i32 2
+; CHECK: %a.i3 = getelementptr i32, i32* %a.i0, i32 3
; CHECK: %c.i0 = bitcast <4 x i32>* %c to i32*
-; CHECK: %c.i1 = getelementptr i32* %c.i0, i32 1
-; CHECK: %c.i2 = getelementptr i32* %c.i0, i32 2
-; CHECK: %c.i3 = getelementptr i32* %c.i0, i32 3
+; CHECK: %c.i1 = getelementptr i32, i32* %c.i0, i32 1
+; CHECK: %c.i2 = getelementptr i32, i32* %c.i0, i32 2
+; CHECK: %c.i3 = getelementptr i32, i32* %c.i0, i32 3
; CHECK: %b.i0 = bitcast <4 x i32>* %b to i32*
-; CHECK: %b.i1 = getelementptr i32* %b.i0, i32 1
-; CHECK: %b.i2 = getelementptr i32* %b.i0, i32 2
-; CHECK: %b.i3 = getelementptr i32* %b.i0, i32 3
+; CHECK: %b.i1 = getelementptr i32, i32* %b.i0, i32 1
+; CHECK: %b.i2 = getelementptr i32, i32* %b.i0, i32 2
+; CHECK: %b.i3 = getelementptr i32, i32* %b.i0, i32 3
; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %a, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %b, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
; CHECK: tail call void @llvm.dbg.value(metadata <4 x i32>* %c, i64 0, metadata !{{[0-9]+}}, metadata {{.*}}), !dbg !{{[0-9]+}}
.preheader:
%0 = sext i32 %y to i64
%1 = sext i32 %x to i64
- %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
%4 = load float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add i32 %y, 1
%7 = sext i32 %6 to i64
- %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+ %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
%9 = addrspacecast float addrspace(3)* %8 to float*
%10 = load float* %9, align 4
%11 = fadd float %5, %10
%12 = add i32 %x, 1
%13 = sext i32 %12 to i64
- %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+ %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
%15 = addrspacecast float addrspace(3)* %14 to float*
%16 = load float* %15, align 4
%17 = fadd float %11, %16
- %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+ %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
%19 = addrspacecast float addrspace(3)* %18 to float*
%20 = load float* %19, align 4
%21 = fadd float %17, %20
; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
; IR-LABEL: @sum_of_array(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33
; @sum_of_array2 is very similar to @sum_of_array. The only difference is in
; the order of "sext" and "add" when computing the array indices. @sum_of_array
.preheader:
%0 = sext i32 %y to i64
%1 = sext i32 %x to i64
- %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
%4 = load float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add i64 %0, 1
- %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
+ %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
%8 = addrspacecast float addrspace(3)* %7 to float*
%9 = load float* %8, align 4
%10 = fadd float %5, %9
%11 = add i64 %1, 1
- %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
+ %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
%13 = addrspacecast float addrspace(3)* %12 to float*
%14 = load float* %13, align 4
%15 = fadd float %10, %14
- %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
+ %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
%17 = addrspacecast float addrspace(3)* %16 to float*
%18 = load float* %17, align 4
%19 = fadd float %15, %18
; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
; IR-LABEL: @sum_of_array2(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33
; This function loads
.preheader:
%0 = zext i32 %y to i64
%1 = zext i32 %x to i64
- %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
%4 = load float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add nuw i32 %y, 1
%7 = zext i32 %6 to i64
- %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
+ %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
%9 = addrspacecast float addrspace(3)* %8 to float*
%10 = load float* %9, align 4
%11 = fadd float %5, %10
%12 = add nuw i32 %x, 1
%13 = zext i32 %12 to i64
- %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
+ %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
%15 = addrspacecast float addrspace(3)* %14 to float*
%16 = load float* %15, align 4
%17 = fadd float %11, %16
- %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
+ %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
%19 = addrspacecast float addrspace(3)* %18 to float*
%20 = load float* %19, align 4
%21 = fadd float %17, %20
; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
; IR-LABEL: @sum_of_array3(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33
; This function loads
.preheader:
%0 = zext i32 %y to i64
%1 = zext i32 %x to i64
- %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
+ %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
%3 = addrspacecast float addrspace(3)* %2 to float*
%4 = load float* %3, align 4
%5 = fadd float %4, 0.000000e+00
%6 = add i64 %0, 1
- %7 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
+ %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
%8 = addrspacecast float addrspace(3)* %7 to float*
%9 = load float* %8, align 4
%10 = fadd float %5, %9
%11 = add i64 %1, 1
- %12 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
+ %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
%13 = addrspacecast float addrspace(3)* %12 to float*
%14 = load float* %13, align 4
%15 = fadd float %10, %14
- %16 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
+ %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
%17 = addrspacecast float addrspace(3)* %16 to float*
%18 = load float* %17, align 4
%19 = fadd float %15, %18
; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
; IR-LABEL: @sum_of_array4(
-; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 1
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 32
-; IR: getelementptr float addrspace(3)* [[BASE_PTR]], i64 33
+; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 1
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 32
+; IR: getelementptr float, float addrspace(3)* [[BASE_PTR]], i64 33
entry:
%add = add nsw i32 %i, 5
%idxprom = sext i32 %add to i64
- %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
+ %p = getelementptr inbounds [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1
ret double* %p
}
; CHECK-LABEL: @struct(
-; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
+; CHECK: getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1
; We should be able to trace into sext(a + b) if a + b is non-negative
; (e.g., used as an index of an inbounds GEP) and one of a and b is
%2 = add i32 %j, -2
; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN
%3 = sext i32 %2 to i64
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
+ %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3
ret float* %p
}
; CHECK-LABEL: @sext_add(
; CHECK-NOT: = add
; CHECK: add i32 %j, -2
; CHECK: sext
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 32
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* %{{[a-zA-Z0-9]+}}, i64 32
; We should be able to trace into sext/zext if it can be distributed to both
; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b)
%d1 = add nuw i32 %d, 1
%d2 = zext i32 %d1 to i64
%j = add i64 %c, %d2 ; j = c + zext(d +nuw 1)
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+ %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
ret float* %p
}
; CHECK-LABEL: @ext_add_no_overflow(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR]], i64 33
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 33
; Verifies we handle nested sext/zext correctly.
define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) {
%3 = add nsw i32 %b, 2
%4 = sext i32 %3 to i48
%5 = zext i48 %4 to i64 ; zext(sext(b +nsw 2)) != zext(sext(b)) + 2
- %p1 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
+ %p1 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5
store float* %p1, float** %out1
%6 = add nuw i32 %a, 3
%7 = zext i32 %6 to i48
%9 = add nsw i32 %b, 4
%10 = zext i32 %9 to i48
%11 = sext i48 %10 to i64 ; sext(zext(b +nsw 4)) != zext(b) + 4
- %p2 = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
+ %p2 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11
store float* %p2, float** %out2
ret void
}
; CHECK-LABEL: @sext_zext(
-; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR_1]], i64 32
-; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR_2]], i64 96
+; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR_1]], i64 32
+; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR_2]], i64 96
; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if
; its operand is an OR and the two operands of the OR have no common bits.
%b3.ext = sext i32 %b3 to i64
%i = add i64 %a, %b2.ext
%j = add i64 %a, %b3.ext
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
+ %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j
ret float* %p
}
; CHECK-LABEL: @sext_or(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR]], i64 32
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 32
; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b +
; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't
entry:
%b5 = add i64 %b, 5
%i = add i64 %b5, %a
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
+ %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0
store i64 %b5, i64* %out
ret float* %p
}
; CHECK-LABEL: @expr(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
-; CHECK: getelementptr float* [[BASE_PTR]], i64 160
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 160
; CHECK: store i64 %b5, i64* %out
; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8
%2 = add nsw i32 %a, %1
%3 = sext i32 %2 to i64
%i = add i64 %d, %3
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+ %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
ret float* %p
}
; CHECK-LABEL: @sext_expr(
; CHECK: sext i32
; CHECK: sext i32
; CHECK: sext i32
-; CHECK: getelementptr float* %{{[a-zA-Z0-9]+}}, i64 8
+; CHECK: getelementptr float, float* %{{[a-zA-Z0-9]+}}, i64 8
; Verifies we handle "sub" correctly.
define float* @sub(i64 %i, i64 %j) {
%i2 = sub i64 %i, 5 ; i - 5
%j2 = sub i64 5, %j ; 5 - i
- %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
+ %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2
ret float* %p
}
; CHECK-LABEL: @sub(
; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
-; CHECK: getelementptr float* [[BASE_PTR]], i64 -155
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]]
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 -155
%struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed
%idxprom = sext i32 %add to i64
%add1 = add nsw i32 %i, 1
%idxprom2 = sext i32 %add1 to i64
- %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
+ %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom
ret i64* %arrayidx3
}
; CHECK-LABEL: @packed_struct(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}}
; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8*
-; CHECK: %uglygep = getelementptr i8* [[CASTED_PTR]], i64 100
+; CHECK: %uglygep = getelementptr i8, i8* [[CASTED_PTR]], i64 100
; CHECK: bitcast i8* %uglygep to i64*
; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))",
%0 = add i32 %b, 8
%1 = add nuw i32 %a, %0
%i = zext i32 %1 to i64
- %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+ %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
ret float* %p
}
; CHECK-LABEL: zext_expr(
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i
; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep
; should be considered sign-extended to the pointer size. Therefore,
define float* @i32_add(i32 %a) {
entry:
%i = add i32 %a, 8
- %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
+ %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i
ret float* %p
}
; CHECK-LABEL: @i32_add(
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
; CHECK-NOT: getelementptr
; Verifies that we compute the correct constant offset when the index is
%0 = add nsw nuw i1 %a, 1
%1 = sext i1 %0 to i4
%2 = zext i4 %1 to i64 ; zext (sext i1 1 to i4) to i64 = 15
- %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
+ %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2
ret float* %p
}
; CHECK-LABEL: @apint(
-; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
-; CHECK: getelementptr float* [[BASE_PTR]], i64 15
+; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}}
+; CHECK: getelementptr float, float* [[BASE_PTR]], i64 15
; Do not trace into binary operators other than ADD, SUB, and OR.
define float* @and(i64 %a) {
entry:
%0 = shl i64 %a, 2
%1 = and i64 %0, 1
- %p = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
+ %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1
ret float* %p
}
; CHECK-LABEL: @and(
-; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array
+; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array
; CHECK-NOT: getelementptr
; The code that rebuilds an OR expression used to be buggy, and failed on this
; ((a << 2) + 12) and 1 have no common bits. Therefore,
; SeparateConstOffsetFromGEP is able to extract the 12.
; TODO(jingyue): We could reassociate the expression to combine 12 and 1.
- %p = getelementptr float* %ptr, i64 %or
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float* %ptr, i64 [[OR]]
-; CHECK: getelementptr float* [[PTR]], i64 12
+ %p = getelementptr float, float* %ptr, i64 %or
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float, float* %ptr, i64 [[OR]]
+; CHECK: getelementptr float, float* [[PTR]], i64 12
ret float* %p
; CHECK-NEXT: ret
}
entry:
%arrayidx = add nsw i64 %idx, -2
; CHECK-NOT: add
- %ptr2 = getelementptr inbounds %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
-; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
+ %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1
+; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1
; CHECK: [[PTR1:%[a-zA-Z0-9]+]] = bitcast %struct2* [[PTR]] to i8*
-; CHECK: getelementptr i8* [[PTR1]], i64 -64
+; CHECK: getelementptr i8, i8* [[PTR1]], i64 -64
; CHECK: bitcast
ret %struct2* %ptr2
; CHECK-NEXT: ret
getfree.exit: ; preds = %endif.1.i, %then.1.i
ret void
endif.1: ; preds = %read_min.exit
- %tmp.27.i = getelementptr i32* null, i32 0 ; <i32*> [#uses=0]
+ %tmp.27.i = getelementptr i32, i32* null, i32 0 ; <i32*> [#uses=0]
br i1 false, label %loopexit.0.i15, label %no_exit.0.i14
no_exit.0.i14: ; preds = %endif.1
ret void
ret i32 1
cond_next27: ; preds = %cond_next13
%tmp29 = load %struct.anon** %num ; <%struct.anon*> [#uses=1]
- %tmp30 = getelementptr %struct.anon* %tmp29, i32 0, i32 2 ; <i32*> [#uses=1]
+ %tmp30 = getelementptr %struct.anon, %struct.anon* %tmp29, i32 0, i32 2 ; <i32*> [#uses=1]
%tmp31 = load i32* %tmp30 ; <i32> [#uses=2]
%tmp33 = icmp sge i32 %tmp31, %scale ; <i1> [#uses=1]
%max = select i1 %tmp33, i32 %tmp31, i32 %scale ; <i32> [#uses=4]
call void @init_num( %struct.anon** %guess1 )
%tmp36 = call %struct.anon* @new_num( i32 1, i32 1 ) ; <%struct.anon*> [#uses=2]
store %struct.anon* %tmp36, %struct.anon** %point5
- %tmp.upgrd.3 = getelementptr %struct.anon* %tmp36, i32 0, i32 4, i32 1 ; <i8*> [#uses=1]
+ %tmp.upgrd.3 = getelementptr %struct.anon, %struct.anon* %tmp36, i32 0, i32 4, i32 1 ; <i8*> [#uses=1]
store i8 5, i8* %tmp.upgrd.3
%tmp39 = icmp slt i32 %tmp17, 0 ; <i1> [#uses=1]
br i1 %tmp39, label %cond_true40, label %cond_false43
cond_false43: ; preds = %cond_next27
call void @int2num( %struct.anon** %guess, i32 10 )
%tmp45 = load %struct.anon** %num ; <%struct.anon*> [#uses=1]
- %tmp46 = getelementptr %struct.anon* %tmp45, i32 0, i32 1 ; <i32*> [#uses=1]
+ %tmp46 = getelementptr %struct.anon, %struct.anon* %tmp45, i32 0, i32 1 ; <i32*> [#uses=1]
%tmp47 = load i32* %tmp46 ; <i32> [#uses=1]
call void @int2num( %struct.anon** %guess1, i32 %tmp47 )
%tmp48 = load %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
%tmp49 = load %struct.anon** %point5 ; <%struct.anon*> [#uses=1]
call void @bc_multiply( %struct.anon* %tmp48, %struct.anon* %tmp49, %struct.anon** %guess1, i32 %max )
%tmp51 = load %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
- %tmp52 = getelementptr %struct.anon* %tmp51, i32 0, i32 2 ; <i32*> [#uses=1]
+ %tmp52 = getelementptr %struct.anon, %struct.anon* %tmp51, i32 0, i32 2 ; <i32*> [#uses=1]
store i32 0, i32* %tmp52
%tmp53 = load %struct.anon** %guess ; <%struct.anon*> [#uses=1]
%tmp54 = load %struct.anon** %guess1 ; <%struct.anon*> [#uses=1]
ret void
bb145: ; preds = %entry
%tmp146 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
- %tmp148 = getelementptr %struct.tree_node* %tmp146, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
+ %tmp148 = getelementptr %struct.tree_node, %struct.tree_node* %tmp146, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
%tmp149 = load %struct.tree_node** %tmp148 ; <%struct.tree_node*> [#uses=1]
%tmp150 = bitcast %struct.tree_node* %tmp149 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
- %tmp151 = getelementptr %struct.tree_type* %tmp150, i32 0, i32 6 ; <i16*> [#uses=1]
+ %tmp151 = getelementptr %struct.tree_type, %struct.tree_type* %tmp150, i32 0, i32 6 ; <i16*> [#uses=1]
%tmp151.upgrd.1 = bitcast i16* %tmp151 to i32* ; <i32*> [#uses=1]
%tmp152 = load i32* %tmp151.upgrd.1 ; <i32> [#uses=1]
%tmp154 = lshr i32 %tmp152, 16 ; <i32> [#uses=1]
%tmp154.mask = and i32 %tmp154, 127 ; <i32> [#uses=1]
%gep.upgrd.2 = zext i32 %tmp154.mask to i64 ; <i64> [#uses=1]
- %tmp155 = getelementptr [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.2 ; <i8*> [#uses=1]
+ %tmp155 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.2 ; <i8*> [#uses=1]
%tmp156 = load i8* %tmp155 ; <i8> [#uses=1]
%tmp157 = icmp eq i8 %tmp156, 4 ; <i1> [#uses=1]
br i1 %tmp157, label %cond_next241, label %cond_true158
cond_true158: ; preds = %bb145
%tmp172 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
- %tmp174 = getelementptr %struct.tree_node* %tmp172, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
+ %tmp174 = getelementptr %struct.tree_node, %struct.tree_node* %tmp172, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
%tmp175 = load %struct.tree_node** %tmp174 ; <%struct.tree_node*> [#uses=1]
%tmp176 = bitcast %struct.tree_node* %tmp175 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
- %tmp177 = getelementptr %struct.tree_type* %tmp176, i32 0, i32 6 ; <i16*> [#uses=1]
+ %tmp177 = getelementptr %struct.tree_type, %struct.tree_type* %tmp176, i32 0, i32 6 ; <i16*> [#uses=1]
%tmp177.upgrd.3 = bitcast i16* %tmp177 to i32* ; <i32*> [#uses=1]
%tmp178 = load i32* %tmp177.upgrd.3 ; <i32> [#uses=1]
%tmp180 = lshr i32 %tmp178, 16 ; <i32> [#uses=1]
%tmp180.mask = and i32 %tmp180, 127 ; <i32> [#uses=1]
%gep.upgrd.4 = zext i32 %tmp180.mask to i64 ; <i64> [#uses=1]
- %tmp181 = getelementptr [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.4 ; <i8*> [#uses=1]
+ %tmp181 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.4 ; <i8*> [#uses=1]
%tmp182 = load i8* %tmp181 ; <i8> [#uses=1]
%tmp183 = icmp eq i8 %tmp182, 8 ; <i1> [#uses=1]
br i1 %tmp183, label %cond_next241, label %cond_true184
cond_true184: ; preds = %cond_true158
%tmp185 = load %struct.tree_node** null ; <%struct.tree_node*> [#uses=1]
- %tmp187 = getelementptr %struct.tree_node* %tmp185, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
+ %tmp187 = getelementptr %struct.tree_node, %struct.tree_node* %tmp185, i32 0, i32 0, i32 0, i32 1 ; <%struct.tree_node**> [#uses=1]
%tmp188 = load %struct.tree_node** %tmp187 ; <%struct.tree_node*> [#uses=1]
%tmp189 = bitcast %struct.tree_node* %tmp188 to %struct.tree_type* ; <%struct.tree_type*> [#uses=1]
- %tmp190 = getelementptr %struct.tree_type* %tmp189, i32 0, i32 6 ; <i16*> [#uses=1]
+ %tmp190 = getelementptr %struct.tree_type, %struct.tree_type* %tmp189, i32 0, i32 6 ; <i16*> [#uses=1]
%tmp190.upgrd.5 = bitcast i16* %tmp190 to i32* ; <i32*> [#uses=1]
%tmp191 = load i32* %tmp190.upgrd.5 ; <i32> [#uses=1]
%tmp193 = lshr i32 %tmp191, 16 ; <i32> [#uses=1]
%tmp193.mask = and i32 %tmp193, 127 ; <i32> [#uses=1]
%gep.upgrd.6 = zext i32 %tmp193.mask to i64 ; <i64> [#uses=1]
- %tmp194 = getelementptr [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.6 ; <i8*> [#uses=1]
+ %tmp194 = getelementptr [35 x i8], [35 x i8]* @mode_class, i32 0, i64 %gep.upgrd.6 ; <i8*> [#uses=1]
%tmp195 = load i8* %tmp194 ; <i8> [#uses=1]
%tmp196 = icmp eq i8 %tmp195, 4 ; <i1> [#uses=1]
br i1 %tmp196, label %cond_next241, label %cond_true197
store %struct.trie_s* %t, %struct.trie_s** %t_addr
store %struct.FILE* %f, %struct.FILE** %f_addr
store i32 0, i32* %wstate
- %tmp = getelementptr %struct.charsequence* %cs, i64 0, i32 0 ; <i8**> [#uses=1]
- %tmp1 = getelementptr %struct.charsequence* @C.0.2294, i64 0, i32 0 ; <i8**> [#uses=1]
+ %tmp = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 0 ; <i8**> [#uses=1]
+ %tmp1 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 0 ; <i8**> [#uses=1]
%tmp.upgrd.5 = load i8** %tmp1 ; <i8*> [#uses=1]
store i8* %tmp.upgrd.5, i8** %tmp
- %tmp.upgrd.6 = getelementptr %struct.charsequence* %cs, i64 0, i32 1 ; <i32*> [#uses=1]
- %tmp2 = getelementptr %struct.charsequence* @C.0.2294, i64 0, i32 1 ; <i32*> [#uses=1]
+ %tmp.upgrd.6 = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 1 ; <i32*> [#uses=1]
+ %tmp2 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 1 ; <i32*> [#uses=1]
%tmp.upgrd.7 = load i32* %tmp2 ; <i32> [#uses=1]
store i32 %tmp.upgrd.7, i32* %tmp.upgrd.6
- %tmp3 = getelementptr %struct.charsequence* %cs, i64 0, i32 2 ; <i32*> [#uses=1]
- %tmp4 = getelementptr %struct.charsequence* @C.0.2294, i64 0, i32 2 ; <i32*> [#uses=1]
+ %tmp3 = getelementptr %struct.charsequence, %struct.charsequence* %cs, i64 0, i32 2 ; <i32*> [#uses=1]
+ %tmp4 = getelementptr %struct.charsequence, %struct.charsequence* @C.0.2294, i64 0, i32 2 ; <i32*> [#uses=1]
%tmp5 = load i32* %tmp4 ; <i32> [#uses=1]
store i32 %tmp5, i32* %tmp3
br label %bb33
; CHECK: entry:
; CHECK-NEXT: sub i3 %arg, -4
; CHECK-NEXT: zext i3 %switch.tableidx to i4
-; CHECK-NEXT: getelementptr inbounds [8 x i64]* @switch.table, i32 0, i4 %switch.tableidx.zext
+; CHECK-NEXT: getelementptr inbounds [8 x i64], [8 x i64]* @switch.table, i32 0, i4 %switch.tableidx.zext
; CHECK-NEXT: load i64* %switch.gep
; CHECK-NEXT: add i64
; CHECK-NEXT: ret i64
; CHECK: entry:
; CHECK-NEXT: sub i2 %0, -2
; CHECK-NEXT: zext i2 %switch.tableidx to i3
-; CHECK-NEXT: getelementptr inbounds [4 x i64]* @switch.table, i32 0, i3 %switch.tableidx.zext
+; CHECK-NEXT: getelementptr inbounds [4 x i64], [4 x i64]* @switch.table, i32 0, i3 %switch.tableidx.zext
; CHECK-NEXT: load i64* %switch.gep
; CHECK-NEXT: ret i64 %switch.load
define i64 @_TFO6reduce1E5toRawfS0_FT_Si(i2) {
; CHECK-NEXT: %0 = icmp ult i32 %switch.tableidx, 7
; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [7 x i32]* @switch.table, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
; CHECK: return:
; CHECK-NEXT: %switch.shiftamt = mul i32 %switch.tableidx, 8
; CHECK-NEXT: %switch.downshift = lshr i32 89655594, %switch.shiftamt
; CHECK-NEXT: %switch.masked = trunc i32 %switch.downshift to i8
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x float]* @switch.table1, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x float], [4 x float]* @switch.table1, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load float* %switch.gep
; CHECK-NEXT: br label %sw.epilog
; CHECK: sw.epilog:
; CHECK-NEXT: %0 = icmp ult i32 %switch.tableidx, 4
; CHECK-NEXT: br i1 %0, label %switch.lookup, label %return
; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i8*]* @switch.table2, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i8*], [4 x i8*]* @switch.table2, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i8** %switch.gep
; CHECK-NEXT: ret i8* %switch.load
}
; CHECK-LABEL: @earlyreturncrash(
; CHECK: switch.lookup:
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32]* @switch.table3, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table3, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
; CHECK: sw.epilog:
; CHECK-LABEL: @cprop(
; CHECK: switch.lookup:
-; CHECK: %switch.gep = getelementptr inbounds [7 x i32]* @switch.table5, i32 0, i32 %switch.tableidx
+; CHECK: %switch.gep = getelementptr inbounds [7 x i32], [7 x i32]* @switch.table5, i32 0, i32 %switch.tableidx
}
define i32 @unreachable_case(i32 %x) {
; CHECK-LABEL: @unreachable_case(
; CHECK: switch.lookup:
-; CHECK: getelementptr inbounds [9 x i32]* @switch.table6, i32 0, i32 %switch.tableidx
+; CHECK: getelementptr inbounds [9 x i32], [9 x i32]* @switch.table6, i32 0, i32 %switch.tableidx
}
define i32 @unreachable_default(i32 %x) {
; CHECK-NEXT: %switch.tableidx = sub i32 %x, 0
; CHECK-NOT: icmp
; CHECK-NOT: br 1i
-; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32]* @switch.table7, i32 0, i32 %switch.tableidx
+; CHECK-NEXT: %switch.gep = getelementptr inbounds [4 x i32], [4 x i32]* @switch.table7, i32 0, i32 %switch.tableidx
; CHECK-NEXT: %switch.load = load i32* %switch.gep
; CHECK-NEXT: ret i32 %switch.load
}
; CHECK-NOT: call void @barrier
define void @noduplicate(i32 %cond, i32* %out) {
entry:
- %out1 = getelementptr i32* %out, i32 1
- %out2 = getelementptr i32* %out, i32 2
+ %out1 = getelementptr i32, i32* %out, i32 1
+ %out2 = getelementptr i32, i32* %out, i32 2
%cmp = icmp eq i32 %cond, 0
br i1 %cmp, label %if.then, label %if.end
;CHECK-NEXT: icmp eq
BB3: ; preds = %BB2
- %6 = getelementptr inbounds [5 x %0]* @0, i32 0, i32 %0, !dbg !6
+ %6 = getelementptr inbounds [5 x %0], [5 x %0]* @0, i32 0, i32 %0, !dbg !6
call void @llvm.dbg.value(metadata %0* %6, i64 0, metadata !7, metadata !{}), !dbg !12
%7 = icmp eq %0* %6, null, !dbg !13
br i1 %7, label %BB5, label %BB4, !dbg !13
xlab8x: ; preds = %xlab5x
%xvaluex = call i32 @xselectorx()
- %xblkx.x = getelementptr [9 x i8*]* @xblkx.bbs, i32 0, i32 %xvaluex
+ %xblkx.x = getelementptr [9 x i8*], [9 x i8*]* @xblkx.bbs, i32 0, i32 %xvaluex
%xblkx.load = load i8** %xblkx.x
indirectbr i8* %xblkx.load, [label %xblkx.begin, label %xblkx.begin3, label %xblkx.begin4, label %xblkx.begin5, label %xblkx.begin6, label %xblkx.begin7, label %xblkx.begin8, label %xblkx.begin9, label %xblkx.end]
%add = add i32 %low.0, %high.addr.0
%div = udiv i32 %add, 2
%idxprom = zext i32 %div to i64
- %arrayidx = getelementptr inbounds i32* %r, i64 %idxprom
+ %arrayidx = getelementptr inbounds i32, i32* %r, i64 %idxprom
%0 = load i32* %arrayidx
%cmp1 = icmp ult i32 %k, %0
br i1 %cmp1, label %if.then, label %if.else
if.end7: ; preds = %if.else, %if.then4, %if.then
%x.0 = phi i32* [ %a, %if.then ], [ null, %if.then4 ], [ null, %if.else ]
- %gep = getelementptr i32* %x.0, i32 10
+ %gep = getelementptr i32, i32* %x.0, i32 10
%tmp9 = load i32* %gep
%tmp10 = or i32 %tmp9, 1
store i32 %tmp10, i32* %gep
br i1 %cmp, label %if.then, label %if.end
if.then:
- %incdec.ptr = getelementptr %ST* %x, i32 0, i32 1
+ %incdec.ptr = getelementptr %ST, %ST* %x, i32 0, i32 1
br label %if.end
if.end:
define void @ifconvertstore(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
- %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
- %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
; First store to the location.
store i32 %add, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
%1 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
define void @noifconvertstore1(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
- %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
- %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
; Store to a different location.
store i32 %add, i32* %arrayidx, align 4
- %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
%1 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
define void @noifconvertstore2(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
- %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
- %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
; First store to the location.
store i32 %add, i32* %arrayidx2, align 4
call void @unknown_fun()
- %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
%1 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
define void @noifconvertstore_volatile(i32 %m, i32* %A, i32* %B, i32 %C, i32 %D) {
entry:
- %arrayidx = getelementptr inbounds i32* %B, i64 0
+ %arrayidx = getelementptr inbounds i32, i32* %B, i64 0
%0 = load i32* %arrayidx, align 4
%add = add nsw i32 %0, %C
- %arrayidx2 = getelementptr inbounds i32* %A, i64 0
+ %arrayidx2 = getelementptr inbounds i32, i32* %A, i64 0
; First store to the location.
store i32 %add, i32* %arrayidx2, align 4
- %arrayidx4 = getelementptr inbounds i32* %B, i64 1
+ %arrayidx4 = getelementptr inbounds i32, i32* %B, i64 1
%1 = load i32* %arrayidx4, align 4
%add5 = add nsw i32 %1, %D
%cmp6 = icmp sgt i32 %add5, %C
define void @yes(i1 %c) nounwind {
entry:
%a = alloca [4 x i64*], align 8
- %__a.addr = getelementptr [4 x i64*]* %a, i64 0, i64 3
+ %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 0, i64 3
call void @frob(i64** %__a.addr)
br i1 %c, label %if.then, label %if.end
define void @no0(i1 %c) nounwind {
entry:
%a = alloca [4 x i64*], align 8
- %__a.addr = getelementptr [4 x i64*]* %a, i64 0, i64 4
+ %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 0, i64 4
call void @frob(i64** %__a.addr)
br i1 %c, label %if.then, label %if.end
define void @no1(i1 %c, i64 %n) nounwind {
entry:
%a = alloca [4 x i64*], align 8
- %__a.addr = getelementptr [4 x i64*]* %a, i64 0, i64 %n
+ %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 0, i64 %n
call void @frob(i64** %__a.addr)
br i1 %c, label %if.then, label %if.end
define void @no2(i1 %c, i64 %n) nounwind {
entry:
%a = alloca [4 x i64*], align 8
- %__a.addr = getelementptr [4 x i64*]* %a, i64 1, i64 0
+ %__a.addr = getelementptr [4 x i64*], [4 x i64*]* %a, i64 1, i64 0
call void @frob(i64** %__a.addr)
br i1 %c, label %if.then, label %if.end
define i1 @test6({ i32, i32 }* %I) {
entry:
- %tmp.1.i = getelementptr { i32, i32 }* %I, i64 0, i32 1 ; <i32*> [#uses=1]
+ %tmp.1.i = getelementptr { i32, i32 }, { i32, i32 }* %I, i64 0, i32 1 ; <i32*> [#uses=1]
%tmp.2.i = load i32* %tmp.1.i ; <i32> [#uses=6]
%tmp.2 = icmp eq i32 %tmp.2.i, 14 ; <i1> [#uses=1]
br i1 %tmp.2, label %shortcirc_done.4, label %shortcirc_next.0
while_block: ; preds = %and_if_cont2, %and_if_cont
%newlen = sub i32 %newlen, 1
- %newptr = getelementptr i8* %newptr, i64 1
+ %newptr = getelementptr i8, i8* %newptr, i64 1
%test = icmp sgt i32 %newlen, 0
br i1 %test, label %and_if1, label %and_if_cont2
%Addr.017 = phi i8* [ %incdec.ptr, %while.body ], [ null, %if.then ], [ null, %entry ]
%x.016 = phi i8 [ %inc, %while.body ], [ 0, %if.then ], [ 0, %entry ]
%inc = add i8 %x.016, 1
- %incdec.ptr = getelementptr inbounds i8* %Addr.017, i64 1
+ %incdec.ptr = getelementptr inbounds i8, i8* %Addr.017, i64 1
store volatile i8 %x.016, i8* %Addr.017, align 1
%0 = ptrtoint i8* %incdec.ptr to i64
%1 = trunc i64 %0 to i32
br i1 %1, label %if, label %endif
if:
- %2 = getelementptr i32* %0, i32 1
+ %2 = getelementptr i32, i32* %0, i32 1
store i32 0, i32* %0
store i32 1, i32* %2
- %3 = getelementptr i32* %0, i32 %b
+ %3 = getelementptr i32, i32* %0, i32 %b
%4 = load i32* %3
ret i32 %4
br i1 %1, label %if, label %endif
if:
- %2 = getelementptr i32* %0, i32 1
+ %2 = getelementptr i32, i32* %0, i32 1
store i32 0, i32* %0
store i32 1, i32* %2
- %3 = getelementptr i32* %0, i32 %b
+ %3 = getelementptr i32, i32* %0, i32 %b
%4 = load i32* %3
ret i32 %4
br i1 %1, label %if, label %endif
if:
- %2 = getelementptr i32* %0, i32 1
+ %2 = getelementptr i32, i32* %0, i32 1
store i32 0, i32* %0
store i32 1, i32* %2
- %3 = getelementptr i32* %0, i32 %b
+ %3 = getelementptr i32, i32* %0, i32 %b
%4 = load i32* %3
ret i32 %4
for.body:
%i = phi i32 [0, %entry], [%i.inc, %end.loop]
- %ptr = getelementptr i32 addrspace(1)* %out, i32 %i
+ %ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %i
store i32 %i, i32 addrspace(1)* %ptr, align 4
br i1 %arg, label %mid.loop, label %end.loop
; CHECK: for.body:
for.body: ; preds = %for.cond
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %out, i32 %i.0
+ %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %i.0
store i32 %i.0, i32 addrspace(1)* %arrayidx, align 4
%cmp1 = icmp ugt i32 %i.0, %cond_b
; CHECK: br i1 %{{[0-9a-zA-Z_]+}}, label %for.inc, label %[[FLOW1:[0-9a-zA-Z_]+]]
for.inc: ; preds = %for.body
%0 = add i32 %cond_a, %i.0
- %arrayidx3 = getelementptr inbounds i32 addrspace(1)* %out, i32 %0
+ %arrayidx3 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 %0
store i32 %i.0, i32 addrspace(1)* %arrayidx3, align 4
%inc = add i32 %i.0, 1
br label %for.cond
; CHECK: lor.lhs.false:
; CHECK: br label %Flow
lor.lhs.false: ; preds = %for.body
- %arrayidx = getelementptr inbounds float* %nr, i64 %indvars.iv
+ %arrayidx = getelementptr inbounds float, float* %nr, i64 %indvars.iv
%tmp1 = load float* %arrayidx, align 4
%tmp2 = add nsw i64 %indvars.iv, -1
- %arrayidx2 = getelementptr inbounds float* %nr, i64 %tmp2
+ %arrayidx2 = getelementptr inbounds float, float* %nr, i64 %tmp2
%tmp3 = load float* %arrayidx2, align 4
%cmp3 = fcmp une float %tmp1, %tmp3
br i1 %cmp3, label %if.then, label %for.body.1
if.then: ; preds = %lor.lhs.false, %for.body
%sub4 = sub nsw i32 %tmp0, %prev_start.026
%tmp4 = add nsw i64 %indvars.iv, -1
- %arrayidx8 = getelementptr inbounds float* %nr, i64 %tmp4
+ %arrayidx8 = getelementptr inbounds float, float* %nr, i64 %tmp4
%tmp5 = load float* %arrayidx8, align 4
br i1 %cmp1, label %for.end, label %for.body.1
; CHECK: if.then6.6
; CHECK: br label %for.body.backedge
if.then6.6: ; preds = %for.body.6
- %arrayidx8.6 = getelementptr inbounds float* %nr, i64 %indvars.iv.next.454
+ %arrayidx8.6 = getelementptr inbounds float, float* %nr, i64 %indvars.iv.next.454
%tmp29 = load float* %arrayidx8.6, align 4
br label %for.body.backedge
; contained WITHIN a structure.
define void @test({i32, i32*} * %X) {
- getelementptr {i32, i32*} * %X, i32 0, i32 1, i32 0
+ getelementptr {i32, i32*}, {i32, i32*} * %X, i32 0, i32 1, i32 0
ret void
}
@bb = global [16 x i8] zeroinitializer, align 1
define void @x() nounwind {
L.0:
- %0 = getelementptr [32 x i8]* @aa, i32 0, i32 4
+ %0 = getelementptr [32 x i8], [32 x i8]* @aa, i32 0, i32 4
%1 = bitcast i8* %0 to [16 x i8]*
%2 = bitcast [16 x i8]* %1 to [0 x i8]*
- %3 = getelementptr [16 x i8]* @bb
+ %3 = getelementptr [16 x i8], [16 x i8]* @bb
%4 = bitcast [16 x i8]* %3 to [0 x i8]*
call void @llvm.memcpy.i32([0 x i8]* %2, [0 x i8]* %4, i32 16, i32 1)
br label %return
; Check that we can find inttoptr -> illegal bitcasts when hidden
; inside constantexpr pointer operands
define i32 addrspace(2)* @illegal_bitcast_inttoptr_as_1_to_2_inside_gep() {
- %cast = getelementptr i32 addrspace(2)* bitcast (i32 addrspace(1)* inttoptr (i32 1234 to i32 addrspace(1)*) to i32 addrspace(2)*), i32 3
+ %cast = getelementptr i32, i32 addrspace(2)* bitcast (i32 addrspace(1)* inttoptr (i32 1234 to i32 addrspace(1)*) to i32 addrspace(2)*), i32 3
ret i32 addrspace(2)* %cast
}
%tmp = load float* %x, align 4
%add = fadd float %tmp, 1.000000e+00
store float %add, float* %x, align 4
- %arrayidx1 = getelementptr inbounds float* %x, i64 1
+ %arrayidx1 = getelementptr inbounds float, float* %x, i64 1
%tmp1 = load float* %arrayidx1, align 4
%add2 = fadd float %tmp1, 1.000000e+00
store float %add2, float* %arrayidx1, align 4
- %arrayidx3 = getelementptr inbounds float* %x, i64 2
+ %arrayidx3 = getelementptr inbounds float, float* %x, i64 2
%tmp2 = load float* %arrayidx3, align 4
%add4 = fadd float %tmp2, 1.000000e+00
store float %add4, float* %arrayidx3, align 4
- %arrayidx5 = getelementptr inbounds float* %x, i64 3
+ %arrayidx5 = getelementptr inbounds float, float* %x, i64 3
%tmp3 = load float* %arrayidx5, align 4
%add6 = fadd float %tmp3, 1.000000e+00
store float %add6, float* %arrayidx5, align 4
bb1:
%i.0 = phi i64 [ 0, %bb ], [ %tmp4, %bb1 ]
- %tmp = getelementptr inbounds float* %x, i64 %i.0
+ %tmp = getelementptr inbounds float, float* %x, i64 %i.0
%tmp2 = load float* %tmp, align 4
%tmp3 = fadd float %tmp2, 1.000000e+00
store float %tmp3, float* %tmp, align 4
// FIXME: getGetElementPtr() actually creates an inbounds ConstantGEP,
// not a normal one!
//CHECK(ConstantExpr::getGetElementPtr(Global, V, false),
- // "getelementptr i32** @dummy, i32 1");
+ // "getelementptr i32*, i32** @dummy, i32 1");
CHECK(ConstantExpr::getInBoundsGetElementPtr(Global, V),
- "getelementptr inbounds i32** @dummy, i32 1");
+ "getelementptr inbounds i32*, i32** @dummy, i32 1");
CHECK(ConstantExpr::getExtractElement(P6, One), "extractelement <2 x i16> "
P6STR ", i32 1");